diff --git a/command/common.go b/command/common.go index e35e1ee067..3eb262378c 100644 --- a/command/common.go +++ b/command/common.go @@ -2,17 +2,8 @@ package command import ( "errors" - "os" - "path/filepath" - "strings" - "github.com/0xPolygon/polygon-edge/crypto" "github.com/0xPolygon/polygon-edge/helper/common" - "github.com/0xPolygon/polygon-edge/secrets" - "github.com/0xPolygon/polygon-edge/secrets/local" - "github.com/0xPolygon/polygon-edge/types" - "github.com/0xPolygon/polygon-edge/validators" - "github.com/hashicorp/go-hclog" ) // Flags shared across multiple spaces @@ -27,8 +18,11 @@ const ( ValidatorPrefixFlag = "validators-prefix" MinValidatorCountFlag = "min-validator-count" MaxValidatorCountFlag = "max-validator-count" +) - IBFTValidatorTypeFlag = "ibft-validator-type" +var ( + MinValidatorCount = uint64(1) + MaxValidatorCount = common.MaxSafeJSInt ) const ( @@ -44,8 +38,6 @@ var ( "than MaxSafeJSInt (2^53 - 2)") ErrValidatorNumberExceedsMax = errors.New("validator number exceeds max validator number") - ErrECDSAKeyNotFound = errors.New("ECDSA key not found in given path") - ErrBLSKeyNotFound = errors.New("BLS key not found in given path") ) func ValidateMinMaxValidatorsNumber(minValidatorCount uint64, maxValidatorCount uint64) error { @@ -63,119 +55,3 @@ func ValidateMinMaxValidatorsNumber(minValidatorCount uint64, maxValidatorCount return nil } - -// GetValidatorsFromPrefixPath extracts the addresses of the validators based on the directory -// prefix. It scans the directories for validator private keys and compiles a list of addresses -func GetValidatorsFromPrefixPath( - root string, - prefix string, - validatorType validators.ValidatorType, -) (validators.Validators, error) { - files, err := os.ReadDir(root) - if err != nil { - return nil, err - } - - fullRootPath, err := filepath.Abs(root) - if err != nil { - return nil, err - } - - validatorSet := validators.NewValidatorSetFromType(validatorType) - - for _, file := range files { - path := file.Name() - - if !file.IsDir() || !strings.HasPrefix(path, prefix) { - continue - } - - localSecretsManager, err := local.SecretsManagerFactory( - nil, - &secrets.SecretsManagerParams{ - Logger: hclog.NewNullLogger(), - Extra: map[string]interface{}{ - secrets.Path: filepath.Join(fullRootPath, path), - }, - }, - ) - if err != nil { - return nil, err - } - - address, err := getValidatorAddressFromSecretManager(localSecretsManager) - if err != nil { - return nil, err - } - - switch validatorType { - case validators.ECDSAValidatorType: - if err := validatorSet.Add(&validators.ECDSAValidator{ - Address: address, - }); err != nil { - return nil, err - } - - case validators.BLSValidatorType: - blsPublicKey, err := getBLSPublicKeyBytesFromSecretManager(localSecretsManager) - if err != nil { - return nil, err - } - - if err := validatorSet.Add(&validators.BLSValidator{ - Address: address, - BLSPublicKey: blsPublicKey, - }); err != nil { - return nil, err - } - } - } - - return validatorSet, nil -} - -func getValidatorAddressFromSecretManager(manager secrets.SecretsManager) (types.Address, error) { - if !manager.HasSecret(secrets.ValidatorKey) { - return types.ZeroAddress, ErrECDSAKeyNotFound - } - - keyBytes, err := manager.GetSecret(secrets.ValidatorKey) - if err != nil { - return types.ZeroAddress, err - } - - privKey, err := crypto.BytesToECDSAPrivateKey(keyBytes) - if err != nil { - return types.ZeroAddress, err - } - - return crypto.PubKeyToAddress(&privKey.PublicKey), nil -} - -func getBLSPublicKeyBytesFromSecretManager(manager secrets.SecretsManager) ([]byte, error) { - if !manager.HasSecret(secrets.ValidatorBLSKey) { - return nil, ErrBLSKeyNotFound - } - - keyBytes, err := manager.GetSecret(secrets.ValidatorBLSKey) - if err != nil { - return nil, err - } - - secretKey, err := crypto.BytesToBLSSecretKey(keyBytes) - if err != nil { - return nil, err - } - - pubKey, err := secretKey.GetPublicKey() - if err != nil { - return nil, err - } - - pubKeyBytes, err := pubKey.MarshalBinary() - if err != nil { - return nil, err - } - - return pubKeyBytes, nil -} diff --git a/command/default.go b/command/default.go index 2b69cc721f..5b3758fa9a 100644 --- a/command/default.go +++ b/command/default.go @@ -18,6 +18,7 @@ const ( DefaultGenesisGasLimit = 5242880 // 0x500000 DefaultGenesisBaseFeeEM = chain.GenesisBaseFeeEM DefaultGenesisBaseFeeChangeDenom = chain.BaseFeeChangeDenom + DefaultEpochSize = 10 ) var ( diff --git a/command/genesis/genesis.go b/command/genesis/genesis.go index bb0ffdc37e..d84e5aeceb 100644 --- a/command/genesis/genesis.go +++ b/command/genesis/genesis.go @@ -7,10 +7,8 @@ import ( "github.com/0xPolygon/polygon-edge/command" "github.com/0xPolygon/polygon-edge/command/genesis/predeploy" - "github.com/0xPolygon/polygon-edge/command/helper" - "github.com/0xPolygon/polygon-edge/consensus/ibft" "github.com/0xPolygon/polygon-edge/helper/common" - "github.com/0xPolygon/polygon-edge/validators" + "github.com/0xPolygon/polygon-edge/server" ) func GetCommand() *cobra.Command { @@ -106,7 +104,7 @@ func setFlags(cmd *cobra.Command) { cmd.Flags().Uint64Var( ¶ms.epochSize, epochSizeFlag, - ibft.DefaultEpochSize, + command.DefaultEpochSize, "the epoch size for the chain", ) @@ -117,64 +115,43 @@ func setFlags(cmd *cobra.Command) { "admin for proxy contracts", ) - // PoS - { - cmd.Flags().BoolVar( - ¶ms.isPos, - posFlag, - false, - "the flag indicating that the client should use Proof of Stake IBFT. Defaults to "+ - "Proof of Authority if flag is not provided or false", - ) - - cmd.Flags().Uint64Var( - ¶ms.minNumValidators, - command.MinValidatorCountFlag, - 1, - "the minimum number of validators in the validator set for PoS", - ) - - cmd.Flags().Uint64Var( - ¶ms.maxNumValidators, - command.MaxValidatorCountFlag, - common.MaxSafeJSInt, - "the maximum number of validators in the validator set for PoS", - ) + cmd.Flags().Uint64Var( + ¶ms.minNumValidators, + command.MinValidatorCountFlag, + 1, + "the minimum number of validators in the validator set for PoS", + ) - cmd.Flags().StringVar( - ¶ms.validatorsPath, - command.ValidatorRootFlag, - command.DefaultValidatorRoot, - "root path containing validators secrets", - ) + cmd.Flags().Uint64Var( + ¶ms.maxNumValidators, + command.MaxValidatorCountFlag, + common.MaxSafeJSInt, + "the maximum number of validators in the validator set for PoS", + ) - cmd.Flags().StringVar( - ¶ms.validatorsPrefixPath, - command.ValidatorPrefixFlag, - command.DefaultValidatorPrefix, - "folder prefix names for validators secrets", - ) + cmd.Flags().StringVar( + ¶ms.validatorsPath, + command.ValidatorRootFlag, + command.DefaultValidatorRoot, + "root path containing validators secrets", + ) - cmd.Flags().StringArrayVar( - ¶ms.validators, - command.ValidatorFlag, - []string{}, - "validators defined by user (polybft format: ::)", - ) + cmd.Flags().StringVar( + ¶ms.validatorsPrefixPath, + command.ValidatorPrefixFlag, + command.DefaultValidatorPrefix, + "folder prefix names for validators secrets", + ) - cmd.MarkFlagsMutuallyExclusive(command.ValidatorFlag, command.ValidatorRootFlag) - cmd.MarkFlagsMutuallyExclusive(command.ValidatorFlag, command.ValidatorPrefixFlag) - } + cmd.Flags().StringArrayVar( + ¶ms.validators, + command.ValidatorFlag, + []string{}, + "validators defined by user (polybft format: ::)", + ) - // IBFT Validators - { - cmd.Flags().StringVar( - ¶ms.rawIBFTValidatorType, - command.IBFTValidatorTypeFlag, - string(validators.BLSValidatorType), - "the type of validators in IBFT", - ) - } + cmd.MarkFlagsMutuallyExclusive(command.ValidatorFlag, command.ValidatorRootFlag) + cmd.MarkFlagsMutuallyExclusive(command.ValidatorFlag, command.ValidatorPrefixFlag) // PolyBFT { @@ -351,9 +328,11 @@ func preRunCommand(cmd *cobra.Command, _ []string) error { return err } - helper.SetRequiredFlags(cmd, params.getRequiredFlags()) + //nolint:godox + // TODO: @Stefan-Ethernal Maybe it can be removed + params.consensus = server.ConsensusType(params.consensusRaw) - return params.initRawParams() + return nil } func runCommand(cmd *cobra.Command, _ []string) { diff --git a/command/genesis/params.go b/command/genesis/params.go index 4c55f5de8e..93f20932f6 100644 --- a/command/genesis/params.go +++ b/command/genesis/params.go @@ -13,16 +13,10 @@ import ( "github.com/0xPolygon/polygon-edge/chain" "github.com/0xPolygon/polygon-edge/command" "github.com/0xPolygon/polygon-edge/command/helper" - "github.com/0xPolygon/polygon-edge/consensus/ibft" - "github.com/0xPolygon/polygon-edge/consensus/ibft/fork" - "github.com/0xPolygon/polygon-edge/consensus/ibft/signer" "github.com/0xPolygon/polygon-edge/consensus/polybft" "github.com/0xPolygon/polygon-edge/contracts" - "github.com/0xPolygon/polygon-edge/contracts/staking" - stakingHelper "github.com/0xPolygon/polygon-edge/helper/staking" "github.com/0xPolygon/polygon-edge/server" "github.com/0xPolygon/polygon-edge/types" - "github.com/0xPolygon/polygon-edge/validators" ) const ( @@ -35,7 +29,6 @@ const ( blockGasLimitFlag = "block-gas-limit" burnContractFlag = "burn-contract" genesisBaseFeeConfigFlag = "base-fee-config" - posFlag = "pos" nativeTokenConfigFlag = "native-token-config" rewardTokenCodeFlag = "reward-token-code" rewardWalletFlag = "reward-wallet" @@ -87,19 +80,12 @@ type genesisParams struct { baseFeeConfig string parsedBaseFeeConfig *baseFeeInfo - // PoS - isPos bool minNumValidators uint64 maxNumValidators uint64 validatorsPath string validatorsPrefixPath string validators []string - // IBFT - rawIBFTValidatorType string - ibftValidatorType validators.ValidatorType - ibftValidators validators.Validators - extraData []byte consensus server.ConsensusType @@ -149,14 +135,17 @@ func (p *genesisParams) validateFlags() error { return errUnsupportedConsensus } + // Check if the genesis file already exists + if err := verifyGenesisExistence(p.genesisPath); err != nil { + return errors.New(err.GetMessage()) + } + if err := p.validateGenesisBaseFeeConfig(); err != nil { return err } // Check if validator information is set at all - if p.isIBFTConsensus() && - !p.areValidatorsSetManually() && - !p.areValidatorsSetByPrefix() { + if !p.areValidatorsSetManually() && !p.areValidatorsSetByPrefix() { return errValidatorsNotSpecified } @@ -184,19 +173,13 @@ func (p *genesisParams) validateFlags() error { if err := p.validateProxyContractsAdmin(); err != nil { return err } - } - - // Check if the genesis file already exists - if generateError := verifyGenesisExistence(p.genesisPath); generateError != nil { - return errors.New(generateError.GetMessage()) - } - // Check that the epoch size is correct - if p.epochSize < 2 && (p.isIBFTConsensus() || p.isPolyBFTConsensus()) { - // Epoch size must be greater than 1, so new transactions have a chance to be added to a block. - // Otherwise, every block would be an endblock (meaning it will not have any transactions). - // Check is placed here to avoid additional parsing if epochSize < 2 - return errInvalidEpochSize + if p.epochSize < 2 { + // Epoch size must be greater than 1, so new transactions have a chance to be added to a block. + // Otherwise, every block would be an endblock (meaning it will not have any transactions). + // Check is placed here to avoid additional parsing if epochSize < 2 + return errInvalidEpochSize + } } // Validate validatorsPath only if validators information were not provided via CLI flag @@ -210,10 +193,6 @@ func (p *genesisParams) validateFlags() error { return command.ValidateMinMaxValidatorsNumber(p.minNumValidators, p.maxNumValidators) } -func (p *genesisParams) isIBFTConsensus() bool { - return server.ConsensusType(p.consensusRaw) == server.IBFTConsensus -} - func (p *genesisParams) isPolyBFTConsensus() bool { return server.ConsensusType(p.consensusRaw) == server.PolyBFTConsensus } @@ -226,165 +205,6 @@ func (p *genesisParams) areValidatorsSetByPrefix() bool { return p.validatorsPrefixPath != "" } -func (p *genesisParams) getRequiredFlags() []string { - if p.isIBFTConsensus() { - return []string{ - command.BootnodeFlag, - } - } - - return []string{} -} - -func (p *genesisParams) initRawParams() error { - p.consensus = server.ConsensusType(p.consensusRaw) - - if p.consensus == server.PolyBFTConsensus { - return nil - } - - if err := p.initIBFTValidatorType(); err != nil { - return err - } - - if err := p.initValidatorSet(); err != nil { - return err - } - - p.initIBFTExtraData() - p.initConsensusEngineConfig() - - return nil -} - -// setValidatorSetFromCli sets validator set from cli command -func (p *genesisParams) setValidatorSetFromCli() error { - if len(p.validators) == 0 { - return nil - } - - newValidators, err := validators.ParseValidators(p.ibftValidatorType, p.validators) - if err != nil { - return err - } - - if err = p.ibftValidators.Merge(newValidators); err != nil { - return err - } - - return nil -} - -// setValidatorSetFromPrefixPath sets validator set from prefix path -func (p *genesisParams) setValidatorSetFromPrefixPath() error { - if !p.areValidatorsSetByPrefix() { - return nil - } - - validators, err := command.GetValidatorsFromPrefixPath( - p.validatorsPath, - p.validatorsPrefixPath, - p.ibftValidatorType, - ) - - if err != nil { - return fmt.Errorf("failed to read from prefix: %w", err) - } - - if err := p.ibftValidators.Merge(validators); err != nil { - return err - } - - return nil -} - -func (p *genesisParams) initIBFTValidatorType() error { - var err error - if p.ibftValidatorType, err = validators.ParseValidatorType(p.rawIBFTValidatorType); err != nil { - return err - } - - return nil -} - -func (p *genesisParams) initValidatorSet() error { - p.ibftValidators = validators.NewValidatorSetFromType(p.ibftValidatorType) - - // Set validator set - // Priority goes to cli command over prefix path - if err := p.setValidatorSetFromPrefixPath(); err != nil { - return err - } - - if err := p.setValidatorSetFromCli(); err != nil { - return err - } - - // Validate if validator number exceeds max number - if ok := p.isValidatorNumberValid(); !ok { - return command.ErrValidatorNumberExceedsMax - } - - return nil -} - -func (p *genesisParams) isValidatorNumberValid() bool { - return p.ibftValidators == nil || uint64(p.ibftValidators.Len()) <= p.maxNumValidators -} - -func (p *genesisParams) initIBFTExtraData() { - if p.consensus != server.IBFTConsensus { - return - } - - var committedSeal signer.Seals - - switch p.ibftValidatorType { - case validators.ECDSAValidatorType: - committedSeal = new(signer.SerializedSeal) - case validators.BLSValidatorType: - committedSeal = new(signer.AggregatedSeal) - } - - ibftExtra := &signer.IstanbulExtra{ - Validators: p.ibftValidators, - ProposerSeal: []byte{}, - CommittedSeals: committedSeal, - } - - p.extraData = make([]byte, signer.IstanbulExtraVanity) - p.extraData = ibftExtra.MarshalRLPTo(p.extraData) -} - -func (p *genesisParams) initConsensusEngineConfig() { - if p.consensus != server.IBFTConsensus { - p.consensusEngineConfig = map[string]interface{}{ - p.consensusRaw: map[string]interface{}{}, - } - - return - } - - if p.isPos { - p.initIBFTEngineMap(fork.PoS) - - return - } - - p.initIBFTEngineMap(fork.PoA) -} - -func (p *genesisParams) initIBFTEngineMap(ibftType fork.IBFTType) { - p.consensusEngineConfig = map[string]interface{}{ - string(server.IBFTConsensus): map[string]interface{}{ - fork.KeyType: ibftType, - fork.KeyValidatorType: p.ibftValidatorType, - fork.KeyBlockTime: p.blockTime, - ibft.KeyEpochSize: p.epochSize, - }, - } -} - func (p *genesisParams) generateGenesis() error { if err := p.initGenesisConfig(); err != nil { return err @@ -440,16 +260,6 @@ func (p *genesisParams) initGenesisConfig() error { chainConfig.Params.BurnContractDestinationAddress = burnContractInfo.DestinationAddress } - // Predeploy staking smart contract if needed - if p.shouldPredeployStakingSC() { - stakingAccount, err := p.predeployStakingSC() - if err != nil { - return err - } - - chainConfig.Genesis.Alloc[staking.AddrStakingContract] = stakingAccount - } - for _, premineInfo := range p.premineInfos { chainConfig.Genesis.Alloc[premineInfo.address] = &chain.GenesisAccount{ Balance: premineInfo.amount, @@ -461,26 +271,6 @@ func (p *genesisParams) initGenesisConfig() error { return nil } -func (p *genesisParams) shouldPredeployStakingSC() bool { - // If the consensus selected is IBFT / Dev and the mechanism is Proof of Stake, - // deploy the Staking SC - return p.isPos && (p.consensus == server.IBFTConsensus || p.consensus == server.DevConsensus) -} - -func (p *genesisParams) predeployStakingSC() (*chain.GenesisAccount, error) { - stakingAccount, predeployErr := stakingHelper.PredeployStakingSC( - p.ibftValidators, - stakingHelper.PredeployParams{ - MinValidatorCount: p.minNumValidators, - MaxValidatorCount: p.maxNumValidators, - }) - if predeployErr != nil { - return nil, predeployErr - } - - return stakingAccount, nil -} - // validateRewardWallet validates reward wallet flag func (p *genesisParams) validateRewardWallet() error { if p.rewardWallet == "" { diff --git a/command/genesis/polybft_params.go b/command/genesis/polybft_params.go index a71388405e..dd8e0a586b 100644 --- a/command/genesis/polybft_params.go +++ b/command/genesis/polybft_params.go @@ -24,6 +24,9 @@ import ( "github.com/0xPolygon/polygon-edge/types" ) +//nolint:godox +// TODO: @Stefan-Ethernal move this to params.go + const ( sprintSizeFlag = "sprint-size" blockTimeFlag = "block-time" @@ -69,7 +72,10 @@ type contractInfo struct { address types.Address } +// TODO: @Stefan-Ethernal Rename to generateChainConfig // generatePolyBftChainConfig creates and persists polybft chain configuration to the provided file path +// +//nolint:godox func (p *genesisParams) generatePolyBftChainConfig(o command.OutputFormatter) error { // populate premine balance map premineBalances := make(map[types.Address]*premineInfo, len(p.premine)) diff --git a/command/genesis/predeploy/params.go b/command/genesis/predeploy/params.go index 270e940774..f0611f1206 100644 --- a/command/genesis/predeploy/params.go +++ b/command/genesis/predeploy/params.go @@ -8,7 +8,6 @@ import ( "github.com/0xPolygon/polygon-edge/chain" "github.com/0xPolygon/polygon-edge/command" "github.com/0xPolygon/polygon-edge/command/helper" - "github.com/0xPolygon/polygon-edge/contracts/staking" "github.com/0xPolygon/polygon-edge/helper/hex" "github.com/0xPolygon/polygon-edge/helper/predeployment" "github.com/0xPolygon/polygon-edge/types" @@ -32,9 +31,7 @@ var ( var ( predeployAddressMin = types.StringToAddress("01100") - reservedAddresses = []types.Address{ - staking.AddrStakingContract, - } + reservedAddresses = []types.Address{} ) var ( diff --git a/command/helper/helper.go b/command/helper/helper.go index b934dbd2d4..4696039499 100644 --- a/command/helper/helper.go +++ b/command/helper/helper.go @@ -11,7 +11,6 @@ import ( "github.com/0xPolygon/polygon-edge/chain" "github.com/0xPolygon/polygon-edge/command" - ibftOp "github.com/0xPolygon/polygon-edge/consensus/ibft/proto" "github.com/0xPolygon/polygon-edge/helper/common" "github.com/0xPolygon/polygon-edge/server" "github.com/0xPolygon/polygon-edge/server/proto" @@ -127,19 +126,6 @@ func GetSystemClientConnection(address string) ( return proto.NewSystemClient(conn), nil } -// GetIBFTOperatorClientConnection returns the IBFT operator client connection -func GetIBFTOperatorClientConnection(address string) ( - ibftOp.IbftOperatorClient, - error, -) { - conn, err := GetGRPCConnection(address) - if err != nil { - return nil, err - } - - return ibftOp.NewIbftOperatorClient(conn), nil -} - // GetGRPCConnection returns a grpc client connection func GetGRPCConnection(address string) (*grpc.ClientConn, error) { conn, err := grpc.Dial(address, grpc.WithTransportCredentials(insecure.NewCredentials())) diff --git a/command/ibft/candidates/ibft_candidates.go b/command/ibft/candidates/ibft_candidates.go deleted file mode 100644 index 78bf50f9ed..0000000000 --- a/command/ibft/candidates/ibft_candidates.go +++ /dev/null @@ -1,46 +0,0 @@ -package candidates - -import ( - "context" - - "github.com/0xPolygon/polygon-edge/command" - "github.com/0xPolygon/polygon-edge/command/helper" - ibftOp "github.com/0xPolygon/polygon-edge/consensus/ibft/proto" - "github.com/spf13/cobra" - empty "google.golang.org/protobuf/types/known/emptypb" -) - -func GetCommand() *cobra.Command { - return &cobra.Command{ - Use: "candidates", - Short: "Queries the current set of proposed candidates, as well as candidates that have not been included yet", - Run: runCommand, - } -} - -func runCommand(cmd *cobra.Command, _ []string) { - outputter := command.InitializeOutputter(cmd) - defer outputter.WriteOutput() - - candidatesResponse, err := getIBFTCandidates(helper.GetGRPCAddress(cmd)) - if err != nil { - outputter.SetError(err) - - return - } - - outputter.SetCommandResult( - newIBFTCandidatesResult(candidatesResponse), - ) -} - -func getIBFTCandidates(grpcAddress string) (*ibftOp.CandidatesResp, error) { - client, err := helper.GetIBFTOperatorClientConnection( - grpcAddress, - ) - if err != nil { - return nil, err - } - - return client.Candidates(context.Background(), &empty.Empty{}) -} diff --git a/command/ibft/candidates/result.go b/command/ibft/candidates/result.go deleted file mode 100644 index 8defcc2952..0000000000 --- a/command/ibft/candidates/result.go +++ /dev/null @@ -1,60 +0,0 @@ -package candidates - -import ( - "bytes" - "fmt" - - "github.com/0xPolygon/polygon-edge/command/helper" - ibftHelper "github.com/0xPolygon/polygon-edge/command/ibft/helper" - ibftOp "github.com/0xPolygon/polygon-edge/consensus/ibft/proto" -) - -type IBFTCandidate struct { - Address string `json:"address"` - Vote ibftHelper.Vote `json:"vote"` -} - -type IBFTCandidatesResult struct { - Candidates []IBFTCandidate `json:"candidates"` -} - -func newIBFTCandidatesResult(resp *ibftOp.CandidatesResp) *IBFTCandidatesResult { - res := &IBFTCandidatesResult{ - Candidates: make([]IBFTCandidate, len(resp.Candidates)), - } - - for i, c := range resp.Candidates { - res.Candidates[i].Address = c.Address - res.Candidates[i].Vote = ibftHelper.BoolToVote(c.Auth) - } - - return res -} - -func (r *IBFTCandidatesResult) GetOutput() string { - var buffer bytes.Buffer - - buffer.WriteString("\n[IBFT CANDIDATES]\n") - - if num := len(r.Candidates); num == 0 { - buffer.WriteString("No candidates found") - } else { - buffer.WriteString(fmt.Sprintf("Number of candidates: %d\n\n", num)) - buffer.WriteString(formatCandidates(r.Candidates)) - } - - buffer.WriteString("\n") - - return buffer.String() -} - -func formatCandidates(candidates []IBFTCandidate) string { - generatedCandidates := make([]string, 0, len(candidates)+1) - - generatedCandidates = append(generatedCandidates, "Address|Vote") - for _, c := range candidates { - generatedCandidates = append(generatedCandidates, fmt.Sprintf("%s|%s", c.Address, c.Vote)) - } - - return helper.FormatKV(generatedCandidates) -} diff --git a/command/ibft/helper/vote.go b/command/ibft/helper/vote.go deleted file mode 100644 index 39f47f6191..0000000000 --- a/command/ibft/helper/vote.go +++ /dev/null @@ -1,20 +0,0 @@ -package helper - -type Vote string - -const ( - VoteAdd = "ADD" - VoteRemove = "REMOVE" -) - -func BoolToVote(vote bool) Vote { - if vote { - return VoteAdd - } - - return VoteRemove -} - -func VoteToString(vote Vote) string { - return string(vote) -} diff --git a/command/ibft/ibft.go b/command/ibft/ibft.go deleted file mode 100644 index 13b78ba0c8..0000000000 --- a/command/ibft/ibft.go +++ /dev/null @@ -1,42 +0,0 @@ -package ibft - -import ( - "github.com/0xPolygon/polygon-edge/command/helper" - "github.com/0xPolygon/polygon-edge/command/ibft/candidates" - "github.com/0xPolygon/polygon-edge/command/ibft/propose" - "github.com/0xPolygon/polygon-edge/command/ibft/quorum" - "github.com/0xPolygon/polygon-edge/command/ibft/snapshot" - "github.com/0xPolygon/polygon-edge/command/ibft/status" - _switch "github.com/0xPolygon/polygon-edge/command/ibft/switch" - "github.com/spf13/cobra" -) - -func GetCommand() *cobra.Command { - ibftCmd := &cobra.Command{ - Use: "ibft", - Short: "Top level IBFT command for interacting with the IBFT consensus. Only accepts subcommands.", - } - - helper.RegisterGRPCAddressFlag(ibftCmd) - - registerSubcommands(ibftCmd) - - return ibftCmd -} - -func registerSubcommands(baseCmd *cobra.Command) { - baseCmd.AddCommand( - // ibft status - status.GetCommand(), - // ibft snapshot - snapshot.GetCommand(), - // ibft propose - propose.GetCommand(), - // ibft candidates - candidates.GetCommand(), - // ibft switch - _switch.GetCommand(), - // ibft quorum - quorum.GetCommand(), - ) -} diff --git a/command/ibft/propose/ibft_propose.go b/command/ibft/propose/ibft_propose.go deleted file mode 100644 index 3c0b60c332..0000000000 --- a/command/ibft/propose/ibft_propose.go +++ /dev/null @@ -1,75 +0,0 @@ -package propose - -import ( - "fmt" - - "github.com/0xPolygon/polygon-edge/command" - "github.com/spf13/cobra" - - "github.com/0xPolygon/polygon-edge/command/helper" -) - -func GetCommand() *cobra.Command { - ibftSnapshotCmd := &cobra.Command{ - Use: "propose", - Short: "Proposes a new candidate to be added or removed from the validator set", - PreRunE: runPreRun, - Run: runCommand, - } - - setFlags(ibftSnapshotCmd) - - helper.SetRequiredFlags(ibftSnapshotCmd, params.getRequiredFlags()) - - return ibftSnapshotCmd -} - -func setFlags(cmd *cobra.Command) { - cmd.Flags().StringVar( - ¶ms.addressRaw, - addressFlag, - "", - "the address of the account to be voted for", - ) - - cmd.Flags().StringVar( - ¶ms.rawBLSPublicKey, - blsFlag, - "", - "the BLS Public Key of the account to be voted for", - ) - - cmd.Flags().StringVar( - ¶ms.vote, - voteFlag, - "", - fmt.Sprintf( - "requested change to the validator set. Possible values: [%s, %s]", - authVote, - dropVote, - ), - ) - - cmd.MarkFlagsRequiredTogether(addressFlag, voteFlag) -} - -func runPreRun(_ *cobra.Command, _ []string) error { - if err := params.validateFlags(); err != nil { - return err - } - - return params.initRawParams() -} - -func runCommand(cmd *cobra.Command, _ []string) { - outputter := command.InitializeOutputter(cmd) - defer outputter.WriteOutput() - - if err := params.proposeCandidate(helper.GetGRPCAddress(cmd)); err != nil { - outputter.SetError(err) - - return - } - - outputter.SetCommandResult(params.getResult()) -} diff --git a/command/ibft/propose/params.go b/command/ibft/propose/params.go deleted file mode 100644 index 1db40125ea..0000000000 --- a/command/ibft/propose/params.go +++ /dev/null @@ -1,139 +0,0 @@ -package propose - -import ( - "context" - "encoding/hex" - "errors" - "fmt" - "strings" - - "github.com/0xPolygon/polygon-edge/command" - "github.com/0xPolygon/polygon-edge/command/helper" - ibftOp "github.com/0xPolygon/polygon-edge/consensus/ibft/proto" - "github.com/0xPolygon/polygon-edge/crypto" - "github.com/0xPolygon/polygon-edge/types" -) - -const ( - voteFlag = "vote" - addressFlag = "addr" - blsFlag = "bls" -) - -const ( - authVote = "auth" - dropVote = "drop" -) - -var ( - errInvalidVoteType = errors.New("invalid vote type") - errInvalidAddressFormat = errors.New("invalid address format") -) - -var ( - params = &proposeParams{} -) - -type proposeParams struct { - addressRaw string - rawBLSPublicKey string - - vote string - address types.Address - blsPublicKey []byte -} - -func (p *proposeParams) getRequiredFlags() []string { - return []string{ - voteFlag, - addressFlag, - } -} - -func (p *proposeParams) validateFlags() error { - if !isValidVoteType(p.vote) { - return errInvalidVoteType - } - - return nil -} - -func (p *proposeParams) initRawParams() error { - if err := p.initAddress(); err != nil { - return err - } - - if err := p.initBLSPublicKey(); err != nil { - return err - } - - return nil -} - -func (p *proposeParams) initAddress() error { - p.address = types.Address{} - if err := p.address.UnmarshalText([]byte(p.addressRaw)); err != nil { - return errInvalidAddressFormat - } - - return nil -} - -func (p *proposeParams) initBLSPublicKey() error { - if p.rawBLSPublicKey == "" { - return nil - } - - blsPubkeyBytes, err := hex.DecodeString(strings.TrimPrefix(p.rawBLSPublicKey, "0x")) - if err != nil { - return fmt.Errorf("failed to parse BLS Public Key: %w", err) - } - - if _, err := crypto.UnmarshalBLSPublicKey(blsPubkeyBytes); err != nil { - return err - } - - p.blsPublicKey = blsPubkeyBytes - - return nil -} - -func isValidVoteType(vote string) bool { - return vote == authVote || vote == dropVote -} - -func (p *proposeParams) proposeCandidate(grpcAddress string) error { - ibftClient, err := helper.GetIBFTOperatorClientConnection(grpcAddress) - if err != nil { - return err - } - - if _, err := ibftClient.Propose( - context.Background(), - p.getCandidate(), - ); err != nil { - return err - } - - return nil -} - -func (p *proposeParams) getCandidate() *ibftOp.Candidate { - res := &ibftOp.Candidate{ - Address: p.address.String(), - Auth: p.vote == authVote, - } - - if p.blsPublicKey != nil { - res.BlsPubkey = p.blsPublicKey - } - - return res -} - -func (p *proposeParams) getResult() command.CommandResult { - return &IBFTProposeResult{ - Address: p.address.String(), - Vote: p.vote, - } -} diff --git a/command/ibft/propose/result.go b/command/ibft/propose/result.go deleted file mode 100644 index c4c69569da..0000000000 --- a/command/ibft/propose/result.go +++ /dev/null @@ -1,39 +0,0 @@ -package propose - -import ( - "bytes" - "fmt" -) - -type IBFTProposeResult struct { - Address string `json:"-"` - Vote string `json:"-"` -} - -func (r *IBFTProposeResult) GetOutput() string { - var buffer bytes.Buffer - - buffer.WriteString("\n[IBFT PROPOSE]\n") - buffer.WriteString(r.Message()) - buffer.WriteString("\n") - - return buffer.String() -} - -func (r *IBFTProposeResult) Message() string { - if r.Vote == authVote { - return fmt.Sprintf( - "Successfully voted for the addition of address [%s] to the validator set", - r.Address, - ) - } - - return fmt.Sprintf( - "Successfully voted for the removal of validator at address [%s] from the validator set", - r.Address, - ) -} - -func (r *IBFTProposeResult) MarshalJSON() ([]byte, error) { - return []byte(fmt.Sprintf(`{"message": "%s"}`, r.Message())), nil -} diff --git a/command/ibft/quorum/ibft_quorum.go b/command/ibft/quorum/ibft_quorum.go deleted file mode 100644 index e54a0a9e43..0000000000 --- a/command/ibft/quorum/ibft_quorum.go +++ /dev/null @@ -1,62 +0,0 @@ -package quorum - -import ( - "fmt" - - "github.com/0xPolygon/polygon-edge/command" - "github.com/0xPolygon/polygon-edge/command/helper" - "github.com/spf13/cobra" -) - -func GetCommand() *cobra.Command { - ibftQuorumCmd := &cobra.Command{ - Use: "quorum", - Short: "Specify the block number after which quorum optimal will be used for reaching consensus", - PreRunE: runPreRun, - Run: runCommand, - } - - setFlags(ibftQuorumCmd) - helper.SetRequiredFlags(ibftQuorumCmd, params.getRequiredFlags()) - - return ibftQuorumCmd -} - -func setFlags(cmd *cobra.Command) { - cmd.Flags().StringVar( - ¶ms.genesisPath, - chainFlag, - fmt.Sprintf("./%s", command.DefaultGenesisFileName), - "the genesis file to update", - ) - - cmd.Flags().Uint64Var( - ¶ms.from, - fromFlag, - 0, - "the height to switch the quorum calculation", - ) -} - -func runPreRun(_ *cobra.Command, _ []string) error { - return params.initRawParams() -} - -func runCommand(cmd *cobra.Command, _ []string) { - outputter := command.InitializeOutputter(cmd) - defer outputter.WriteOutput() - - if err := params.updateGenesisConfig(); err != nil { - outputter.SetError(err) - - return - } - - if err := params.overrideGenesisConfig(); err != nil { - outputter.SetError(err) - - return - } - - outputter.SetCommandResult(params.getResult()) -} diff --git a/command/ibft/quorum/params.go b/command/ibft/quorum/params.go deleted file mode 100644 index 4ad469d710..0000000000 --- a/command/ibft/quorum/params.go +++ /dev/null @@ -1,99 +0,0 @@ -package quorum - -import ( - "errors" - "fmt" - "os" - - "github.com/0xPolygon/polygon-edge/chain" - "github.com/0xPolygon/polygon-edge/command" - "github.com/0xPolygon/polygon-edge/command/helper" - "github.com/0xPolygon/polygon-edge/helper/common" -) - -const ( - fromFlag = "from" - chainFlag = "chain" -) - -var ( - params = &quorumParams{} -) - -type quorumParams struct { - genesisConfig *chain.Chain - from uint64 - genesisPath string -} - -func (p *quorumParams) initChain() error { - cc, err := chain.Import(p.genesisPath) - if err != nil { - return fmt.Errorf( - "failed to load chain config from %s: %w", - p.genesisPath, - err, - ) - } - - p.genesisConfig = cc - - return nil -} - -func (p *quorumParams) initRawParams() error { - return p.initChain() -} - -func (p *quorumParams) getRequiredFlags() []string { - return []string{ - fromFlag, - } -} - -func (p *quorumParams) updateGenesisConfig() error { - return appendIBFTQuorum( - p.genesisConfig, - p.from, - ) -} - -func (p *quorumParams) overrideGenesisConfig() error { - // Remove the current genesis configuration from disk - if err := os.Remove(p.genesisPath); err != nil { - return err - } - - // Save the new genesis configuration - if err := helper.WriteGenesisConfigToDisk( - p.genesisConfig, - p.genesisPath, - ); err != nil { - return err - } - - return nil -} - -func (p *quorumParams) getResult() command.CommandResult { - return &IBFTQuorumResult{ - Chain: p.genesisPath, - From: common.JSONNumber{Value: p.from}, - } -} - -func appendIBFTQuorum( - cc *chain.Chain, - from uint64, -) error { - ibftConfig, ok := cc.Params.Engine["ibft"].(map[string]interface{}) - if !ok { - return errors.New(`"ibft" setting doesn't exist in "engine" of genesis.json'`) - } - - ibftConfig["quorumSizeBlockNum"] = from - - cc.Params.Engine["ibft"] = ibftConfig - - return nil -} diff --git a/command/ibft/quorum/result.go b/command/ibft/quorum/result.go deleted file mode 100644 index 848fb9a3de..0000000000 --- a/command/ibft/quorum/result.go +++ /dev/null @@ -1,30 +0,0 @@ -package quorum - -import ( - "bytes" - "fmt" - - "github.com/0xPolygon/polygon-edge/command/helper" - "github.com/0xPolygon/polygon-edge/helper/common" -) - -type IBFTQuorumResult struct { - Chain string `json:"chain"` - From common.JSONNumber `json:"from"` -} - -func (r *IBFTQuorumResult) GetOutput() string { - var buffer bytes.Buffer - - buffer.WriteString("\n[NEW IBFT QUORUM START]\n") - - outputs := []string{ - fmt.Sprintf("Chain|%s", r.Chain), - fmt.Sprintf("From|%d", r.From.Value), - } - - buffer.WriteString(helper.FormatKV(outputs)) - buffer.WriteString("\n") - - return buffer.String() -} diff --git a/command/ibft/snapshot/ibft_snapshot.go b/command/ibft/snapshot/ibft_snapshot.go deleted file mode 100644 index 17973cc255..0000000000 --- a/command/ibft/snapshot/ibft_snapshot.go +++ /dev/null @@ -1,50 +0,0 @@ -package snapshot - -import ( - "math" - - "github.com/0xPolygon/polygon-edge/command" - "github.com/0xPolygon/polygon-edge/command/helper" - "github.com/spf13/cobra" -) - -func GetCommand() *cobra.Command { - ibftSnapshotCmd := &cobra.Command{ - Use: "snapshot", - Short: "Returns the IBFT snapshot at the latest block number, unless a block number is specified", - Run: runCommand, - } - - setFlags(ibftSnapshotCmd) - - return ibftSnapshotCmd -} - -func setFlags(cmd *cobra.Command) { - cmd.Flags().Uint64Var( - ¶ms.blockNumber, - numberFlag, - math.MaxUint64, - "the block height (number) for the snapshot", - ) -} - -func runCommand(cmd *cobra.Command, _ []string) { - outputter := command.InitializeOutputter(cmd) - defer outputter.WriteOutput() - - if err := params.initSnapshot(helper.GetGRPCAddress(cmd)); err != nil { - outputter.SetError(err) - - return - } - - result, err := newIBFTSnapshotResult(params.snapshot) - if err != nil { - outputter.SetError(err) - - return - } - - outputter.SetCommandResult(result) -} diff --git a/command/ibft/snapshot/params.go b/command/ibft/snapshot/params.go deleted file mode 100644 index 1076388b05..0000000000 --- a/command/ibft/snapshot/params.go +++ /dev/null @@ -1,55 +0,0 @@ -package snapshot - -import ( - "context" - "math" - - "github.com/0xPolygon/polygon-edge/command/helper" - ibftOp "github.com/0xPolygon/polygon-edge/consensus/ibft/proto" -) - -const ( - numberFlag = "number" -) - -var ( - params = &snapshotParams{} -) - -type snapshotParams struct { - blockNumber uint64 - - snapshot *ibftOp.Snapshot -} - -func (p *snapshotParams) initSnapshot(grpcAddress string) error { - ibftClient, err := helper.GetIBFTOperatorClientConnection(grpcAddress) - if err != nil { - return err - } - - snapshot, err := ibftClient.GetSnapshot( - context.Background(), - p.getSnapshotRequest(), - ) - if err != nil { - return err - } - - p.snapshot = snapshot - - return nil -} - -func (p *snapshotParams) getSnapshotRequest() *ibftOp.SnapshotReq { - req := &ibftOp.SnapshotReq{ - Latest: true, - } - - if p.blockNumber != math.MaxUint64 { - req.Latest = false - req.Number = p.blockNumber - } - - return req -} diff --git a/command/ibft/snapshot/result.go b/command/ibft/snapshot/result.go deleted file mode 100644 index e959d18820..0000000000 --- a/command/ibft/snapshot/result.go +++ /dev/null @@ -1,123 +0,0 @@ -package snapshot - -import ( - "bytes" - "fmt" - - "github.com/0xPolygon/polygon-edge/command/helper" - ibftHelper "github.com/0xPolygon/polygon-edge/command/ibft/helper" - ibftOp "github.com/0xPolygon/polygon-edge/consensus/ibft/proto" - "github.com/0xPolygon/polygon-edge/validators" -) - -type IBFTSnapshotVote struct { - Proposer string `json:"proposer"` - Address string `json:"address"` - Vote ibftHelper.Vote `json:"vote"` -} - -type IBFTSnapshotResult struct { - Number uint64 `json:"number"` - Hash string `json:"hash"` - Votes []IBFTSnapshotVote `json:"votes"` - Validators []validators.Validator `json:"validators"` -} - -func newIBFTSnapshotResult(resp *ibftOp.Snapshot) (*IBFTSnapshotResult, error) { - res := &IBFTSnapshotResult{ - Number: resp.Number, - Hash: resp.Hash, - Votes: make([]IBFTSnapshotVote, len(resp.Votes)), - Validators: make([]validators.Validator, len(resp.Validators)), - } - - for i, v := range resp.Votes { - res.Votes[i].Proposer = v.Validator - res.Votes[i].Address = v.Proposed - res.Votes[i].Vote = ibftHelper.BoolToVote(v.Auth) - } - - var ( - validatorType validators.ValidatorType - err error - ) - - for i, v := range resp.Validators { - if validatorType, err = validators.ParseValidatorType(v.Type); err != nil { - return nil, err - } - - validator, err := validators.NewValidatorFromType(validatorType) - if err != nil { - return nil, err - } - - if err := validator.SetFromBytes(v.Data); err != nil { - return nil, err - } - - res.Validators[i] = validator - } - - return res, nil -} - -func (r *IBFTSnapshotResult) GetOutput() string { - var buffer bytes.Buffer - - buffer.WriteString("\n[IBFT SNAPSHOT]\n") - r.writeBlockData(&buffer) - r.writeVoteData(&buffer) - r.writeValidatorData(&buffer) - - return buffer.String() -} - -func (r *IBFTSnapshotResult) writeBlockData(buffer *bytes.Buffer) { - buffer.WriteString(helper.FormatKV([]string{ - fmt.Sprintf("Block|%d", r.Number), - fmt.Sprintf("Hash|%s", r.Hash), - })) - buffer.WriteString("\n") -} - -func (r *IBFTSnapshotResult) writeVoteData(buffer *bytes.Buffer) { - numVotes := len(r.Votes) - votes := make([]string, numVotes+1) - - votes[0] = "No votes found" - - if numVotes > 0 { - votes[0] = "PROPOSER|ADDRESS|VOTE TO ADD" - - for i, d := range r.Votes { - votes[i+1] = fmt.Sprintf( - "%s|%s|%s", - d.Proposer, - d.Address, - ibftHelper.VoteToString(d.Vote), - ) - } - } - - buffer.WriteString("\n[VOTES]\n") - buffer.WriteString(helper.FormatList(votes)) - buffer.WriteString("\n") -} - -func (r *IBFTSnapshotResult) writeValidatorData(buffer *bytes.Buffer) { - numValidators := len(r.Validators) - validators := make([]string, numValidators+1) - validators[0] = "No validators found" - - if numValidators > 0 { - validators[0] = "ADDRESS" - for i, d := range r.Validators { - validators[i+1] = d.String() - } - } - - buffer.WriteString("\n[VALIDATORS]\n") - buffer.WriteString(helper.FormatList(validators)) - buffer.WriteString("\n") -} diff --git a/command/ibft/status/ibft_status.go b/command/ibft/status/ibft_status.go deleted file mode 100644 index ba55b81104..0000000000 --- a/command/ibft/status/ibft_status.go +++ /dev/null @@ -1,46 +0,0 @@ -package status - -import ( - "context" - - "github.com/0xPolygon/polygon-edge/command" - "github.com/0xPolygon/polygon-edge/command/helper" - ibftOp "github.com/0xPolygon/polygon-edge/consensus/ibft/proto" - "github.com/spf13/cobra" - empty "google.golang.org/protobuf/types/known/emptypb" -) - -func GetCommand() *cobra.Command { - return &cobra.Command{ - Use: "status", - Short: "Returns the current validator key of the IBFT client", - Run: runCommand, - } -} - -func runCommand(cmd *cobra.Command, _ []string) { - outputter := command.InitializeOutputter(cmd) - defer outputter.WriteOutput() - - statusResponse, err := getIBFTStatus(helper.GetGRPCAddress(cmd)) - if err != nil { - outputter.SetError(err) - - return - } - - outputter.SetCommandResult(&IBFTStatusResult{ - ValidatorKey: statusResponse.Key, - }) -} - -func getIBFTStatus(grpcAddress string) (*ibftOp.IbftStatusResp, error) { - client, err := helper.GetIBFTOperatorClientConnection( - grpcAddress, - ) - if err != nil { - return nil, err - } - - return client.Status(context.Background(), &empty.Empty{}) -} diff --git a/command/ibft/status/result.go b/command/ibft/status/result.go deleted file mode 100644 index 64f018b576..0000000000 --- a/command/ibft/status/result.go +++ /dev/null @@ -1,24 +0,0 @@ -package status - -import ( - "bytes" - "fmt" - - "github.com/0xPolygon/polygon-edge/command/helper" -) - -type IBFTStatusResult struct { - ValidatorKey string `json:"validator_key"` -} - -func (r *IBFTStatusResult) GetOutput() string { - var buffer bytes.Buffer - - buffer.WriteString("\n[VALIDATOR STATUS]\n") - buffer.WriteString(helper.FormatKV([]string{ - fmt.Sprintf("Validator key|%s", r.ValidatorKey), - })) - buffer.WriteString("\n") - - return buffer.String() -} diff --git a/command/ibft/switch/ibft_switch.go b/command/ibft/switch/ibft_switch.go deleted file mode 100644 index 419a97bd8e..0000000000 --- a/command/ibft/switch/ibft_switch.go +++ /dev/null @@ -1,135 +0,0 @@ -package ibftswitch - -import ( - "fmt" - - "github.com/spf13/cobra" - - "github.com/0xPolygon/polygon-edge/command" - "github.com/0xPolygon/polygon-edge/command/helper" - "github.com/0xPolygon/polygon-edge/validators" -) - -func GetCommand() *cobra.Command { - ibftSwitchCmd := &cobra.Command{ - Use: "switch", - Short: "Add settings in genesis.json to switch IBFT type", - PreRunE: runPreRun, - Run: runCommand, - } - - setFlags(ibftSwitchCmd) - helper.SetRequiredFlags(ibftSwitchCmd, params.getRequiredFlags()) - - return ibftSwitchCmd -} - -func setFlags(cmd *cobra.Command) { - cmd.Flags().StringVar( - ¶ms.genesisPath, - chainFlag, - fmt.Sprintf("./%s", command.DefaultGenesisFileName), - "the genesis file to update", - ) - - cmd.Flags().StringVar( - ¶ms.typeRaw, - typeFlag, - "", - "the new IBFT type [PoA, PoS]", - ) - - { - // switch block height - cmd.Flags().StringVar( - ¶ms.deploymentRaw, - deploymentFlag, - "", - "the height to deploy the contract in PoS", - ) - - cmd.Flags().StringVar( - ¶ms.fromRaw, - fromFlag, - "", - "the height to switch the new type", - ) - } - - // IBFT - { - cmd.Flags().StringVar( - ¶ms.rawIBFTValidatorType, - command.IBFTValidatorTypeFlag, - string(validators.BLSValidatorType), - "the type of validators in IBFT", - ) - } - - { - // PoS Configuration - cmd.Flags().StringVar( - ¶ms.minValidatorCountRaw, - command.MinValidatorCountFlag, - "", - "the minimum number of validators in the validator set for PoS", - ) - - cmd.Flags().StringVar( - ¶ms.maxValidatorCountRaw, - command.MaxValidatorCountFlag, - "", - "the maximum number of validators in the validator set for PoS", - ) - - cmd.Flags().StringVar( - ¶ms.validatorRootPath, - command.ValidatorRootFlag, - command.DefaultValidatorRoot, - "root path for validator folder directory. "+ - "Needs to be present if validators is omitted", - ) - - cmd.Flags().StringVar( - ¶ms.validatorPrefixPath, - command.ValidatorPrefixFlag, - command.DefaultValidatorPrefix, - "prefix path for validator folder directory. "+ - "Needs to be present if validators is omitted", - ) - - cmd.Flags().StringArrayVar( - ¶ms.validatorsRaw, - command.ValidatorFlag, - []string{}, - "addresses to be used as IBFT validators, can be used multiple times. "+ - "Needs to be present if validators-prefix is omitted", - ) - - cmd.MarkFlagsMutuallyExclusive(command.ValidatorPrefixFlag, command.ValidatorFlag) - cmd.MarkFlagsMutuallyExclusive(command.ValidatorRootFlag, command.ValidatorFlag) - } -} - -func runPreRun(_ *cobra.Command, _ []string) error { - return params.initRawParams() -} - -func runCommand(cmd *cobra.Command, _ []string) { - outputter := command.InitializeOutputter(cmd) - defer outputter.WriteOutput() - - if err := params.updateGenesisConfig(); err != nil { - outputter.SetError(err) - - return - } - - if err := params.overrideGenesisConfig(); err != nil { - outputter.SetError(err) - - return - } - - outputter.SetCommandResult(params.getResult()) -} diff --git a/command/ibft/switch/params.go b/command/ibft/switch/params.go deleted file mode 100644 index 0f13d68314..0000000000 --- a/command/ibft/switch/params.go +++ /dev/null @@ -1,445 +0,0 @@ -package ibftswitch - -import ( - "errors" - "fmt" - "os" - - "github.com/0xPolygon/polygon-edge/chain" - "github.com/0xPolygon/polygon-edge/command" - "github.com/0xPolygon/polygon-edge/command/helper" - "github.com/0xPolygon/polygon-edge/consensus/ibft/fork" - "github.com/0xPolygon/polygon-edge/helper/common" - "github.com/0xPolygon/polygon-edge/validators" -) - -const ( - chainFlag = "chain" - typeFlag = "type" - deploymentFlag = "deployment" - fromFlag = "from" -) - -var ( - ErrFromPositive = errors.New(`"from" must be positive number`) - ErrIBFTConfigNotFound = errors.New(`"ibft" config doesn't exist in "engine" of genesis.json'`) - ErrSameIBFTAndValidatorType = errors.New("cannot specify same IBFT type and validator type as the last fork") - ErrLessFromThanLastFrom = errors.New(`"from" must be greater than the beginning height of last fork`) - ErrInvalidValidatorsUpdateHeight = errors.New(`cannot specify a less height than 2 for validators update`) -) - -var ( - params = &switchParams{} -) - -type switchParams struct { - genesisPath string - typeRaw string - ibftType fork.IBFTType - - // height - deploymentRaw string - deployment *uint64 - fromRaw string - from uint64 - - rawIBFTValidatorType string - ibftValidatorType validators.ValidatorType - - // PoA - validatorRootPath string - validatorPrefixPath string - validatorsRaw []string - ibftValidators validators.Validators - - // PoS - maxValidatorCountRaw string - maxValidatorCount *uint64 - minValidatorCountRaw string - minValidatorCount *uint64 - - genesisConfig *chain.Chain -} - -func (p *switchParams) getRequiredFlags() []string { - return []string{ - typeFlag, - fromFlag, - } -} - -func (p *switchParams) initRawParams() error { - if err := p.initMechanismType(); err != nil { - return err - } - - if err := p.initIBFTValidatorType(); err != nil { - return err - } - - if err := p.initDeployment(); err != nil { - return err - } - - if err := p.initFrom(); err != nil { - return err - } - - if err := p.initPoAConfig(); err != nil { - return err - } - - if err := p.initPoSConfig(); err != nil { - return err - } - - if err := p.initChain(); err != nil { - return err - } - - return nil -} - -func (p *switchParams) initMechanismType() error { - ibftType, err := fork.ParseIBFTType(p.typeRaw) - if err != nil { - return fmt.Errorf("unable to parse mechanism type: %w", err) - } - - p.ibftType = ibftType - - return nil -} - -func (p *switchParams) initIBFTValidatorType() error { - if p.rawIBFTValidatorType == "" { - return nil - } - - var err error - - if p.ibftValidatorType, err = validators.ParseValidatorType(p.rawIBFTValidatorType); err != nil { - return err - } - - return nil -} - -func (p *switchParams) initDeployment() error { - if p.deploymentRaw != "" { - if p.ibftType != fork.PoS { - return fmt.Errorf( - "doesn't support contract deployment in %s", - string(p.ibftType), - ) - } - - d, err := common.ParseUint64orHex(&p.deploymentRaw) - if err != nil { - return fmt.Errorf( - "unable to parse deployment value, %w", - err, - ) - } - - p.deployment = &d - } - - return nil -} - -func (p *switchParams) initPoAConfig() error { - if p.ibftType != fork.PoA { - return nil - } - - p.ibftValidators = validators.NewValidatorSetFromType(p.ibftValidatorType) - - if err := p.setValidatorSetFromPrefixPath(); err != nil { - return err - } - - if err := p.setValidatorSetFromCli(); err != nil { - return err - } - - // Validate if validator number exceeds max number - if uint64(p.ibftValidators.Len()) > common.MaxSafeJSInt { - return command.ErrValidatorNumberExceedsMax - } - - return nil -} - -func (p *switchParams) setValidatorSetFromPrefixPath() error { - if p.validatorPrefixPath == "" { - return nil - } - - validators, err := command.GetValidatorsFromPrefixPath( - p.validatorRootPath, - p.validatorPrefixPath, - p.ibftValidatorType, - ) - if err != nil { - return fmt.Errorf("failed to read from prefix: %w", err) - } - - if err := p.ibftValidators.Merge(validators); err != nil { - return err - } - - return nil -} - -// setValidatorSetFromCli sets validator set from cli command -func (p *switchParams) setValidatorSetFromCli() error { - if len(p.validatorsRaw) == 0 { - return nil - } - - newSet, err := validators.ParseValidators(p.ibftValidatorType, p.validatorsRaw) - if err != nil { - return err - } - - if err = p.ibftValidators.Merge(newSet); err != nil { - return err - } - - return nil -} - -func (p *switchParams) initPoSConfig() error { - if p.ibftType != fork.PoS { - if p.minValidatorCountRaw != "" || p.maxValidatorCountRaw != "" { - return fmt.Errorf( - "doesn't support min validator count in %s", - string(p.ibftType), - ) - } - - return nil - } - - if p.minValidatorCountRaw != "" { - value, err := common.ParseUint64orHex(&p.minValidatorCountRaw) - if err != nil { - return fmt.Errorf( - "unable to parse min validator count value, %w", - err, - ) - } - - p.minValidatorCount = &value - } - - if p.maxValidatorCountRaw != "" { - value, err := common.ParseUint64orHex(&p.maxValidatorCountRaw) - if err != nil { - return fmt.Errorf( - "unable to parse max validator count value, %w", - err, - ) - } - - p.maxValidatorCount = &value - } - - if err := p.validateMinMaxValidatorNumber(); err != nil { - return err - } - - // Validate validatorRootPath only if validators information were not provided via CLI flag - if len(p.validatorsRaw) == 0 { - if _, err := os.Stat(p.validatorRootPath); err != nil { - return fmt.Errorf("invalid validators path ('%s') provided. Error: %w", p.validatorRootPath, err) - } - } - - return nil -} - -func (p *switchParams) validateMinMaxValidatorNumber() error { - // Validate min and max validators number if not nil - // If they are not defined they will get default values - // in PoSMechanism - minValidatorCount := uint64(1) - maxValidatorCount := common.MaxSafeJSInt - - if p.minValidatorCount != nil { - minValidatorCount = *p.minValidatorCount - } - - if p.maxValidatorCount != nil { - maxValidatorCount = *p.maxValidatorCount - } - - if err := command.ValidateMinMaxValidatorsNumber(minValidatorCount, maxValidatorCount); err != nil { - return err - } - - return nil -} - -func (p *switchParams) initFrom() error { - from, err := common.ParseUint64orHex(&p.fromRaw) - if err != nil { - return fmt.Errorf("unable to parse from value, %w", err) - } - - if from <= 0 { - return ErrFromPositive - } - - p.from = from - - return nil -} - -func (p *switchParams) initChain() error { - cc, err := chain.Import(p.genesisPath) - if err != nil { - return fmt.Errorf( - "failed to load chain config from %s: %w", - p.genesisPath, - err, - ) - } - - p.genesisConfig = cc - - return nil -} - -func (p *switchParams) updateGenesisConfig() error { - return appendIBFTForks( - p.genesisConfig, - p.ibftType, - p.ibftValidatorType, - p.from, - p.deployment, - p.ibftValidators, - p.maxValidatorCount, - p.minValidatorCount, - ) -} - -func (p *switchParams) overrideGenesisConfig() error { - // Remove the current genesis configuration from disk - if err := os.Remove(p.genesisPath); err != nil { - return err - } - - // Save the new genesis configuration - if err := helper.WriteGenesisConfigToDisk( - p.genesisConfig, - p.genesisPath, - ); err != nil { - return err - } - - return nil -} - -func (p *switchParams) getResult() command.CommandResult { - result := &IBFTSwitchResult{ - Chain: p.genesisPath, - Type: p.ibftType, - ValidatorType: p.ibftValidatorType, - From: common.JSONNumber{Value: p.from}, - } - - if p.deployment != nil { - result.Deployment = &common.JSONNumber{Value: *p.deployment} - } - - if p.minValidatorCount != nil { - result.MinValidatorCount = common.JSONNumber{Value: *p.minValidatorCount} - } else { - result.MinValidatorCount = common.JSONNumber{Value: 1} - } - - if p.maxValidatorCount != nil { - result.MaxValidatorCount = common.JSONNumber{Value: *p.maxValidatorCount} - } else { - result.MaxValidatorCount = common.JSONNumber{Value: common.MaxSafeJSInt} - } - - return result -} - -func appendIBFTForks( - cc *chain.Chain, - ibftType fork.IBFTType, - validatorType validators.ValidatorType, - from uint64, - deployment *uint64, - // PoA - validators validators.Validators, - // PoS - maxValidatorCount *uint64, - minValidatorCount *uint64, -) error { - ibftConfig, ok := cc.Params.Engine["ibft"].(map[string]interface{}) - if !ok { - return ErrIBFTConfigNotFound - } - - ibftForks, err := fork.GetIBFTForks(ibftConfig) - if err != nil { - return err - } - - lastFork := ibftForks[len(ibftForks)-1] - - if (ibftType == lastFork.Type) && - (validatorType == lastFork.ValidatorType) { - return ErrSameIBFTAndValidatorType - } - - if from <= lastFork.From.Value { - return ErrLessFromThanLastFrom - } - - if ibftType == fork.PoA && validators != nil && from <= 1 { - // can't update validators at block 0 - return ErrInvalidValidatorsUpdateHeight - } - - lastFork.To = &common.JSONNumber{Value: from - 1} - - newFork := fork.IBFTFork{ - Type: ibftType, - ValidatorType: validatorType, - From: common.JSONNumber{Value: from}, - BlockTime: lastFork.BlockTime, - } - - switch ibftType { - case fork.PoA: - newFork.Validators = validators - case fork.PoS: - if deployment != nil { - newFork.Deployment = &common.JSONNumber{Value: *deployment} - } - - if maxValidatorCount != nil { - newFork.MaxValidatorCount = &common.JSONNumber{Value: *maxValidatorCount} - } - - if minValidatorCount != nil { - newFork.MinValidatorCount = &common.JSONNumber{Value: *minValidatorCount} - } - } - - ibftForks = append(ibftForks, &newFork) - ibftConfig["types"] = ibftForks - - // remove leftover config - delete(ibftConfig, "type") - - cc.Params.Engine["ibft"] = ibftConfig - - return nil -} diff --git a/command/ibft/switch/result.go b/command/ibft/switch/result.go deleted file mode 100644 index afd4c07f6f..0000000000 --- a/command/ibft/switch/result.go +++ /dev/null @@ -1,51 +0,0 @@ -package ibftswitch - -import ( - "bytes" - "fmt" - - "github.com/0xPolygon/polygon-edge/command/helper" - "github.com/0xPolygon/polygon-edge/consensus/ibft/fork" - "github.com/0xPolygon/polygon-edge/helper/common" - "github.com/0xPolygon/polygon-edge/validators" -) - -type IBFTSwitchResult struct { - Chain string `json:"chain"` - Type fork.IBFTType `json:"type"` - ValidatorType validators.ValidatorType `json:"validator_type"` - From common.JSONNumber `json:"from"` - Deployment *common.JSONNumber `json:"deployment,omitempty"` - MaxValidatorCount common.JSONNumber `json:"maxValidatorCount"` - MinValidatorCount common.JSONNumber `json:"minValidatorCount"` -} - -func (r *IBFTSwitchResult) GetOutput() string { - var buffer bytes.Buffer - - buffer.WriteString("\n[NEW IBFT FORK]\n") - - outputs := []string{ - fmt.Sprintf("Chain|%s", r.Chain), - fmt.Sprintf("Type|%s", r.Type), - fmt.Sprintf("ValidatorType|%s", r.ValidatorType), - } - - if r.Deployment != nil { - outputs = append(outputs, fmt.Sprintf("Deployment|%d", r.Deployment.Value)) - } - - outputs = append(outputs, fmt.Sprintf("From|%d", r.From.Value)) - - if r.Type == fork.PoS { - outputs = append(outputs, - fmt.Sprintf("MaxValidatorCount|%d", r.MaxValidatorCount.Value), - fmt.Sprintf("MinValidatorCount|%d", r.MinValidatorCount.Value), - ) - } - - buffer.WriteString(helper.FormatKV(outputs)) - buffer.WriteString("\n") - - return buffer.String() -} diff --git a/command/root/root.go b/command/root/root.go index eeeeea4940..5fe6c372dd 100644 --- a/command/root/root.go +++ b/command/root/root.go @@ -10,7 +10,6 @@ import ( "github.com/0xPolygon/polygon-edge/command/bridge" "github.com/0xPolygon/polygon-edge/command/genesis" "github.com/0xPolygon/polygon-edge/command/helper" - "github.com/0xPolygon/polygon-edge/command/ibft" "github.com/0xPolygon/polygon-edge/command/license" "github.com/0xPolygon/polygon-edge/command/monitor" "github.com/0xPolygon/polygon-edge/command/peers" @@ -52,7 +51,6 @@ func (rc *RootCommand) registerSubCommands() { peers.GetCommand(), rootchain.GetCommand(), monitor.GetCommand(), - ibft.GetCommand(), backup.GetCommand(), genesis.GetCommand(), server.GetCommand(), diff --git a/consensus/ibft/consensus.go b/consensus/ibft/consensus.go deleted file mode 100644 index 74bcd03461..0000000000 --- a/consensus/ibft/consensus.go +++ /dev/null @@ -1,57 +0,0 @@ -package ibft - -import ( - "context" - "sync" - - "github.com/0xPolygon/go-ibft/core" -) - -// IBFTConsensus is a convenience wrapper for the go-ibft package -type IBFTConsensus struct { - *core.IBFT - - wg sync.WaitGroup - - cancelSequence context.CancelFunc -} - -func newIBFT( - logger core.Logger, - backend core.Backend, - transport core.Transport, -) *IBFTConsensus { - return &IBFTConsensus{ - IBFT: core.NewIBFT(logger, backend, transport), - wg: sync.WaitGroup{}, - } -} - -// runSequence starts the underlying consensus mechanism for the given height. -// It may be called by a single thread at any given time -func (c *IBFTConsensus) runSequence(height uint64) <-chan struct{} { - done := make(chan struct{}) - ctx, cancel := context.WithCancel(context.Background()) - - c.cancelSequence = cancel - - c.wg.Add(1) - - go func() { - defer func() { - cancel() - c.wg.Done() - close(done) - }() - - c.RunSequence(ctx, height) - }() - - return done -} - -// stopSequence terminates the running IBFT sequence gracefully and waits for it to return -func (c *IBFTConsensus) stopSequence() { - c.cancelSequence() - c.wg.Wait() -} diff --git a/consensus/ibft/consensus_backend.go b/consensus/ibft/consensus_backend.go deleted file mode 100644 index 3285be41ec..0000000000 --- a/consensus/ibft/consensus_backend.go +++ /dev/null @@ -1,424 +0,0 @@ -package ibft - -import ( - "context" - "fmt" - "math/big" - "time" - - "github.com/0xPolygon/go-ibft/messages" - "github.com/0xPolygon/go-ibft/messages/proto" - "github.com/0xPolygon/polygon-edge/consensus" - "github.com/0xPolygon/polygon-edge/consensus/ibft/signer" - "github.com/0xPolygon/polygon-edge/helper/hex" - "github.com/0xPolygon/polygon-edge/state" - "github.com/0xPolygon/polygon-edge/types" -) - -func (i *backendIBFT) BuildProposal(view *proto.View) []byte { - var ( - latestHeader = i.blockchain.Header() - latestBlockNumber = latestHeader.Number - ) - - if latestBlockNumber+1 != view.Height { - i.logger.Error( - "unable to build block, due to lack of parent block", - "num", - latestBlockNumber, - ) - - return nil - } - - block, err := i.buildBlock(latestHeader) - if err != nil { - i.logger.Error("cannot build block", "num", view.Height, "err", err) - - return nil - } - - return block.MarshalRLP() -} - -// InsertProposal inserts a proposal of which the consensus has been got -func (i *backendIBFT) InsertProposal( - proposal *proto.Proposal, - committedSeals []*messages.CommittedSeal, -) { - newBlock := &types.Block{} - if err := newBlock.UnmarshalRLP(proposal.RawProposal); err != nil { - i.logger.Error("cannot unmarshal proposal", "err", err) - - return - } - - committedSealsMap := make(map[types.Address][]byte, len(committedSeals)) - - for _, cm := range committedSeals { - committedSealsMap[types.BytesToAddress(cm.Signer)] = cm.Signature - } - - // Copy extra data for debugging purposes - extraDataOriginal := newBlock.Header.ExtraData - extraDataBackup := make([]byte, len(extraDataOriginal)) - copy(extraDataBackup, extraDataOriginal) - - // Push the committed seals to the header - header, err := i.currentSigner.WriteCommittedSeals(newBlock.Header, proposal.Round, committedSealsMap) - if err != nil { - i.logger.Error("cannot write committed seals", "err", err) - - return - } - - // WriteCommittedSeals alters the extra data before writing the block - // It doesn't handle errors while pushing changes which can result in - // corrupted extra data. - // We don't know exact circumstance of the unmarshalRLP error - // This is a safety net to help us narrow down and also recover before - // writing the block - if err := i.ValidateExtraDataFormat(newBlock.Header); err != nil { - //Format committed seals to make them more readable - committedSealsStr := make([]string, len(committedSealsMap)) - for i, seal := range committedSeals { - committedSealsStr[i] = fmt.Sprintf("{signer=%v signature=%v}", - hex.EncodeToHex(seal.Signer), - hex.EncodeToHex(seal.Signature)) - } - - i.logger.Error("cannot write block: corrupted extra data", - "err", err, - "before", hex.EncodeToHex(extraDataBackup), - "after", hex.EncodeToHex(header.ExtraData), - "committedSeals", committedSealsStr) - - return - } - - newBlock.Header = header - - // Save the block locally - if err := i.blockchain.WriteBlock(newBlock, "consensus"); err != nil { - i.logger.Error("cannot write block", "err", err) - - return - } - - i.updateMetrics(newBlock) - - i.logger.Info( - "block committed", - "number", newBlock.Number(), - "hash", newBlock.Hash(), - "validation_type", i.currentSigner.Type(), - "validators", i.currentValidators.Len(), - "committed", len(committedSeals), - ) - - if err := i.currentHooks.PostInsertBlock(newBlock); err != nil { - i.logger.Error( - "failed to call PostInsertBlock hook", - "height", newBlock.Number(), - "hash", newBlock.Hash(), - "err", err, - ) - - return - } - - // after the block has been written we reset the txpool so that - // the old transactions are removed - i.txpool.ResetWithHeaders(newBlock.Header) -} - -func (i *backendIBFT) ID() []byte { - return i.currentSigner.Address().Bytes() -} - -func (i *backendIBFT) MaximumFaultyNodes() uint64 { - return uint64(CalcMaxFaultyNodes(i.currentValidators)) -} - -// DISCLAIMER: IBFT will be deprecated so we set 1 as a voting power to all validators -func (i *backendIBFT) GetVotingPowers(height uint64) (map[string]*big.Int, error) { - validators, err := i.forkManager.GetValidators(height) - if err != nil { - return nil, err - } - - result := make(map[string]*big.Int, validators.Len()) - - for index := 0; index < validators.Len(); index++ { - strAddress := types.AddressToString(validators.At(uint64(index)).Addr()) - result[strAddress] = big.NewInt(1) // set 1 as voting power to everyone - } - - return result, nil -} - -// buildBlock builds the block, based on the passed in snapshot and parent header -func (i *backendIBFT) buildBlock(parent *types.Header) (*types.Block, error) { - header := &types.Header{ - ParentHash: parent.Hash, - Number: parent.Number + 1, - Miner: types.ZeroAddress.Bytes(), - Nonce: types.Nonce{}, - MixHash: signer.IstanbulDigest, - // this is required because blockchain needs difficulty to organize blocks and forks - Difficulty: parent.Number + 1, - StateRoot: types.EmptyRootHash, // this avoids needing state for now - Sha3Uncles: types.EmptyUncleHash, - GasLimit: parent.GasLimit, // Inherit from parent for now, will need to adjust dynamically later. - } - - // calculate gas limit based on parent header - gasLimit, err := i.blockchain.CalculateGasLimit(header.Number) - if err != nil { - return nil, err - } - - // calculate base fee - header.GasLimit = gasLimit - - if err := i.currentHooks.ModifyHeader(header, i.currentSigner.Address()); err != nil { - return nil, err - } - - // Set the header timestamp - potentialTimestamp := i.calcHeaderTimestamp(parent.Timestamp, time.Now().UTC()) - header.Timestamp = uint64(potentialTimestamp.Unix()) - - parentCommittedSeals, err := i.extractParentCommittedSeals(parent) - if err != nil { - return nil, err - } - - i.currentSigner.InitIBFTExtra(header, i.currentValidators, parentCommittedSeals) - - transition, err := i.executor.BeginTxn(parent.StateRoot, header, i.currentSigner.Address()) - if err != nil { - return nil, err - } - - // Get the block transactions - writeCtx, cancelFn := context.WithDeadline(context.Background(), potentialTimestamp) - defer cancelFn() - - txs := i.writeTransactions( - writeCtx, - gasLimit, - header.Number, - transition, - ) - - // provide dummy block instance to the PreCommitState - // (for the IBFT consensus, it is correct to have just a header, as only it is used) - if err := i.PreCommitState(&types.Block{Header: header}, transition); err != nil { - return nil, err - } - - _, root, err := transition.Commit() - if err != nil { - return nil, fmt.Errorf("failed to commit the state changes: %w", err) - } - - header.StateRoot = root - header.GasUsed = transition.TotalGas() - - // build the block - block := consensus.BuildBlock(consensus.BuildBlockParams{ - Header: header, - Txns: txs, - Receipts: transition.Receipts(), - }) - - // write the seal of the block after all the fields are completed - header, err = i.currentSigner.WriteProposerSeal(header) - if err != nil { - return nil, err - } - - block.Header = header - - // compute the hash, this is only a provisional hash since the final one - // is sealed after all the committed seals - block.Header.ComputeHash() - - i.logger.Info("build block", "number", header.Number, "txs", len(txs)) - - return block, nil -} - -// calcHeaderTimestamp calculates the new block timestamp, based -// on the block time and parent timestamp -func (i *backendIBFT) calcHeaderTimestamp(parentUnix uint64, currentTime time.Time) time.Time { - var ( - parentTimestamp = time.Unix(int64(parentUnix), 0) - potentialTimestamp = parentTimestamp.Add(i.blockTime) - ) - - if potentialTimestamp.Before(currentTime) { - // The deadline for creating this next block - // has passed, round it to the nearest - // multiple of block time - // t........t+blockT...x (t+blockT.x; now).....t+blockT (potential) - potentialTimestamp = roundUpTime(currentTime, i.blockTime) - } - - return potentialTimestamp -} - -// roundUpTime rounds up the specified time to the -// nearest higher multiple -func roundUpTime(t time.Time, roundOn time.Duration) time.Time { - return t.Add(roundOn / 2).Round(roundOn) -} - -type status uint8 - -const ( - success status = iota - fail - skip -) - -type txExeResult struct { - tx *types.Transaction - status status -} - -type transitionInterface interface { - Write(txn *types.Transaction) error -} - -func (i *backendIBFT) writeTransactions( - writeCtx context.Context, - gasLimit, - blockNumber uint64, - transition transitionInterface, -) (executed []*types.Transaction) { - executed = make([]*types.Transaction, 0) - - if !i.currentHooks.ShouldWriteTransactions(blockNumber) { - return - } - - var ( - successful = 0 - failed = 0 - skipped = 0 - ) - - defer func() { - i.logger.Info( - "executed txs", - "successful", successful, - "failed", failed, - "skipped", skipped, - "remaining", i.txpool.Length(), - ) - }() - - i.txpool.Prepare() - -write: - for { - select { - case <-writeCtx.Done(): - return - default: - // execute transactions one by one - result, ok := i.writeTransaction( - i.txpool.Peek(), - transition, - gasLimit, - ) - - if !ok { - break write - } - - tx := result.tx - - switch result.status { - case success: - executed = append(executed, tx) - successful++ - case fail: - failed++ - case skip: - skipped++ - } - } - } - - // wait for the timer to expire - <-writeCtx.Done() - - return -} - -func (i *backendIBFT) writeTransaction( - tx *types.Transaction, - transition transitionInterface, - gasLimit uint64, -) (*txExeResult, bool) { - if tx == nil { - return nil, false - } - - if tx.Gas > gasLimit { - i.txpool.Drop(tx) - - // continue processing - return &txExeResult{tx, fail}, true - } - - if err := transition.Write(tx); err != nil { - if _, ok := err.(*state.GasLimitReachedTransitionApplicationError); ok { //nolint:errorlint - // stop processing - return nil, false - } else if appErr, ok := err.(*state.TransitionApplicationError); ok && appErr.IsRecoverable { //nolint:errorlint - i.txpool.Demote(tx) - - return &txExeResult{tx, skip}, true - } else { - i.txpool.Drop(tx) - - return &txExeResult{tx, fail}, true - } - } - - i.txpool.Pop(tx) - - return &txExeResult{tx, success}, true -} - -// extractCommittedSeals extracts CommittedSeals from header -func (i *backendIBFT) extractCommittedSeals( - header *types.Header, -) (signer.Seals, error) { - signer, err := i.forkManager.GetSigner(header.Number) - if err != nil { - return nil, err - } - - extra, err := signer.GetIBFTExtra(header) - if err != nil { - return nil, err - } - - return extra.CommittedSeals, nil -} - -// extractParentCommittedSeals extracts ParentCommittedSeals from header -func (i *backendIBFT) extractParentCommittedSeals( - header *types.Header, -) (signer.Seals, error) { - if header.Number == 0 { - return nil, nil - } - - return i.extractCommittedSeals(header) -} diff --git a/consensus/ibft/consensus_backend_test.go b/consensus/ibft/consensus_backend_test.go deleted file mode 100644 index 756644305d..0000000000 --- a/consensus/ibft/consensus_backend_test.go +++ /dev/null @@ -1,113 +0,0 @@ -package ibft - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -// TestIBFTBackend_CalculateHeaderTimestamp verifies that the header timestamp -// is successfully calculated -func TestIBFTBackend_CalculateHeaderTimestamp(t *testing.T) { - t.Parallel() - - // Reference time - now := time.Unix(time.Now().UTC().Unix(), 0) // Round down - - testTable := []struct { - name string - parentTimestamp int64 - currentTime time.Time - blockTime uint64 - - expectedTimestamp time.Time - }{ - { - "Valid clock block timestamp", - now.Add(time.Duration(-1) * time.Second).Unix(), // 1s before - now, - 1, - now, // 1s after - }, - { - "Next multiple block clock", - now.Add(time.Duration(-4) * time.Second).Unix(), // 4s before - now, - 3, - roundUpTime(now, 3*time.Second), - }, - } - - for _, testCase := range testTable { - testCase := testCase - - t.Run(testCase.name, func(t *testing.T) { - t.Parallel() - - i := &backendIBFT{ - blockTime: time.Duration(testCase.blockTime) * time.Second, - } - - assert.Equal( - t, - testCase.expectedTimestamp.Unix(), - i.calcHeaderTimestamp( - uint64(testCase.parentTimestamp), - testCase.currentTime, - ).Unix(), - ) - }) - } -} - -// TestIBFTBackend_RoundUpTime verifies time is rounded up correctly -func TestIBFTBackend_RoundUpTime(t *testing.T) { - t.Parallel() - - // Reference time - now := time.Now().UTC() - - calcExpected := func(time int64, multiple int64) int64 { - if time%multiple == 0 { - return time + multiple - } - - return ((time + multiple/2) / multiple) * multiple - } - - testTable := []struct { - name string - time time.Time - multiple time.Duration - - expectedTime int64 - }{ - { - "No rounding needed", - now, - 0 * time.Second, - now.Unix(), - }, - { - "Rounded up time even", - now, - 2 * time.Second, - calcExpected(now.Unix(), 2), - }, - } - - for _, testCase := range testTable { - testCase := testCase - - t.Run(testCase.name, func(t *testing.T) { - t.Parallel() - - assert.Equal( - t, - testCase.expectedTime, - roundUpTime(testCase.time, testCase.multiple).Unix(), - ) - }) - } -} diff --git a/consensus/ibft/fork/fork.go b/consensus/ibft/fork/fork.go deleted file mode 100644 index 9d5695a680..0000000000 --- a/consensus/ibft/fork/fork.go +++ /dev/null @@ -1,173 +0,0 @@ -package fork - -import ( - "encoding/json" - "errors" - - "github.com/0xPolygon/polygon-edge/helper/common" - "github.com/0xPolygon/polygon-edge/validators" -) - -const ( - // Keys in IBFT Configuration - KeyType = "type" - KeyTypes = "types" - KeyValidatorType = "validator_type" - KeyBlockTime = "blockTime" -) - -var ( - ErrUndefinedIBFTConfig = errors.New("IBFT config is not defined") - errInvalidBlockTime = errors.New("invalid block time provided") -) - -// IBFT Fork represents setting in params.engine.ibft of genesis.json -type IBFTFork struct { - Type IBFTType `json:"type"` - ValidatorType validators.ValidatorType `json:"validator_type"` - Deployment *common.JSONNumber `json:"deployment,omitempty"` - From common.JSONNumber `json:"from"` - To *common.JSONNumber `json:"to,omitempty"` - BlockTime *common.Duration `json:"blockTime,omitempty"` - - // PoA - Validators validators.Validators `json:"validators,omitempty"` - - // PoS - MaxValidatorCount *common.JSONNumber `json:"maxValidatorCount,omitempty"` - MinValidatorCount *common.JSONNumber `json:"minValidatorCount,omitempty"` -} - -func (f *IBFTFork) UnmarshalJSON(data []byte) error { - raw := struct { - Type IBFTType `json:"type"` - ValidatorType *validators.ValidatorType `json:"validator_type,omitempty"` - Deployment *common.JSONNumber `json:"deployment,omitempty"` - From common.JSONNumber `json:"from"` - To *common.JSONNumber `json:"to,omitempty"` - BlockTime *common.Duration `json:"blockTime,omitempty"` - Validators interface{} `json:"validators,omitempty"` - MaxValidatorCount *common.JSONNumber `json:"maxValidatorCount,omitempty"` - MinValidatorCount *common.JSONNumber `json:"minValidatorCount,omitempty"` - }{} - - if err := json.Unmarshal(data, &raw); err != nil { - return err - } - - f.Type = raw.Type - f.Deployment = raw.Deployment - f.From = raw.From - f.To = raw.To - f.BlockTime = raw.BlockTime - f.MaxValidatorCount = raw.MaxValidatorCount - f.MinValidatorCount = raw.MinValidatorCount - - f.ValidatorType = validators.ECDSAValidatorType - if raw.ValidatorType != nil { - f.ValidatorType = *raw.ValidatorType - } - - if raw.Validators == nil { - return nil - } - - f.Validators = validators.NewValidatorSetFromType(f.ValidatorType) - - validatorsBytes, err := json.Marshal(raw.Validators) - if err != nil { - return err - } - - return json.Unmarshal(validatorsBytes, f.Validators) -} - -// GetIBFTForks returns IBFT fork configurations from chain config -func GetIBFTForks(ibftConfig map[string]interface{}) (IBFTForks, error) { - // no fork, only specifying IBFT type in chain config - if originalType, ok := ibftConfig[KeyType].(string); ok { - typ, err := ParseIBFTType(originalType) - if err != nil { - return nil, err - } - - validatorType := validators.ECDSAValidatorType - - if rawValType, ok := ibftConfig[KeyValidatorType].(string); ok { - if validatorType, err = validators.ParseValidatorType(rawValType); err != nil { - return nil, err - } - } - - var blockTime *common.Duration = nil - - blockTimeGeneric, ok := ibftConfig[KeyBlockTime] - if ok { - blockTimeRaw, err := json.Marshal(blockTimeGeneric) - if err != nil { - return nil, errInvalidBlockTime - } - - if err := json.Unmarshal(blockTimeRaw, &blockTime); err != nil { - return nil, errInvalidBlockTime - } - } - - return IBFTForks{ - { - Type: typ, - Deployment: nil, - ValidatorType: validatorType, - From: common.JSONNumber{Value: 0}, - To: nil, - BlockTime: blockTime, - }, - }, nil - } - - // with forks - if types, ok := ibftConfig[KeyTypes].([]interface{}); ok { - bytes, err := json.Marshal(types) - if err != nil { - return nil, err - } - - var forks IBFTForks - if err := json.Unmarshal(bytes, &forks); err != nil { - return nil, err - } - - return forks, nil - } - - return nil, ErrUndefinedIBFTConfig -} - -type IBFTForks []*IBFTFork - -// getByFork returns the fork in which the given height is -// it doesn't use binary search for now because number of IBFTFork is not so many -func (fs *IBFTForks) getFork(height uint64) *IBFTFork { - for idx := len(*fs) - 1; idx >= 0; idx-- { - fork := (*fs)[idx] - - if fork.From.Value <= height && (fork.To == nil || height <= fork.To.Value) { - return fork - } - } - - return nil -} - -// filterByType returns new list of IBFTFork whose type matches with the given type -func (fs *IBFTForks) filterByType(ibftType IBFTType) IBFTForks { - filteredForks := make(IBFTForks, 0) - - for _, fork := range *fs { - if fork.Type == ibftType { - filteredForks = append(filteredForks, fork) - } - } - - return filteredForks -} diff --git a/consensus/ibft/fork/fork_test.go b/consensus/ibft/fork/fork_test.go deleted file mode 100644 index f77e6c5f91..0000000000 --- a/consensus/ibft/fork/fork_test.go +++ /dev/null @@ -1,337 +0,0 @@ -package fork - -import ( - "encoding/json" - "errors" - "fmt" - "testing" - "time" - - "github.com/0xPolygon/polygon-edge/helper/common" - testHelper "github.com/0xPolygon/polygon-edge/helper/tests" - "github.com/0xPolygon/polygon-edge/types" - "github.com/0xPolygon/polygon-edge/validators" - "github.com/stretchr/testify/assert" -) - -func TestIBFTForkUnmarshalJSON(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - data string - expected *IBFTFork - err error - }{ - { - name: "should parse without validator type and validators", - data: fmt.Sprintf(`{ - "type": "%s", - "from": %d - }`, PoA, 0), - expected: &IBFTFork{ - Type: PoA, - ValidatorType: validators.ECDSAValidatorType, - From: common.JSONNumber{Value: 0}, - To: nil, - Validators: nil, - MaxValidatorCount: nil, - MinValidatorCount: nil, - }, - }, - { - name: "should parse without validators", - data: fmt.Sprintf(`{ - "type": "%s", - "from": %d, - "to": %d, - "maxValidatorCount": %d, - "MinValidatorCount": %d - }`, PoS, 10, 15, 100, 1), - expected: &IBFTFork{ - Type: PoS, - ValidatorType: validators.ECDSAValidatorType, - From: common.JSONNumber{Value: 10}, - To: &common.JSONNumber{Value: 15}, - Validators: nil, - MaxValidatorCount: &common.JSONNumber{Value: 100}, - MinValidatorCount: &common.JSONNumber{Value: 1}, - }, - }, - { - name: "should parse without validators", - data: fmt.Sprintf(`{ - "type": "%s", - "validator_type": "%s", - "blockTime": %d, - "validators": [ - { - "Address": "%s" - }, - { - "Address": "%s" - } - ], - "from": %d, - "to": %d - }`, - PoA, validators.ECDSAValidatorType, - 3000000000, - types.StringToAddress("1"), types.StringToAddress("2"), - 16, 20, - ), - expected: &IBFTFork{ - Type: PoA, - ValidatorType: validators.ECDSAValidatorType, - From: common.JSONNumber{Value: 16}, - To: &common.JSONNumber{Value: 20}, - Validators: validators.NewECDSAValidatorSet( - validators.NewECDSAValidator(types.StringToAddress("1")), - validators.NewECDSAValidator(types.StringToAddress("2")), - ), - BlockTime: &common.Duration{Duration: 3 * time.Second}, - MaxValidatorCount: nil, - MinValidatorCount: nil, - }, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - fork := &IBFTFork{} - - err := json.Unmarshal([]byte(test.data), fork) - - testHelper.AssertErrorMessageContains(t, test.err, err) - - assert.Equal(t, test.expected, fork) - }) - } -} - -func TestGetIBFTForks(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - config map[string]interface{} - res IBFTForks - err error - }{ - { - name: "should return error if invalid type is set in type", - config: map[string]interface{}{ - "type": "invalid", - }, - res: nil, - err: errors.New("invalid IBFT type invalid"), - }, - { - name: "should return a single fork for ECDSA if IBFTConfig has type but it doesn't have validator type", - config: map[string]interface{}{ - "type": "PoA", - }, - res: IBFTForks{ - { - Type: PoA, - ValidatorType: validators.ECDSAValidatorType, - Deployment: nil, - From: common.JSONNumber{Value: 0}, - To: nil, - }, - }, - err: nil, - }, - { - name: "should return a single fork for ECDSA if IBFTConfig has type and validator type", - config: map[string]interface{}{ - "type": "PoS", - "validator_type": "bls", - }, - res: IBFTForks{ - { - Type: PoS, - ValidatorType: validators.BLSValidatorType, - Deployment: nil, - From: common.JSONNumber{Value: 0}, - To: nil, - }, - }, - err: nil, - }, - { - name: "should return multiple forks", - config: map[string]interface{}{ - "types": []interface{}{ - map[string]interface{}{ - "type": "PoA", - "validator_type": "ecdsa", - "from": 0, - "to": 10, - }, - map[string]interface{}{ - "type": "PoA", - "validator_type": "bls", - "from": 11, - }, - }, - }, - res: IBFTForks{ - { - Type: PoA, - ValidatorType: validators.ECDSAValidatorType, - Deployment: nil, - From: common.JSONNumber{Value: 0}, - To: &common.JSONNumber{Value: 10}, - }, - { - Type: PoA, - ValidatorType: validators.BLSValidatorType, - Deployment: nil, - From: common.JSONNumber{Value: 11}, - To: nil, - }, - }, - err: nil, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - res, err := GetIBFTForks(test.config) - - assert.Equal( - t, - test.res, - res, - ) - - testHelper.AssertErrorMessageContains( - t, - test.err, - err, - ) - }) - } -} - -func TestIBFTForks_getFork(t *testing.T) { - t.Parallel() - - forks := IBFTForks{ - { - From: common.JSONNumber{Value: 0}, - To: &common.JSONNumber{Value: 10}, - }, - { - From: common.JSONNumber{Value: 11}, - To: &common.JSONNumber{Value: 50}, - }, - { - From: common.JSONNumber{Value: 51}, - }, - } - - tests := []struct { - name string - height uint64 - expected *IBFTFork - }{ - { - name: "should return the first fork if height is 0", - height: 0, - expected: forks[0], - }, - { - name: "should return the first fork if height is 1", - height: 1, - expected: forks[0], - }, - { - name: "should return the first fork if height is 10", - height: 10, - expected: forks[0], - }, - { - name: "should return the first fork if height is 11", - height: 11, - expected: forks[1], - }, - { - name: "should return the first fork if height is 50", - height: 50, - expected: forks[1], - }, - { - name: "should return the first fork if height is 51", - height: 51, - expected: forks[2], - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - assert.Equal( - t, - test.expected, - forks.getFork(test.height), - ) - }) - } -} - -func TestIBFTForks_filterByType(t *testing.T) { - t.Parallel() - - forks := IBFTForks{ - { - Type: PoA, - From: common.JSONNumber{Value: 0}, - To: &common.JSONNumber{Value: 10}, - }, - { - Type: PoS, - From: common.JSONNumber{Value: 11}, - To: &common.JSONNumber{Value: 20}, - }, - { - Type: PoA, - From: common.JSONNumber{Value: 21}, - To: &common.JSONNumber{Value: 30}, - }, - { - Type: PoS, - From: common.JSONNumber{Value: 31}, - }, - } - - assert.Equal( - t, - IBFTForks{ - forks[0], - forks[2], - }, - forks.filterByType(PoA), - ) - - assert.Equal( - t, - IBFTForks{ - forks[1], - forks[3], - }, - forks.filterByType(PoS), - ) -} diff --git a/consensus/ibft/fork/helper.go b/consensus/ibft/fork/helper.go deleted file mode 100644 index bc00ac31b6..0000000000 --- a/consensus/ibft/fork/helper.go +++ /dev/null @@ -1,62 +0,0 @@ -package fork - -import ( - "encoding/json" - "os" - - "github.com/0xPolygon/polygon-edge/helper/common" - "github.com/0xPolygon/polygon-edge/validators/store/snapshot" -) - -// loadSnapshotMetadata loads Metadata from file -func loadSnapshotMetadata(path string) (*snapshot.SnapshotMetadata, error) { - var meta *snapshot.SnapshotMetadata - if err := readDataStore(path, &meta); err != nil { - return nil, err - } - - return meta, nil -} - -// loadSnapshots loads Snapshots from file -func loadSnapshots(path string) ([]*snapshot.Snapshot, error) { - snaps := []*snapshot.Snapshot{} - if err := readDataStore(path, &snaps); err != nil { - return nil, err - } - - return snaps, nil -} - -// readDataStore attempts to read the specific file from file storage -// return nil if the file doesn't exist -func readDataStore(path string, obj interface{}) error { - if _, err := os.Stat(path); os.IsNotExist(err) { - return nil - } - - data, err := os.ReadFile(path) - if err != nil { - return err - } - - if err := json.Unmarshal(data, obj); err != nil { - return err - } - - return nil -} - -// writeDataStore attempts to write the specific file to file storage -func writeDataStore(path string, obj interface{}) error { - data, err := json.Marshal(obj) - if err != nil { - return err - } - - if err := common.SaveFileSafe(path, data, 0660); err != nil { - return err - } - - return nil -} diff --git a/consensus/ibft/fork/helper_test.go b/consensus/ibft/fork/helper_test.go deleted file mode 100644 index fb79a12a8e..0000000000 --- a/consensus/ibft/fork/helper_test.go +++ /dev/null @@ -1,256 +0,0 @@ -package fork - -import ( - "encoding/json" - "errors" - "os" - "path" - "testing" - - "github.com/0xPolygon/polygon-edge/crypto" - testHelper "github.com/0xPolygon/polygon-edge/helper/tests" - "github.com/0xPolygon/polygon-edge/types" - "github.com/0xPolygon/polygon-edge/validators" - "github.com/0xPolygon/polygon-edge/validators/store" - "github.com/0xPolygon/polygon-edge/validators/store/snapshot" - "github.com/stretchr/testify/assert" -) - -var ( - sampleJSON = `{"Hello":"World"}` - sampleMap = map[string]interface{}{ - "Hello": "World", - } -) - -func createTestTempDirectory(t *testing.T) string { - t.Helper() - - path, err := os.MkdirTemp("", "temp") - if err != nil { - t.Logf("failed to create temp directory, err=%+v", err) - - t.FailNow() - } - - t.Cleanup(func() { - os.RemoveAll(path) - }) - - return path -} - -func Test_loadSnapshotMetadata(t *testing.T) { - t.Parallel() - - t.Run("should return error if the file doesn't exist", func(t *testing.T) { - t.Parallel() - - dirPath := createTestTempDirectory(t) - filePath := path.Join(dirPath, "test.dat") - - res, err := loadSnapshotMetadata(filePath) - - assert.NoError(t, err) - assert.Nil(t, res) - }) - - t.Run("should load metadata", func(t *testing.T) { - t.Parallel() - - metadata := &snapshot.SnapshotMetadata{ - LastBlock: 100, - } - - fileData, err := json.Marshal(metadata) - assert.NoError(t, err) - - dirPath := createTestTempDirectory(t) - filePath := path.Join(dirPath, "test.dat") - assert.NoError(t, os.WriteFile(filePath, fileData, 0775)) - - res, err := loadSnapshotMetadata(filePath) - - assert.NoError( - t, - err, - ) - - assert.Equal( - t, - metadata, - res, - ) - }) -} - -func Test_loadSnapshots(t *testing.T) { - t.Parallel() - - t.Run("should return error if the file doesn't exist", func(t *testing.T) { - t.Parallel() - - dirPath := createTestTempDirectory(t) - filePath := path.Join(dirPath, "test.dat") - - res, err := loadSnapshots(filePath) - - assert.NoError(t, err) - assert.Equal(t, []*snapshot.Snapshot{}, res) - }) - - t.Run("should load metadata", func(t *testing.T) { - t.Parallel() - - snapshots := []*snapshot.Snapshot{ - { - Number: 10, - Hash: types.BytesToHash(crypto.Keccak256([]byte{0x10})).String(), - Set: validators.NewECDSAValidatorSet( - validators.NewECDSAValidator(types.StringToAddress("1")), - ), - Votes: []*store.Vote{ - { - Candidate: validators.NewECDSAValidator(types.StringToAddress("2")), - Validator: types.StringToAddress("1"), - Authorize: true, - }, - }, - }, - } - - fileData, err := json.Marshal(snapshots) - assert.NoError(t, err) - - dirPath := createTestTempDirectory(t) - filePath := path.Join(dirPath, "test.dat") - assert.NoError(t, os.WriteFile(filePath, fileData, 0775)) - - res, err := loadSnapshots(filePath) - - assert.NoError( - t, - err, - ) - - assert.Equal( - t, - snapshots, - res, - ) - }) -} - -func Test_readDataStore(t *testing.T) { - t.Parallel() - - t.Run("should return error if the file doesn't exist", func(t *testing.T) { - t.Parallel() - - dirPath := createTestTempDirectory(t) - filePath := path.Join(dirPath, "test.dat") - var data interface{} - - assert.Equal( - t, - nil, - readDataStore(filePath, data), - ) - }) - - t.Run("should return error if the content is not json", func(t *testing.T) { - t.Parallel() - - dirPath := createTestTempDirectory(t) - filePath := path.Join(dirPath, "test.dat") - - assert.NoError( - t, - os.WriteFile(filePath, []byte("hello: world"), 0775), - ) - - data := map[string]interface{}{} - - assert.IsType( - t, - &json.SyntaxError{}, - readDataStore(filePath, data), - ) - }) - - t.Run("should read and map to object", func(t *testing.T) { - t.Parallel() - - dirPath := createTestTempDirectory(t) - filePath := path.Join(dirPath, "test.dat") - - assert.NoError( - t, - os.WriteFile(filePath, []byte(sampleJSON), 0775), - ) - - data := map[string]interface{}{} - - assert.NoError( - t, - readDataStore(filePath, &data), - ) - - assert.Equal( - t, - sampleMap, - data, - ) - }) -} - -func Test_writeDataStore(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - data interface{} - expectedStoredData string - expectedErr error - }{ - { - name: "should return error if json.Marshal failed", - data: func() {}, - expectedStoredData: "", - expectedErr: errors.New("json: unsupported type: func()"), - }, - { - name: "should return error if WriteFile failed", - data: map[string]interface{}{ - "Hello": "World", - }, - expectedStoredData: sampleJSON, - expectedErr: nil, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - dirPath := createTestTempDirectory(t) - filePath := path.Join(dirPath, "test.dat") - - testHelper.AssertErrorMessageContains( - t, - test.expectedErr, - writeDataStore(filePath, test.data), - ) - - readData, _ := os.ReadFile(filePath) - - assert.Equal( - t, - test.expectedStoredData, - string(readData), - ) - }) - } -} diff --git a/consensus/ibft/fork/hookregister.go b/consensus/ibft/fork/hookregister.go deleted file mode 100644 index 5d2f291c4a..0000000000 --- a/consensus/ibft/fork/hookregister.go +++ /dev/null @@ -1,102 +0,0 @@ -package fork - -import ( - "github.com/0xPolygon/polygon-edge/consensus/ibft/hook" -) - -// PoAHookRegisterer that registers hooks for PoA mode -type PoAHookRegister struct { - getValidatorsStore func(*IBFTFork) ValidatorStore - poaForks IBFTForks - updateValidatorsForks map[uint64]*IBFTFork -} - -// NewPoAHookRegisterer is a constructor of PoAHookRegister -func NewPoAHookRegisterer( - getValidatorsStore func(*IBFTFork) ValidatorStore, - forks IBFTForks, -) *PoAHookRegister { - poaForks := forks.filterByType(PoA) - - updateValidatorsForks := make(map[uint64]*IBFTFork) - - for _, fork := range poaForks { - if fork.Validators == nil { - continue - } - - updateValidatorsForks[fork.From.Value] = fork - } - - return &PoAHookRegister{ - getValidatorsStore: getValidatorsStore, - poaForks: poaForks, - updateValidatorsForks: updateValidatorsForks, - } -} - -// RegisterHooks registers hooks of PoA for voting and validators updating -func (r *PoAHookRegister) RegisterHooks(hooks *hook.Hooks, height uint64) { - if currentFork := r.poaForks.getFork(height); currentFork != nil { - // in PoA mode currently - validatorStore := r.getValidatorsStore(currentFork) - - registerHeaderModifierHooks(hooks, validatorStore) - } - - // update validators in the end of the last block - if updateValidatorsFork, ok := r.updateValidatorsForks[height+1]; ok { - validatorStore := r.getValidatorsStore(updateValidatorsFork) - - registerUpdateValidatorsHooks( - hooks, - validatorStore, - updateValidatorsFork.Validators, - updateValidatorsFork.From.Value, - ) - } -} - -// PoAHookRegisterer that registers hooks for PoS mode -type PoSHookRegister struct { - posForks IBFTForks - epochSize uint64 - deployContractForks map[uint64]*IBFTFork -} - -// NewPoSHookRegister is a constructor of PoSHookRegister -func NewPoSHookRegister( - forks IBFTForks, - epochSize uint64, -) *PoSHookRegister { - posForks := forks.filterByType(PoS) - - deployContractForks := make(map[uint64]*IBFTFork) - - for _, fork := range posForks { - if fork.Deployment == nil { - continue - } - - deployContractForks[fork.Deployment.Value] = fork - } - - return &PoSHookRegister{ - posForks: posForks, - epochSize: epochSize, - deployContractForks: deployContractForks, - } -} - -// RegisterHooks registers hooks of PoA for additional block verification and contract deployment -func (r *PoSHookRegister) RegisterHooks(hooks *hook.Hooks, height uint64) { - if currentFork := r.posForks.getFork(height); currentFork != nil { - // in PoS mode currently - registerTxInclusionGuardHooks(hooks, r.epochSize) - } - - if deploymentFork, ok := r.deployContractForks[height]; ok { - // deploy or update staking contract in deployment height - registerStakingContractDeploymentHooks(hooks, deploymentFork) - } -} diff --git a/consensus/ibft/fork/hooks.go b/consensus/ibft/fork/hooks.go deleted file mode 100644 index 74d9435d3b..0000000000 --- a/consensus/ibft/fork/hooks.go +++ /dev/null @@ -1,136 +0,0 @@ -package fork - -import ( - "errors" - - "github.com/0xPolygon/polygon-edge/consensus/ibft/hook" - "github.com/0xPolygon/polygon-edge/contracts/staking" - "github.com/0xPolygon/polygon-edge/helper/hex" - stakingHelper "github.com/0xPolygon/polygon-edge/helper/staking" - "github.com/0xPolygon/polygon-edge/state" - "github.com/0xPolygon/polygon-edge/types" - "github.com/0xPolygon/polygon-edge/validators" - "github.com/0xPolygon/polygon-edge/validators/store" -) - -var ( - ErrTxInLastEpochOfBlock = errors.New("block must not have transactions in the last of epoch") -) - -// HeaderModifier is an interface for the struct that modifies block header for additional process -type HeaderModifier interface { - ModifyHeader(*types.Header, types.Address) error - VerifyHeader(*types.Header) error - ProcessHeader(*types.Header) error -} - -// registerHeaderModifierHooks registers hooks to modify header by validator store -func registerHeaderModifierHooks( - hooks *hook.Hooks, - validatorStore store.ValidatorStore, -) { - if modifier, ok := validatorStore.(HeaderModifier); ok { - hooks.ModifyHeaderFunc = modifier.ModifyHeader - hooks.VerifyHeaderFunc = modifier.VerifyHeader - hooks.ProcessHeaderFunc = modifier.ProcessHeader - } -} - -// Updatable is an interface for the struct that updates validators in the middle -type Updatable interface { - // UpdateValidatorSet updates validators forcibly - // in order that new validators are available from the given height - UpdateValidatorSet(validators.Validators, uint64) error -} - -// registerUpdateValidatorsHooks registers hooks to update validators in the middle -func registerUpdateValidatorsHooks( - hooks *hook.Hooks, - validatorStore store.ValidatorStore, - validators validators.Validators, - fromHeight uint64, -) { - if us, ok := validatorStore.(Updatable); ok { - hooks.PostInsertBlockFunc = func(b *types.Block) error { - if fromHeight != b.Number()+1 { - return nil - } - - // update validators if the block height is the one before beginning height - return us.UpdateValidatorSet(validators, fromHeight) - } - } -} - -// registerPoSVerificationHooks registers that hooks to prevent the last epoch block from having transactions -func registerTxInclusionGuardHooks(hooks *hook.Hooks, epochSize uint64) { - isLastEpoch := func(height uint64) bool { - return height > 0 && height%epochSize == 0 - } - - hooks.ShouldWriteTransactionFunc = func(height uint64) bool { - return !isLastEpoch(height) - } - - hooks.VerifyBlockFunc = func(block *types.Block) error { - if isLastEpoch(block.Number()) && len(block.Transactions) > 0 { - return ErrTxInLastEpochOfBlock - } - - return nil - } -} - -// registerStakingContractDeploymentHooks registers hooks -// to deploy or update staking contract -func registerStakingContractDeploymentHooks( - hooks *hook.Hooks, - fork *IBFTFork, -) { - hooks.PreCommitStateFunc = func(header *types.Header, txn *state.Transition) error { - // safe check - if header.Number != fork.Deployment.Value { - return nil - } - - if txn.AccountExists(staking.AddrStakingContract) { - // update bytecode of deployed contract - codeBytes, err := hex.DecodeHex(stakingHelper.StakingSCBytecode) - if err != nil { - return err - } - - return txn.SetCodeDirectly(staking.AddrStakingContract, codeBytes) - } else { - // deploy contract - contractState, err := stakingHelper.PredeployStakingSC( - fork.Validators, - getPreDeployParams(fork), - ) - - if err != nil { - return err - } - - return txn.SetAccountDirectly(staking.AddrStakingContract, contractState) - } - } -} - -// getPreDeployParams returns PredeployParams for Staking Contract from IBFTFork -func getPreDeployParams(fork *IBFTFork) stakingHelper.PredeployParams { - params := stakingHelper.PredeployParams{ - MinValidatorCount: stakingHelper.MinValidatorCount, - MaxValidatorCount: stakingHelper.MaxValidatorCount, - } - - if fork.MinValidatorCount != nil { - params.MinValidatorCount = fork.MinValidatorCount.Value - } - - if fork.MaxValidatorCount != nil { - params.MaxValidatorCount = fork.MaxValidatorCount.Value - } - - return params -} diff --git a/consensus/ibft/fork/hooks_test.go b/consensus/ibft/fork/hooks_test.go deleted file mode 100644 index db5869a557..0000000000 --- a/consensus/ibft/fork/hooks_test.go +++ /dev/null @@ -1,402 +0,0 @@ -package fork - -import ( - "errors" - "testing" - - "github.com/0xPolygon/polygon-edge/chain" - "github.com/0xPolygon/polygon-edge/consensus/ibft/hook" - "github.com/0xPolygon/polygon-edge/contracts/staking" - "github.com/0xPolygon/polygon-edge/crypto" - "github.com/0xPolygon/polygon-edge/helper/common" - stakingHelper "github.com/0xPolygon/polygon-edge/helper/staking" - "github.com/0xPolygon/polygon-edge/state" - itrie "github.com/0xPolygon/polygon-edge/state/immutable-trie" - "github.com/0xPolygon/polygon-edge/types" - "github.com/0xPolygon/polygon-edge/validators" - "github.com/0xPolygon/polygon-edge/validators/store" - "github.com/hashicorp/go-hclog" - "github.com/stretchr/testify/assert" -) - -type mockHeaderModifierStore struct { - store.ValidatorStore - - ModifyHeaderFunc func(*types.Header, types.Address) error - VerifyHeaderFunc func(*types.Header) error - ProcessHeaderFunc func(*types.Header) error -} - -func (m *mockHeaderModifierStore) ModifyHeader(header *types.Header, addr types.Address) error { - return m.ModifyHeaderFunc(header, addr) -} - -func (m *mockHeaderModifierStore) VerifyHeader(header *types.Header) error { - return m.VerifyHeaderFunc(header) -} - -func (m *mockHeaderModifierStore) ProcessHeader(header *types.Header) error { - return m.ProcessHeaderFunc(header) -} - -type mockUpdatableStore struct { - store.ValidatorStore - - UpdateValidatorStoreFunc func(validators.Validators, uint64) error -} - -func (m *mockUpdatableStore) UpdateValidatorSet(validators validators.Validators, height uint64) error { - return m.UpdateValidatorStoreFunc(validators, height) -} - -func Test_registerHeaderModifierHooks(t *testing.T) { - t.Parallel() - - t.Run("should do nothing if validator store doesn't implement HeaderModifier", func(t *testing.T) { - t.Parallel() - - type invalidValidatorStoreMock struct { - store.ValidatorStore - } - - hooks := &hook.Hooks{} - mockStore := &invalidValidatorStoreMock{} - - registerHeaderModifierHooks(hooks, mockStore) - - assert.Equal( - t, - &hook.Hooks{}, - hooks, - ) - }) - - t.Run("should register functions to the hooks", func(t *testing.T) { - t.Parallel() - - var ( - header = &types.Header{ - Number: 100, - Hash: types.BytesToHash(crypto.Keccak256([]byte{0x10, 0x0})), - } - addr = types.StringToAddress("1") - - err1 = errors.New("error 1") - err2 = errors.New("error 1") - err3 = errors.New("error 1") - ) - - hooks := &hook.Hooks{} - mockStore := &mockHeaderModifierStore{ - ModifyHeaderFunc: func(h *types.Header, a types.Address) error { - assert.Equal(t, header, h) - assert.Equal(t, addr, a) - - return err1 - }, - VerifyHeaderFunc: func(h *types.Header) error { - assert.Equal(t, header, h) - - return err2 - }, - ProcessHeaderFunc: func(h *types.Header) error { - assert.Equal(t, header, h) - - return err3 - }, - } - - registerHeaderModifierHooks(hooks, mockStore) - - assert.Nil(t, hooks.ShouldWriteTransactionFunc) - assert.Nil(t, hooks.VerifyBlockFunc) - assert.Nil(t, hooks.PreCommitStateFunc) - assert.Nil(t, hooks.PostInsertBlockFunc) - - assert.Equal( - t, - hooks.ModifyHeader(header, addr), - err1, - ) - assert.Equal( - t, - hooks.VerifyHeader(header), - err2, - ) - assert.Equal( - t, - hooks.ProcessHeader(header), - err3, - ) - }) -} - -func Test_registerUpdateValidatorsHooks(t *testing.T) { - t.Parallel() - - var ( - vals = validators.NewECDSAValidatorSet( - validators.NewECDSAValidator(types.StringToAddress("1")), - validators.NewECDSAValidator(types.StringToAddress("2")), - ) - ) - - t.Run("should do nothing if validator store doesn't implement Updatable", func(t *testing.T) { - t.Parallel() - - type invalidValidatorStoreMock struct { - store.ValidatorStore - } - - hooks := &hook.Hooks{} - mockStore := &invalidValidatorStoreMock{} - - registerUpdateValidatorsHooks(hooks, mockStore, vals, 0) - - assert.Equal( - t, - &hook.Hooks{}, - hooks, - ) - }) - - t.Run("should register UpdateValidatorSet to the hooks", func(t *testing.T) { - t.Parallel() - - var ( - fromHeight uint64 = 10 - err = errors.New("test") - - block = &types.Block{ - Header: &types.Header{}, - Transactions: []*types.Transaction{}, - Uncles: []*types.Header{}, - } - ) - - hooks := &hook.Hooks{} - mockStore := &mockUpdatableStore{ - UpdateValidatorStoreFunc: func(v validators.Validators, h uint64) error { - assert.Equal(t, vals, v) - assert.Equal(t, fromHeight, h) - - return err - }, - } - - registerUpdateValidatorsHooks(hooks, mockStore, vals, fromHeight) - - assert.Nil(t, hooks.ModifyHeaderFunc) - assert.Nil(t, hooks.VerifyHeaderFunc) - assert.Nil(t, hooks.ProcessHeaderFunc) - assert.Nil(t, hooks.ShouldWriteTransactionFunc) - assert.Nil(t, hooks.VerifyBlockFunc) - assert.Nil(t, hooks.PreCommitStateFunc) - - // case 1: the block number is not the one before fromHeight - assert.NoError( - t, - hooks.PostInsertBlockFunc(block), - ) - - // case 2: the block number is the one before fromHeight - block.Header.Number = fromHeight - 1 - - assert.Equal( - t, - hooks.PostInsertBlockFunc(block), - err, - ) - }) -} - -func Test_registerTxInclusionGuardHooks(t *testing.T) { - t.Parallel() - - epochSize := uint64(10) - hooks := &hook.Hooks{} - - registerTxInclusionGuardHooks(hooks, epochSize) - - assert.Nil(t, hooks.ModifyHeaderFunc) - assert.Nil(t, hooks.VerifyHeaderFunc) - assert.Nil(t, hooks.ProcessHeaderFunc) - assert.Nil(t, hooks.PreCommitStateFunc) - assert.Nil(t, hooks.PostInsertBlockFunc) - - var ( - cases = map[uint64]bool{ - 0: true, - epochSize - 1: true, - epochSize: false, - epochSize + 1: true, - epochSize*2 - 1: true, - epochSize * 2: false, - epochSize*2 + 1: true, - } - - blockWithoutTransactions = &types.Block{ - Header: &types.Header{}, - Transactions: []*types.Transaction{}, - } - - blockWithTransactions = &types.Block{ - Header: &types.Header{}, - Transactions: []*types.Transaction{ - { - Nonce: 0, - }, - }, - } - ) - - for h, ok := range cases { - assert.Equal( - t, - ok, - hooks.ShouldWriteTransactions(h), - ) - - blockWithTransactions.Header.Number = h - blockWithoutTransactions.Header.Number = h - - if ok { - assert.NoError(t, hooks.VerifyBlock(blockWithoutTransactions)) - assert.NoError(t, hooks.VerifyBlock(blockWithTransactions)) - } else { - assert.NoError(t, hooks.VerifyBlock(blockWithoutTransactions)) - assert.ErrorIs(t, ErrTxInLastEpochOfBlock, hooks.VerifyBlock(blockWithTransactions)) - } - } -} - -func newTestTransition( - t *testing.T, -) *state.Transition { - t.Helper() - - st := itrie.NewState(itrie.NewMemoryStorage()) - - ex := state.NewExecutor(&chain.Params{ - Forks: chain.AllForksEnabled, - BurnContract: map[uint64]types.Address{ - 0: types.ZeroAddress, - }, - }, st, hclog.NewNullLogger()) - - rootHash, err := ex.WriteGenesis(nil, types.Hash{}) - assert.NoError(t, err) - - ex.GetHash = func(h *types.Header) state.GetHashByNumber { - return func(i uint64) types.Hash { - return rootHash - } - } - - transition, err := ex.BeginTxn( - rootHash, - &types.Header{}, - types.ZeroAddress, - ) - assert.NoError(t, err) - - return transition -} - -func Test_registerStakingContractDeploymentHooks(t *testing.T) { - t.Parallel() - - hooks := &hook.Hooks{} - fork := &IBFTFork{ - Deployment: &common.JSONNumber{ - Value: 10, - }, - } - - registerStakingContractDeploymentHooks(hooks, fork) - - assert.Nil(t, hooks.ShouldWriteTransactionFunc) - assert.Nil(t, hooks.ModifyHeaderFunc) - assert.Nil(t, hooks.VerifyHeaderFunc) - assert.Nil(t, hooks.ProcessHeaderFunc) - assert.Nil(t, hooks.PostInsertBlockFunc) - - txn := newTestTransition(t) - - // deployment should not happen - assert.NoError( - t, - hooks.PreCommitState(&types.Header{Number: 5}, txn), - ) - - assert.False( - t, - txn.AccountExists(staking.AddrStakingContract), - ) - - // should deploy contract - assert.NoError( - t, - hooks.PreCommitState(&types.Header{Number: 10}, txn), - ) - - assert.True( - t, - txn.AccountExists(staking.AddrStakingContract), - ) - - // should update only bytecode (if contract is deployed again, it returns error) - assert.NoError( - t, - hooks.PreCommitState(&types.Header{Number: 10}, txn), - ) - - assert.True( - t, - txn.AccountExists(staking.AddrStakingContract), - ) -} - -func Test_getPreDeployParams(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - fork *IBFTFork - params stakingHelper.PredeployParams - }{ - { - name: "should use the given heights", - fork: &IBFTFork{ - MinValidatorCount: &common.JSONNumber{Value: 10}, - MaxValidatorCount: &common.JSONNumber{Value: 20}, - }, - params: stakingHelper.PredeployParams{ - MinValidatorCount: 10, - MaxValidatorCount: 20, - }, - }, - { - name: "should use the default values", - fork: &IBFTFork{}, - params: stakingHelper.PredeployParams{ - MinValidatorCount: stakingHelper.MinValidatorCount, - MaxValidatorCount: stakingHelper.MaxValidatorCount, - }, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - assert.Equal( - t, - test.params, - getPreDeployParams(test.fork), - ) - }) - } -} diff --git a/consensus/ibft/fork/manager.go b/consensus/ibft/fork/manager.go deleted file mode 100644 index 4e91e8f84f..0000000000 --- a/consensus/ibft/fork/manager.go +++ /dev/null @@ -1,326 +0,0 @@ -package fork - -import ( - "errors" - - "github.com/0xPolygon/polygon-edge/consensus/ibft/hook" - "github.com/0xPolygon/polygon-edge/consensus/ibft/signer" - "github.com/0xPolygon/polygon-edge/secrets" - "github.com/0xPolygon/polygon-edge/state" - "github.com/0xPolygon/polygon-edge/types" - "github.com/0xPolygon/polygon-edge/validators" - "github.com/0xPolygon/polygon-edge/validators/store" - "github.com/0xPolygon/polygon-edge/validators/store/contract" - "github.com/hashicorp/go-hclog" -) - -const ( - loggerName = "fork_manager" - snapshotMetadataFilename = "metadata" - snapshotSnapshotsFilename = "snapshots" -) - -var ( - ErrForkNotFound = errors.New("fork not found") - ErrSignerNotFound = errors.New("signer not found") - ErrValidatorStoreNotFound = errors.New("validator set not found") - ErrKeyManagerNotFound = errors.New("key manager not found") -) - -// ValidatorStore is an interface that ForkManager calls for Validator Store -type ValidatorStore interface { - store.ValidatorStore - // Close defines termination process - Close() error - // GetValidators is a method to return validators at the given height - GetValidators(height, epochSize, forkFrom uint64) (validators.Validators, error) -} - -// HookRegister is an interface that ForkManager calls for hook registrations -type HooksRegister interface { - // RegisterHooks register hooks for the given block height - RegisterHooks(hooks *hook.Hooks, height uint64) -} - -// HooksInterface is an interface of hooks to be called by IBFT -// This interface is referred from fork and ibft package -type HooksInterface interface { - ShouldWriteTransactions(uint64) bool - ModifyHeader(*types.Header, types.Address) error - VerifyHeader(*types.Header) error - VerifyBlock(*types.Block) error - ProcessHeader(*types.Header) error - PreCommitState(*types.Header, *state.Transition) error - PostInsertBlock(*types.Block) error -} - -// ForkManager is the module that has Fork configuration and multiple version of submodules -// and returns the proper submodule at specified height -type ForkManager struct { - logger hclog.Logger - blockchain store.HeaderGetter - executor contract.Executor - secretsManager secrets.SecretsManager - - // configuration - forks IBFTForks - filePath string - epochSize uint64 - - // submodule lookup - keyManagers map[validators.ValidatorType]signer.KeyManager - validatorStores map[store.SourceType]ValidatorStore - hooksRegisters map[IBFTType]HooksRegister -} - -// NewForkManager is a constructor of ForkManager -func NewForkManager( - logger hclog.Logger, - blockchain store.HeaderGetter, - executor contract.Executor, - secretManager secrets.SecretsManager, - filePath string, - epochSize uint64, - ibftConfig map[string]interface{}, -) (*ForkManager, error) { - forks, err := GetIBFTForks(ibftConfig) - if err != nil { - return nil, err - } - - fm := &ForkManager{ - logger: logger.Named(loggerName), - blockchain: blockchain, - executor: executor, - secretsManager: secretManager, - filePath: filePath, - epochSize: epochSize, - forks: forks, - keyManagers: make(map[validators.ValidatorType]signer.KeyManager), - validatorStores: make(map[store.SourceType]ValidatorStore), - hooksRegisters: make(map[IBFTType]HooksRegister), - } - - // Need initialization of signers in the constructor - // because hash calculation is called from blockchain initialization - if err := fm.initializeKeyManagers(); err != nil { - return nil, err - } - - return fm, nil -} - -// Initialize initializes ForkManager on initialization phase -func (m *ForkManager) Initialize() error { - if err := m.initializeValidatorStores(); err != nil { - return err - } - - m.initializeHooksRegisters() - - return nil -} - -// Close calls termination process of submodules -func (m *ForkManager) Close() error { - for _, store := range m.validatorStores { - if err := store.Close(); err != nil { - return err - } - } - - return nil -} - -// GetSigner returns a proper signer at specified height -func (m *ForkManager) GetSigner(height uint64) (signer.Signer, error) { - keyManager, err := m.getKeyManager(height) - if err != nil { - return nil, err - } - - var parentKeyManager signer.KeyManager - - if height > 1 { - if parentKeyManager, err = m.getKeyManager(height - 1); err != nil { - return nil, err - } - } - - return signer.NewSigner( - keyManager, - parentKeyManager, - ), nil -} - -// GetValidatorStore returns a proper validator set at specified height -func (m *ForkManager) GetValidatorStore(height uint64) (ValidatorStore, error) { - fork := m.forks.getFork(height) - if fork == nil { - return nil, ErrForkNotFound - } - - set := m.getValidatorStoreByIBFTFork(fork) - if set == nil { - return nil, ErrValidatorStoreNotFound - } - - return set, nil -} - -// GetValidators returns validators at specified height -func (m *ForkManager) GetValidators(height uint64) (validators.Validators, error) { - fork := m.forks.getFork(height) - if fork == nil { - return nil, ErrForkNotFound - } - - set := m.getValidatorStoreByIBFTFork(fork) - if set == nil { - return nil, ErrValidatorStoreNotFound - } - - return set.GetValidators( - height, - m.epochSize, - fork.From.Value, - ) -} - -// GetHooks returns a hooks at specified height -func (m *ForkManager) GetHooks(height uint64) HooksInterface { - hooks := &hook.Hooks{} - - for _, r := range m.hooksRegisters { - r.RegisterHooks(hooks, height) - } - - return hooks -} - -func (m *ForkManager) getValidatorStoreByIBFTFork(fork *IBFTFork) ValidatorStore { - set, ok := m.validatorStores[ibftTypesToSourceType[fork.Type]] - if !ok { - return nil - } - - return set -} - -func (m *ForkManager) getKeyManager(height uint64) (signer.KeyManager, error) { - fork := m.forks.getFork(height) - if fork == nil { - return nil, ErrForkNotFound - } - - keyManager, ok := m.keyManagers[fork.ValidatorType] - if !ok { - return nil, ErrKeyManagerNotFound - } - - return keyManager, nil -} - -// initializeKeyManagers initialize all key managers based on Fork configuration -func (m *ForkManager) initializeKeyManagers() error { - for _, fork := range m.forks { - if err := m.initializeKeyManager(fork.ValidatorType); err != nil { - return err - } - } - - return nil -} - -// initializeKeyManager initializes the sp -func (m *ForkManager) initializeKeyManager(valType validators.ValidatorType) error { - if _, ok := m.keyManagers[valType]; ok { - return nil - } - - keyManager, err := signer.NewKeyManagerFromType(m.secretsManager, valType) - if err != nil { - return err - } - - m.keyManagers[valType] = keyManager - - return nil -} - -// initializeValidatorStores initializes all validator sets based on Fork configuration -func (m *ForkManager) initializeValidatorStores() error { - for _, fork := range m.forks { - sourceType := ibftTypesToSourceType[fork.Type] - if err := m.initializeValidatorStore(sourceType); err != nil { - return err - } - } - - return nil -} - -// initializeValidatorStore initializes the specified validator set -func (m *ForkManager) initializeValidatorStore(setType store.SourceType) error { - if _, ok := m.validatorStores[setType]; ok { - return nil - } - - var ( - valStore ValidatorStore - err error - ) - - switch setType { - case store.Snapshot: - valStore, err = NewSnapshotValidatorStoreWrapper( - m.logger, - m.blockchain, - m.GetSigner, - m.filePath, - m.epochSize, - ) - case store.Contract: - valStore, err = NewContractValidatorStoreWrapper( - m.logger, - m.blockchain, - m.executor, - m.GetSigner, - ) - } - - if err != nil { - return err - } - - m.validatorStores[setType] = valStore - - return nil -} - -// initializeHooksRegisters initialize all HookRegisters to be used -func (m *ForkManager) initializeHooksRegisters() { - for _, fork := range m.forks { - m.initializeHooksRegister(fork.Type) - } -} - -// initializeHooksRegister initialize HookRegister by IBFTType -func (m *ForkManager) initializeHooksRegister(ibftType IBFTType) { - if _, ok := m.hooksRegisters[ibftType]; ok { - return - } - - switch ibftType { - case PoA: - m.hooksRegisters[PoA] = NewPoAHookRegisterer( - m.getValidatorStoreByIBFTFork, - m.forks, - ) - case PoS: - m.hooksRegisters[PoS] = NewPoSHookRegister( - m.forks, - m.epochSize, - ) - } -} diff --git a/consensus/ibft/fork/manager_test.go b/consensus/ibft/fork/manager_test.go deleted file mode 100644 index b56bf098eb..0000000000 --- a/consensus/ibft/fork/manager_test.go +++ /dev/null @@ -1,926 +0,0 @@ -package fork - -import ( - "errors" - "path" - "testing" - - "github.com/0xPolygon/polygon-edge/consensus/ibft/hook" - "github.com/0xPolygon/polygon-edge/consensus/ibft/signer" - "github.com/0xPolygon/polygon-edge/crypto" - "github.com/0xPolygon/polygon-edge/helper/common" - testHelper "github.com/0xPolygon/polygon-edge/helper/tests" - "github.com/0xPolygon/polygon-edge/secrets" - "github.com/0xPolygon/polygon-edge/types" - "github.com/0xPolygon/polygon-edge/validators" - "github.com/0xPolygon/polygon-edge/validators/store" - "github.com/0xPolygon/polygon-edge/validators/store/snapshot" - "github.com/hashicorp/go-hclog" - "github.com/stretchr/testify/assert" -) - -type mockValidatorStore struct { - store.ValidatorStore - - CloseFunc func() error - GetValidatorsFunc func(uint64, uint64, uint64) (validators.Validators, error) -} - -func (m *mockValidatorStore) Close() error { - return m.CloseFunc() -} - -func (m *mockValidatorStore) GetValidators(height, epoch, from uint64) (validators.Validators, error) { - return m.GetValidatorsFunc(height, epoch, from) -} - -type mockHooksRegister struct { - RegisterHooksFunc func(hooks *hook.Hooks, height uint64) -} - -func (m *mockHooksRegister) RegisterHooks(hooks *hook.Hooks, height uint64) { - m.RegisterHooksFunc(hooks, height) -} - -type mockSecretManager struct { - secrets.SecretsManager - - HasSecretFunc func(name string) bool - GetSecretFunc func(name string) ([]byte, error) -} - -func (m *mockSecretManager) HasSecret(name string) bool { - return m.HasSecretFunc(name) -} - -func (m *mockSecretManager) GetSecret(name string) ([]byte, error) { - return m.GetSecretFunc(name) -} - -func TestNewForkManager(t *testing.T) { - t.Parallel() - - _, ecdsaKeyBytes, err := crypto.GenerateAndEncodeECDSAPrivateKey() - assert.NoError(t, err) - - _, blsKeyBytes, err := crypto.GenerateAndEncodeBLSSecretKey() - assert.NoError(t, err) - - logger := hclog.NewNullLogger() - - t.Run("should return error if ibftConfig is empty", func(t *testing.T) { - t.Parallel() - - _, err := NewForkManager( - logger, - nil, - nil, - nil, - "", - 0, - map[string]interface{}{}, - ) - - assert.ErrorIs(t, ErrUndefinedIBFTConfig, err) - }) - - t.Run("should return error if key manager initialization fails", func(t *testing.T) { - t.Parallel() - - var ( - epochSize uint64 = 10 - - secretManager = &mockSecretManager{ - HasSecretFunc: func(name string) bool { - return true - }, - GetSecretFunc: func(name string) ([]byte, error) { - return nil, errTest - }, - } - ) - - _, err := NewForkManager( - logger, - nil, - nil, - secretManager, - "", - epochSize, - map[string]interface{}{ - "type": "PoS", - "validator_type": "bls", - }, - ) - - assert.ErrorIs(t, errTest, err) - }) - - t.Run("should return error if validator store initialization fails", func(t *testing.T) { - t.Parallel() - - var ( - latestNumber uint64 = 50 - epochSize uint64 = 10 - - blockchain = &store.MockBlockchain{ - HeaderFn: func() *types.Header { - return &types.Header{Number: latestNumber} - }, - GetHeaderByNumberFn: func(u uint64) (*types.Header, bool) { - return nil, false - }, - } - - secretManager = &mockSecretManager{ - HasSecretFunc: func(name string) bool { - assert.Equal(t, secrets.ValidatorKey, name) - - return true - }, - GetSecretFunc: func(name string) ([]byte, error) { - assert.Equal(t, secrets.ValidatorKey, name) - - return ecdsaKeyBytes, nil - }, - } - ) - - dirPath := createTestTempDirectory(t) - - fm, err := NewForkManager( - logger, - blockchain, - nil, - secretManager, - dirPath, - epochSize, - map[string]interface{}{ - "type": "PoA", - "validator_type": "ecdsa", - }, - ) - - assert.NoError(t, err) - testHelper.AssertErrorMessageContains( - t, - errors.New("header at 50 not found"), - fm.Initialize(), - ) - }) - - t.Run("PoA and ECDSA", func(t *testing.T) { - t.Parallel() - - var ( - latestNumber uint64 = 50 - epochSize uint64 = 10 - - metadata = &snapshot.SnapshotMetadata{ - LastBlock: latestNumber, - } - - snapshots = []*snapshot.Snapshot{ - { - Number: latestNumber, - Hash: types.BytesToHash(crypto.Keccak256([]byte{0x10})).String(), - Set: validators.NewECDSAValidatorSet( - validators.NewECDSAValidator(types.StringToAddress("1")), - ), - Votes: []*store.Vote{ - { - Candidate: validators.NewECDSAValidator(types.StringToAddress("2")), - Validator: types.StringToAddress("1"), - Authorize: true, - }, - }, - }, - } - - blockchain = &store.MockBlockchain{ - HeaderFn: func() *types.Header { - return &types.Header{Number: latestNumber} - }, - } - - secretManager = &mockSecretManager{ - HasSecretFunc: func(name string) bool { - assert.Equal(t, secrets.ValidatorKey, name) - - return true - }, - GetSecretFunc: func(name string) ([]byte, error) { - assert.Equal(t, secrets.ValidatorKey, name) - - return ecdsaKeyBytes, nil - }, - } - ) - - dirPath := createTestTempDirectory(t) - - assert.NoError( - t, - writeDataStore(path.Join(dirPath, snapshotMetadataFilename), metadata), - ) - - assert.NoError( - t, - writeDataStore(path.Join(dirPath, snapshotSnapshotsFilename), snapshots), - ) - - fm, err := NewForkManager( - logger, - blockchain, - nil, - secretManager, - dirPath, - epochSize, - map[string]interface{}{ - "type": "PoA", - "validator_type": "ecdsa", - }, - ) - - assert.NoError(t, err) - assert.NoError(t, fm.Initialize()) - - assert.NotNil(t, fm.keyManagers[validators.ECDSAValidatorType]) - assert.NotNil(t, fm.validatorStores[store.Snapshot]) - assert.NotNil(t, fm.hooksRegisters[PoA]) - }) - - t.Run("PoS and BLS", func(t *testing.T) { - t.Parallel() - - var ( - epochSize uint64 = 10 - - secretManager = &mockSecretManager{ - HasSecretFunc: func(name string) bool { - assert.True(t, name == secrets.ValidatorKey || name == secrets.ValidatorBLSKey) - - return true - }, - GetSecretFunc: func(name string) ([]byte, error) { - assert.True(t, name == secrets.ValidatorKey || name == secrets.ValidatorBLSKey) - - if name == secrets.ValidatorKey { - return ecdsaKeyBytes, nil - } else { - return blsKeyBytes, nil - } - }, - } - ) - - fm, err := NewForkManager( - logger, - nil, - nil, - secretManager, - "", - epochSize, - map[string]interface{}{ - "type": "PoS", - "validator_type": "bls", - }, - ) - - assert.NoError(t, err) - assert.NoError(t, fm.Initialize()) - - assert.NotNil(t, fm.keyManagers[validators.BLSValidatorType]) - assert.NotNil(t, fm.validatorStores[store.Contract]) - assert.NotNil(t, fm.hooksRegisters[PoS]) - }) -} - -func TestForkManagerClose(t *testing.T) { - t.Parallel() - - t.Run("should call all Close methods of ValidatorStore", func(t *testing.T) { - t.Parallel() - - numCalls := 0 - - fm := &ForkManager{ - validatorStores: map[store.SourceType]ValidatorStore{ - store.Contract: &mockValidatorStore{ - CloseFunc: func() error { - numCalls++ - - return nil - }, - }, - store.Snapshot: &mockValidatorStore{ - CloseFunc: func() error { - numCalls++ - - return nil - }, - }, - }, - } - - assert.NoError(t, fm.Close()) - assert.Equal(t, 2, numCalls) - }) - - t.Run("should return error if one of Close method returns error", func(t *testing.T) { - t.Parallel() - - numCalls := 0 - - fm := &ForkManager{ - validatorStores: map[store.SourceType]ValidatorStore{ - // should call the either - store.Contract: &mockValidatorStore{ - CloseFunc: func() error { - numCalls++ - - return errTest - }, - }, - store.Snapshot: &mockValidatorStore{ - CloseFunc: func() error { - numCalls++ - - return errTest - }, - }, - }, - } - - assert.Equal(t, errTest, fm.Close()) - assert.Equal(t, 1, numCalls) - }) -} - -type MockKeyManager struct { - signer.KeyManager - ValType validators.ValidatorType -} - -func TestForkManagerGetSigner(t *testing.T) { - t.Parallel() - - var ( - ecdsaKeyManager = &MockKeyManager{ - ValType: validators.ECDSAValidatorType, - } - blsKeyManager = &MockKeyManager{ - ValType: validators.BLSValidatorType, - } - ) - - tests := []struct { - name string - forks IBFTForks - keyManagers map[validators.ValidatorType]signer.KeyManager - height uint64 - expectedSigner signer.Signer - expectedErr error - }{ - { - name: "should return ErrForkNotFound if fork not found", - forks: IBFTForks{ - { - ValidatorType: validators.ECDSAValidatorType, - From: common.JSONNumber{Value: 0}, - To: &common.JSONNumber{Value: 10}, - }, - { - ValidatorType: validators.BLSValidatorType, - From: common.JSONNumber{Value: 11}, - To: &common.JSONNumber{Value: 20}, - }, - }, - keyManagers: map[validators.ValidatorType]signer.KeyManager{}, - height: 22, - expectedSigner: nil, - expectedErr: ErrForkNotFound, - }, - { - name: "should return ErrKeyManagerNotFound if fork managers is nil", - forks: IBFTForks{ - { - ValidatorType: validators.ECDSAValidatorType, - From: common.JSONNumber{Value: 0}, - }, - }, - keyManagers: map[validators.ValidatorType]signer.KeyManager{}, - height: 10, - expectedSigner: nil, - expectedErr: ErrKeyManagerNotFound, - }, - { - name: "should return err if fork not found for parent key manager", - forks: IBFTForks{ - { - ValidatorType: validators.ECDSAValidatorType, - From: common.JSONNumber{Value: 2}, - }, - }, - keyManagers: map[validators.ValidatorType]signer.KeyManager{ - validators.ECDSAValidatorType: MockKeyManager{}, - }, - height: 2, - expectedSigner: nil, - expectedErr: ErrForkNotFound, - }, - { - name: "should return the signer with single key manager if the height is 1", - forks: IBFTForks{ - { - ValidatorType: validators.ECDSAValidatorType, - From: common.JSONNumber{Value: 0}, - To: &common.JSONNumber{Value: 10}, - }, - { - ValidatorType: validators.BLSValidatorType, - From: common.JSONNumber{Value: 11}, - }, - }, - keyManagers: map[validators.ValidatorType]signer.KeyManager{ - validators.ECDSAValidatorType: ecdsaKeyManager, - validators.BLSValidatorType: blsKeyManager, - }, - height: 1, - expectedSigner: signer.NewSigner(ecdsaKeyManager, nil), - }, - { - name: "should return the signer with different key manager and parent key manager", - forks: IBFTForks{ - { - ValidatorType: validators.ECDSAValidatorType, - From: common.JSONNumber{Value: 0}, - To: &common.JSONNumber{Value: 10}, - }, - { - ValidatorType: validators.BLSValidatorType, - From: common.JSONNumber{Value: 11}, - }, - }, - keyManagers: map[validators.ValidatorType]signer.KeyManager{ - validators.ECDSAValidatorType: ecdsaKeyManager, - validators.BLSValidatorType: blsKeyManager, - }, - height: 11, - expectedSigner: signer.NewSigner(blsKeyManager, ecdsaKeyManager), - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - fm := &ForkManager{ - forks: test.forks, - keyManagers: test.keyManagers, - } - - signer, err := fm.GetSigner(test.height) - - assert.Equal( - t, - test.expectedSigner, - signer, - ) - - assert.Equal( - t, - test.expectedErr, - err, - ) - }) - } -} - -func TestForkManagerGetValidatorStore(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - forks IBFTForks - validatorStores map[store.SourceType]ValidatorStore - height uint64 - expectedStore ValidatorStore - expectedErr error - }{ - { - name: "should return ErrForkNotFound if fork not found", - forks: IBFTForks{ - { - ValidatorType: validators.ECDSAValidatorType, - From: common.JSONNumber{Value: 0}, - To: &common.JSONNumber{Value: 10}, - }, - { - ValidatorType: validators.BLSValidatorType, - From: common.JSONNumber{Value: 11}, - To: &common.JSONNumber{Value: 20}, - }, - }, - validatorStores: map[store.SourceType]ValidatorStore{}, - height: 25, - expectedStore: nil, - expectedErr: ErrForkNotFound, - }, - { - name: "should return ErrValidatorStoreNotFound if validator store not found", - forks: IBFTForks{ - { - Type: PoA, - ValidatorType: validators.ECDSAValidatorType, - From: common.JSONNumber{Value: 0}, - }, - }, - validatorStores: map[store.SourceType]ValidatorStore{}, - height: 25, - expectedStore: nil, - expectedErr: ErrValidatorStoreNotFound, - }, - { - name: "should return Snapshot store for PoA", - forks: IBFTForks{ - { - Type: PoA, - ValidatorType: validators.ECDSAValidatorType, - From: common.JSONNumber{Value: 0}, - }, - }, - validatorStores: map[store.SourceType]ValidatorStore{ - store.Snapshot: &mockValidatorStore{}, - }, - height: 25, - expectedStore: &mockValidatorStore{}, - expectedErr: nil, - }, - { - name: "should return Contract store for PoS", - forks: IBFTForks{ - { - Type: PoS, - ValidatorType: validators.ECDSAValidatorType, - From: common.JSONNumber{Value: 0}, - }, - }, - validatorStores: map[store.SourceType]ValidatorStore{ - store.Contract: &mockValidatorStore{}, - }, - height: 25, - expectedStore: &mockValidatorStore{}, - expectedErr: nil, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - fm := &ForkManager{ - forks: test.forks, - validatorStores: test.validatorStores, - } - - store, err := fm.GetValidatorStore(test.height) - - assert.Equal( - t, - test.expectedStore, - store, - ) - - assert.Equal( - t, - test.expectedErr, - err, - ) - }) - } -} - -func TestForkManagerGetValidators(t *testing.T) { - t.Parallel() - - var epochSize uint64 = 10 - - tests := []struct { - name string - forks IBFTForks - validatorStores map[store.SourceType]ValidatorStore - height uint64 - expectedValidators validators.Validators - expectedErr error - }{ - { - name: "should return ErrForkNotFound if fork not found", - forks: IBFTForks{ - { - ValidatorType: validators.ECDSAValidatorType, - From: common.JSONNumber{Value: 0}, - To: &common.JSONNumber{Value: 10}, - }, - { - ValidatorType: validators.BLSValidatorType, - From: common.JSONNumber{Value: 11}, - To: &common.JSONNumber{Value: 20}, - }, - }, - validatorStores: map[store.SourceType]ValidatorStore{}, - height: 25, - expectedValidators: nil, - expectedErr: ErrForkNotFound, - }, - { - name: "should return ErrValidatorStoreNotFound if validator store not found", - forks: IBFTForks{ - { - Type: PoA, - ValidatorType: validators.ECDSAValidatorType, - From: common.JSONNumber{Value: 0}, - }, - }, - validatorStores: map[store.SourceType]ValidatorStore{}, - height: 25, - expectedValidators: nil, - expectedErr: ErrValidatorStoreNotFound, - }, - { - name: "should return Validators", - forks: IBFTForks{ - { - Type: PoA, - ValidatorType: validators.ECDSAValidatorType, - From: common.JSONNumber{Value: 10}, - }, - }, - validatorStores: map[store.SourceType]ValidatorStore{ - store.Snapshot: &mockValidatorStore{ - GetValidatorsFunc: func(u1, u2, u3 uint64) (validators.Validators, error) { - assert.Equal(t, uint64(25), u1) // height - assert.Equal(t, epochSize, u2) // epochSize - assert.Equal(t, uint64(10), u3) // from - - return validators.NewECDSAValidatorSet( - validators.NewECDSAValidator(types.StringToAddress("1")), - validators.NewECDSAValidator(types.StringToAddress("2")), - ), nil - }, - }, - }, - height: 25, - expectedValidators: validators.NewECDSAValidatorSet( - validators.NewECDSAValidator(types.StringToAddress("1")), - validators.NewECDSAValidator(types.StringToAddress("2")), - ), - expectedErr: nil, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - fm := &ForkManager{ - forks: test.forks, - validatorStores: test.validatorStores, - epochSize: epochSize, - } - - validators, err := fm.GetValidators(test.height) - - assert.Equal( - t, - test.expectedValidators, - validators, - ) - - assert.Equal( - t, - test.expectedErr, - err, - ) - }) - } -} - -func TestForkManagerGetHooks(t *testing.T) { - t.Parallel() - - var ( - height uint64 = 25 - - err1 = errors.New("error 1") - err2 = errors.New("error 2") - ) - - fm := &ForkManager{ - hooksRegisters: map[IBFTType]HooksRegister{ - PoA: &mockHooksRegister{ - RegisterHooksFunc: func(hooks *hook.Hooks, h uint64) { - assert.Equal(t, height, h) - - hooks.ModifyHeaderFunc = func(h *types.Header, a types.Address) error { - return err1 - } - }, - }, - PoS: &mockHooksRegister{ - RegisterHooksFunc: func(hooks *hook.Hooks, h uint64) { - assert.Equal(t, height, h) - - hooks.VerifyBlockFunc = func(b *types.Block) error { - return err2 - } - }, - }, - }, - } - - hooks := fm.GetHooks(height) - - assert.Equal(t, err1, hooks.ModifyHeader(&types.Header{}, types.StringToAddress("1"))) - assert.Equal(t, err2, hooks.VerifyBlock(&types.Block{}), nil) -} - -func TestForkManager_initializeKeyManagers(t *testing.T) { - t.Parallel() - - key, keyBytes, err := crypto.GenerateAndEncodeECDSAPrivateKey() - assert.NoError(t, err) - - tests := []struct { - name string - forks IBFTForks - secretManager *mockSecretManager - expectedErr error - expectedKeyManagers map[validators.ValidatorType]signer.KeyManager - }{ - { - name: "should return error if NewKeyManagerFromType fails", - forks: IBFTForks{ - { - Type: PoA, - ValidatorType: validators.ValidatorType("fake"), - From: common.JSONNumber{Value: 0}, - }, - }, - secretManager: nil, - expectedErr: errors.New("unsupported validator type: fake"), - expectedKeyManagers: map[validators.ValidatorType]signer.KeyManager{}, - }, - { - name: "should initializes key manager", - forks: IBFTForks{ - { - Type: PoA, - ValidatorType: validators.ECDSAValidatorType, - From: common.JSONNumber{Value: 0}, - To: &common.JSONNumber{Value: 49}, - }, - { - Type: PoS, - ValidatorType: validators.ECDSAValidatorType, - From: common.JSONNumber{Value: 50}, - }, - }, - secretManager: &mockSecretManager{ - HasSecretFunc: func(name string) bool { - assert.Equal(t, secrets.ValidatorKey, name) - - return true - }, - GetSecretFunc: func(name string) ([]byte, error) { - assert.Equal(t, secrets.ValidatorKey, name) - - return keyBytes, nil - }, - }, - expectedErr: nil, - expectedKeyManagers: map[validators.ValidatorType]signer.KeyManager{ - validators.ECDSAValidatorType: signer.NewECDSAKeyManagerFromKey(key), - }, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - fm := &ForkManager{ - forks: test.forks, - secretsManager: test.secretManager, - keyManagers: map[validators.ValidatorType]signer.KeyManager{}, - } - - testHelper.AssertErrorMessageContains( - t, - fm.initializeKeyManagers(), - test.expectedErr, - ) - - assert.Equal( - t, - test.expectedKeyManagers, - fm.keyManagers, - ) - }) - } -} - -func TestForkManager_initializeValidatorStores(t *testing.T) { - t.Parallel() - - var ( - logger = hclog.NewNullLogger() - blockchain = &store.MockBlockchain{} - executor = &MockExecutor{} - - forks = IBFTForks{ - { - Type: PoS, - ValidatorType: validators.ECDSAValidatorType, - From: common.JSONNumber{Value: 0}, - To: &common.JSONNumber{Value: 49}, - }, - { - Type: PoS, - ValidatorType: validators.ECDSAValidatorType, - From: common.JSONNumber{Value: 50}, - }, - } - ) - - fm := &ForkManager{ - forks: forks, - validatorStores: map[store.SourceType]ValidatorStore{}, - logger: logger, - blockchain: blockchain, - executor: executor, - } - - assert.NoError(t, fm.initializeValidatorStores()) - - assert.NotNil( - t, - fm.validatorStores[store.Contract], - ) - - assert.Nil( - t, - fm.validatorStores[store.Snapshot], - ) -} - -func TestForkManager_initializeHooksRegisters(t *testing.T) { - t.Parallel() - - var ( - forks = IBFTForks{ - { - Type: PoA, - ValidatorType: validators.ECDSAValidatorType, - From: common.JSONNumber{Value: 0}, - To: &common.JSONNumber{Value: 49}, - }, - { - Type: PoS, - ValidatorType: validators.ECDSAValidatorType, - From: common.JSONNumber{Value: 50}, - To: &common.JSONNumber{Value: 100}, - }, - { - Type: PoS, - ValidatorType: validators.ECDSAValidatorType, - From: common.JSONNumber{Value: 101}, - }, - } - ) - - fm := &ForkManager{ - forks: forks, - hooksRegisters: map[IBFTType]HooksRegister{}, - } - - fm.initializeHooksRegisters() - - assert.NotNil( - t, - fm.hooksRegisters[PoA], - ) - - assert.NotNil( - t, - fm.hooksRegisters[PoS], - ) -} diff --git a/consensus/ibft/fork/storewrapper.go b/consensus/ibft/fork/storewrapper.go deleted file mode 100644 index 47e5710fe1..0000000000 --- a/consensus/ibft/fork/storewrapper.go +++ /dev/null @@ -1,194 +0,0 @@ -package fork - -import ( - "encoding/json" - "errors" - "path/filepath" - - "github.com/0xPolygon/polygon-edge/consensus/ibft/signer" - "github.com/0xPolygon/polygon-edge/validators" - "github.com/0xPolygon/polygon-edge/validators/store" - "github.com/0xPolygon/polygon-edge/validators/store/contract" - "github.com/0xPolygon/polygon-edge/validators/store/snapshot" - "github.com/hashicorp/go-hclog" -) - -// isJSONSyntaxError returns bool indicating the giving error is json.SyntaxError or not -func isJSONSyntaxError(err error) bool { - var expected *json.SyntaxError - - if err == nil { - return false - } - - return errors.As(err, &expected) -} - -// SnapshotValidatorStoreWrapper is a wrapper of store.SnapshotValidatorStore -// in order to add initialization and closer process with side effect -type SnapshotValidatorStoreWrapper struct { - *snapshot.SnapshotValidatorStore - dirPath string -} - -// Close saves SnapshotValidator data into local storage -func (w *SnapshotValidatorStoreWrapper) Close() error { - // save data - var ( - metadata = w.GetSnapshotMetadata() - snapshots = w.GetSnapshots() - ) - - if err := writeDataStore(filepath.Join(w.dirPath, snapshotMetadataFilename), metadata); err != nil { - return err - } - - if err := writeDataStore(filepath.Join(w.dirPath, snapshotSnapshotsFilename), snapshots); err != nil { - return err - } - - return nil -} - -// GetValidators returns validators at the specific height -func (w *SnapshotValidatorStoreWrapper) GetValidators(height, _, _ uint64) (validators.Validators, error) { - // the biggest height of blocks that have been processed before the given height - return w.GetValidatorsByHeight(height - 1) -} - -// NewSnapshotValidatorStoreWrapper loads data from local storage and creates *SnapshotValidatorStoreWrapper -func NewSnapshotValidatorStoreWrapper( - logger hclog.Logger, - blockchain store.HeaderGetter, - getSigner func(uint64) (signer.Signer, error), - dirPath string, - epochSize uint64, -) (*SnapshotValidatorStoreWrapper, error) { - var ( - snapshotMetadataPath = filepath.Join(dirPath, snapshotMetadataFilename) - snapshotsPath = filepath.Join(dirPath, snapshotSnapshotsFilename) - ) - - snapshotMeta, err := loadSnapshotMetadata(snapshotMetadataPath) - if isJSONSyntaxError(err) { - logger.Warn("Snapshot metadata file is broken, recover metadata from local chain", "filepath", snapshotMetadataPath) - - snapshotMeta = nil - } else if err != nil { - return nil, err - } - - snapshots, err := loadSnapshots(snapshotsPath) - if isJSONSyntaxError(err) { - logger.Warn("Snapshots file is broken, recover snapshots from local chain", "filepath", snapshotsPath) - - snapshots = nil - } else if err != nil { - return nil, err - } - - snapshotStore, err := snapshot.NewSnapshotValidatorStore( - logger, - blockchain, - func(height uint64) (snapshot.SignerInterface, error) { - rawSigner, err := getSigner(height) - if err != nil { - return nil, err - } - - return snapshot.SignerInterface(rawSigner), nil - }, - epochSize, - snapshotMeta, - snapshots, - ) - - if err != nil { - return nil, err - } - - return &SnapshotValidatorStoreWrapper{ - SnapshotValidatorStore: snapshotStore, - dirPath: dirPath, - }, nil -} - -// ContractValidatorStoreWrapper is a wrapper of *contract.ContractValidatorStore -// in order to add Close and GetValidators -type ContractValidatorStoreWrapper struct { - *contract.ContractValidatorStore - getSigner func(uint64) (signer.Signer, error) -} - -// NewContractValidatorStoreWrapper creates *ContractValidatorStoreWrapper -func NewContractValidatorStoreWrapper( - logger hclog.Logger, - blockchain store.HeaderGetter, - executor contract.Executor, - getSigner func(uint64) (signer.Signer, error), -) (*ContractValidatorStoreWrapper, error) { - contractStore, err := contract.NewContractValidatorStore( - logger, - blockchain, - executor, - contract.DefaultValidatorSetCacheSize, - ) - - if err != nil { - return nil, err - } - - return &ContractValidatorStoreWrapper{ - ContractValidatorStore: contractStore, - getSigner: getSigner, - }, nil -} - -// Close is closer process -func (w *ContractValidatorStoreWrapper) Close() error { - return nil -} - -// GetValidators gets and returns validators at the given height -func (w *ContractValidatorStoreWrapper) GetValidators( - height, epochSize, forkFrom uint64, -) (validators.Validators, error) { - signer, err := w.getSigner(height) - if err != nil { - return nil, err - } - - return w.GetValidatorsByHeight( - signer.Type(), - calculateContractStoreFetchingHeight( - height, - epochSize, - forkFrom, - ), - ) -} - -// calculateContractStoreFetchingHeight calculates the block height at which ContractStore fetches validators -// based on height, epoch, and fork beginning height -func calculateContractStoreFetchingHeight(height, epochSize, forkFrom uint64) uint64 { - // calculates the beginning of the epoch the given height is in - beginningEpoch := (height / epochSize) * epochSize - - // calculates the end of the previous epoch - // to determine the height to fetch validators - fetchingHeight := uint64(0) - if beginningEpoch > 0 { - fetchingHeight = beginningEpoch - 1 - } - - // use the calculated height if it's bigger than or equal to from - if fetchingHeight >= forkFrom { - return fetchingHeight - } - - if forkFrom > 0 { - return forkFrom - 1 - } - - return forkFrom -} diff --git a/consensus/ibft/fork/storewrapper_test.go b/consensus/ibft/fork/storewrapper_test.go deleted file mode 100644 index ce81cfd0b5..0000000000 --- a/consensus/ibft/fork/storewrapper_test.go +++ /dev/null @@ -1,552 +0,0 @@ -package fork - -import ( - "encoding/json" - "errors" - "fmt" - "os" - "path" - "testing" - - "github.com/0xPolygon/polygon-edge/consensus/ibft/signer" - testHelper "github.com/0xPolygon/polygon-edge/helper/tests" - "github.com/0xPolygon/polygon-edge/state" - "github.com/0xPolygon/polygon-edge/types" - "github.com/0xPolygon/polygon-edge/validators" - "github.com/0xPolygon/polygon-edge/validators/store" - "github.com/0xPolygon/polygon-edge/validators/store/snapshot" - "github.com/hashicorp/go-hclog" - "github.com/stretchr/testify/assert" -) - -var ( - errTest = errors.New("test") -) - -// a mock returning an error in UnmarshalJSON -type fakeUnmarshalerStruct struct{} - -func (s *fakeUnmarshalerStruct) UnmarshalJSON(data []byte) error { - return errTest -} - -type mockSigner struct { - signer.Signer - - TypeFn func() validators.ValidatorType - EcrecoverFromHeaderFn func(*types.Header) (types.Address, error) - GetValidatorsFn func(*types.Header) (validators.Validators, error) -} - -func (m *mockSigner) Type() validators.ValidatorType { - return m.TypeFn() -} - -func (m *mockSigner) EcrecoverFromHeader(h *types.Header) (types.Address, error) { - return m.EcrecoverFromHeaderFn(h) -} - -func (m *mockSigner) GetValidators(h *types.Header) (validators.Validators, error) { - return m.GetValidatorsFn(h) -} - -func Test_isJSONSyntaxError(t *testing.T) { - t.Parallel() - - var ( - // create some special errors - snaps = []*snapshot.Snapshot{} - fakeStr = &fakeUnmarshalerStruct{} - - invalidJSONErr = json.Unmarshal([]byte("foo"), &snaps) - invalidUnmarshalErr = json.Unmarshal([]byte("{}"), fakeStr) - ) - - tests := []struct { - name string - err error - expected bool - }{ - { - name: "should return false for nil", - err: nil, - expected: false, - }, - { - name: "should return false for custom error", - err: errTest, - expected: false, - }, - { - name: "should return marshal for json.InvalidUnmarshalError", - err: invalidUnmarshalErr, - expected: false, - }, - { - name: "should return json.SyntaxError", - err: invalidJSONErr, - expected: true, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - assert.Equal( - t, - test.expected, - isJSONSyntaxError(test.err), - ) - }) - } -} - -func createTestMetadataJSON(height uint64) string { - return fmt.Sprintf(`{"LastBlock": %d}`, height) -} - -func createTestSnapshotJSON(t *testing.T, snapshot *snapshot.Snapshot) string { - t.Helper() - - res, err := json.Marshal(snapshot) - assert.NoError(t, err) - - return string(res) -} - -func TestSnapshotValidatorStoreWrapper(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - storedSnapshotMetadata string - storedSnapshots string - blockchain store.HeaderGetter - signer signer.Signer - epochSize uint64 - err error - }{ - { - name: "should return error if initialize fails", - storedSnapshotMetadata: createTestMetadataJSON(0), - storedSnapshots: "[]", - blockchain: &store.MockBlockchain{ - HeaderFn: func() *types.Header { - return &types.Header{Number: 0} - }, - }, - signer: nil, - epochSize: 10, - err: fmt.Errorf("signer not found %d", 0), - }, - { - name: "should succeed", - storedSnapshotMetadata: createTestMetadataJSON(10), - storedSnapshots: fmt.Sprintf("[%s]", createTestSnapshotJSON( - t, - &snapshot.Snapshot{ - Number: 10, - Hash: types.BytesToHash([]byte{0x10}).String(), - Set: validators.NewECDSAValidatorSet(), - Votes: []*store.Vote{}, - }, - )), - blockchain: &store.MockBlockchain{ - HeaderFn: func() *types.Header { - return &types.Header{Number: 10} - }, - }, - signer: nil, - epochSize: 10, - err: nil, - }, - // the below cases recover snapshots from local chain, - // but this test just makes sure constructor doesn't return an error - // because snapshot package has tests covering such cases - { - name: "should succeed and recover snapshots from headers when the files don't exist", - storedSnapshotMetadata: "", - storedSnapshots: "", - blockchain: &store.MockBlockchain{ - HeaderFn: func() *types.Header { - return &types.Header{Number: 0} - }, - }, - signer: &mockSigner{ - GetValidatorsFn: func(h *types.Header) (validators.Validators, error) { - // height of the header HeaderFn returns - assert.Equal(t, uint64(0), h.Number) - - return &validators.Set{}, nil - }, - }, - epochSize: 10, - err: nil, - }, - { - name: "should succeed and recover snapshots from headers when the metadata file is broken", - storedSnapshotMetadata: "broken data", - storedSnapshots: fmt.Sprintf("[%s]", createTestSnapshotJSON( - t, - &snapshot.Snapshot{ - Number: 10, - Hash: types.BytesToHash([]byte{0x10}).String(), - Set: validators.NewECDSAValidatorSet(), - Votes: []*store.Vote{}, - }, - )), - blockchain: &store.MockBlockchain{ - HeaderFn: func() *types.Header { - return &types.Header{Number: 0} - }, - }, - signer: &mockSigner{ - GetValidatorsFn: func(h *types.Header) (validators.Validators, error) { - // height of the header HeaderFn returns - assert.Equal(t, uint64(0), h.Number) - - return &validators.Set{}, nil - }, - }, - epochSize: 10, - err: nil, - }, - { - name: "should succeed and recover snapshots from headers when the snapshots file is broken", - storedSnapshotMetadata: createTestMetadataJSON(0), - storedSnapshots: "broken", - blockchain: &store.MockBlockchain{ - HeaderFn: func() *types.Header { - return &types.Header{Number: 0} - }, - }, - signer: &mockSigner{ - GetValidatorsFn: func(h *types.Header) (validators.Validators, error) { - // height of the header HeaderFn returns - assert.Equal(t, uint64(0), h.Number) - - return &validators.Set{}, nil - }, - }, - epochSize: 10, - err: nil, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - dirPath := createTestTempDirectory(t) - - if len(test.storedSnapshotMetadata) != 0 { - assert.NoError( - t, - os.WriteFile(path.Join(dirPath, snapshotMetadataFilename), []byte(test.storedSnapshotMetadata), 0775), - ) - } - - if len(test.storedSnapshots) != 0 { - assert.NoError( - t, - os.WriteFile(path.Join(dirPath, snapshotSnapshotsFilename), []byte(test.storedSnapshots), 0775), - ) - } - - store, err := NewSnapshotValidatorStoreWrapper( - hclog.NewNullLogger(), - test.blockchain, - func(u uint64) (signer.Signer, error) { - return test.signer, nil - }, - dirPath, - test.epochSize, - ) - - testHelper.AssertErrorMessageContains( - t, - test.err, - err, - ) - - if store != nil { - assert.Equal( - t, - dirPath, - store.dirPath, - ) - } - }) - } -} - -func TestSnapshotValidatorStoreWrapperGetValidators(t *testing.T) { - t.Parallel() - - var ( - epochSize uint64 = 10 - metadata = &snapshot.SnapshotMetadata{ - LastBlock: 10, - } - snapshots = []*snapshot.Snapshot{ - { - Number: 10, - Hash: types.StringToHash("1").String(), - Set: validators.NewECDSAValidatorSet( - validators.NewECDSAValidator(types.StringToAddress("1")), - ), - Votes: []*store.Vote{}, - }, - } - ) - - store, err := snapshot.NewSnapshotValidatorStore( - hclog.NewNullLogger(), - &store.MockBlockchain{ - HeaderFn: func() *types.Header { - return &types.Header{Number: 10} - }, - }, - func(u uint64) (snapshot.SignerInterface, error) { - return nil, nil - }, - epochSize, - metadata, - snapshots, - ) - - assert.NoError(t, err) - - wrapper := SnapshotValidatorStoreWrapper{ - SnapshotValidatorStore: store, - } - - vals, err := wrapper.GetValidators(11, 0, 0) - assert.NoError(t, err) - assert.Equal(t, snapshots[0].Set, vals) -} - -func TestSnapshotValidatorStoreWrapperClose(t *testing.T) { - t.Parallel() - - var ( - dirPath = createTestTempDirectory(t) - - epochSize uint64 = 10 - metadata = &snapshot.SnapshotMetadata{ - LastBlock: 10, - } - snapshots = []*snapshot.Snapshot{ - { - Number: 10, - Hash: types.StringToHash("1").String(), - Set: validators.NewECDSAValidatorSet( - validators.NewECDSAValidator(types.StringToAddress("1")), - ), - Votes: []*store.Vote{}, - }, - } - ) - - store, err := snapshot.NewSnapshotValidatorStore( - hclog.NewNullLogger(), - &store.MockBlockchain{ - HeaderFn: func() *types.Header { - return &types.Header{Number: 10} - }, - }, - func(u uint64) (snapshot.SignerInterface, error) { - return nil, nil - }, - epochSize, - metadata, - snapshots, - ) - - assert.NoError(t, err) - - wrapper := SnapshotValidatorStoreWrapper{ - dirPath: dirPath, - SnapshotValidatorStore: store, - } - - assert.NoError(t, wrapper.Close()) - - savedMetadataFile, err := os.ReadFile(path.Join(dirPath, snapshotMetadataFilename)) - assert.NoError(t, err) - assert.JSONEq( - t, - createTestMetadataJSON(metadata.LastBlock), - string(savedMetadataFile), - ) - - savedSnapshots, err := os.ReadFile(path.Join(dirPath, snapshotSnapshotsFilename)) - assert.NoError(t, err) - assert.JSONEq( - t, - fmt.Sprintf("[%s]", createTestSnapshotJSON(t, snapshots[0])), - string(savedSnapshots), - ) -} - -type MockExecutor struct { - BeginTxnFunc func(types.Hash, *types.Header, types.Address) (*state.Transition, error) -} - -func (m *MockExecutor) BeginTxn(hash types.Hash, header *types.Header, addr types.Address) (*state.Transition, error) { - return m.BeginTxnFunc(hash, header, addr) -} - -func TestNewContractValidatorStoreWrapper(t *testing.T) { - t.Parallel() - - _, err := NewContractValidatorStoreWrapper( - hclog.NewNullLogger(), - &store.MockBlockchain{}, - &MockExecutor{}, - func(u uint64) (signer.Signer, error) { - return nil, nil - }, - ) - - assert.NoError(t, err) -} - -func TestNewContractValidatorStoreWrapperClose(t *testing.T) { - t.Parallel() - - wrapper, err := NewContractValidatorStoreWrapper( - hclog.NewNullLogger(), - &store.MockBlockchain{}, - &MockExecutor{}, - func(u uint64) (signer.Signer, error) { - return nil, nil - }, - ) - - assert.NoError(t, err) - assert.NoError(t, wrapper.Close()) -} - -func TestNewContractValidatorStoreWrapperGetValidators(t *testing.T) { - t.Parallel() - - t.Run("should return error if getSigner returns error", func(t *testing.T) { - t.Parallel() - - wrapper, err := NewContractValidatorStoreWrapper( - hclog.NewNullLogger(), - &store.MockBlockchain{}, - &MockExecutor{}, - func(u uint64) (signer.Signer, error) { - return nil, errTest - }, - ) - - assert.NoError(t, err) - - res, err := wrapper.GetValidators(0, 0, 0) - assert.Nil(t, res) - assert.ErrorIs(t, errTest, err) - }) - - t.Run("should return error if GetValidatorsByHeight returns error", func(t *testing.T) { - t.Parallel() - - wrapper, err := NewContractValidatorStoreWrapper( - hclog.NewNullLogger(), - &store.MockBlockchain{ - GetHeaderByNumberFn: func(u uint64) (*types.Header, bool) { - return nil, false - }, - }, - &MockExecutor{}, - func(u uint64) (signer.Signer, error) { - return signer.NewSigner( - &signer.ECDSAKeyManager{}, - nil, - ), nil - }, - ) - - assert.NoError(t, err) - - res, err := wrapper.GetValidators(10, 10, 0) - assert.Nil(t, res) - assert.ErrorContains(t, err, "header not found at 9") - }) -} - -func Test_calculateContractStoreFetchingHeight(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - height uint64 - epochSize uint64 - forkFrom uint64 - expected uint64 - }{ - { - name: "should return 0 if the height is 2 (in the first epoch)", - height: 2, - epochSize: 10, - forkFrom: 0, - expected: 0, - }, - { - name: "should return 0 if the height is 9 (in the first epoch)", - height: 9, - epochSize: 10, - forkFrom: 0, - expected: 0, - }, - { - name: "should return 9 if the height is 10 (in the second epoch)", - height: 10, - epochSize: 10, - forkFrom: 0, - expected: 9, - }, - { - name: "should return 9 if the height is 19 (in the second epoch)", - height: 19, - epochSize: 10, - forkFrom: 0, - expected: 9, - }, - { - name: "should return 49 if the height is 10 but forkFrom is 50", - height: 10, - epochSize: 10, - forkFrom: 50, - expected: 49, - }, - { - name: "should return 59 if the height is 60 and forkFrom is 50", - height: 60, - epochSize: 10, - forkFrom: 50, - expected: 59, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - assert.Equal( - t, - test.expected, - calculateContractStoreFetchingHeight(test.height, test.epochSize, test.forkFrom), - ) - }) - } -} diff --git a/consensus/ibft/fork/type.go b/consensus/ibft/fork/type.go deleted file mode 100644 index 9c66362259..0000000000 --- a/consensus/ibft/fork/type.go +++ /dev/null @@ -1,51 +0,0 @@ -package fork - -import ( - "fmt" - - "github.com/0xPolygon/polygon-edge/validators/store" -) - -// Define the type of the IBFT consensus -type IBFTType string - -const ( - // PoA defines the Proof of Authority IBFT type, - // where the validator set is changed through voting / pre-set in genesis - PoA IBFTType = "PoA" - - // PoS defines the Proof of Stake IBFT type, - // where the validator set it changed through staking on the Staking Smart Contract - PoS IBFTType = "PoS" -) - -// ibftTypes is the map used for easy string -> IBFTType lookups -var ibftTypes = map[string]IBFTType{ - "PoA": PoA, - "PoS": PoS, -} - -// ibftTypesToSourceType defines validator set type used under each IBFT Type -// Right now each IBFT Type is correspond one-to-one with ValidatorStore -// In other words, PoA always uses SnapshotValidatorStore while PoS uses ContractValidatorStore -// By definition, PoA can fetch validators from ContractValidatorStore -var ibftTypesToSourceType = map[IBFTType]store.SourceType{ - PoA: store.Snapshot, - PoS: store.Contract, -} - -// String is a helper method for casting a IBFTType to a string representation -func (t IBFTType) String() string { - return string(t) -} - -// ParseIBFTType converts a ibftType string representation to a IBFTType -func ParseIBFTType(ibftType string) (IBFTType, error) { - // Check if the cast is possible - castType, ok := ibftTypes[ibftType] - if !ok { - return castType, fmt.Errorf("invalid IBFT type %s", ibftType) - } - - return castType, nil -} diff --git a/consensus/ibft/fork/type_test.go b/consensus/ibft/fork/type_test.go deleted file mode 100644 index e617749623..0000000000 --- a/consensus/ibft/fork/type_test.go +++ /dev/null @@ -1,74 +0,0 @@ -package fork - -import ( - "errors" - "testing" - - testHelper "github.com/0xPolygon/polygon-edge/helper/tests" - "github.com/stretchr/testify/assert" -) - -func TestIBFTTypeString(t *testing.T) { - t.Parallel() - - cases := map[IBFTType]string{ - PoA: "PoA", - PoS: "PoS", - } - - for typ, expected := range cases { - assert.Equal( - t, - expected, - typ.String(), - ) - } -} - -func TestParseIBFTType(t *testing.T) { - t.Parallel() - - tests := []struct { - value string - res IBFTType - err error - }{ - { - value: "PoA", - res: PoA, - err: nil, - }, - { - value: "PoS", - res: PoS, - err: nil, - }, - { - value: "hoge", - res: IBFTType(""), - err: errors.New("invalid IBFT type hoge"), - }, - } - - for _, test := range tests { - test := test - - t.Run(test.value, func(t *testing.T) { - t.Parallel() - - res, err := ParseIBFTType(test.value) - - assert.Equal( - t, - test.res, - res, - ) - - testHelper.AssertErrorMessageContains( - t, - test.err, - err, - ) - }) - } -} diff --git a/consensus/ibft/helper_test.go b/consensus/ibft/helper_test.go deleted file mode 100644 index c6761f430c..0000000000 --- a/consensus/ibft/helper_test.go +++ /dev/null @@ -1,91 +0,0 @@ -package ibft - -import ( - "crypto/ecdsa" - "strconv" - "testing" - - "github.com/0xPolygon/polygon-edge/crypto" - "github.com/0xPolygon/polygon-edge/types" - "github.com/0xPolygon/polygon-edge/validators" - "github.com/stretchr/testify/require" -) - -type testerAccount struct { - alias string - priv *ecdsa.PrivateKey -} - -func (t *testerAccount) Address() types.Address { - return crypto.PubKeyToAddress(&t.priv.PublicKey) -} - -type testerAccountPool struct { - t *testing.T - accounts []*testerAccount -} - -func newTesterAccountPool(t *testing.T, num ...int) *testerAccountPool { - t.Helper() - - pool := &testerAccountPool{ - t: t, - accounts: []*testerAccount{}, - } - - if len(num) == 1 { - for i := 0; i < num[0]; i++ { - key, _ := crypto.GenerateECDSAKey() - - pool.accounts = append(pool.accounts, &testerAccount{ - alias: strconv.Itoa(i), - priv: key, - }) - } - } - - return pool -} - -func (ap *testerAccountPool) add(accounts ...string) { - ap.t.Helper() - - for _, account := range accounts { - if acct := ap.get(account); acct != nil { - continue - } - - priv, err := crypto.GenerateECDSAKey() - require.NoError(ap.t, err) - - ap.accounts = append(ap.accounts, &testerAccount{ - alias: account, - priv: priv, - }) - } -} - -func (ap *testerAccountPool) get(name string) *testerAccount { - ap.t.Helper() - - for _, i := range ap.accounts { - if i.alias == name { - return i - } - } - - return nil -} - -func (ap *testerAccountPool) ValidatorSet() validators.Validators { - ap.t.Helper() - - v := validators.NewECDSAValidatorSet() - for _, i := range ap.accounts { - _ = v.Add(&validators.ECDSAValidator{ - Address: i.Address(), - }) - } - - return v -} diff --git a/consensus/ibft/hook/hook.go b/consensus/ibft/hook/hook.go deleted file mode 100644 index ca4b2c3af7..0000000000 --- a/consensus/ibft/hook/hook.go +++ /dev/null @@ -1,86 +0,0 @@ -package hook - -import ( - "github.com/0xPolygon/polygon-edge/state" - "github.com/0xPolygon/polygon-edge/types" -) - -type ShouldWriteTransactionsFunc func(uint64) bool - -type ModifyHeaderFunc func(*types.Header, types.Address) error - -type VerifyHeaderFunc func(*types.Header) error - -type VerifyBlockFunc func(*types.Block) error - -type ProcessHeaderFunc func(*types.Header) error - -type PreCommitStateFunc func(*types.Header, *state.Transition) error - -type PostInsertBlockFunc func(*types.Block) error - -type Hooks struct { - ShouldWriteTransactionFunc ShouldWriteTransactionsFunc - ModifyHeaderFunc ModifyHeaderFunc - VerifyHeaderFunc VerifyHeaderFunc - VerifyBlockFunc VerifyBlockFunc - ProcessHeaderFunc ProcessHeaderFunc - PreCommitStateFunc PreCommitStateFunc - PostInsertBlockFunc PostInsertBlockFunc -} - -func (m *Hooks) ShouldWriteTransactions(height uint64) bool { - if m.ShouldWriteTransactionFunc != nil { - return m.ShouldWriteTransactionFunc(height) - } - - return true -} - -func (m *Hooks) ModifyHeader(header *types.Header, proposer types.Address) error { - if m.ModifyHeaderFunc != nil { - return m.ModifyHeaderFunc(header, proposer) - } - - return nil -} - -func (m *Hooks) VerifyHeader(header *types.Header) error { - if m.VerifyHeaderFunc != nil { - return m.VerifyHeaderFunc(header) - } - - return nil -} - -func (m *Hooks) VerifyBlock(block *types.Block) error { - if m.VerifyBlockFunc != nil { - return m.VerifyBlockFunc(block) - } - - return nil -} - -func (m *Hooks) ProcessHeader(header *types.Header) error { - if m.ProcessHeaderFunc != nil { - return m.ProcessHeaderFunc(header) - } - - return nil -} - -func (m *Hooks) PreCommitState(header *types.Header, txn *state.Transition) error { - if m.PreCommitStateFunc != nil { - return m.PreCommitStateFunc(header, txn) - } - - return nil -} - -func (m *Hooks) PostInsertBlock(block *types.Block) error { - if m.PostInsertBlockFunc != nil { - return m.PostInsertBlockFunc(block) - } - - return nil -} diff --git a/consensus/ibft/hook/hook_test.go b/consensus/ibft/hook/hook_test.go deleted file mode 100644 index 5d06a8d205..0000000000 --- a/consensus/ibft/hook/hook_test.go +++ /dev/null @@ -1,343 +0,0 @@ -package hook - -import ( - "errors" - "testing" - - "github.com/0xPolygon/polygon-edge/state" - "github.com/0xPolygon/polygon-edge/types" - "github.com/stretchr/testify/assert" -) - -var ( - testHeader = &types.Header{ - Number: 1, - Miner: []byte{}, - ExtraData: []byte{}, - } - - testBlock = &types.Block{ - Header: testHeader, - Transactions: []*types.Transaction{}, - Uncles: []*types.Header{}, - } - - addr1 = types.StringToAddress("1") - - errTest = errors.New("error test") -) - -func newTestHooks( - shouldWriteTransactions ShouldWriteTransactionsFunc, - modifyHeader ModifyHeaderFunc, - verifyHeader VerifyHeaderFunc, - verifyBlock VerifyBlockFunc, - processHeaderFunc ProcessHeaderFunc, - preCommitState PreCommitStateFunc, - postInsertBlock PostInsertBlockFunc, -) *Hooks { - return &Hooks{ - ShouldWriteTransactionFunc: shouldWriteTransactions, - ModifyHeaderFunc: modifyHeader, - VerifyHeaderFunc: verifyHeader, - VerifyBlockFunc: verifyBlock, - ProcessHeaderFunc: processHeaderFunc, - PreCommitStateFunc: preCommitState, - PostInsertBlockFunc: postInsertBlock, - } -} - -func TestShouldWriteTransactions(t *testing.T) { - t.Parallel() - - t.Run("should return true if the function is not set", func(t *testing.T) { - t.Parallel() - - hooks := newTestHooks(nil, nil, nil, nil, nil, nil, nil) - - assert.True(t, hooks.ShouldWriteTransactions(0)) - }) - - t.Run("should call ShouldWriteTransactionFunc", func(t *testing.T) { - t.Parallel() - - shouldWriteTransaction := func(x uint64) bool { - assert.LessOrEqual(t, x, uint64(1)) - - return x != 0 - } - - hooks := newTestHooks(shouldWriteTransaction, nil, nil, nil, nil, nil, nil) - - assert.False(t, hooks.ShouldWriteTransactions(0)) - assert.True(t, hooks.ShouldWriteTransactions(1)) - }) -} - -func TestModifyHeader(t *testing.T) { - t.Parallel() - - t.Run("should do nothing if the function is not set", func(t *testing.T) { - t.Parallel() - - header := testHeader.Copy() - - hooks := newTestHooks(nil, nil, nil, nil, nil, nil, nil) - - assert.Nil(t, hooks.ModifyHeader(header, addr1)) - assert.Equal(t, testHeader, header) - }) - - t.Run("should call ModifyHeader", func(t *testing.T) { - t.Parallel() - - header := testHeader.Copy() - - modifyHeader := func(h *types.Header, proposer types.Address) error { - assert.Equal(t, header, h) - assert.Equal(t, addr1, proposer) - - h.Miner = proposer.Bytes() - - return nil - } - - hooks := newTestHooks(nil, modifyHeader, nil, nil, nil, nil, nil) - - assert.Nil(t, hooks.ModifyHeader(header, addr1)) - assert.Equal( - t, - &types.Header{ - Number: 1, - Miner: addr1.Bytes(), - ExtraData: []byte{}, - }, - header, - ) - }) -} - -//nolint:dupl -func TestVerifyHeader(t *testing.T) { - t.Parallel() - - t.Run("should return nil if the function is not set", func(t *testing.T) { - t.Parallel() - - header := testHeader.Copy() - - hooks := newTestHooks(nil, nil, nil, nil, nil, nil, nil) - - assert.Nil(t, hooks.VerifyHeader(header)) - assert.Equal(t, testHeader, header) - }) - - t.Run("should call VerifyHeader", func(t *testing.T) { - t.Parallel() - - header := testHeader.Copy() - - verifyHeader := func(h *types.Header) error { - assert.Equal(t, header, h) - - return errTest - } - - hooks := newTestHooks(nil, nil, verifyHeader, nil, nil, nil, nil) - - assert.Equal( - t, - errTest, - hooks.VerifyHeader(header), - ) - assert.Equal( - t, - testHeader, - header, - ) - }) -} - -//nolint:dupl -func TestVerifyBlock(t *testing.T) { - t.Parallel() - - t.Run("should return nil if the function is not set", func(t *testing.T) { - t.Parallel() - - block := &types.Block{ - Header: testBlock.Header.Copy(), - Transactions: []*types.Transaction{}, - Uncles: []*types.Header{}, - } - - hooks := newTestHooks(nil, nil, nil, nil, nil, nil, nil) - - assert.Nil(t, hooks.VerifyBlock(testBlock)) - assert.Equal(t, testBlock, block) - }) - - t.Run("should call VerifyHeader", func(t *testing.T) { - t.Parallel() - - block := &types.Block{ - Header: testBlock.Header.Copy(), - Transactions: []*types.Transaction{}, - Uncles: []*types.Header{}, - } - - verifyBlock := func(b *types.Block) error { - assert.Equal(t, block, b) - - return errTest - } - - hooks := newTestHooks(nil, nil, nil, verifyBlock, nil, nil, nil) - - assert.Equal( - t, - errTest, - hooks.VerifyBlock(block), - ) - assert.Equal( - t, - testBlock, - block, - ) - }) -} - -//nolint:dupl -func TestProcessHeader(t *testing.T) { - t.Parallel() - - t.Run("should do nothing if the function is not set", func(t *testing.T) { - t.Parallel() - - header := testHeader.Copy() - - hooks := newTestHooks(nil, nil, nil, nil, nil, nil, nil) - - assert.Nil(t, hooks.ProcessHeader(header)) - assert.Equal(t, testHeader, header) - }) - - t.Run("should call ProcessHeader", func(t *testing.T) { - t.Parallel() - - header := testHeader.Copy() - - processHeader := func(h *types.Header) error { - assert.Equal(t, header, h) - - return errTest - } - - hooks := newTestHooks(nil, nil, nil, nil, processHeader, nil, nil) - - assert.Equal( - t, - errTest, - hooks.ProcessHeader(header), - ) - assert.Equal( - t, - testHeader, - header, - ) - }) -} - -func TestPreCommitState(t *testing.T) { - t.Parallel() - - var ( - txn = &state.Transition{} - ) - - t.Run("should do nothing if the function is not set", func(t *testing.T) { - t.Parallel() - - header := testHeader.Copy() - - hooks := newTestHooks(nil, nil, nil, nil, nil, nil, nil) - - assert.Nil(t, hooks.PreCommitState(header, txn)) - assert.Equal(t, testHeader, header) - }) - - t.Run("should call ProcessHeader", func(t *testing.T) { - t.Parallel() - - header := testHeader.Copy() - - preCommitState := func(h *types.Header, x *state.Transition) error { - assert.Equal(t, header, h) - assert.Equal(t, txn, x) - - return errTest - } - - hooks := newTestHooks(nil, nil, nil, nil, nil, preCommitState, nil) - - assert.Equal( - t, - errTest, - hooks.PreCommitState(header, txn), - ) - assert.Equal( - t, - testHeader, - header, - ) - }) -} - -//nolint:dupl -func TestPostInsertBlock(t *testing.T) { - t.Parallel() - - t.Run("should do nothing if the function is not set", func(t *testing.T) { - t.Parallel() - - block := &types.Block{ - Header: testBlock.Header.Copy(), - Transactions: []*types.Transaction{}, - Uncles: []*types.Header{}, - } - - hooks := newTestHooks(nil, nil, nil, nil, nil, nil, nil) - - assert.Nil(t, hooks.PostInsertBlock(block)) - assert.Equal(t, testBlock, block) - }) - - t.Run("should call ProcessHeader", func(t *testing.T) { - t.Parallel() - - block := &types.Block{ - Header: testBlock.Header.Copy(), - Transactions: []*types.Transaction{}, - Uncles: []*types.Header{}, - } - - postBlock := func(b *types.Block) error { - assert.Equal(t, block, b) - - return errTest - } - - hooks := newTestHooks(nil, nil, nil, nil, nil, nil, postBlock) - - assert.Equal( - t, - errTest, - hooks.PostInsertBlock(block), - ) - assert.Equal( - t, - testBlock, - block, - ) - }) -} diff --git a/consensus/ibft/ibft.go b/consensus/ibft/ibft.go deleted file mode 100644 index acb608173d..0000000000 --- a/consensus/ibft/ibft.go +++ /dev/null @@ -1,692 +0,0 @@ -package ibft - -import ( - "errors" - "fmt" - "time" - - "github.com/0xPolygon/polygon-edge/blockchain" - "github.com/0xPolygon/polygon-edge/consensus" - "github.com/0xPolygon/polygon-edge/consensus/ibft/fork" - "github.com/0xPolygon/polygon-edge/consensus/ibft/proto" - "github.com/0xPolygon/polygon-edge/consensus/ibft/signer" - "github.com/0xPolygon/polygon-edge/helper/progress" - "github.com/0xPolygon/polygon-edge/network" - "github.com/0xPolygon/polygon-edge/secrets" - "github.com/0xPolygon/polygon-edge/state" - "github.com/0xPolygon/polygon-edge/syncer" - "github.com/0xPolygon/polygon-edge/types" - "github.com/0xPolygon/polygon-edge/validators" - "github.com/armon/go-metrics" - "github.com/hashicorp/go-hclog" - "google.golang.org/grpc" -) - -const ( - DefaultEpochSize = 100000 - IbftKeyName = "validator.key" - KeyEpochSize = "epochSize" - - ibftProto = "/ibft/0.2" - - // consensusMetrics is a prefix used for consensus-related metrics - consensusMetrics = "consensus" -) - -var ( - ErrInvalidHookParam = errors.New("invalid IBFT hook param passed in") - ErrProposerSealByNonValidator = errors.New("proposer seal by non-validator") - ErrInvalidMixHash = errors.New("invalid mixhash") - ErrInvalidSha3Uncles = errors.New("invalid sha3 uncles") - ErrWrongDifficulty = errors.New("wrong difficulty") -) - -type txPoolInterface interface { - Prepare() - Length() uint64 - Peek() *types.Transaction - Pop(tx *types.Transaction) - Drop(tx *types.Transaction) - Demote(tx *types.Transaction) - ResetWithHeaders(headers ...*types.Header) - SetSealing(bool) -} - -type forkManagerInterface interface { - Initialize() error - Close() error - GetSigner(uint64) (signer.Signer, error) - GetValidatorStore(uint64) (fork.ValidatorStore, error) - GetValidators(uint64) (validators.Validators, error) - GetHooks(uint64) fork.HooksInterface -} - -// backendIBFT represents the IBFT consensus mechanism object -type backendIBFT struct { - consensus *IBFTConsensus - - // Static References - logger hclog.Logger // Reference to the logging - blockchain *blockchain.Blockchain // Reference to the blockchain layer - network *network.Server // Reference to the networking layer - executor *state.Executor // Reference to the state executor - txpool txPoolInterface // Reference to the transaction pool - syncer syncer.Syncer // Reference to the sync protocol - secretsManager secrets.SecretsManager // Reference to the secret manager - Grpc *grpc.Server // Reference to the gRPC manager - operator *operator // Reference to the gRPC service of IBFT - transport transport // Reference to the transport protocol - - // Dynamic References - forkManager forkManagerInterface // Manager to hold IBFT Forks - currentSigner signer.Signer // Signer at current sequence - currentValidators validators.Validators // signer at current sequence - currentHooks fork.HooksInterface // Hooks at current sequence - - // Configurations - config *consensus.Config // Consensus configuration - epochSize uint64 - quorumSizeBlockNum uint64 - blockTime time.Duration // Minimum block generation time in seconds - - // Channels - closeCh chan struct{} // Channel for closing -} - -// Factory implements the base consensus Factory method -func Factory(params *consensus.Params) (consensus.Consensus, error) { - // defaults for user set fields in genesis - var ( - epochSize = uint64(DefaultEpochSize) - quorumSizeBlockNum = uint64(0) - ) - - if definedEpochSize, ok := params.Config.Config[KeyEpochSize]; ok { - // Epoch size is defined, use the passed in one - readSize, ok := definedEpochSize.(float64) - if !ok { - return nil, errors.New("invalid type assertion") - } - - epochSize = uint64(readSize) - } - - if rawBlockNum, ok := params.Config.Config["quorumSizeBlockNum"]; ok { - // Block number specified for quorum size switch - readBlockNum, ok := rawBlockNum.(float64) - if !ok { - return nil, errors.New("invalid type assertion") - } - - quorumSizeBlockNum = uint64(readBlockNum) - } - - logger := params.Logger.Named("ibft") - - forkManager, err := fork.NewForkManager( - logger, - params.Blockchain, - params.Executor, - params.SecretsManager, - params.Config.Path, - epochSize, - params.Config.Config, - ) - - if err != nil { - return nil, err - } - - p := &backendIBFT{ - // References - logger: logger, - blockchain: params.Blockchain, - network: params.Network, - executor: params.Executor, - txpool: params.TxPool, - syncer: syncer.NewSyncer( - params.Logger, - params.Network, - params.Blockchain, - time.Duration(params.BlockTime)*3*time.Second, - ), - secretsManager: params.SecretsManager, - Grpc: params.Grpc, - forkManager: forkManager, - - // Configurations - config: params.Config, - epochSize: epochSize, - quorumSizeBlockNum: quorumSizeBlockNum, - blockTime: time.Duration(params.BlockTime) * time.Second, - - // Channels - closeCh: make(chan struct{}), - } - - // Istanbul requires a different header hash function - p.SetHeaderHash() - - return p, nil -} - -func (i *backendIBFT) Initialize() error { - // register the grpc operator - if i.Grpc != nil { - i.operator = &operator{ibft: i} - proto.RegisterIbftOperatorServer(i.Grpc, i.operator) - } - - // start the transport protocol - if err := i.setupTransport(); err != nil { - return err - } - - // initialize fork manager - if err := i.forkManager.Initialize(); err != nil { - return err - } - - if err := i.updateCurrentModules(i.blockchain.Header().Number + 1); err != nil { - return err - } - - i.logger.Info("validator key", "addr", i.currentSigner.Address().String()) - - i.consensus = newIBFT( - i.logger.Named("consensus"), - i, - i, - ) - - // Ensure consensus takes into account user configured block production time - i.consensus.ExtendRoundTimeout(i.blockTime) - - return nil -} - -// sync runs the syncer in the background to receive blocks from advanced peers -func (i *backendIBFT) startSyncing() { - callInsertBlockHook := func(fullBlock *types.FullBlock) bool { - if err := i.currentHooks.PostInsertBlock(fullBlock.Block); err != nil { - i.logger.Error("failed to call PostInsertBlock", "height", fullBlock.Block.Header.Number, "error", err) - } - - if err := i.updateCurrentModules(fullBlock.Block.Number() + 1); err != nil { - i.logger.Error("failed to update sub modules", "height", fullBlock.Block.Number()+1, "err", err) - } - - i.txpool.ResetWithHeaders(fullBlock.Block.Header) - - return false - } - - if err := i.syncer.Sync( - callInsertBlockHook, - ); err != nil { - i.logger.Error("watch sync failed", "err", err) - } -} - -// Start starts the IBFT consensus -func (i *backendIBFT) Start() error { - // Start the syncer - if err := i.syncer.Start(); err != nil { - return err - } - - // Start syncing blocks from other peers - go i.startSyncing() - - // Start the actual consensus protocol - go i.startConsensus() - - return nil -} - -// GetSyncProgression gets the latest sync progression, if any -func (i *backendIBFT) GetSyncProgression() *progress.Progression { - return i.syncer.GetSyncProgression() -} - -func (i *backendIBFT) startConsensus() { - var ( - newBlockSub = i.blockchain.SubscribeEvents() - syncerBlockCh = make(chan struct{}) - ) - - // Receive a notification every time syncer manages - // to insert a valid block. Used for cancelling active consensus - // rounds for a specific height - go func() { - eventCh := newBlockSub.GetEventCh() - - for { - if ev := <-eventCh; ev.Source == "syncer" { - if ev.NewChain[0].Number < i.blockchain.Header().Number { - // The blockchain notification system can eventually deliver - // stale block notifications. These should be ignored - continue - } - - syncerBlockCh <- struct{}{} - } - } - }() - - defer i.blockchain.UnsubscribeEvents(newBlockSub) - - var ( - sequenceCh = make(<-chan struct{}) - isValidator bool - ) - - for { - var ( - latest = i.blockchain.Header().Number - pending = latest + 1 - ) - - if err := i.updateCurrentModules(pending); err != nil { - i.logger.Error( - "failed to update submodules", - "height", pending, - "err", err, - ) - } - - // Update the No.of validator metric - metrics.SetGauge([]string{consensusMetrics, "validators"}, float32(i.currentValidators.Len())) - - isValidator = i.isActiveValidator() - - i.txpool.SetSealing(isValidator) - - if isValidator { - sequenceCh = i.consensus.runSequence(pending) - } - - select { - case <-syncerBlockCh: - if isValidator { - i.consensus.stopSequence() - i.logger.Info("canceled sequence", "sequence", pending) - } - case <-sequenceCh: - case <-i.closeCh: - if isValidator { - i.consensus.stopSequence() - } - - return - } - } -} - -// isActiveValidator returns whether my signer belongs to current validators -func (i *backendIBFT) isActiveValidator() bool { - return i.currentValidators.Includes(i.currentSigner.Address()) -} - -// updateMetrics will update various metrics based on the given block -// currently we capture No.of Txs and block interval metrics using this function -func (i *backendIBFT) updateMetrics(block *types.Block) { - // get previous header - prvHeader, _ := i.blockchain.GetHeaderByNumber(block.Number() - 1) - parentTime := time.Unix(int64(prvHeader.Timestamp), 0) - headerTime := time.Unix(int64(block.Header.Timestamp), 0) - - // Update the block interval metric - if block.Number() > 1 { - metrics.SetGauge([]string{consensusMetrics, "block_interval"}, float32(headerTime.Sub(parentTime).Seconds())) - } - - // Update the Number of transactions in the block metric - metrics.SetGauge([]string{consensusMetrics, "num_txs"}, float32(len(block.Body().Transactions))) - - // Update the base fee metric - metrics.SetGauge([]string{consensusMetrics, "base_fee"}, float32(block.Header.BaseFee)) -} - -// verifyHeaderImpl verifies fields including Extra -// for the past or being proposed header -func (i *backendIBFT) verifyHeaderImpl( - parent, header *types.Header, - headerSigner signer.Signer, - validators validators.Validators, - hooks fork.HooksInterface, - shouldVerifyParentCommittedSeals bool, -) error { - if header.MixHash != signer.IstanbulDigest { - return ErrInvalidMixHash - } - - if header.Sha3Uncles != types.EmptyUncleHash { - return ErrInvalidSha3Uncles - } - - // difficulty has to match number - if header.Difficulty != header.Number { - return ErrWrongDifficulty - } - - // ensure the extra data is correctly formatted - if _, err := headerSigner.GetIBFTExtra(header); err != nil { - return err - } - - // verify the ProposerSeal - if err := verifyProposerSeal( - header, - headerSigner, - validators, - ); err != nil { - return err - } - - // verify the ParentCommittedSeals - if err := i.verifyParentCommittedSeals( - parent, header, - shouldVerifyParentCommittedSeals, - ); err != nil { - return err - } - - // Additional header verification - if err := hooks.VerifyHeader(header); err != nil { - return err - } - - return nil -} - -// VerifyHeader wrapper for verifying headers -func (i *backendIBFT) VerifyHeader(header *types.Header) error { - parent, ok := i.blockchain.GetHeaderByNumber(header.Number - 1) - if !ok { - return fmt.Errorf( - "unable to get parent header for block number %d", - header.Number, - ) - } - - headerSigner, validators, hooks, err := getModulesFromForkManager( - i.forkManager, - header.Number, - ) - if err != nil { - return err - } - - // verify all the header fields - if err := i.verifyHeaderImpl( - parent, - header, - headerSigner, - validators, - hooks, - false, - ); err != nil { - return err - } - - extra, err := headerSigner.GetIBFTExtra(header) - if err != nil { - return err - } - - hashForCommittedSeal, err := i.calculateProposalHash( - headerSigner, - header, - extra.RoundNumber, - ) - if err != nil { - return err - } - - // verify the Committed Seals - // CommittedSeals exists only in the finalized header - if err := headerSigner.VerifyCommittedSeals( - hashForCommittedSeal, - extra.CommittedSeals, - validators, - i.quorumSize(header.Number)(validators), - ); err != nil { - return err - } - - return nil -} - -// quorumSize returns a callback that when executed on a Validators computes -// number of votes required to reach quorum based on the size of the set. -// The blockNumber argument indicates which formula was used to calculate the result (see PRs #513, #549) -func (i *backendIBFT) quorumSize(blockNumber uint64) QuorumImplementation { - if blockNumber < i.quorumSizeBlockNum { - return LegacyQuorumSize - } - - return OptimalQuorumSize -} - -// ProcessHeaders updates the snapshot based on previously verified headers -func (i *backendIBFT) ProcessHeaders(headers []*types.Header) error { - for _, header := range headers { - hooks := i.forkManager.GetHooks(header.Number) - - if err := hooks.ProcessHeader(header); err != nil { - return err - } - } - - return nil -} - -// GetBlockCreator retrieves the block signer from the extra data field -func (i *backendIBFT) GetBlockCreator(header *types.Header) (types.Address, error) { - signer, err := i.forkManager.GetSigner(header.Number) - if err != nil { - return types.ZeroAddress, err - } - - return signer.EcrecoverFromHeader(header) -} - -// PreCommitState a hook to be called before finalizing state transition on inserting block -func (i *backendIBFT) PreCommitState(block *types.Block, txn *state.Transition) error { - hooks := i.forkManager.GetHooks(block.Number()) - - return hooks.PreCommitState(block.Header, txn) -} - -// GetEpoch returns the current epoch -func (i *backendIBFT) GetEpoch(number uint64) uint64 { - if number%i.epochSize == 0 { - return number / i.epochSize - } - - return number/i.epochSize + 1 -} - -// IsLastOfEpoch checks if the block number is the last of the epoch -func (i *backendIBFT) IsLastOfEpoch(number uint64) bool { - return number > 0 && number%i.epochSize == 0 -} - -// Close closes the IBFT consensus mechanism, and does write back to disk -func (i *backendIBFT) Close() error { - close(i.closeCh) - - if i.syncer != nil { - if err := i.syncer.Close(); err != nil { - return err - } - } - - if i.forkManager != nil { - if err := i.forkManager.Close(); err != nil { - return err - } - } - - return nil -} - -// SetHeaderHash updates hash calculation function for IBFT -func (i *backendIBFT) SetHeaderHash() { - types.HeaderHash = func(h *types.Header) types.Hash { - signer, err := i.forkManager.GetSigner(h.Number) - if err != nil { - return types.ZeroHash - } - - hash, err := signer.CalculateHeaderHash(h) - if err != nil { - return types.ZeroHash - } - - return hash - } -} - -// GetBridgeProvider returns an instance of BridgeDataProvider -func (i *backendIBFT) GetBridgeProvider() consensus.BridgeDataProvider { - return nil -} - -// FilterExtra is the implementation of Consensus interface -func (i *backendIBFT) FilterExtra(extra []byte) ([]byte, error) { - return extra, nil -} - -// updateCurrentModules updates Signer, Hooks, and Validators -// that are used at specified height -// by fetching from ForkManager -func (i *backendIBFT) updateCurrentModules(height uint64) error { - lastSigner := i.currentSigner - - signer, validators, hooks, err := getModulesFromForkManager(i.forkManager, height) - if err != nil { - return err - } - - i.currentSigner = signer - i.currentValidators = validators - i.currentHooks = hooks - - i.logFork(lastSigner, signer) - - return nil -} - -// logFork logs validation type switch -func (i *backendIBFT) logFork( - lastSigner, signer signer.Signer, -) { - if lastSigner != nil && signer != nil && lastSigner.Type() != signer.Type() { - i.logger.Info("IBFT validation type switched", "old", lastSigner.Type(), "new", signer.Type()) - } -} - -func (i *backendIBFT) verifyParentCommittedSeals( - parent, header *types.Header, - shouldVerifyParentCommittedSeals bool, -) error { - if parent.IsGenesis() { - return nil - } - - parentSigner, parentValidators, _, err := getModulesFromForkManager( - i.forkManager, - parent.Number, - ) - if err != nil { - return err - } - - parentHeader, ok := i.blockchain.GetHeaderByHash(parent.Hash) - if !ok { - return fmt.Errorf("header %s not found", parent.Hash) - } - - parentExtra, err := parentSigner.GetIBFTExtra(parentHeader) - if err != nil { - return err - } - - parentHash, err := i.calculateProposalHash( - parentSigner, - parentHeader, - parentExtra.RoundNumber, - ) - if err != nil { - return err - } - - // if shouldVerifyParentCommittedSeals is false, skip the verification - // when header doesn't have Parent Committed Seals (Backward Compatibility) - return parentSigner.VerifyParentCommittedSeals( - parentHash, - header, - parentValidators, - i.quorumSize(parent.Number)(parentValidators), - shouldVerifyParentCommittedSeals, - ) -} - -// getModulesFromForkManager is a helper function to get all modules from ForkManager -func getModulesFromForkManager(forkManager forkManagerInterface, height uint64) ( - signer.Signer, - validators.Validators, - fork.HooksInterface, - error, -) { - signer, err := forkManager.GetSigner(height) - if err != nil { - return nil, nil, nil, err - } - - validators, err := forkManager.GetValidators(height) - if err != nil { - return nil, nil, nil, err - } - - hooks := forkManager.GetHooks(height) - - return signer, validators, hooks, nil -} - -// verifyProposerSeal verifies ProposerSeal in IBFT Extra of header -// and make sure signer belongs to validators -func verifyProposerSeal( - header *types.Header, - signer signer.Signer, - validators validators.Validators, -) error { - proposer, err := signer.EcrecoverFromHeader(header) - if err != nil { - return err - } - - if !validators.Includes(proposer) { - return ErrProposerSealByNonValidator - } - - return nil -} - -// ValidateExtraDataFormat Verifies that extra data can be unmarshaled -func (i *backendIBFT) ValidateExtraDataFormat(header *types.Header) error { - blockSigner, _, _, err := getModulesFromForkManager( - i.forkManager, - header.Number, - ) - - if err != nil { - return err - } - - _, err = blockSigner.GetIBFTExtra(header) - - return err -} diff --git a/consensus/ibft/messages.go b/consensus/ibft/messages.go deleted file mode 100644 index f4cf0a2169..0000000000 --- a/consensus/ibft/messages.go +++ /dev/null @@ -1,108 +0,0 @@ -package ibft - -import ( - "google.golang.org/protobuf/proto" - - protoIBFT "github.com/0xPolygon/go-ibft/messages/proto" -) - -func (i *backendIBFT) signMessage(msg *protoIBFT.Message) *protoIBFT.Message { - raw, err := proto.Marshal(msg) - if err != nil { - return nil - } - - if msg.Signature, err = i.currentSigner.SignIBFTMessage(raw); err != nil { - return nil - } - - return msg -} - -func (i *backendIBFT) BuildPrePrepareMessage( - rawProposal []byte, - certificate *protoIBFT.RoundChangeCertificate, - view *protoIBFT.View, -) *protoIBFT.Message { - proposedBlock := &protoIBFT.Proposal{ - RawProposal: rawProposal, - Round: view.Round, - } - - // hash calculation begins - proposalHash, err := i.calculateProposalHashFromBlockBytes(rawProposal, &view.Round) - if err != nil { - return nil - } - - msg := &protoIBFT.Message{ - View: view, - From: i.ID(), - Type: protoIBFT.MessageType_PREPREPARE, - Payload: &protoIBFT.Message_PreprepareData{ - PreprepareData: &protoIBFT.PrePrepareMessage{ - Proposal: proposedBlock, - ProposalHash: proposalHash.Bytes(), - Certificate: certificate, - }, - }, - } - - return i.signMessage(msg) -} - -func (i *backendIBFT) BuildPrepareMessage(proposalHash []byte, view *protoIBFT.View) *protoIBFT.Message { - msg := &protoIBFT.Message{ - View: view, - From: i.ID(), - Type: protoIBFT.MessageType_PREPARE, - Payload: &protoIBFT.Message_PrepareData{ - PrepareData: &protoIBFT.PrepareMessage{ - ProposalHash: proposalHash, - }, - }, - } - - return i.signMessage(msg) -} - -func (i *backendIBFT) BuildCommitMessage(proposalHash []byte, view *protoIBFT.View) *protoIBFT.Message { - committedSeal, err := i.currentSigner.CreateCommittedSeal(proposalHash) - if err != nil { - i.logger.Error("Unable to build commit message, %v", err) - - return nil - } - - msg := &protoIBFT.Message{ - View: view, - From: i.ID(), - Type: protoIBFT.MessageType_COMMIT, - Payload: &protoIBFT.Message_CommitData{ - CommitData: &protoIBFT.CommitMessage{ - ProposalHash: proposalHash, - CommittedSeal: committedSeal, - }, - }, - } - - return i.signMessage(msg) -} - -func (i *backendIBFT) BuildRoundChangeMessage( - proposal *protoIBFT.Proposal, - certificate *protoIBFT.PreparedCertificate, - view *protoIBFT.View, -) *protoIBFT.Message { - msg := &protoIBFT.Message{ - View: view, - From: i.ID(), - Type: protoIBFT.MessageType_ROUND_CHANGE, - Payload: &protoIBFT.Message_RoundChangeData{RoundChangeData: &protoIBFT.RoundChangeMessage{ - LastPreparedProposal: proposal, - LatestPreparedCertificate: certificate, - }}, - } - - return i.signMessage(msg) -} diff --git a/consensus/ibft/operator_service.go b/consensus/ibft/operator_service.go deleted file mode 100644 index 82f1c38217..0000000000 --- a/consensus/ibft/operator_service.go +++ /dev/null @@ -1,240 +0,0 @@ -package ibft - -import ( - "context" - "errors" - "fmt" - - "github.com/0xPolygon/polygon-edge/consensus/ibft/proto" - "github.com/0xPolygon/polygon-edge/consensus/ibft/signer" - "github.com/0xPolygon/polygon-edge/crypto" - "github.com/0xPolygon/polygon-edge/types" - "github.com/0xPolygon/polygon-edge/validators" - "github.com/0xPolygon/polygon-edge/validators/store" - empty "google.golang.org/protobuf/types/known/emptypb" -) - -var ( - ErrVotingNotSupported = errors.New("voting is not supported") - ErrHeaderNotFound = errors.New("header not found") -) - -type operator struct { - proto.UnimplementedIbftOperatorServer - - ibft *backendIBFT -} - -// Votable is an interface of the ValidatorStore with vote function -type Votable interface { - Votes(uint64) ([]*store.Vote, error) - Candidates() []*store.Candidate - Propose(validators.Validator, bool, types.Address) error -} - -// Status returns the status of the IBFT client -func (o *operator) Status(ctx context.Context, req *empty.Empty) (*proto.IbftStatusResp, error) { - signer, err := o.getLatestSigner() - if err != nil { - return nil, err - } - - return &proto.IbftStatusResp{ - Key: signer.Address().String(), - }, nil -} - -// GetSnapshot returns the snapshot, based on the passed in request -func (o *operator) GetSnapshot(ctx context.Context, req *proto.SnapshotReq) (*proto.Snapshot, error) { - height := req.Number - if req.Latest { - height = o.ibft.blockchain.Header().Number - } - - header, ok := o.ibft.blockchain.GetHeaderByNumber(height) - if !ok { - return nil, ErrHeaderNotFound - } - - validatorsStore, err := o.ibft.forkManager.GetValidatorStore(height) - if err != nil { - return nil, err - } - - validators, err := o.ibft.forkManager.GetValidators(height) - if err != nil { - return nil, err - } - - resp := &proto.Snapshot{ - Number: height, - Hash: header.Hash.String(), - Validators: validatorsToProtoValidators(validators), - } - - votes, err := getVotes(validatorsStore, height) - if err != nil { - return nil, err - } - - if votes == nil { - // current ValidatorStore doesn't have voting function - return resp, nil - } - - resp.Votes = votesToProtoVotes(votes) - - return resp, nil -} - -// Propose proposes a new candidate to be added / removed from the validator set -func (o *operator) Propose(ctx context.Context, req *proto.Candidate) (*empty.Empty, error) { - votableSet, err := o.getVotableValidatorStore() - if err != nil { - return nil, err - } - - candidate, err := o.parseCandidate(req) - if err != nil { - return nil, err - } - - if err := votableSet.Propose(candidate, req.Auth, o.ibft.currentSigner.Address()); err != nil { - return nil, err - } - - return &empty.Empty{}, nil -} - -// Candidates returns the validator candidates list -func (o *operator) Candidates(ctx context.Context, req *empty.Empty) (*proto.CandidatesResp, error) { - votableValSet, err := o.getVotableValidatorStore() - if err != nil { - return nil, err - } - - candidates := votableValSet.Candidates() - - return &proto.CandidatesResp{ - Candidates: candidatesToProtoCandidates(candidates), - }, nil -} - -// parseCandidate parses proto.Candidate and maps to validator -func (o *operator) parseCandidate(req *proto.Candidate) (validators.Validator, error) { - signer, err := o.getLatestSigner() - if err != nil { - return nil, err - } - - switch signer.Type() { - case validators.ECDSAValidatorType: - return &validators.ECDSAValidator{ - Address: types.StringToAddress(req.Address), - }, nil - - case validators.BLSValidatorType: - // safe check - if req.Auth { - // BLS public key is necessary but the command is not required - if req.BlsPubkey == nil { - return nil, errors.New("BLS public key required") - } - - if _, err := crypto.UnmarshalBLSPublicKey(req.BlsPubkey); err != nil { - return nil, err - } - } - - // BLS Public Key doesn't have to be given in case of removal - return &validators.BLSValidator{ - Address: types.StringToAddress(req.Address), - BLSPublicKey: req.BlsPubkey, - }, nil - } - - return nil, fmt.Errorf("invalid validator type: %s", signer.Type()) -} - -// getVotableValidatorStore gets current validator set and convert its type to Votable -func (o *operator) getVotableValidatorStore() (Votable, error) { - valSet, err := o.ibft.forkManager.GetValidatorStore(o.ibft.blockchain.Header().Number) - if err != nil { - return nil, err - } - - votableValSet, ok := valSet.(Votable) - if !ok { - return nil, ErrVotingNotSupported - } - - return votableValSet, nil -} - -// getLatestSigner gets the latest signer IBFT uses -func (o *operator) getLatestSigner() (signer.Signer, error) { - if o.ibft.currentSigner != nil { - return o.ibft.currentSigner, nil - } - - return o.ibft.forkManager.GetSigner(o.ibft.blockchain.Header().Number) -} - -// validatorsToProtoValidators converts validators to response of validators -func validatorsToProtoValidators(validators validators.Validators) []*proto.Snapshot_Validator { - protoValidators := make([]*proto.Snapshot_Validator, validators.Len()) - - for idx := 0; idx < validators.Len(); idx++ { - validator := validators.At(uint64(idx)) - - protoValidators[idx] = &proto.Snapshot_Validator{ - Type: string(validator.Type()), - Address: validator.Addr().String(), - Data: validator.Bytes(), - } - } - - return protoValidators -} - -// votesToProtoVotes converts votes to response of votes -func votesToProtoVotes(votes []*store.Vote) []*proto.Snapshot_Vote { - protoVotes := make([]*proto.Snapshot_Vote, len(votes)) - - for idx := range votes { - protoVotes[idx] = &proto.Snapshot_Vote{ - Validator: votes[idx].Validator.String(), - Proposed: votes[idx].Candidate.String(), - Auth: votes[idx].Authorize, - } - } - - return protoVotes -} - -func candidatesToProtoCandidates(candidates []*store.Candidate) []*proto.Candidate { - protoCandidates := make([]*proto.Candidate, len(candidates)) - - for idx, candidate := range candidates { - protoCandidates[idx] = &proto.Candidate{ - Address: candidate.Validator.Addr().String(), - Auth: candidate.Authorize, - } - - if blsVal, ok := candidate.Validator.(*validators.BLSValidator); ok { - protoCandidates[idx].BlsPubkey = blsVal.BLSPublicKey - } - } - - return protoCandidates -} - -// getVotes gets votes from validator store only if store supports voting -func getVotes(validatorStore store.ValidatorStore, height uint64) ([]*store.Vote, error) { - votableStore, ok := validatorStore.(Votable) - if !ok { - return nil, nil - } - - return votableStore.Votes(height) -} diff --git a/consensus/ibft/proto/ibft_operator.pb.go b/consensus/ibft/proto/ibft_operator.pb.go deleted file mode 100644 index 2863467466..0000000000 --- a/consensus/ibft/proto/ibft_operator.pb.go +++ /dev/null @@ -1,718 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.7 -// source: consensus/ibft/proto/ibft_operator.proto - -package proto - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - emptypb "google.golang.org/protobuf/types/known/emptypb" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type IbftStatusResp struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` -} - -func (x *IbftStatusResp) Reset() { - *x = IbftStatusResp{} - if protoimpl.UnsafeEnabled { - mi := &file_consensus_ibft_proto_ibft_operator_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *IbftStatusResp) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*IbftStatusResp) ProtoMessage() {} - -func (x *IbftStatusResp) ProtoReflect() protoreflect.Message { - mi := &file_consensus_ibft_proto_ibft_operator_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use IbftStatusResp.ProtoReflect.Descriptor instead. -func (*IbftStatusResp) Descriptor() ([]byte, []int) { - return file_consensus_ibft_proto_ibft_operator_proto_rawDescGZIP(), []int{0} -} - -func (x *IbftStatusResp) GetKey() string { - if x != nil { - return x.Key - } - return "" -} - -type SnapshotReq struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Latest bool `protobuf:"varint,1,opt,name=latest,proto3" json:"latest,omitempty"` - Number uint64 `protobuf:"varint,2,opt,name=number,proto3" json:"number,omitempty"` -} - -func (x *SnapshotReq) Reset() { - *x = SnapshotReq{} - if protoimpl.UnsafeEnabled { - mi := &file_consensus_ibft_proto_ibft_operator_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SnapshotReq) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SnapshotReq) ProtoMessage() {} - -func (x *SnapshotReq) ProtoReflect() protoreflect.Message { - mi := &file_consensus_ibft_proto_ibft_operator_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SnapshotReq.ProtoReflect.Descriptor instead. -func (*SnapshotReq) Descriptor() ([]byte, []int) { - return file_consensus_ibft_proto_ibft_operator_proto_rawDescGZIP(), []int{1} -} - -func (x *SnapshotReq) GetLatest() bool { - if x != nil { - return x.Latest - } - return false -} - -func (x *SnapshotReq) GetNumber() uint64 { - if x != nil { - return x.Number - } - return 0 -} - -type Snapshot struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Validators []*Snapshot_Validator `protobuf:"bytes,1,rep,name=validators,proto3" json:"validators,omitempty"` - Number uint64 `protobuf:"varint,2,opt,name=number,proto3" json:"number,omitempty"` - Hash string `protobuf:"bytes,3,opt,name=hash,proto3" json:"hash,omitempty"` - Votes []*Snapshot_Vote `protobuf:"bytes,4,rep,name=votes,proto3" json:"votes,omitempty"` -} - -func (x *Snapshot) Reset() { - *x = Snapshot{} - if protoimpl.UnsafeEnabled { - mi := &file_consensus_ibft_proto_ibft_operator_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Snapshot) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Snapshot) ProtoMessage() {} - -func (x *Snapshot) ProtoReflect() protoreflect.Message { - mi := &file_consensus_ibft_proto_ibft_operator_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Snapshot.ProtoReflect.Descriptor instead. -func (*Snapshot) Descriptor() ([]byte, []int) { - return file_consensus_ibft_proto_ibft_operator_proto_rawDescGZIP(), []int{2} -} - -func (x *Snapshot) GetValidators() []*Snapshot_Validator { - if x != nil { - return x.Validators - } - return nil -} - -func (x *Snapshot) GetNumber() uint64 { - if x != nil { - return x.Number - } - return 0 -} - -func (x *Snapshot) GetHash() string { - if x != nil { - return x.Hash - } - return "" -} - -func (x *Snapshot) GetVotes() []*Snapshot_Vote { - if x != nil { - return x.Votes - } - return nil -} - -type ProposeReq struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` - Auth bool `protobuf:"varint,2,opt,name=auth,proto3" json:"auth,omitempty"` -} - -func (x *ProposeReq) Reset() { - *x = ProposeReq{} - if protoimpl.UnsafeEnabled { - mi := &file_consensus_ibft_proto_ibft_operator_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ProposeReq) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ProposeReq) ProtoMessage() {} - -func (x *ProposeReq) ProtoReflect() protoreflect.Message { - mi := &file_consensus_ibft_proto_ibft_operator_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ProposeReq.ProtoReflect.Descriptor instead. -func (*ProposeReq) Descriptor() ([]byte, []int) { - return file_consensus_ibft_proto_ibft_operator_proto_rawDescGZIP(), []int{3} -} - -func (x *ProposeReq) GetAddress() string { - if x != nil { - return x.Address - } - return "" -} - -func (x *ProposeReq) GetAuth() bool { - if x != nil { - return x.Auth - } - return false -} - -type CandidatesResp struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Candidates []*Candidate `protobuf:"bytes,1,rep,name=candidates,proto3" json:"candidates,omitempty"` -} - -func (x *CandidatesResp) Reset() { - *x = CandidatesResp{} - if protoimpl.UnsafeEnabled { - mi := &file_consensus_ibft_proto_ibft_operator_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CandidatesResp) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CandidatesResp) ProtoMessage() {} - -func (x *CandidatesResp) ProtoReflect() protoreflect.Message { - mi := &file_consensus_ibft_proto_ibft_operator_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CandidatesResp.ProtoReflect.Descriptor instead. -func (*CandidatesResp) Descriptor() ([]byte, []int) { - return file_consensus_ibft_proto_ibft_operator_proto_rawDescGZIP(), []int{4} -} - -func (x *CandidatesResp) GetCandidates() []*Candidate { - if x != nil { - return x.Candidates - } - return nil -} - -type Candidate struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` - BlsPubkey []byte `protobuf:"bytes,2,opt,name=bls_pubkey,json=blsPubkey,proto3" json:"bls_pubkey,omitempty"` - Auth bool `protobuf:"varint,3,opt,name=auth,proto3" json:"auth,omitempty"` -} - -func (x *Candidate) Reset() { - *x = Candidate{} - if protoimpl.UnsafeEnabled { - mi := &file_consensus_ibft_proto_ibft_operator_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Candidate) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Candidate) ProtoMessage() {} - -func (x *Candidate) ProtoReflect() protoreflect.Message { - mi := &file_consensus_ibft_proto_ibft_operator_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Candidate.ProtoReflect.Descriptor instead. -func (*Candidate) Descriptor() ([]byte, []int) { - return file_consensus_ibft_proto_ibft_operator_proto_rawDescGZIP(), []int{5} -} - -func (x *Candidate) GetAddress() string { - if x != nil { - return x.Address - } - return "" -} - -func (x *Candidate) GetBlsPubkey() []byte { - if x != nil { - return x.BlsPubkey - } - return nil -} - -func (x *Candidate) GetAuth() bool { - if x != nil { - return x.Auth - } - return false -} - -type Snapshot_Validator struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` - Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` -} - -func (x *Snapshot_Validator) Reset() { - *x = Snapshot_Validator{} - if protoimpl.UnsafeEnabled { - mi := &file_consensus_ibft_proto_ibft_operator_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Snapshot_Validator) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Snapshot_Validator) ProtoMessage() {} - -func (x *Snapshot_Validator) ProtoReflect() protoreflect.Message { - mi := &file_consensus_ibft_proto_ibft_operator_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Snapshot_Validator.ProtoReflect.Descriptor instead. -func (*Snapshot_Validator) Descriptor() ([]byte, []int) { - return file_consensus_ibft_proto_ibft_operator_proto_rawDescGZIP(), []int{2, 0} -} - -func (x *Snapshot_Validator) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *Snapshot_Validator) GetAddress() string { - if x != nil { - return x.Address - } - return "" -} - -func (x *Snapshot_Validator) GetData() []byte { - if x != nil { - return x.Data - } - return nil -} - -type Snapshot_Vote struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Validator string `protobuf:"bytes,1,opt,name=validator,proto3" json:"validator,omitempty"` - Proposed string `protobuf:"bytes,2,opt,name=proposed,proto3" json:"proposed,omitempty"` - Auth bool `protobuf:"varint,3,opt,name=auth,proto3" json:"auth,omitempty"` -} - -func (x *Snapshot_Vote) Reset() { - *x = Snapshot_Vote{} - if protoimpl.UnsafeEnabled { - mi := &file_consensus_ibft_proto_ibft_operator_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Snapshot_Vote) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Snapshot_Vote) ProtoMessage() {} - -func (x *Snapshot_Vote) ProtoReflect() protoreflect.Message { - mi := &file_consensus_ibft_proto_ibft_operator_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Snapshot_Vote.ProtoReflect.Descriptor instead. -func (*Snapshot_Vote) Descriptor() ([]byte, []int) { - return file_consensus_ibft_proto_ibft_operator_proto_rawDescGZIP(), []int{2, 1} -} - -func (x *Snapshot_Vote) GetValidator() string { - if x != nil { - return x.Validator - } - return "" -} - -func (x *Snapshot_Vote) GetProposed() string { - if x != nil { - return x.Proposed - } - return "" -} - -func (x *Snapshot_Vote) GetAuth() bool { - if x != nil { - return x.Auth - } - return false -} - -var File_consensus_ibft_proto_ibft_operator_proto protoreflect.FileDescriptor - -var file_consensus_ibft_proto_ibft_operator_proto_rawDesc = []byte{ - 0x0a, 0x28, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x6e, 0x73, 0x75, 0x73, 0x2f, 0x69, 0x62, 0x66, 0x74, - 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x69, 0x62, 0x66, 0x74, 0x5f, 0x6f, 0x70, 0x65, 0x72, - 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x76, 0x31, 0x1a, 0x1b, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, - 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x22, 0x0a, 0x0e, 0x49, - 0x62, 0x66, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, - 0x3d, 0x0a, 0x0b, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x12, 0x16, - 0x0a, 0x06, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, - 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0xbc, - 0x02, 0x0a, 0x08, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x36, 0x0a, 0x0a, 0x76, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x16, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x2e, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x6f, 0x72, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x68, - 0x61, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x12, - 0x27, 0x0a, 0x05, 0x76, 0x6f, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, - 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x2e, 0x56, 0x6f, 0x74, - 0x65, 0x52, 0x05, 0x76, 0x6f, 0x74, 0x65, 0x73, 0x1a, 0x4d, 0x0a, 0x09, 0x56, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, - 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, - 0x65, 0x73, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x54, 0x0a, 0x04, 0x56, 0x6f, 0x74, 0x65, 0x12, - 0x1c, 0x0a, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x1a, 0x0a, - 0x08, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x61, 0x75, 0x74, - 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x61, 0x75, 0x74, 0x68, 0x22, 0x3a, 0x0a, - 0x0a, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x52, 0x65, 0x71, 0x12, 0x18, 0x0a, 0x07, 0x61, - 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, - 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x61, 0x75, 0x74, 0x68, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x04, 0x61, 0x75, 0x74, 0x68, 0x22, 0x3f, 0x0a, 0x0e, 0x43, 0x61, 0x6e, - 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x12, 0x2d, 0x0a, 0x0a, 0x63, - 0x61, 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x0d, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x61, 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x0a, - 0x63, 0x61, 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, 0x73, 0x22, 0x58, 0x0a, 0x09, 0x43, 0x61, - 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, - 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, - 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x6c, 0x73, 0x5f, 0x70, 0x75, 0x62, 0x6b, 0x65, 0x79, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x62, 0x6c, 0x73, 0x50, 0x75, 0x62, 0x6b, 0x65, 0x79, - 0x12, 0x12, 0x0a, 0x04, 0x61, 0x75, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, - 0x61, 0x75, 0x74, 0x68, 0x32, 0xde, 0x01, 0x0a, 0x0c, 0x49, 0x62, 0x66, 0x74, 0x4f, 0x70, 0x65, - 0x72, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x2c, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x53, 0x6e, 0x61, 0x70, - 0x73, 0x68, 0x6f, 0x74, 0x12, 0x0f, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, - 0x6f, 0x74, 0x52, 0x65, 0x71, 0x1a, 0x0c, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, - 0x68, 0x6f, 0x74, 0x12, 0x30, 0x0a, 0x07, 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x12, 0x0d, - 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x61, 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, 0x1a, 0x16, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x38, 0x0a, 0x0a, 0x43, 0x61, 0x6e, 0x64, 0x69, 0x64, 0x61, - 0x74, 0x65, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x12, 0x2e, 0x76, 0x31, - 0x2e, 0x43, 0x61, 0x6e, 0x64, 0x69, 0x64, 0x61, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x12, - 0x34, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x1a, 0x12, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x62, 0x66, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x42, 0x17, 0x5a, 0x15, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x6e, - 0x73, 0x75, 0x73, 0x2f, 0x69, 0x62, 0x66, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_consensus_ibft_proto_ibft_operator_proto_rawDescOnce sync.Once - file_consensus_ibft_proto_ibft_operator_proto_rawDescData = file_consensus_ibft_proto_ibft_operator_proto_rawDesc -) - -func file_consensus_ibft_proto_ibft_operator_proto_rawDescGZIP() []byte { - file_consensus_ibft_proto_ibft_operator_proto_rawDescOnce.Do(func() { - file_consensus_ibft_proto_ibft_operator_proto_rawDescData = protoimpl.X.CompressGZIP(file_consensus_ibft_proto_ibft_operator_proto_rawDescData) - }) - return file_consensus_ibft_proto_ibft_operator_proto_rawDescData -} - -var file_consensus_ibft_proto_ibft_operator_proto_msgTypes = make([]protoimpl.MessageInfo, 8) -var file_consensus_ibft_proto_ibft_operator_proto_goTypes = []interface{}{ - (*IbftStatusResp)(nil), // 0: v1.IbftStatusResp - (*SnapshotReq)(nil), // 1: v1.SnapshotReq - (*Snapshot)(nil), // 2: v1.Snapshot - (*ProposeReq)(nil), // 3: v1.ProposeReq - (*CandidatesResp)(nil), // 4: v1.CandidatesResp - (*Candidate)(nil), // 5: v1.Candidate - (*Snapshot_Validator)(nil), // 6: v1.Snapshot.Validator - (*Snapshot_Vote)(nil), // 7: v1.Snapshot.Vote - (*emptypb.Empty)(nil), // 8: google.protobuf.Empty -} -var file_consensus_ibft_proto_ibft_operator_proto_depIdxs = []int32{ - 6, // 0: v1.Snapshot.validators:type_name -> v1.Snapshot.Validator - 7, // 1: v1.Snapshot.votes:type_name -> v1.Snapshot.Vote - 5, // 2: v1.CandidatesResp.candidates:type_name -> v1.Candidate - 1, // 3: v1.IbftOperator.GetSnapshot:input_type -> v1.SnapshotReq - 5, // 4: v1.IbftOperator.Propose:input_type -> v1.Candidate - 8, // 5: v1.IbftOperator.Candidates:input_type -> google.protobuf.Empty - 8, // 6: v1.IbftOperator.Status:input_type -> google.protobuf.Empty - 2, // 7: v1.IbftOperator.GetSnapshot:output_type -> v1.Snapshot - 8, // 8: v1.IbftOperator.Propose:output_type -> google.protobuf.Empty - 4, // 9: v1.IbftOperator.Candidates:output_type -> v1.CandidatesResp - 0, // 10: v1.IbftOperator.Status:output_type -> v1.IbftStatusResp - 7, // [7:11] is the sub-list for method output_type - 3, // [3:7] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 3, // [3:3] is the sub-list for extension extendee - 0, // [0:3] is the sub-list for field type_name -} - -func init() { file_consensus_ibft_proto_ibft_operator_proto_init() } -func file_consensus_ibft_proto_ibft_operator_proto_init() { - if File_consensus_ibft_proto_ibft_operator_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_consensus_ibft_proto_ibft_operator_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*IbftStatusResp); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_consensus_ibft_proto_ibft_operator_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SnapshotReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_consensus_ibft_proto_ibft_operator_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Snapshot); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_consensus_ibft_proto_ibft_operator_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProposeReq); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_consensus_ibft_proto_ibft_operator_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CandidatesResp); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_consensus_ibft_proto_ibft_operator_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Candidate); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_consensus_ibft_proto_ibft_operator_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Snapshot_Validator); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_consensus_ibft_proto_ibft_operator_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Snapshot_Vote); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_consensus_ibft_proto_ibft_operator_proto_rawDesc, - NumEnums: 0, - NumMessages: 8, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_consensus_ibft_proto_ibft_operator_proto_goTypes, - DependencyIndexes: file_consensus_ibft_proto_ibft_operator_proto_depIdxs, - MessageInfos: file_consensus_ibft_proto_ibft_operator_proto_msgTypes, - }.Build() - File_consensus_ibft_proto_ibft_operator_proto = out.File - file_consensus_ibft_proto_ibft_operator_proto_rawDesc = nil - file_consensus_ibft_proto_ibft_operator_proto_goTypes = nil - file_consensus_ibft_proto_ibft_operator_proto_depIdxs = nil -} diff --git a/consensus/ibft/proto/ibft_operator.pb.validate.go b/consensus/ibft/proto/ibft_operator.pb.validate.go deleted file mode 100644 index 8569a959d3..0000000000 --- a/consensus/ibft/proto/ibft_operator.pb.validate.go +++ /dev/null @@ -1,968 +0,0 @@ -// Code generated by protoc-gen-validate. DO NOT EDIT. -// source: consensus/ibft/proto/ibft_operator.proto - -package proto - -import ( - "bytes" - "errors" - "fmt" - "net" - "net/mail" - "net/url" - "regexp" - "sort" - "strings" - "time" - "unicode/utf8" - - "google.golang.org/protobuf/types/known/anypb" -) - -// ensure the imports are used -var ( - _ = bytes.MinRead - _ = errors.New("") - _ = fmt.Print - _ = utf8.UTFMax - _ = (*regexp.Regexp)(nil) - _ = (*strings.Reader)(nil) - _ = net.IPv4len - _ = time.Duration(0) - _ = (*url.URL)(nil) - _ = (*mail.Address)(nil) - _ = anypb.Any{} - _ = sort.Sort -) - -// Validate checks the field values on IbftStatusResp with the rules defined in -// the proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *IbftStatusResp) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on IbftStatusResp with the rules defined -// in the proto definition for this message. If any rules are violated, the -// result is a list of violation errors wrapped in IbftStatusRespMultiError, -// or nil if none found. -func (m *IbftStatusResp) ValidateAll() error { - return m.validate(true) -} - -func (m *IbftStatusResp) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - // no validation rules for Key - - if len(errors) > 0 { - return IbftStatusRespMultiError(errors) - } - - return nil -} - -// IbftStatusRespMultiError is an error wrapping multiple validation errors -// returned by IbftStatusResp.ValidateAll() if the designated constraints -// aren't met. -type IbftStatusRespMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m IbftStatusRespMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m IbftStatusRespMultiError) AllErrors() []error { return m } - -// IbftStatusRespValidationError is the validation error returned by -// IbftStatusResp.Validate if the designated constraints aren't met. -type IbftStatusRespValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e IbftStatusRespValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e IbftStatusRespValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e IbftStatusRespValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e IbftStatusRespValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e IbftStatusRespValidationError) ErrorName() string { return "IbftStatusRespValidationError" } - -// Error satisfies the builtin error interface -func (e IbftStatusRespValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sIbftStatusResp.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = IbftStatusRespValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = IbftStatusRespValidationError{} - -// Validate checks the field values on SnapshotReq with the rules defined in -// the proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *SnapshotReq) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on SnapshotReq with the rules defined in -// the proto definition for this message. If any rules are violated, the -// result is a list of violation errors wrapped in SnapshotReqMultiError, or -// nil if none found. -func (m *SnapshotReq) ValidateAll() error { - return m.validate(true) -} - -func (m *SnapshotReq) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - // no validation rules for Latest - - // no validation rules for Number - - if len(errors) > 0 { - return SnapshotReqMultiError(errors) - } - - return nil -} - -// SnapshotReqMultiError is an error wrapping multiple validation errors -// returned by SnapshotReq.ValidateAll() if the designated constraints aren't met. -type SnapshotReqMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m SnapshotReqMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m SnapshotReqMultiError) AllErrors() []error { return m } - -// SnapshotReqValidationError is the validation error returned by -// SnapshotReq.Validate if the designated constraints aren't met. -type SnapshotReqValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e SnapshotReqValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e SnapshotReqValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e SnapshotReqValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e SnapshotReqValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e SnapshotReqValidationError) ErrorName() string { return "SnapshotReqValidationError" } - -// Error satisfies the builtin error interface -func (e SnapshotReqValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sSnapshotReq.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = SnapshotReqValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = SnapshotReqValidationError{} - -// Validate checks the field values on Snapshot with the rules defined in the -// proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *Snapshot) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on Snapshot with the rules defined in -// the proto definition for this message. If any rules are violated, the -// result is a list of violation errors wrapped in SnapshotMultiError, or nil -// if none found. -func (m *Snapshot) ValidateAll() error { - return m.validate(true) -} - -func (m *Snapshot) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - for idx, item := range m.GetValidators() { - _, _ = idx, item - - if all { - switch v := interface{}(item).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, SnapshotValidationError{ - field: fmt.Sprintf("Validators[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, SnapshotValidationError{ - field: fmt.Sprintf("Validators[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return SnapshotValidationError{ - field: fmt.Sprintf("Validators[%v]", idx), - reason: "embedded message failed validation", - cause: err, - } - } - } - - } - - // no validation rules for Number - - // no validation rules for Hash - - for idx, item := range m.GetVotes() { - _, _ = idx, item - - if all { - switch v := interface{}(item).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, SnapshotValidationError{ - field: fmt.Sprintf("Votes[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, SnapshotValidationError{ - field: fmt.Sprintf("Votes[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return SnapshotValidationError{ - field: fmt.Sprintf("Votes[%v]", idx), - reason: "embedded message failed validation", - cause: err, - } - } - } - - } - - if len(errors) > 0 { - return SnapshotMultiError(errors) - } - - return nil -} - -// SnapshotMultiError is an error wrapping multiple validation errors returned -// by Snapshot.ValidateAll() if the designated constraints aren't met. -type SnapshotMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m SnapshotMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m SnapshotMultiError) AllErrors() []error { return m } - -// SnapshotValidationError is the validation error returned by -// Snapshot.Validate if the designated constraints aren't met. -type SnapshotValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e SnapshotValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e SnapshotValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e SnapshotValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e SnapshotValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e SnapshotValidationError) ErrorName() string { return "SnapshotValidationError" } - -// Error satisfies the builtin error interface -func (e SnapshotValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sSnapshot.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = SnapshotValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = SnapshotValidationError{} - -// Validate checks the field values on ProposeReq with the rules defined in the -// proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *ProposeReq) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on ProposeReq with the rules defined in -// the proto definition for this message. If any rules are violated, the -// result is a list of violation errors wrapped in ProposeReqMultiError, or -// nil if none found. -func (m *ProposeReq) ValidateAll() error { - return m.validate(true) -} - -func (m *ProposeReq) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - // no validation rules for Address - - // no validation rules for Auth - - if len(errors) > 0 { - return ProposeReqMultiError(errors) - } - - return nil -} - -// ProposeReqMultiError is an error wrapping multiple validation errors -// returned by ProposeReq.ValidateAll() if the designated constraints aren't met. -type ProposeReqMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m ProposeReqMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m ProposeReqMultiError) AllErrors() []error { return m } - -// ProposeReqValidationError is the validation error returned by -// ProposeReq.Validate if the designated constraints aren't met. -type ProposeReqValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e ProposeReqValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e ProposeReqValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e ProposeReqValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e ProposeReqValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e ProposeReqValidationError) ErrorName() string { return "ProposeReqValidationError" } - -// Error satisfies the builtin error interface -func (e ProposeReqValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sProposeReq.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = ProposeReqValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = ProposeReqValidationError{} - -// Validate checks the field values on CandidatesResp with the rules defined in -// the proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *CandidatesResp) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on CandidatesResp with the rules defined -// in the proto definition for this message. If any rules are violated, the -// result is a list of violation errors wrapped in CandidatesRespMultiError, -// or nil if none found. -func (m *CandidatesResp) ValidateAll() error { - return m.validate(true) -} - -func (m *CandidatesResp) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - for idx, item := range m.GetCandidates() { - _, _ = idx, item - - if all { - switch v := interface{}(item).(type) { - case interface{ ValidateAll() error }: - if err := v.ValidateAll(); err != nil { - errors = append(errors, CandidatesRespValidationError{ - field: fmt.Sprintf("Candidates[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - case interface{ Validate() error }: - if err := v.Validate(); err != nil { - errors = append(errors, CandidatesRespValidationError{ - field: fmt.Sprintf("Candidates[%v]", idx), - reason: "embedded message failed validation", - cause: err, - }) - } - } - } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { - if err := v.Validate(); err != nil { - return CandidatesRespValidationError{ - field: fmt.Sprintf("Candidates[%v]", idx), - reason: "embedded message failed validation", - cause: err, - } - } - } - - } - - if len(errors) > 0 { - return CandidatesRespMultiError(errors) - } - - return nil -} - -// CandidatesRespMultiError is an error wrapping multiple validation errors -// returned by CandidatesResp.ValidateAll() if the designated constraints -// aren't met. -type CandidatesRespMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m CandidatesRespMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m CandidatesRespMultiError) AllErrors() []error { return m } - -// CandidatesRespValidationError is the validation error returned by -// CandidatesResp.Validate if the designated constraints aren't met. -type CandidatesRespValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e CandidatesRespValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e CandidatesRespValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e CandidatesRespValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e CandidatesRespValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e CandidatesRespValidationError) ErrorName() string { return "CandidatesRespValidationError" } - -// Error satisfies the builtin error interface -func (e CandidatesRespValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sCandidatesResp.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = CandidatesRespValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = CandidatesRespValidationError{} - -// Validate checks the field values on Candidate with the rules defined in the -// proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *Candidate) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on Candidate with the rules defined in -// the proto definition for this message. If any rules are violated, the -// result is a list of violation errors wrapped in CandidateMultiError, or nil -// if none found. -func (m *Candidate) ValidateAll() error { - return m.validate(true) -} - -func (m *Candidate) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - // no validation rules for Address - - // no validation rules for BlsPubkey - - // no validation rules for Auth - - if len(errors) > 0 { - return CandidateMultiError(errors) - } - - return nil -} - -// CandidateMultiError is an error wrapping multiple validation errors returned -// by Candidate.ValidateAll() if the designated constraints aren't met. -type CandidateMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m CandidateMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m CandidateMultiError) AllErrors() []error { return m } - -// CandidateValidationError is the validation error returned by -// Candidate.Validate if the designated constraints aren't met. -type CandidateValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e CandidateValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e CandidateValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e CandidateValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e CandidateValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e CandidateValidationError) ErrorName() string { return "CandidateValidationError" } - -// Error satisfies the builtin error interface -func (e CandidateValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sCandidate.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = CandidateValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = CandidateValidationError{} - -// Validate checks the field values on Snapshot_Validator with the rules -// defined in the proto definition for this message. If any rules are -// violated, the first error encountered is returned, or nil if there are no violations. -func (m *Snapshot_Validator) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on Snapshot_Validator with the rules -// defined in the proto definition for this message. If any rules are -// violated, the result is a list of violation errors wrapped in -// Snapshot_ValidatorMultiError, or nil if none found. -func (m *Snapshot_Validator) ValidateAll() error { - return m.validate(true) -} - -func (m *Snapshot_Validator) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - // no validation rules for Type - - // no validation rules for Address - - // no validation rules for Data - - if len(errors) > 0 { - return Snapshot_ValidatorMultiError(errors) - } - - return nil -} - -// Snapshot_ValidatorMultiError is an error wrapping multiple validation errors -// returned by Snapshot_Validator.ValidateAll() if the designated constraints -// aren't met. -type Snapshot_ValidatorMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m Snapshot_ValidatorMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m Snapshot_ValidatorMultiError) AllErrors() []error { return m } - -// Snapshot_ValidatorValidationError is the validation error returned by -// Snapshot_Validator.Validate if the designated constraints aren't met. -type Snapshot_ValidatorValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e Snapshot_ValidatorValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e Snapshot_ValidatorValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e Snapshot_ValidatorValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e Snapshot_ValidatorValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e Snapshot_ValidatorValidationError) ErrorName() string { - return "Snapshot_ValidatorValidationError" -} - -// Error satisfies the builtin error interface -func (e Snapshot_ValidatorValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sSnapshot_Validator.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = Snapshot_ValidatorValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = Snapshot_ValidatorValidationError{} - -// Validate checks the field values on Snapshot_Vote with the rules defined in -// the proto definition for this message. If any rules are violated, the first -// error encountered is returned, or nil if there are no violations. -func (m *Snapshot_Vote) Validate() error { - return m.validate(false) -} - -// ValidateAll checks the field values on Snapshot_Vote with the rules defined -// in the proto definition for this message. If any rules are violated, the -// result is a list of violation errors wrapped in Snapshot_VoteMultiError, or -// nil if none found. -func (m *Snapshot_Vote) ValidateAll() error { - return m.validate(true) -} - -func (m *Snapshot_Vote) validate(all bool) error { - if m == nil { - return nil - } - - var errors []error - - // no validation rules for Validator - - // no validation rules for Proposed - - // no validation rules for Auth - - if len(errors) > 0 { - return Snapshot_VoteMultiError(errors) - } - - return nil -} - -// Snapshot_VoteMultiError is an error wrapping multiple validation errors -// returned by Snapshot_Vote.ValidateAll() if the designated constraints -// aren't met. -type Snapshot_VoteMultiError []error - -// Error returns a concatenation of all the error messages it wraps. -func (m Snapshot_VoteMultiError) Error() string { - var msgs []string - for _, err := range m { - msgs = append(msgs, err.Error()) - } - return strings.Join(msgs, "; ") -} - -// AllErrors returns a list of validation violation errors. -func (m Snapshot_VoteMultiError) AllErrors() []error { return m } - -// Snapshot_VoteValidationError is the validation error returned by -// Snapshot_Vote.Validate if the designated constraints aren't met. -type Snapshot_VoteValidationError struct { - field string - reason string - cause error - key bool -} - -// Field function returns field value. -func (e Snapshot_VoteValidationError) Field() string { return e.field } - -// Reason function returns reason value. -func (e Snapshot_VoteValidationError) Reason() string { return e.reason } - -// Cause function returns cause value. -func (e Snapshot_VoteValidationError) Cause() error { return e.cause } - -// Key function returns key value. -func (e Snapshot_VoteValidationError) Key() bool { return e.key } - -// ErrorName returns error name. -func (e Snapshot_VoteValidationError) ErrorName() string { return "Snapshot_VoteValidationError" } - -// Error satisfies the builtin error interface -func (e Snapshot_VoteValidationError) Error() string { - cause := "" - if e.cause != nil { - cause = fmt.Sprintf(" | caused by: %v", e.cause) - } - - key := "" - if e.key { - key = "key for " - } - - return fmt.Sprintf( - "invalid %sSnapshot_Vote.%s: %s%s", - key, - e.field, - e.reason, - cause) -} - -var _ error = Snapshot_VoteValidationError{} - -var _ interface { - Field() string - Reason() string - Key() bool - Cause() error - ErrorName() string -} = Snapshot_VoteValidationError{} diff --git a/consensus/ibft/proto/ibft_operator.proto b/consensus/ibft/proto/ibft_operator.proto deleted file mode 100644 index 8c75026434..0000000000 --- a/consensus/ibft/proto/ibft_operator.proto +++ /dev/null @@ -1,60 +0,0 @@ -syntax = "proto3"; - -package v1; - -option go_package = "/consensus/ibft/proto"; - -import "google/protobuf/empty.proto"; - -service IbftOperator { - rpc GetSnapshot(SnapshotReq) returns (Snapshot); - rpc Propose(Candidate) returns (google.protobuf.Empty); - rpc Candidates(google.protobuf.Empty) returns (CandidatesResp); - rpc Status(google.protobuf.Empty) returns (IbftStatusResp); -} - -message IbftStatusResp { - string key = 1; -} - -message SnapshotReq { - bool latest = 1; - uint64 number = 2; -} - -message Snapshot { - repeated Validator validators = 1; - - uint64 number = 2; - - string hash = 3; - - repeated Vote votes = 4; - - message Validator { - string type = 1; - string address = 2; - bytes data = 3; - } - - message Vote { - string validator = 1; - string proposed = 2; - bool auth = 3; - } -} - -message ProposeReq { - string address = 1; - bool auth = 2; -} - -message CandidatesResp { - repeated Candidate candidates = 1; -} - -message Candidate { - string address = 1; - bytes bls_pubkey = 2; - bool auth = 3; -} diff --git a/consensus/ibft/proto/ibft_operator_grpc.pb.go b/consensus/ibft/proto/ibft_operator_grpc.pb.go deleted file mode 100644 index dac75a466f..0000000000 --- a/consensus/ibft/proto/ibft_operator_grpc.pb.go +++ /dev/null @@ -1,214 +0,0 @@ -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.21.7 -// source: consensus/ibft/proto/ibft_operator.proto - -package proto - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - emptypb "google.golang.org/protobuf/types/known/emptypb" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -// IbftOperatorClient is the client API for IbftOperator service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type IbftOperatorClient interface { - GetSnapshot(ctx context.Context, in *SnapshotReq, opts ...grpc.CallOption) (*Snapshot, error) - Propose(ctx context.Context, in *Candidate, opts ...grpc.CallOption) (*emptypb.Empty, error) - Candidates(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*CandidatesResp, error) - Status(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*IbftStatusResp, error) -} - -type ibftOperatorClient struct { - cc grpc.ClientConnInterface -} - -func NewIbftOperatorClient(cc grpc.ClientConnInterface) IbftOperatorClient { - return &ibftOperatorClient{cc} -} - -func (c *ibftOperatorClient) GetSnapshot(ctx context.Context, in *SnapshotReq, opts ...grpc.CallOption) (*Snapshot, error) { - out := new(Snapshot) - err := c.cc.Invoke(ctx, "/v1.IbftOperator/GetSnapshot", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *ibftOperatorClient) Propose(ctx context.Context, in *Candidate, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/v1.IbftOperator/Propose", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *ibftOperatorClient) Candidates(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*CandidatesResp, error) { - out := new(CandidatesResp) - err := c.cc.Invoke(ctx, "/v1.IbftOperator/Candidates", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *ibftOperatorClient) Status(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*IbftStatusResp, error) { - out := new(IbftStatusResp) - err := c.cc.Invoke(ctx, "/v1.IbftOperator/Status", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// IbftOperatorServer is the server API for IbftOperator service. -// All implementations must embed UnimplementedIbftOperatorServer -// for forward compatibility -type IbftOperatorServer interface { - GetSnapshot(context.Context, *SnapshotReq) (*Snapshot, error) - Propose(context.Context, *Candidate) (*emptypb.Empty, error) - Candidates(context.Context, *emptypb.Empty) (*CandidatesResp, error) - Status(context.Context, *emptypb.Empty) (*IbftStatusResp, error) - mustEmbedUnimplementedIbftOperatorServer() -} - -// UnimplementedIbftOperatorServer must be embedded to have forward compatible implementations. -type UnimplementedIbftOperatorServer struct { -} - -func (UnimplementedIbftOperatorServer) GetSnapshot(context.Context, *SnapshotReq) (*Snapshot, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetSnapshot not implemented") -} -func (UnimplementedIbftOperatorServer) Propose(context.Context, *Candidate) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Propose not implemented") -} -func (UnimplementedIbftOperatorServer) Candidates(context.Context, *emptypb.Empty) (*CandidatesResp, error) { - return nil, status.Errorf(codes.Unimplemented, "method Candidates not implemented") -} -func (UnimplementedIbftOperatorServer) Status(context.Context, *emptypb.Empty) (*IbftStatusResp, error) { - return nil, status.Errorf(codes.Unimplemented, "method Status not implemented") -} -func (UnimplementedIbftOperatorServer) mustEmbedUnimplementedIbftOperatorServer() {} - -// UnsafeIbftOperatorServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to IbftOperatorServer will -// result in compilation errors. -type UnsafeIbftOperatorServer interface { - mustEmbedUnimplementedIbftOperatorServer() -} - -func RegisterIbftOperatorServer(s grpc.ServiceRegistrar, srv IbftOperatorServer) { - s.RegisterService(&IbftOperator_ServiceDesc, srv) -} - -func _IbftOperator_GetSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SnapshotReq) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IbftOperatorServer).GetSnapshot(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/v1.IbftOperator/GetSnapshot", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IbftOperatorServer).GetSnapshot(ctx, req.(*SnapshotReq)) - } - return interceptor(ctx, in, info, handler) -} - -func _IbftOperator_Propose_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Candidate) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IbftOperatorServer).Propose(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/v1.IbftOperator/Propose", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IbftOperatorServer).Propose(ctx, req.(*Candidate)) - } - return interceptor(ctx, in, info, handler) -} - -func _IbftOperator_Candidates_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(emptypb.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IbftOperatorServer).Candidates(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/v1.IbftOperator/Candidates", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IbftOperatorServer).Candidates(ctx, req.(*emptypb.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -func _IbftOperator_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(emptypb.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IbftOperatorServer).Status(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/v1.IbftOperator/Status", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IbftOperatorServer).Status(ctx, req.(*emptypb.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -// IbftOperator_ServiceDesc is the grpc.ServiceDesc for IbftOperator service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var IbftOperator_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "v1.IbftOperator", - HandlerType: (*IbftOperatorServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "GetSnapshot", - Handler: _IbftOperator_GetSnapshot_Handler, - }, - { - MethodName: "Propose", - Handler: _IbftOperator_Propose_Handler, - }, - { - MethodName: "Candidates", - Handler: _IbftOperator_Candidates_Handler, - }, - { - MethodName: "Status", - Handler: _IbftOperator_Status_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "consensus/ibft/proto/ibft_operator.proto", -} diff --git a/consensus/ibft/sign_test.go b/consensus/ibft/sign_test.go deleted file mode 100644 index b3924bd937..0000000000 --- a/consensus/ibft/sign_test.go +++ /dev/null @@ -1,122 +0,0 @@ -package ibft - -import ( - "testing" - - "github.com/0xPolygon/polygon-edge/consensus/ibft/signer" - "github.com/0xPolygon/polygon-edge/types" - "github.com/stretchr/testify/assert" -) - -func TestSign_Sealer(t *testing.T) { - t.Parallel() - - pool := newTesterAccountPool(t) - pool.add("A") - - correctValset := pool.ValidatorSet() - - h := &types.Header{} - - signerA := signer.NewSigner( - signer.NewECDSAKeyManagerFromKey(pool.get("A").priv), - signer.NewECDSAKeyManagerFromKey(pool.get("A").priv), - ) - - signer.UseIstanbulHeaderHashInTest(t, signerA) - - signerA.InitIBFTExtra(h, correctValset, nil) - - h = h.ComputeHash() - - // non-validator address - pool.add("X") - - signerX := signer.NewSigner( - signer.NewECDSAKeyManagerFromKey(pool.get("X").priv), - signer.NewECDSAKeyManagerFromKey(pool.get("A").priv), - ) - - badSealedBlock, _ := signerX.WriteProposerSeal(h) - assert.Error(t, verifyProposerSeal(badSealedBlock, signerA, correctValset)) - - // seal the block with a validator - goodSealedBlock, _ := signerA.WriteProposerSeal(h) - assert.NoError(t, verifyProposerSeal(goodSealedBlock, signerA, correctValset)) -} - -func TestSign_CommittedSeals(t *testing.T) { - t.Parallel() - - pool := newTesterAccountPool(t) - pool.add("A", "B", "C", "D", "E") - - var ( - h = &types.Header{} - err error - - roundNumber uint64 = 1 - ) - - correctValSet := pool.ValidatorSet() - - signerA := signer.NewSigner( - signer.NewECDSAKeyManagerFromKey(pool.get("A").priv), - signer.NewECDSAKeyManagerFromKey(pool.get("A").priv), - ) - - signerA.InitIBFTExtra(h, correctValSet, nil) - - h.Hash, err = signerA.CalculateHeaderHash(h) - if err != nil { - t.Fatalf("Unable to calculate hash, %v", err) - } - - // non-validator address - pool.add("X") - - buildCommittedSeal := func(names []string) error { - seals := map[types.Address][]byte{} - - for _, name := range names { - acc := pool.get(name) - - signer := signer.NewSigner( - signer.NewECDSAKeyManagerFromKey( - acc.priv, - ), - signer.NewECDSAKeyManagerFromKey( - acc.priv, - ), - ) - - seal, err := signer.CreateCommittedSeal(h.Hash.Bytes()) - - assert.NoError(t, err) - - seals[acc.Address()] = seal - } - - sealed, err := signerA.WriteCommittedSeals(h, roundNumber, seals) - assert.NoError(t, err) - - committedSeal, err := signerA.GetIBFTExtra(sealed) - assert.NoError(t, err) - - return signerA.VerifyCommittedSeals( - sealed.Hash, - committedSeal.CommittedSeals, - correctValSet, - OptimalQuorumSize(correctValSet), - ) - } - - // Correct - assert.NoError(t, buildCommittedSeal([]string{"A", "B", "C", "D"})) - - // Failed - Non validator signature - assert.Error(t, buildCommittedSeal([]string{"A", "X"})) - - // Failed - Not enough signatures - assert.Error(t, buildCommittedSeal([]string{"A"})) -} diff --git a/consensus/ibft/signer/bls.go b/consensus/ibft/signer/bls.go deleted file mode 100644 index 39cf7b07c5..0000000000 --- a/consensus/ibft/signer/bls.go +++ /dev/null @@ -1,313 +0,0 @@ -package signer - -import ( - "crypto/ecdsa" - "fmt" - "math/big" - - "github.com/0xPolygon/polygon-edge/crypto" - "github.com/0xPolygon/polygon-edge/secrets" - "github.com/0xPolygon/polygon-edge/types" - "github.com/0xPolygon/polygon-edge/validators" - "github.com/coinbase/kryptology/pkg/signatures/bls/bls_sig" - "github.com/umbracle/fastrlp" -) - -// BLSKeyManager is a module that holds ECDSA and BLS keys -// and implements methods of signing by these keys -type BLSKeyManager struct { - ecdsaKey *ecdsa.PrivateKey - blsKey *bls_sig.SecretKey - address types.Address -} - -// NewBLSKeyManager initializes BLSKeyManager by the ECDSA key and BLS key which are loaded from SecretsManager -func NewBLSKeyManager(manager secrets.SecretsManager) (KeyManager, error) { - ecdsaKey, err := getOrCreateECDSAKey(manager) - if err != nil { - return nil, err - } - - blsKey, err := getOrCreateBLSKey(manager) - if err != nil { - return nil, err - } - - return NewBLSKeyManagerFromKeys(ecdsaKey, blsKey), nil -} - -// NewBLSKeyManagerFromKeys initializes BLSKeyManager from the given ECDSA and BLS keys -func NewBLSKeyManagerFromKeys(ecdsaKey *ecdsa.PrivateKey, blsKey *bls_sig.SecretKey) KeyManager { - return &BLSKeyManager{ - ecdsaKey: ecdsaKey, - blsKey: blsKey, - address: crypto.PubKeyToAddress(&ecdsaKey.PublicKey), - } -} - -// Type returns the validator type KeyManager supports -func (s *BLSKeyManager) Type() validators.ValidatorType { - return validators.BLSValidatorType -} - -// Address returns the address of KeyManager -func (s *BLSKeyManager) Address() types.Address { - return s.address -} - -// NewEmptyValidators returns empty validator collection BLSKeyManager uses -func (s *BLSKeyManager) NewEmptyValidators() validators.Validators { - return validators.NewBLSValidatorSet() -} - -// NewEmptyCommittedSeals returns empty CommittedSeals BLSKeyManager uses -func (s *BLSKeyManager) NewEmptyCommittedSeals() Seals { - return &AggregatedSeal{} -} - -func (s *BLSKeyManager) SignProposerSeal(data []byte) ([]byte, error) { - return crypto.Sign(s.ecdsaKey, data) -} - -func (s *BLSKeyManager) SignCommittedSeal(data []byte) ([]byte, error) { - return crypto.SignByBLS(s.blsKey, data) -} - -func (s *BLSKeyManager) VerifyCommittedSeal( - set validators.Validators, - addr types.Address, - rawSignature []byte, - hash []byte, -) error { - if set.Type() != s.Type() { - return ErrInvalidValidators - } - - validatorIndex := set.Index(addr) - if validatorIndex == -1 { - return ErrValidatorNotFound - } - - validator, ok := set.At(uint64(validatorIndex)).(*validators.BLSValidator) - if !ok { - return ErrInvalidValidators - } - - if err := crypto.VerifyBLSSignatureFromBytes( - validator.BLSPublicKey, - rawSignature, - hash, - ); err != nil { - return err - } - - return nil -} - -func (s *BLSKeyManager) GenerateCommittedSeals( - sealMap map[types.Address][]byte, - set validators.Validators, -) (Seals, error) { - if set.Type() != s.Type() { - return nil, ErrInvalidValidators - } - - blsSignatures, bitMap, err := getBLSSignatures(sealMap, set) - if err != nil { - return nil, err - } - - multiSignature, err := bls_sig.NewSigPop().AggregateSignatures(blsSignatures...) - if err != nil { - return nil, err - } - - multiSignatureBytes, err := multiSignature.MarshalBinary() - if err != nil { - return nil, err - } - - return &AggregatedSeal{ - Bitmap: bitMap, - Signature: multiSignatureBytes, - }, nil -} - -func (s *BLSKeyManager) VerifyCommittedSeals( - rawCommittedSeal Seals, - message []byte, - vals validators.Validators, -) (int, error) { - committedSeal, ok := rawCommittedSeal.(*AggregatedSeal) - if !ok { - return 0, ErrInvalidCommittedSealType - } - - if vals.Type() != s.Type() { - return 0, ErrInvalidValidators - } - - return verifyBLSCommittedSealsImpl(committedSeal, message, vals) -} - -func (s *BLSKeyManager) SignIBFTMessage(msg []byte) ([]byte, error) { - return crypto.Sign(s.ecdsaKey, msg) -} - -func (s *BLSKeyManager) Ecrecover(sig, digest []byte) (types.Address, error) { - return ecrecover(sig, digest) -} - -type AggregatedSeal struct { - Bitmap *big.Int - Signature []byte -} - -func (s *AggregatedSeal) Num() int { - return s.Bitmap.BitLen() -} - -func (s *AggregatedSeal) MarshalRLPWith(ar *fastrlp.Arena) *fastrlp.Value { - x := ar.NewArray() - - if s.Bitmap == nil { - x.Set(ar.NewNull()) - } else { - x.Set(ar.NewBytes(s.Bitmap.Bytes())) - } - - if s.Signature == nil { - x.Set(ar.NewNull()) - } else { - x.Set(ar.NewCopyBytes(s.Signature)) - } - - return x -} - -func (s *AggregatedSeal) UnmarshalRLPFrom(p *fastrlp.Parser, v *fastrlp.Value) error { - vals, err := v.GetElems() - - if err != nil { - return fmt.Errorf("mismatch of RLP type for CommittedSeal, expected list but found %s", v.Type()) - } - - if len(vals) == 0 { - return nil - } - - if len(vals) < 2 { - return fmt.Errorf("mismatch of RLP type for AggregatedCommittedSeal") - } - - var rawBitMap []byte - - rawBitMap, err = vals[0].GetBytes(rawBitMap) - if err != nil { - return err - } - - s.Bitmap = new(big.Int).SetBytes(rawBitMap) - - if s.Signature, err = vals[1].GetBytes(s.Signature); err != nil { - return err - } - - return nil -} - -func getBLSSignatures( - sealMap map[types.Address][]byte, - validators validators.Validators, -) ([]*bls_sig.Signature, *big.Int, error) { - blsSignatures := make([]*bls_sig.Signature, 0, len(sealMap)) - bitMap := new(big.Int) - - for addr, seal := range sealMap { - index := validators.Index(addr) - if index == -1 { - return nil, nil, ErrNonValidatorCommittedSeal - } - - bsig := &bls_sig.Signature{} - if err := bsig.UnmarshalBinary(seal); err != nil { - return nil, nil, err - } - - bitMap = bitMap.SetBit(bitMap, int(index), 1) - - blsSignatures = append(blsSignatures, bsig) - } - - return blsSignatures, bitMap, nil -} - -func createAggregatedBLSPubKeys( - vals validators.Validators, - bitMap *big.Int, -) (*bls_sig.MultiPublicKey, int, error) { - pubkeys := make([]*bls_sig.PublicKey, 0, vals.Len()) - - for idx := 0; idx < vals.Len(); idx++ { - if bitMap.Bit(idx) == 0 { - continue - } - - validator := vals.At(uint64(idx)) - if validator == nil { - return nil, 0, ErrValidatorNotFound - } - - blsValidator, ok := validator.(*validators.BLSValidator) - if !ok { - return nil, 0, ErrInvalidValidator - } - - pubKey, err := crypto.UnmarshalBLSPublicKey(blsValidator.BLSPublicKey) - if err != nil { - return nil, 0, err - } - - pubkeys = append(pubkeys, pubKey) - } - - key, err := bls_sig.NewSigPop().AggregatePublicKeys(pubkeys...) - if err != nil { - return nil, 0, err - } - - return key, len(pubkeys), nil -} - -func verifyBLSCommittedSealsImpl( - committedSeal *AggregatedSeal, - msg []byte, - vals validators.Validators, -) (int, error) { - if len(committedSeal.Signature) == 0 || - committedSeal.Bitmap == nil || - committedSeal.Bitmap.BitLen() == 0 { - return 0, ErrEmptyCommittedSeals - } - - aggregatedPubKey, numKeys, err := createAggregatedBLSPubKeys(vals, committedSeal.Bitmap) - if err != nil { - return 0, fmt.Errorf("failed to aggregate BLS Public Keys: %w", err) - } - - signature := &bls_sig.MultiSignature{} - if err := signature.UnmarshalBinary(committedSeal.Signature); err != nil { - return 0, err - } - - ok, err := bls_sig.NewSigPop().VerifyMultiSignature(aggregatedPubKey, msg, signature) - if err != nil { - return 0, err - } - - if !ok { - return 0, ErrInvalidSignature - } - - return numKeys, nil -} diff --git a/consensus/ibft/signer/bls_test.go b/consensus/ibft/signer/bls_test.go deleted file mode 100644 index 836d2580db..0000000000 --- a/consensus/ibft/signer/bls_test.go +++ /dev/null @@ -1,1073 +0,0 @@ -package signer - -import ( - "crypto/ecdsa" - "errors" - "math/big" - "testing" - - "github.com/0xPolygon/polygon-edge/crypto" - "github.com/0xPolygon/polygon-edge/helper/hex" - testHelper "github.com/0xPolygon/polygon-edge/helper/tests" - "github.com/0xPolygon/polygon-edge/secrets" - "github.com/0xPolygon/polygon-edge/types" - "github.com/0xPolygon/polygon-edge/validators" - "github.com/coinbase/kryptology/pkg/signatures/bls/bls_sig" - "github.com/stretchr/testify/assert" -) - -func newTestBLSKeyManager(t *testing.T) (KeyManager, *ecdsa.PrivateKey, *bls_sig.SecretKey) { - t.Helper() - - testECDSAKey, _ := newTestECDSAKey(t) - testBLSKey, _ := newTestBLSKey(t) - - return NewBLSKeyManagerFromKeys(testECDSAKey, testBLSKey), testECDSAKey, testBLSKey -} - -func testAggregateBLSSignatureBytes(t *testing.T, sigs ...[]byte) []byte { - t.Helper() - - blsSignatures := make([]*bls_sig.Signature, len(sigs)) - - for idx, sigBytes := range sigs { - blsSig, err := crypto.UnmarshalBLSSignature(sigBytes) - assert.NoError(t, err) - - blsSignatures[idx] = blsSig - } - - aggregatedBLSSig, err := bls_sig.NewSigPop().AggregateSignatures(blsSignatures...) - assert.NoError(t, err) - - aggregatedBLSSigBytes, err := aggregatedBLSSig.MarshalBinary() - assert.NoError(t, err) - - return aggregatedBLSSigBytes -} - -func testBLSKeyManagerToBLSValidator(t *testing.T, keyManager KeyManager) *validators.BLSValidator { - t.Helper() - - blsKeyManager, ok := keyManager.(*BLSKeyManager) - assert.True(t, ok) - - pubkeyBytes, err := crypto.BLSSecretKeyToPubkeyBytes(blsKeyManager.blsKey) - assert.NoError(t, err) - - return validators.NewBLSValidator( - blsKeyManager.Address(), - pubkeyBytes, - ) -} - -func testCreateAggregatedSignature(t *testing.T, msg []byte, keyManagers ...KeyManager) []byte { - t.Helper() - - signatures := make([][]byte, len(keyManagers)) - - for idx, km := range keyManagers { - sig, err := km.SignCommittedSeal(msg) - assert.NoError(t, err) - - signatures[idx] = sig - } - - return testAggregateBLSSignatureBytes(t, signatures...) -} - -// assert equality of marshalled aggregated BLS Public Keys -// because the field values in MultiPublicKey may be different for the same keys -func assertEqualAggregatedBLSPublicKeys(t *testing.T, apk1, apk2 *bls_sig.MultiPublicKey) { - t.Helper() - - apkBytes1, err := apk1.MarshalBinary() - assert.NoError(t, err) - - apkBytes2, err := apk2.MarshalBinary() - assert.NoError(t, err) - - assert.Equal(t, apkBytes1, apkBytes2) -} - -func TestNewBLSKeyManager(t *testing.T) { - t.Parallel() - - testECDSAKey, testECDSAKeyEncoded := newTestECDSAKey(t) - testBLSKey, testBLSKeyEncoded := newTestBLSKey(t) - - testSecretName := func(name string) { - t.Helper() - - // make sure that the correct key is given - assert.Contains( - t, - []string{secrets.ValidatorKey, secrets.ValidatorBLSKey}, - name, - ) - } - - //lint:ignore dupl - tests := []struct { - name string - mockSecretManager *MockSecretManager - expectedResult KeyManager - expectedErr error - }{ - { - name: "should initialize BLSKeyManager from the loaded ECDSA and BLS key", - mockSecretManager: &MockSecretManager{ - HasSecretFn: func(name string) bool { - testSecretName(name) - - return true - }, - GetSecretFn: func(name string) ([]byte, error) { - testSecretName(name) - - switch name { - case secrets.ValidatorKey: - return testECDSAKeyEncoded, nil - case secrets.ValidatorBLSKey: - return testBLSKeyEncoded, nil - } - - return nil, nil - }, - }, - expectedResult: &BLSKeyManager{ - ecdsaKey: testECDSAKey, - blsKey: testBLSKey, - address: crypto.PubKeyToAddress(&testECDSAKey.PublicKey), - }, - expectedErr: nil, - }, - { - name: "should return error if getOrCreateECDSAKey returns error", - mockSecretManager: &MockSecretManager{ - HasSecretFn: func(name string) bool { - testSecretName(name) - - return true - }, - GetSecretFn: func(name string) ([]byte, error) { - testSecretName(name) - - switch name { - case secrets.ValidatorKey: - // return error instead of key - return nil, errTest - case secrets.ValidatorBLSKey: - return testBLSKeyEncoded, nil - } - - return nil, nil - }, - }, - expectedResult: nil, - expectedErr: errTest, - }, - { - name: "should return error if getOrCreateBLSKey returns error", - mockSecretManager: &MockSecretManager{ - HasSecretFn: func(name string) bool { - testSecretName(name) - - return true - }, - GetSecretFn: func(name string) ([]byte, error) { - testSecretName(name) - - switch name { - case secrets.ValidatorKey: - return testECDSAKeyEncoded, nil - case secrets.ValidatorBLSKey: - // return error instead of key - return nil, errTest - } - - return nil, nil - }, - }, - expectedResult: nil, - expectedErr: errTest, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - res, err := NewBLSKeyManager(test.mockSecretManager) - - assert.Equal(t, test.expectedResult, res) - assert.ErrorIs(t, test.expectedErr, err) - }) - } -} - -func TestNewECDSAKeyManagerFromKeys(t *testing.T) { - t.Parallel() - - testKey, _ := newTestECDSAKey(t) - testBLSKey, _ := newTestBLSKey(t) - - assert.Equal( - t, - &BLSKeyManager{ - ecdsaKey: testKey, - blsKey: testBLSKey, - address: crypto.PubKeyToAddress(&testKey.PublicKey), - }, - NewBLSKeyManagerFromKeys(testKey, testBLSKey), - ) -} - -func TestBLSKeyManagerType(t *testing.T) { - t.Parallel() - - blsKeyManager, _, _ := newTestBLSKeyManager(t) - - assert.Equal( - t, - validators.BLSValidatorType, - blsKeyManager.Type(), - ) -} - -func TestBLSKeyManagerAddress(t *testing.T) { - t.Parallel() - - ecdsaKey, _ := newTestECDSAKey(t) - blsKey, _ := newTestBLSKey(t) - blsKeyManager := NewBLSKeyManagerFromKeys(ecdsaKey, blsKey) - - assert.Equal( - t, - crypto.PubKeyToAddress(&ecdsaKey.PublicKey), - blsKeyManager.Address(), - ) -} - -func TestBLSKeyManagerNewEmptyValidators(t *testing.T) { - t.Parallel() - - blsKeyManager, _, _ := newTestBLSKeyManager(t) - - assert.Equal( - t, - validators.NewBLSValidatorSet(), - blsKeyManager.NewEmptyValidators(), - ) -} - -func TestBLSKeyManagerNewEmptyCommittedSeals(t *testing.T) { - t.Parallel() - - blsKeyManager, _, _ := newTestBLSKeyManager(t) - - assert.Equal( - t, - &AggregatedSeal{}, - blsKeyManager.NewEmptyCommittedSeals(), - ) -} - -func TestBLSKeyManagerSignProposerSeal(t *testing.T) { - t.Parallel() - - blsKeyManager, _, _ := newTestBLSKeyManager(t) - msg := crypto.Keccak256( - hex.MustDecodeHex(testHeaderHashHex), - ) - - proposerSeal, err := blsKeyManager.SignProposerSeal(msg) - assert.NoError(t, err) - - recoveredAddress, err := ecrecover(proposerSeal, msg) - assert.NoError(t, err) - - assert.Equal( - t, - blsKeyManager.Address(), - recoveredAddress, - ) -} - -func TestBLSKeyManagerSignCommittedSeal(t *testing.T) { - t.Parallel() - - ecdsaKeyManager, _, blsKey := newTestBLSKeyManager(t) - blsPubKey, err := blsKey.GetPublicKey() - assert.NoError(t, err) - - msg := crypto.Keccak256( - wrapCommitHash( - hex.MustDecodeHex(testHeaderHashHex), - ), - ) - - proposerSealBytes, err := ecdsaKeyManager.SignCommittedSeal(msg) - assert.NoError(t, err) - - proposerSeal, err := crypto.UnmarshalBLSSignature(proposerSealBytes) - assert.NoError(t, err) - - assert.NoError( - t, - crypto.VerifyBLSSignature( - blsPubKey, - proposerSeal, - msg, - ), - ) -} - -func TestBLSKeyManagerVerifyCommittedSeal(t *testing.T) { - t.Parallel() - - blsKeyManager1, _, blsSecretKey1 := newTestBLSKeyManager(t) - blsKeyManager2, _, _ := newTestBLSKeyManager(t) - - msg := crypto.Keccak256( - wrapCommitHash( - hex.MustDecodeHex(testHeaderHashHex), - ), - ) - - correctSignature, err := blsKeyManager1.SignCommittedSeal(msg) - assert.NoError(t, err) - - wrongSignature, err := blsKeyManager2.SignCommittedSeal(msg) - assert.NoError(t, err) - - blsPublicKey1, err := blsSecretKey1.GetPublicKey() - assert.NoError(t, err) - - blsPublicKeyBytes, err := blsPublicKey1.MarshalBinary() - assert.NoError(t, err) - - tests := []struct { - name string - validators validators.Validators - address types.Address - signature []byte - message []byte - expectedErr error - }{ - { - name: "should return ErrInvalidValidators if validators is wrong type", - validators: validators.NewECDSAValidatorSet(), - address: blsKeyManager1.Address(), - signature: []byte{}, - message: []byte{}, - expectedErr: ErrInvalidValidators, - }, - { - name: "should return ErrInvalidSignature if the address is not in the validators", - validators: validators.NewBLSValidatorSet( - testBLSKeyManagerToBLSValidator(t, blsKeyManager2), - ), - address: blsKeyManager1.Address(), - signature: []byte{}, - message: []byte{}, - expectedErr: ErrValidatorNotFound, - }, - { - name: "should return crypto.ErrInvalidBLSSignature if it's wrong signature", - validators: validators.NewBLSValidatorSet( - validators.NewBLSValidator( - blsKeyManager1.Address(), - blsPublicKeyBytes, - ), - ), - address: blsKeyManager1.Address(), - signature: wrongSignature, - message: msg, - expectedErr: crypto.ErrInvalidBLSSignature, - }, - { - name: "should return nil if it's correct signature", - validators: validators.NewBLSValidatorSet( - validators.NewBLSValidator( - blsKeyManager1.Address(), - blsPublicKeyBytes, - ), - ), - address: blsKeyManager1.Address(), - signature: correctSignature, - message: msg, - expectedErr: nil, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - assert.ErrorIs( - t, - test.expectedErr, - blsKeyManager1.VerifyCommittedSeal( - test.validators, - test.address, - test.signature, - test.message, - ), - ) - }) - } -} - -func TestBLSKeyManagerGenerateCommittedSeals(t *testing.T) { - t.Parallel() - - blsKeyManager1, _, _ := newTestBLSKeyManager(t) - - msg := crypto.Keccak256( - wrapCommitHash( - hex.MustDecodeHex(testHeaderHashHex), - ), - ) - - correctCommittedSeal, err := blsKeyManager1.SignCommittedSeal(msg) - assert.NoError(t, err) - - aggregatedBLSSigBytes := testCreateAggregatedSignature( - t, - msg, - blsKeyManager1, - ) - - tests := []struct { - name string - sealMap map[types.Address][]byte - validators validators.Validators - expectedRes Seals - expectedErr error - }{ - { - name: "should return ErrInvalidValidators if rawValidators is not *BLSValidators", - sealMap: nil, - validators: validators.NewECDSAValidatorSet(), - expectedRes: nil, - expectedErr: ErrInvalidValidators, - }, - { - name: "should return error if getBLSSignatures returns error", - sealMap: map[types.Address][]byte{ - blsKeyManager1.Address(): correctCommittedSeal, - }, - validators: validators.NewBLSValidatorSet(), - expectedRes: nil, - expectedErr: ErrNonValidatorCommittedSeal, - }, - { - name: "should return error if sealMap is empty", - sealMap: map[types.Address][]byte{}, - validators: validators.NewBLSValidatorSet(), - expectedRes: nil, - expectedErr: errors.New("at least one signature is required"), - }, - { - name: "should return AggregatedSeal if it's successful", - sealMap: map[types.Address][]byte{ - blsKeyManager1.Address(): correctCommittedSeal, - }, - validators: validators.NewBLSValidatorSet( - testBLSKeyManagerToBLSValidator( - t, - blsKeyManager1, - ), - ), - expectedRes: &AggregatedSeal{ - Bitmap: big.NewInt(0).SetBit(new(big.Int), 0, 1), - Signature: aggregatedBLSSigBytes, - }, - expectedErr: nil, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - res, err := blsKeyManager1.GenerateCommittedSeals( - test.sealMap, - test.validators, - ) - - assert.Equal(t, test.expectedRes, res) - testHelper.AssertErrorMessageContains(t, test.expectedErr, err) - }) - } -} - -func TestBLSKeyManagerVerifyCommittedSeals(t *testing.T) { - t.Parallel() - - blsKeyManager1, _, _ := newTestBLSKeyManager(t) - - msg := crypto.Keccak256( - wrapCommitHash( - hex.MustDecodeHex(testHeaderHashHex), - ), - ) - - aggregatedBLSSigBytes := testCreateAggregatedSignature( - t, - msg, - blsKeyManager1, - ) - - tests := []struct { - name string - rawCommittedSeals Seals - hash []byte - validators validators.Validators - expectedRes int - expectedErr error - }{ - { - name: "should return ErrInvalidCommittedSealType if rawCommittedSeal is not *AggregatedSeal", - rawCommittedSeals: &SerializedSeal{}, - hash: nil, - validators: nil, - expectedRes: 0, - expectedErr: ErrInvalidCommittedSealType, - }, - { - name: "should return ErrInvalidValidators if rawValidators is not *BLSValidators", - rawCommittedSeals: &AggregatedSeal{ - Bitmap: big.NewInt(0).SetBit(new(big.Int), 0, 1), - Signature: aggregatedBLSSigBytes, - }, - validators: validators.NewECDSAValidatorSet(), - expectedRes: 0, - expectedErr: ErrInvalidValidators, - }, - { - name: "should return size of AggregatedSeal if it's successful", - rawCommittedSeals: &AggregatedSeal{ - Bitmap: big.NewInt(0).SetBit(new(big.Int), 0, 1), - Signature: aggregatedBLSSigBytes, - }, - validators: validators.NewBLSValidatorSet( - testBLSKeyManagerToBLSValidator( - t, - blsKeyManager1, - ), - ), - expectedRes: 1, - expectedErr: nil, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - res, err := blsKeyManager1.VerifyCommittedSeals( - test.rawCommittedSeals, - msg, - test.validators, - ) - - assert.Equal(t, test.expectedRes, res) - testHelper.AssertErrorMessageContains(t, test.expectedErr, err) - }) - } -} - -func TestBLSKeyManagerSignIBFTMessageAndEcrecover(t *testing.T) { - t.Parallel() - - blsKeyManager, _, _ := newTestBLSKeyManager(t) - msg := crypto.Keccak256([]byte("message")) - - proposerSeal, err := blsKeyManager.SignIBFTMessage(msg) - assert.NoError(t, err) - - recoveredAddress, err := blsKeyManager.Ecrecover(proposerSeal, msg) - assert.NoError(t, err) - - assert.Equal( - t, - blsKeyManager.Address(), - recoveredAddress, - ) -} - -func Test_getBLSSignatures(t *testing.T) { - t.Parallel() - - validatorKeyManager, _, _ := newTestBLSKeyManager(t) - nonValidatorKeyManager, _, _ := newTestBLSKeyManager(t) - - msg := crypto.Keccak256( - wrapCommitHash( - hex.MustDecodeHex(testHeaderHashHex), - ), - ) - - validatorCommittedSeal, err := validatorKeyManager.SignCommittedSeal(msg) - assert.NoError(t, err) - - nonValidatorCommittedSeal, err := nonValidatorKeyManager.SignCommittedSeal(msg) - assert.NoError(t, err) - - wrongCommittedSeal := []byte("fake committed seal") - - validatorSignature, err := crypto.UnmarshalBLSSignature(validatorCommittedSeal) - assert.NoError(t, err) - - tests := []struct { - name string - sealMap map[types.Address][]byte - validators validators.Validators - expectedSignatures []*bls_sig.Signature - expectedBitMap *big.Int - expectedErr error - }{ - { - name: "should return ErrNonValidatorCommittedSeal if sealMap has committed seal signed by non validator", - sealMap: map[types.Address][]byte{ - nonValidatorKeyManager.Address(): nonValidatorCommittedSeal, - }, - validators: validators.NewBLSValidatorSet( - testBLSKeyManagerToBLSValidator( - t, - validatorKeyManager, - ), - ), - expectedSignatures: nil, - expectedBitMap: nil, - expectedErr: ErrNonValidatorCommittedSeal, - }, - { - name: "should return error if unmarshalling committed seal is failed", - sealMap: map[types.Address][]byte{ - validatorKeyManager.Address(): wrongCommittedSeal, - }, - validators: validators.NewBLSValidatorSet( - testBLSKeyManagerToBLSValidator( - t, - validatorKeyManager, - ), - ), - expectedSignatures: nil, - expectedBitMap: nil, - expectedErr: errors.New("signature must be 96 bytes"), - }, - { - name: "should return signatures and bitmap if all committed seals are right and signed by validators", - sealMap: map[types.Address][]byte{ - validatorKeyManager.Address(): validatorCommittedSeal, - }, - validators: validators.NewBLSValidatorSet( - testBLSKeyManagerToBLSValidator( - t, - validatorKeyManager, - ), - ), - expectedSignatures: []*bls_sig.Signature{ - validatorSignature, - }, - expectedBitMap: new(big.Int).SetBit(new(big.Int), 0, 1), - expectedErr: nil, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - sigs, bitmap, err := getBLSSignatures( - test.sealMap, - test.validators, - ) - - assert.ElementsMatch( - t, - test.expectedSignatures, - sigs, - ) - assert.Equal(t, test.expectedBitMap, bitmap) - testHelper.AssertErrorMessageContains(t, test.expectedErr, err) - }) - } - - t.Run("multiple committed seals by validators", func(t *testing.T) { - t.Parallel() - - // which validator signed committed seals - signerFlags := []bool{ - false, - true, - false, - true, - true, - } - - msg := crypto.Keccak256( - wrapCommitHash( - hex.MustDecodeHex(testHeaderHashHex), - ), - ) - - sealMap := make(map[types.Address][]byte) - validators := validators.NewBLSValidatorSet() - - expectedSignatures := make([]*bls_sig.Signature, 0, len(signerFlags)) - expectedBitMap := new(big.Int) - - for idx, signed := range signerFlags { - blsKeyManager, _, _ := newTestBLSKeyManager(t) - - // add to validators - assert.NoError( - t, - validators.Add( - testBLSKeyManagerToBLSValidator( - t, - blsKeyManager, - ), - ), - ) - - if !signed { - continue - } - - committedSeal, err := blsKeyManager.SignCommittedSeal(msg) - assert.NoError(t, err) - - // set committed seals to sealMap - sealMap[blsKeyManager.Address()] = committedSeal - - // build expected signatures - signature, err := crypto.UnmarshalBLSSignature(committedSeal) - assert.NoError(t, err) - - expectedSignatures = append(expectedSignatures, signature) - - // build expected bit map - expectedBitMap = expectedBitMap.SetBit(expectedBitMap, idx, 1) - } - - signatures, bitmap, err := getBLSSignatures( - sealMap, - validators, - ) - - // the order might be different due to scanning sealMap - assert.ElementsMatch( - t, - expectedSignatures, - signatures, - ) - assert.Equal(t, expectedBitMap, bitmap) - assert.NoError(t, err) - }) -} - -func Test_createAggregatedBLSPubKeys(t *testing.T) { - t.Parallel() - - t.Run("multiple validators", func(t *testing.T) { - t.Parallel() - - // which validator signed committed seals - signerFlags := []bool{ - false, - true, - false, - true, - true, - } - - validators := validators.NewBLSValidatorSet() - bitMap := new(big.Int) - - expectedBLSPublicKeys := []*bls_sig.PublicKey{} - expectedNumSigners := 0 - - for idx, signed := range signerFlags { - blsKeyManager, _, blsSecretKey := newTestBLSKeyManager(t) - - // add to validators - assert.NoError( - t, - validators.Add( - testBLSKeyManagerToBLSValidator( - t, - blsKeyManager, - ), - ), - ) - - if !signed { - continue - } - - // set bit in bitmap - bitMap = bitMap.SetBit(bitMap, idx, 1) - - blsPubKey, err := blsSecretKey.GetPublicKey() - assert.NoError(t, err) - - expectedBLSPublicKeys = append(expectedBLSPublicKeys, blsPubKey) - expectedNumSigners++ - } - - expectedAggregatedBLSPublicKeys, err := bls_sig.NewSigPop().AggregatePublicKeys( - expectedBLSPublicKeys..., - ) - assert.NoError(t, err) - - aggregatedPubKey, num, err := createAggregatedBLSPubKeys( - validators, - bitMap, - ) - - assert.NoError(t, err) - assert.Equal(t, expectedNumSigners, num) - - assertEqualAggregatedBLSPublicKeys(t, expectedAggregatedBLSPublicKeys, aggregatedPubKey) - }) - - t.Run("should return error if bitMap is empty", func(t *testing.T) { - t.Parallel() - - aggrecatedPubKeys, num, err := createAggregatedBLSPubKeys( - validators.NewBLSValidatorSet(), - new(big.Int), - ) - - assert.Nil(t, aggrecatedPubKeys) - assert.Zero(t, num) - assert.ErrorContains(t, err, "at least one public key is required") - }) - - t.Run("should return error if public key is wrong", func(t *testing.T) { - t.Parallel() - - aggrecatedPubKeys, num, err := createAggregatedBLSPubKeys( - validators.NewBLSValidatorSet( - validators.NewBLSValidator( - types.StringToAddress("0"), - []byte("fake"), - ), - ), - new(big.Int).SetBit(new(big.Int), 0, 1), - ) - - assert.Nil(t, aggrecatedPubKeys) - assert.Zero(t, num) - assert.ErrorContains(t, err, "public key must be 48 bytes") - }) -} - -func Test_verifyBLSCommittedSealsImpl(t *testing.T) { - t.Parallel() - - validatorKeyManager1, _, _ := newTestBLSKeyManager(t) - validatorKeyManager2, _, _ := newTestBLSKeyManager(t) - validatorKeyManager3, _, _ := newTestBLSKeyManager(t) - validatorKeyManager4, _, _ := newTestBLSKeyManager(t) - - msg := crypto.Keccak256( - wrapCommitHash( - hex.MustDecodeHex(testHeaderHashHex), - ), - ) - - correctAggregatedSig := testCreateAggregatedSignature( - t, - msg, - validatorKeyManager1, - validatorKeyManager2, - ) - - wrongAggregatedSig := testCreateAggregatedSignature( - t, - []byte("fake"), - validatorKeyManager1, - validatorKeyManager2, - ) - - tests := []struct { - name string - committedSeal *AggregatedSeal - msg []byte - validators validators.Validators - expectedRes int - expectedErr error - }{ - { - name: "should return ErrEmptyCommittedSeals if committedSeal.Signature is empty", - committedSeal: &AggregatedSeal{ - Signature: []byte{}, - Bitmap: new(big.Int).SetBit(new(big.Int), 0, 1), - }, - expectedRes: 0, - expectedErr: ErrEmptyCommittedSeals, - }, - { - name: "should return ErrEmptyCommittedSeals if committedSeal.BitMap is nil", - committedSeal: &AggregatedSeal{ - Signature: []byte("test"), - Bitmap: nil, - }, - expectedRes: 0, - expectedErr: ErrEmptyCommittedSeals, - }, - { - name: "should return ErrEmptyCommittedSeals if committedSeal.BitMap is zero", - committedSeal: &AggregatedSeal{ - Signature: []byte("test"), - Bitmap: new(big.Int), - }, - expectedRes: 0, - expectedErr: ErrEmptyCommittedSeals, - }, - { - name: "should return error if failed to aggregate public keys", - committedSeal: &AggregatedSeal{ - Signature: []byte("test"), - Bitmap: new(big.Int).SetBit(new(big.Int), 0, 1), - }, - validators: validators.NewBLSValidatorSet( - &validators.BLSValidator{ - BLSPublicKey: []byte("test"), - }, - ), - expectedRes: 0, - expectedErr: errors.New("failed to aggregate BLS Public Keys: public key must be 48 bytes"), - }, - { - name: "should return error if failed to unmarshal aggregated signature", - committedSeal: &AggregatedSeal{ - Signature: []byte("test"), - Bitmap: new(big.Int).SetBit(new(big.Int), 0, 1), - }, - validators: validators.NewBLSValidatorSet( - testBLSKeyManagerToBLSValidator(t, validatorKeyManager1), - testBLSKeyManagerToBLSValidator(t, validatorKeyManager2), - ), - expectedRes: 0, - expectedErr: errors.New("multi signature must be 96 bytes"), - }, - { - name: "should return error if message is nil", - committedSeal: &AggregatedSeal{ - Signature: correctAggregatedSig, - Bitmap: new(big.Int).SetBit(new(big.Int), 0, 1), - }, - validators: validators.NewBLSValidatorSet( - testBLSKeyManagerToBLSValidator(t, validatorKeyManager1), - testBLSKeyManagerToBLSValidator(t, validatorKeyManager2), - ), - msg: nil, - expectedRes: 0, - expectedErr: errors.New("signature and message and public key cannot be nil or zero"), - }, - { - name: "should return ErrInvalidSignature if verification failed (different message)", - committedSeal: &AggregatedSeal{ - Signature: wrongAggregatedSig, - Bitmap: new(big.Int).SetBytes([]byte{0x3}), // validator1 & validator2 - }, - validators: validators.NewBLSValidatorSet( - testBLSKeyManagerToBLSValidator(t, validatorKeyManager1), - testBLSKeyManagerToBLSValidator(t, validatorKeyManager2), - ), - msg: msg, - expectedRes: 0, - expectedErr: ErrInvalidSignature, - }, - { - name: "should return ErrInvalidSignature if verification failed (wrong validator set)", - committedSeal: &AggregatedSeal{ - Signature: correctAggregatedSig, - Bitmap: new(big.Int).SetBytes([]byte{0x3}), // validator1 & validator 2 - }, - validators: validators.NewBLSValidatorSet( - testBLSKeyManagerToBLSValidator(t, validatorKeyManager3), - testBLSKeyManagerToBLSValidator(t, validatorKeyManager4), - ), - msg: msg, - expectedRes: 0, - expectedErr: ErrInvalidSignature, - }, - { - name: "should return ErrInvalidSignature if verification failed (smaller validator set)", - committedSeal: &AggregatedSeal{ - Signature: correctAggregatedSig, - Bitmap: new(big.Int).SetBytes([]byte{0x1}), // validator1 - }, - validators: validators.NewBLSValidatorSet( - testBLSKeyManagerToBLSValidator(t, validatorKeyManager1), - ), - msg: msg, - expectedRes: 0, - expectedErr: ErrInvalidSignature, - }, - { - name: "should return ErrInvalidSignature if verification failed (bigger validator set)", - committedSeal: &AggregatedSeal{ - Signature: correctAggregatedSig, - Bitmap: new(big.Int).SetBytes([]byte{0x7}), // validator1 & validator 2 & validator 3 - }, - validators: validators.NewBLSValidatorSet( - testBLSKeyManagerToBLSValidator(t, validatorKeyManager1), - testBLSKeyManagerToBLSValidator(t, validatorKeyManager2), - testBLSKeyManagerToBLSValidator(t, validatorKeyManager3), - ), - msg: msg, - expectedRes: 0, - expectedErr: ErrInvalidSignature, - }, - { - name: "should succeed", - committedSeal: &AggregatedSeal{ - Signature: correctAggregatedSig, - Bitmap: new(big.Int).SetBytes([]byte{0x3}), // validator1 & validator 2 - }, - validators: validators.NewBLSValidatorSet( - testBLSKeyManagerToBLSValidator(t, validatorKeyManager1), - testBLSKeyManagerToBLSValidator(t, validatorKeyManager2), - ), - msg: msg, - expectedRes: 2, - expectedErr: nil, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - res, err := verifyBLSCommittedSealsImpl( - test.committedSeal, - test.msg, - test.validators, - ) - - assert.Equal(t, test.expectedRes, res) - testHelper.AssertErrorMessageContains(t, test.expectedErr, err) - }) - } -} diff --git a/consensus/ibft/signer/ecdsa.go b/consensus/ibft/signer/ecdsa.go deleted file mode 100644 index 4e7377ed7b..0000000000 --- a/consensus/ibft/signer/ecdsa.go +++ /dev/null @@ -1,211 +0,0 @@ -package signer - -import ( - "crypto/ecdsa" - "fmt" - - "github.com/0xPolygon/polygon-edge/crypto" - "github.com/0xPolygon/polygon-edge/secrets" - "github.com/0xPolygon/polygon-edge/types" - "github.com/0xPolygon/polygon-edge/validators" - "github.com/umbracle/fastrlp" -) - -// ECDSAKeyManager is a module that holds ECDSA key -// and implements methods of signing by this key -type ECDSAKeyManager struct { - key *ecdsa.PrivateKey - address types.Address -} - -// NewECDSAKeyManager initializes ECDSAKeyManager by the ECDSA key loaded from SecretsManager -func NewECDSAKeyManager(manager secrets.SecretsManager) (KeyManager, error) { - key, err := getOrCreateECDSAKey(manager) - if err != nil { - return nil, err - } - - return NewECDSAKeyManagerFromKey(key), nil -} - -// NewECDSAKeyManagerFromKey initializes ECDSAKeyManager from the given ECDSA key -func NewECDSAKeyManagerFromKey(key *ecdsa.PrivateKey) KeyManager { - return &ECDSAKeyManager{ - key: key, - address: crypto.PubKeyToAddress(&key.PublicKey), - } -} - -// Type returns the validator type KeyManager supports -func (s *ECDSAKeyManager) Type() validators.ValidatorType { - return validators.ECDSAValidatorType -} - -// Address returns the address of KeyManager -func (s *ECDSAKeyManager) Address() types.Address { - return s.address -} - -// NewEmptyValidators returns empty validator collection ECDSAKeyManager uses -func (s *ECDSAKeyManager) NewEmptyValidators() validators.Validators { - return validators.NewECDSAValidatorSet() -} - -// NewEmptyCommittedSeals returns empty CommittedSeals ECDSAKeyManager uses -func (s *ECDSAKeyManager) NewEmptyCommittedSeals() Seals { - return &SerializedSeal{} -} - -// SignProposerSeal signs the given message by ECDSA key the ECDSAKeyManager holds for ProposerSeal -func (s *ECDSAKeyManager) SignProposerSeal(message []byte) ([]byte, error) { - return crypto.Sign(s.key, message) -} - -// SignProposerSeal signs the given message by ECDSA key the ECDSAKeyManager holds for committed seal -func (s *ECDSAKeyManager) SignCommittedSeal(message []byte) ([]byte, error) { - return crypto.Sign(s.key, message) -} - -// VerifyCommittedSeal verifies a committed seal -func (s *ECDSAKeyManager) VerifyCommittedSeal( - vals validators.Validators, - address types.Address, - signature []byte, - message []byte, -) error { - if vals.Type() != s.Type() { - return ErrInvalidValidators - } - - signer, err := s.Ecrecover(signature, message) - if err != nil { - return ErrInvalidSignature - } - - if address != signer { - return ErrSignerMismatch - } - - if !vals.Includes(address) { - return ErrNonValidatorCommittedSeal - } - - return nil -} - -func (s *ECDSAKeyManager) GenerateCommittedSeals( - sealMap map[types.Address][]byte, - _ validators.Validators, -) (Seals, error) { - seals := [][]byte{} - - for _, seal := range sealMap { - if len(seal) != IstanbulExtraSeal { - return nil, ErrInvalidCommittedSealLength - } - - seals = append(seals, seal) - } - - serializedSeal := SerializedSeal(seals) - - return &serializedSeal, nil -} - -func (s *ECDSAKeyManager) VerifyCommittedSeals( - rawCommittedSeal Seals, - digest []byte, - vals validators.Validators, -) (int, error) { - committedSeal, ok := rawCommittedSeal.(*SerializedSeal) - if !ok { - return 0, ErrInvalidCommittedSealType - } - - if vals.Type() != s.Type() { - return 0, ErrInvalidValidators - } - - return s.verifyCommittedSealsImpl(committedSeal, digest, vals) -} - -func (s *ECDSAKeyManager) SignIBFTMessage(msg []byte) ([]byte, error) { - return crypto.Sign(s.key, msg) -} - -func (s *ECDSAKeyManager) Ecrecover(sig, digest []byte) (types.Address, error) { - return ecrecover(sig, digest) -} - -func (s *ECDSAKeyManager) verifyCommittedSealsImpl( - committedSeal *SerializedSeal, - msg []byte, - validators validators.Validators, -) (int, error) { - numSeals := committedSeal.Num() - if numSeals == 0 { - return 0, ErrEmptyCommittedSeals - } - - visited := make(map[types.Address]bool) - - for _, seal := range *committedSeal { - addr, err := s.Ecrecover(seal, msg) - if err != nil { - return 0, err - } - - if visited[addr] { - return 0, ErrRepeatedCommittedSeal - } - - if !validators.Includes(addr) { - return 0, ErrNonValidatorCommittedSeal - } - - visited[addr] = true - } - - return numSeals, nil -} - -type SerializedSeal [][]byte - -func (s *SerializedSeal) Num() int { - return len(*s) -} - -func (s *SerializedSeal) MarshalRLPWith(ar *fastrlp.Arena) *fastrlp.Value { - if len(*s) == 0 { - return ar.NewNullArray() - } - - committed := ar.NewArray() - - for _, a := range *s { - if len(a) == 0 { - committed.Set(ar.NewNull()) - } else { - committed.Set(ar.NewCopyBytes(a)) - } - } - - return committed -} - -func (s *SerializedSeal) UnmarshalRLPFrom(p *fastrlp.Parser, v *fastrlp.Value) error { - vals, err := v.GetElems() - if err != nil { - return fmt.Errorf("mismatch of RLP type for CommittedSeal, expected list but found %s", v.Type()) - } - - (*s) = make([][]byte, len(vals)) - - for indx, val := range vals { - if (*s)[indx], err = val.GetBytes((*s)[indx]); err != nil { - return err - } - } - - return nil -} diff --git a/consensus/ibft/signer/ecdsa_test.go b/consensus/ibft/signer/ecdsa_test.go deleted file mode 100644 index 6468fdfc7f..0000000000 --- a/consensus/ibft/signer/ecdsa_test.go +++ /dev/null @@ -1,554 +0,0 @@ -package signer - -import ( - "crypto/ecdsa" - "errors" - "testing" - - "github.com/0xPolygon/polygon-edge/crypto" - "github.com/0xPolygon/polygon-edge/helper/hex" - testHelper "github.com/0xPolygon/polygon-edge/helper/tests" - "github.com/0xPolygon/polygon-edge/secrets" - "github.com/0xPolygon/polygon-edge/types" - "github.com/0xPolygon/polygon-edge/validators" - "github.com/stretchr/testify/assert" -) - -func newTestECDSAKeyManager(t *testing.T) (KeyManager, *ecdsa.PrivateKey) { - t.Helper() - - testKey, _ := newTestECDSAKey(t) - - return NewECDSAKeyManagerFromKey(testKey), testKey -} - -func TestNewECDSAKeyManager(t *testing.T) { - t.Parallel() - - testKey, testKeyEncoded := newTestECDSAKey(t) - - testSecretName := func(name string) { - t.Helper() - - // make sure that the correct key is given - assert.Equal(t, secrets.ValidatorKey, name) - } - - //lint:ignore dupl - tests := []struct { - name string - mockSecretManager *MockSecretManager - expectedResult KeyManager - expectedErr error - }{ - { - name: "should initialize ECDSAKeyManager from the loaded ECDSA key", - mockSecretManager: &MockSecretManager{ - HasSecretFn: func(name string) bool { - testSecretName(name) - - return true - }, - GetSecretFn: func(name string) ([]byte, error) { - testSecretName(name) - - return testKeyEncoded, nil - }, - }, - expectedResult: &ECDSAKeyManager{ - key: testKey, - address: crypto.PubKeyToAddress(&testKey.PublicKey), - }, - expectedErr: nil, - }, - { - name: "should return error if getOrCreateECDSAKey returns error", - mockSecretManager: &MockSecretManager{ - HasSecretFn: func(name string) bool { - testSecretName(name) - - return true - }, - GetSecretFn: func(name string) ([]byte, error) { - testSecretName(name) - - return nil, errTest - }, - }, - expectedResult: nil, - expectedErr: errTest, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - res, err := NewECDSAKeyManager(test.mockSecretManager) - - assert.Equal(t, test.expectedResult, res) - assert.ErrorIs(t, test.expectedErr, err) - }) - } -} -func TestNewECDSAKeyManagerFromKey(t *testing.T) { - t.Parallel() - - testKey, _ := newTestECDSAKey(t) - - assert.Equal( - t, - &ECDSAKeyManager{ - key: testKey, - address: crypto.PubKeyToAddress(&testKey.PublicKey), - }, - NewECDSAKeyManagerFromKey(testKey), - ) -} - -func TestECDSAKeyManagerType(t *testing.T) { - t.Parallel() - - ecdsaKeyManager, _ := newTestECDSAKeyManager(t) - - assert.Equal( - t, - validators.ECDSAValidatorType, - ecdsaKeyManager.Type(), - ) -} - -func TestECDSAKeyManagerAddress(t *testing.T) { - t.Parallel() - - ecdsaKey, _ := newTestECDSAKey(t) - ecdsaKeyManager := NewECDSAKeyManagerFromKey(ecdsaKey) - - assert.Equal( - t, - crypto.PubKeyToAddress(&ecdsaKey.PublicKey), - ecdsaKeyManager.Address(), - ) -} - -func TestECDSAKeyManagerNewEmptyValidators(t *testing.T) { - t.Parallel() - - ecdsaKeyManager, _ := newTestECDSAKeyManager(t) - - assert.Equal( - t, - validators.NewECDSAValidatorSet(), - ecdsaKeyManager.NewEmptyValidators(), - ) -} - -func TestECDSAKeyManagerNewEmptyCommittedSeals(t *testing.T) { - t.Parallel() - - ecdsaKeyManager, _ := newTestECDSAKeyManager(t) - - assert.Equal( - t, - &SerializedSeal{}, - ecdsaKeyManager.NewEmptyCommittedSeals(), - ) -} - -func TestECDSAKeyManagerSignProposerSeal(t *testing.T) { - t.Parallel() - - ecdsaKeyManager, _ := newTestECDSAKeyManager(t) - msg := crypto.Keccak256( - hex.MustDecodeHex(testHeaderHashHex), - ) - - proposerSeal, err := ecdsaKeyManager.SignProposerSeal(msg) - assert.NoError(t, err) - - recoveredAddress, err := ecrecover(proposerSeal, msg) - assert.NoError(t, err) - - assert.Equal( - t, - ecdsaKeyManager.Address(), - recoveredAddress, - ) -} - -func TestECDSAKeyManagerSignCommittedSeal(t *testing.T) { - t.Parallel() - - ecdsaKeyManager, _ := newTestECDSAKeyManager(t) - msg := crypto.Keccak256( - wrapCommitHash( - hex.MustDecodeHex(testHeaderHashHex), - ), - ) - - proposerSeal, err := ecdsaKeyManager.SignCommittedSeal(msg) - assert.NoError(t, err) - - recoveredAddress, err := ecrecover(proposerSeal, msg) - assert.NoError(t, err) - - assert.Equal( - t, - ecdsaKeyManager.Address(), - recoveredAddress, - ) -} - -func TestECDSAKeyManagerVerifyCommittedSeal(t *testing.T) { - t.Parallel() - - ecdsaKeyManager1, _ := newTestECDSAKeyManager(t) - ecdsaKeyManager2, _ := newTestECDSAKeyManager(t) - - msg := crypto.Keccak256( - wrapCommitHash( - hex.MustDecodeHex(testHeaderHashHex), - ), - ) - - correctSignature, err := ecdsaKeyManager1.SignCommittedSeal(msg) - assert.NoError(t, err) - - wrongSignature, err := ecdsaKeyManager2.SignCommittedSeal(msg) - assert.NoError(t, err) - - tests := []struct { - name string - validators validators.Validators - address types.Address - signature []byte - message []byte - expectedErr error - }{ - { - name: "should return ErrInvalidValidators if validators is wrong type", - validators: validators.NewBLSValidatorSet(), - address: ecdsaKeyManager1.Address(), - signature: []byte{}, - message: []byte{}, - expectedErr: ErrInvalidValidators, - }, - { - name: "should return ErrInvalidSignature if ecrecover failed", - validators: validators.NewECDSAValidatorSet(), - address: ecdsaKeyManager1.Address(), - signature: []byte{}, - message: []byte{}, - expectedErr: ErrInvalidSignature, - }, - { - name: "should return ErrSignerMismatch if the signature is signed by different signer", - validators: validators.NewECDSAValidatorSet(), - address: ecdsaKeyManager1.Address(), - signature: wrongSignature, - message: msg, - expectedErr: ErrSignerMismatch, - }, - { - name: "should return ErrNonValidatorCommittedSeal if the signer is not in the validators", - validators: validators.NewECDSAValidatorSet(), - address: ecdsaKeyManager1.Address(), - signature: correctSignature, - message: msg, - expectedErr: ErrNonValidatorCommittedSeal, - }, - { - name: "should return nil if it's verified", - validators: validators.NewECDSAValidatorSet( - validators.NewECDSAValidator( - ecdsaKeyManager1.Address(), - ), - ), - address: ecdsaKeyManager1.Address(), - signature: correctSignature, - message: msg, - expectedErr: nil, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - assert.ErrorIs( - t, - test.expectedErr, - ecdsaKeyManager1.VerifyCommittedSeal( - test.validators, - test.address, - test.signature, - test.message, - ), - ) - }) - } -} - -func TestECDSAKeyManagerGenerateCommittedSeals(t *testing.T) { - t.Parallel() - - ecdsaKeyManager1, _ := newTestECDSAKeyManager(t) - - msg := crypto.Keccak256( - wrapCommitHash( - hex.MustDecodeHex(testHeaderHashHex), - ), - ) - - correctCommittedSeal, err := ecdsaKeyManager1.SignCommittedSeal(msg) - assert.NoError(t, err) - - wrongCommittedSeal := []byte("fake") - - tests := []struct { - name string - sealMap map[types.Address][]byte - expectedRes Seals - expectedErr error - }{ - { - name: "should return ErrInvalidCommittedSealLength if the size of committed seal doesn't equal to IstanbulExtraSeal", - sealMap: map[types.Address][]byte{ - ecdsaKeyManager1.Address(): wrongCommittedSeal, - }, - expectedRes: nil, - expectedErr: ErrInvalidCommittedSealLength, - }, - { - name: "should return SerializedSeal", - sealMap: map[types.Address][]byte{ - ecdsaKeyManager1.Address(): correctCommittedSeal, - }, - expectedRes: &SerializedSeal{ - correctCommittedSeal, - }, - expectedErr: nil, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - res, err := ecdsaKeyManager1.GenerateCommittedSeals( - test.sealMap, - nil, - ) - - assert.Equal(t, test.expectedRes, res) - assert.ErrorIs(t, test.expectedErr, err) - }) - } -} - -func TestECDSAKeyManagerVerifyCommittedSeals(t *testing.T) { - t.Parallel() - - ecdsaKeyManager1, _ := newTestECDSAKeyManager(t) - - msg := crypto.Keccak256( - wrapCommitHash( - hex.MustDecodeHex(testHeaderHashHex), - ), - ) - - correctCommittedSeal, err := ecdsaKeyManager1.SignCommittedSeal(msg) - assert.NoError(t, err) - - tests := []struct { - name string - committedSeals Seals - digest []byte - rawSet validators.Validators - expectedRes int - expectedErr error - }{ - { - name: "should return ErrInvalidCommittedSealType if the Seals is not *SerializedSeal", - committedSeals: &AggregatedSeal{}, - digest: msg, - rawSet: nil, - expectedRes: 0, - expectedErr: ErrInvalidCommittedSealType, - }, - { - name: "should return ErrInvalidValidators if the rawSet is not *validators.ECDSAValidators", - committedSeals: &SerializedSeal{}, - digest: msg, - rawSet: validators.NewBLSValidatorSet(), - expectedRes: 0, - expectedErr: ErrInvalidValidators, - }, - { - name: "should return size of CommittedSeals if verification is successful", - committedSeals: &SerializedSeal{ - correctCommittedSeal, - }, - digest: msg, - rawSet: validators.NewECDSAValidatorSet( - validators.NewECDSAValidator( - ecdsaKeyManager1.Address(), - ), - ), - expectedRes: 1, - expectedErr: nil, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - res, err := ecdsaKeyManager1.VerifyCommittedSeals( - test.committedSeals, - test.digest, - test.rawSet, - ) - - assert.Equal(t, test.expectedRes, res) - assert.ErrorIs(t, test.expectedErr, err) - }) - } -} - -func TestECDSAKeyManagerSignIBFTMessageAndEcrecover(t *testing.T) { - t.Parallel() - - ecdsaKeyManager, _ := newTestECDSAKeyManager(t) - msg := crypto.Keccak256([]byte("message")) - - proposerSeal, err := ecdsaKeyManager.SignIBFTMessage(msg) - assert.NoError(t, err) - - recoveredAddress, err := ecdsaKeyManager.Ecrecover(proposerSeal, msg) - assert.NoError(t, err) - - assert.Equal( - t, - ecdsaKeyManager.Address(), - recoveredAddress, - ) -} - -func TestECDSAKeyManager_verifyCommittedSealsImpl(t *testing.T) { - t.Parallel() - - ecdsaKeyManager1, _ := newTestECDSAKeyManager(t) - ecdsaKeyManager2, _ := newTestECDSAKeyManager(t) - - msg := crypto.Keccak256( - wrapCommitHash( - hex.MustDecodeHex(testHeaderHashHex), - ), - ) - - correctCommittedSeal, err := ecdsaKeyManager1.SignCommittedSeal(msg) - assert.NoError(t, err) - - nonValidatorsCommittedSeal, err := ecdsaKeyManager2.SignCommittedSeal(msg) - assert.NoError(t, err) - - wrongSignature := []byte("fake") - - tests := []struct { - name string - committedSeals *SerializedSeal - msg []byte - validators validators.Validators - expectedRes int - expectedErr error - }{ - { - name: "should return ErrInvalidCommittedSealType if the Seals is not *SerializedSeal", - committedSeals: &SerializedSeal{}, - msg: msg, - validators: validators.NewECDSAValidatorSet(), - expectedRes: 0, - expectedErr: ErrEmptyCommittedSeals, - }, - { - name: "should return error if Ecrecover failed", - committedSeals: &SerializedSeal{ - wrongSignature, - }, - msg: msg, - validators: validators.NewECDSAValidatorSet(), - expectedRes: 0, - expectedErr: errors.New("invalid compact signature size"), - }, - { - name: "should return error ErrRepeatedCommittedSeal if CommittedSeal", - committedSeals: &SerializedSeal{ - correctCommittedSeal, - correctCommittedSeal, - }, - msg: msg, - validators: validators.NewECDSAValidatorSet( - validators.NewECDSAValidator( - ecdsaKeyManager1.Address(), - ), - ), - expectedRes: 0, - expectedErr: ErrRepeatedCommittedSeal, - }, - { - name: "should return error ErrNonValidatorCommittedSeal if CommittedSeals has the signature by non-validator", - committedSeals: &SerializedSeal{ - correctCommittedSeal, - nonValidatorsCommittedSeal, - }, - msg: msg, - validators: validators.NewECDSAValidatorSet( - validators.NewECDSAValidator( - ecdsaKeyManager1.Address(), - ), - ), - expectedRes: 0, - expectedErr: ErrNonValidatorCommittedSeal, - }, - { - name: "should return the size of CommittedSeals if verification is successful", - committedSeals: &SerializedSeal{ - correctCommittedSeal, - }, - msg: msg, - validators: validators.NewECDSAValidatorSet( - validators.NewECDSAValidator( - ecdsaKeyManager1.Address(), - ), - ), - expectedRes: 1, - expectedErr: nil, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - res, err := ecdsaKeyManager1.(*ECDSAKeyManager).verifyCommittedSealsImpl( - test.committedSeals, - test.msg, - test.validators, - ) - - assert.Equal(t, test.expectedRes, res) - testHelper.AssertErrorMessageContains(t, test.expectedErr, err) - }) - } -} diff --git a/consensus/ibft/signer/extra.go b/consensus/ibft/signer/extra.go deleted file mode 100644 index fe35ba512a..0000000000 --- a/consensus/ibft/signer/extra.go +++ /dev/null @@ -1,331 +0,0 @@ -package signer - -import ( - "encoding/binary" - "errors" - "fmt" - - "github.com/0xPolygon/polygon-edge/types" - "github.com/0xPolygon/polygon-edge/validators" - "github.com/umbracle/fastrlp" -) - -var ( - // IstanbulDigest represents a hash of "Istanbul practical byzantine fault tolerance" - // to identify whether the block is from Istanbul consensus engine - IstanbulDigest = types.StringToHash("0x63746963616c2062797a616e74696e65206661756c7420746f6c6572616e6365") - - // IstanbulExtraVanity represents a fixed number of extra-data bytes reserved for proposer vanity - IstanbulExtraVanity = 32 - - // IstanbulExtraSeal represents the fixed number of extra-data bytes reserved for proposer seal - IstanbulExtraSeal = 65 - - zeroBytes = make([]byte, 32) - - errRoundNumberOverflow = errors.New("round number is out of range for 64bit") -) - -// IstanbulExtra defines the structure of the extra field for Istanbul -type IstanbulExtra struct { - Validators validators.Validators - ProposerSeal []byte - CommittedSeals Seals - ParentCommittedSeals Seals - RoundNumber *uint64 -} - -type Seals interface { - // Number of committed seals - Num() int - MarshalRLPWith(ar *fastrlp.Arena) *fastrlp.Value - UnmarshalRLPFrom(*fastrlp.Parser, *fastrlp.Value) error -} - -// parseRound parses RLP-encoded bytes into round -func parseRound(v *fastrlp.Value) (*uint64, error) { - roundBytes, err := v.Bytes() - if err != nil { - return nil, err - } - - if len(roundBytes) > 8 { - return nil, errRoundNumberOverflow - } - - if len(roundBytes) == 0 { - return nil, nil - } - - round := binary.BigEndian.Uint64(roundBytes) - - return &round, nil -} - -// toRoundBytes converts uint64 round to bytes -// Round begins with zero and it can be nil for backward compatibility. -// For that reason, Extra always has 8 bytes space for a round when the round has value. -func toRoundBytes(round uint64) []byte { - roundBytes := make([]byte, 8) - binary.BigEndian.PutUint64(roundBytes, round) - - return roundBytes -} - -// MarshalRLPTo defines the marshal function wrapper for IstanbulExtra -func (i *IstanbulExtra) MarshalRLPTo(dst []byte) []byte { - return types.MarshalRLPTo(i.MarshalRLPWith, dst) -} - -// MarshalRLPWith defines the marshal function implementation for IstanbulExtra -func (i *IstanbulExtra) MarshalRLPWith(ar *fastrlp.Arena) *fastrlp.Value { - vv := ar.NewArray() - - // Validators - vv.Set(i.Validators.MarshalRLPWith(ar)) - - // ProposerSeal - if len(i.ProposerSeal) == 0 { - vv.Set(ar.NewNull()) - } else { - vv.Set(ar.NewCopyBytes(i.ProposerSeal)) - } - - // CommittedSeal - vv.Set(i.CommittedSeals.MarshalRLPWith(ar)) - - // ParentCommittedSeal - if i.ParentCommittedSeals == nil { - vv.Set(ar.NewNullArray()) - } else { - vv.Set(i.ParentCommittedSeals.MarshalRLPWith(ar)) - } - - if i.RoundNumber == nil { - vv.Set(ar.NewNull()) - } else { - vv.Set(ar.NewBytes( - toRoundBytes(*i.RoundNumber), - )) - } - - return vv -} - -// UnmarshalRLP defines the unmarshal function wrapper for IstanbulExtra -func (i *IstanbulExtra) UnmarshalRLP(input []byte) error { - return types.UnmarshalRlp(i.UnmarshalRLPFrom, input) -} - -// UnmarshalRLPFrom defines the unmarshal implementation for IstanbulExtra -func (i *IstanbulExtra) UnmarshalRLPFrom(p *fastrlp.Parser, v *fastrlp.Value) error { - elems, err := v.GetElems() - if err != nil { - return err - } - - if len(elems) < 3 { - return fmt.Errorf("incorrect number of elements to decode istambul extra, expected 3 but found %d", len(elems)) - } - - // Validators - if err := i.Validators.UnmarshalRLPFrom(p, elems[0]); err != nil { - return err - } - - // ProposerSeal - if i.ProposerSeal, err = elems[1].GetBytes(i.ProposerSeal); err != nil { - return fmt.Errorf("failed to decode Seal: %w", err) - } - - // CommittedSeal - if err := i.CommittedSeals.UnmarshalRLPFrom(p, elems[2]); err != nil { - return err - } - - // ParentCommitted - if len(elems) >= 4 && i.ParentCommittedSeals != nil { - if err := i.ParentCommittedSeals.UnmarshalRLPFrom(p, elems[3]); err != nil { - return err - } - } - - // Round - if len(elems) >= 5 { - roundNumber, err := parseRound(elems[4]) - if err != nil { - return err - } - - i.RoundNumber = roundNumber - } - - return nil -} - -// UnmarshalRLPForParentCS defines the unmarshal function wrapper for IstanbulExtra -// that parses only Parent Committed Seals -func (i *IstanbulExtra) unmarshalRLPForParentCS(input []byte) error { - return types.UnmarshalRlp(i.unmarshalRLPFromForParentCS, input) -} - -// UnmarshalRLPFrom defines the unmarshal implementation for IstanbulExtra -// that parses only Parent Committed Seals -func (i *IstanbulExtra) unmarshalRLPFromForParentCS(p *fastrlp.Parser, v *fastrlp.Value) error { - elems, err := v.GetElems() - if err != nil { - return err - } - - // ParentCommitted - if len(elems) >= 4 { - if err := i.ParentCommittedSeals.UnmarshalRLPFrom(p, elems[3]); err != nil { - return err - } - } - - // Round - if len(elems) >= 5 { - roundNumber, err := parseRound(elems[4]) - if err != nil { - return err - } - - i.RoundNumber = roundNumber - } - - return nil -} - -// putIbftExtra sets the IBFT extra data field into the header -func putIbftExtra(h *types.Header, istanbulExtra *IstanbulExtra) { - // Pad zeros to the right up to istanbul vanity - extra := h.ExtraData - if len(extra) < IstanbulExtraVanity { - extra = append(extra, zeroBytes[:IstanbulExtraVanity-len(extra)]...) - } else { - extra = extra[:IstanbulExtraVanity] - } - - h.ExtraData = istanbulExtra.MarshalRLPTo(extra) -} - -// packFieldsIntoExtra is a helper function -// that injects a few fields into IBFT Extra -// without modifying other fields -// Validators, CommittedSeals, and ParentCommittedSeals have a few types -// and extra must have these instances before unmarshalling usually -// This function doesn't require the field instances that don't update -func packFieldsIntoExtra( - extraBytes []byte, - packFn func( - ar *fastrlp.Arena, - oldValues []*fastrlp.Value, - newArrayValue *fastrlp.Value, - ) error, -) []byte { - extraHeader := extraBytes[:IstanbulExtraVanity] - extraBody := extraBytes[IstanbulExtraVanity:] - - newExtraBody := types.MarshalRLPTo(func(ar *fastrlp.Arena) *fastrlp.Value { - vv := ar.NewArray() - - _ = types.UnmarshalRlp(func(p *fastrlp.Parser, v *fastrlp.Value) error { - elems, err := v.GetElems() - if err != nil { - return err - } - - if len(elems) < 3 { - return fmt.Errorf("incorrect number of elements to decode istambul extra, expected 3 but found %d", len(elems)) - } - - return packFn(ar, elems, vv) - }, extraBody) - - return vv - }, nil) - - return append( - extraHeader, - newExtraBody..., - ) -} - -// packProposerSealIntoExtra updates only Seal field in Extra -func packProposerSealIntoExtra( - extraBytes []byte, - proposerSeal []byte, -) []byte { - return packFieldsIntoExtra( - extraBytes, - func( - ar *fastrlp.Arena, - oldValues []*fastrlp.Value, - newArrayValue *fastrlp.Value, - ) error { - // Validators - newArrayValue.Set(oldValues[0]) - - // Seal - newArrayValue.Set(ar.NewBytes(proposerSeal)) - - // CommittedSeal - newArrayValue.Set(oldValues[2]) - - // ParentCommittedSeal - if len(oldValues) >= 4 { - newArrayValue.Set(oldValues[3]) - } - - // Round - if len(oldValues) >= 5 { - newArrayValue.Set(oldValues[4]) - } - - return nil - }, - ) -} - -// packCommittedSealsAndRoundNumberIntoExtra updates only CommittedSeal field in Extra -func packCommittedSealsAndRoundNumberIntoExtra( - extraBytes []byte, - committedSeal Seals, - roundNumber *uint64, -) []byte { - return packFieldsIntoExtra( - extraBytes, - func( - ar *fastrlp.Arena, - oldValues []*fastrlp.Value, - newArrayValue *fastrlp.Value, - ) error { - // Validators - newArrayValue.Set(oldValues[0]) - - // Seal - newArrayValue.Set(oldValues[1]) - - // CommittedSeal - newArrayValue.Set(committedSeal.MarshalRLPWith(ar)) - - // ParentCommittedSeal - if len(oldValues) >= 4 { - newArrayValue.Set(oldValues[3]) - } else { - newArrayValue.Set(ar.NewNullArray()) - } - - if roundNumber == nil { - newArrayValue.Set(ar.NewNull()) - } else { - newArrayValue.Set(ar.NewBytes( - toRoundBytes(*roundNumber), - )) - } - - return nil - }, - ) -} diff --git a/consensus/ibft/signer/extra_test.go b/consensus/ibft/signer/extra_test.go deleted file mode 100644 index 55622d38e8..0000000000 --- a/consensus/ibft/signer/extra_test.go +++ /dev/null @@ -1,491 +0,0 @@ -package signer - -import ( - "encoding/json" - "math/big" - "testing" - - "github.com/0xPolygon/polygon-edge/types" - "github.com/0xPolygon/polygon-edge/validators" - "github.com/stretchr/testify/assert" -) - -func JSONMarshalHelper(t *testing.T, extra *IstanbulExtra) string { - t.Helper() - - res, err := json.Marshal(extra) - - assert.NoError(t, err) - - return string(res) -} - -func TestIstanbulExtraMarshalAndUnmarshal(t *testing.T) { - tests := []struct { - name string - extra *IstanbulExtra - }{ - { - name: "ECDSAExtra", - extra: &IstanbulExtra{ - Validators: validators.NewECDSAValidatorSet( - validators.NewECDSAValidator( - testAddr1, - ), - ), - ProposerSeal: testProposerSeal, - CommittedSeals: &SerializedSeal{ - []byte{0x1}, - []byte{0x2}, - }, - ParentCommittedSeals: &SerializedSeal{ - []byte{0x3}, - []byte{0x4}, - }, - }, - }, - { - name: "ECDSAExtra without ParentCommittedSeals", - extra: &IstanbulExtra{ - Validators: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ), - ProposerSeal: testProposerSeal, - CommittedSeals: &SerializedSeal{ - []byte{0x1}, - []byte{0x2}, - }, - }, - }, - { - name: "BLSExtra", - extra: &IstanbulExtra{ - Validators: validators.NewBLSValidatorSet( - blsValidator1, - ), - ProposerSeal: testProposerSeal, - CommittedSeals: &AggregatedSeal{ - Bitmap: new(big.Int).SetBytes([]byte{0x8}), - Signature: []byte{0x1}, - }, - ParentCommittedSeals: &AggregatedSeal{ - Bitmap: new(big.Int).SetBytes([]byte{0x9}), - Signature: []byte{0x2}, - }, - }, - }, - { - name: "BLSExtra without ParentCommittedSeals", - extra: &IstanbulExtra{ - Validators: validators.NewBLSValidatorSet( - blsValidator1, - ), - ProposerSeal: testProposerSeal, - CommittedSeals: &AggregatedSeal{ - Bitmap: new(big.Int).SetBytes([]byte{0x8}), - Signature: []byte{0x1}, - }, - ParentCommittedSeals: &AggregatedSeal{ - Bitmap: new(big.Int).SetBytes([]byte{0x9}), - Signature: []byte{0x2}, - }, - }, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - // create original data - originalExtraJSON := JSONMarshalHelper(t, test.extra) - - bytesData := test.extra.MarshalRLPTo(nil) - err := test.extra.UnmarshalRLP(bytesData) - assert.NoError(t, err) - - // make sure all data is recovered - assert.Equal( - t, - originalExtraJSON, - JSONMarshalHelper(t, test.extra), - ) - }) - } -} - -func Test_packProposerSealIntoExtra(t *testing.T) { - newProposerSeal := []byte("new proposer seal") - - tests := []struct { - name string - extra *IstanbulExtra - }{ - { - name: "ECDSAExtra", - extra: &IstanbulExtra{ - Validators: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ), - ProposerSeal: testProposerSeal, - CommittedSeals: &SerializedSeal{ - []byte{0x1}, - []byte{0x2}, - }, - ParentCommittedSeals: &SerializedSeal{ - []byte{0x3}, - []byte{0x4}, - }, - }, - }, - { - name: "ECDSAExtra without ParentCommittedSeals", - extra: &IstanbulExtra{ - Validators: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ), - ProposerSeal: testProposerSeal, - CommittedSeals: &SerializedSeal{ - []byte{0x1}, - []byte{0x2}, - }, - }, - }, - { - name: "BLSExtra", - extra: &IstanbulExtra{ - Validators: validators.NewBLSValidatorSet( - blsValidator1, - ), - ProposerSeal: testProposerSeal, - CommittedSeals: &AggregatedSeal{ - Bitmap: new(big.Int).SetBytes([]byte{0x8}), - Signature: []byte{0x1}, - }, - ParentCommittedSeals: &AggregatedSeal{ - Bitmap: new(big.Int).SetBytes([]byte{0x9}), - Signature: []byte{0x2}, - }, - }, - }, - { - name: "BLSExtra without ParentCommittedSeals", - extra: &IstanbulExtra{ - Validators: validators.NewBLSValidatorSet( - blsValidator1, - ), - ProposerSeal: testProposerSeal, - CommittedSeals: &AggregatedSeal{ - Bitmap: new(big.Int).SetBytes([]byte{0x8}), - Signature: []byte{0x1}, - }, - ParentCommittedSeals: &AggregatedSeal{ - Bitmap: new(big.Int).SetBytes([]byte{0x9}), - Signature: []byte{0x2}, - }, - }, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - originalProposerSeal := test.extra.ProposerSeal - - // create expected data - test.extra.ProposerSeal = newProposerSeal - expectedJSON := JSONMarshalHelper(t, test.extra) - test.extra.ProposerSeal = originalProposerSeal - - newExtraBytes := packProposerSealIntoExtra( - // prepend IstanbulExtraHeader to parse - append( - make([]byte, IstanbulExtraVanity), - test.extra.MarshalRLPTo(nil)..., - ), - newProposerSeal, - ) - - assert.NoError( - t, - test.extra.UnmarshalRLP(newExtraBytes[IstanbulExtraVanity:]), - ) - - // check json of decoded data matches with the original data - jsonData := JSONMarshalHelper(t, test.extra) - - assert.Equal( - t, - expectedJSON, - jsonData, - ) - }) - } -} - -func Test_packCommittedSealsAndRoundNumberIntoExtra(t *testing.T) { - tests := []struct { - name string - extra *IstanbulExtra - newCommittedSeals Seals - roundNumber *uint64 - }{ - { - name: "ECDSAExtra", - extra: &IstanbulExtra{ - Validators: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ), - ProposerSeal: testProposerSeal, - CommittedSeals: &SerializedSeal{ - []byte{0x1}, - []byte{0x2}, - }, - ParentCommittedSeals: &SerializedSeal{ - []byte{0x3}, - []byte{0x4}, - }, - }, - newCommittedSeals: &SerializedSeal{ - []byte{0x3}, - []byte{0x4}, - }, - roundNumber: nil, - }, - { - name: "ECDSAExtra without ParentCommittedSeals", - extra: &IstanbulExtra{ - Validators: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ), - ProposerSeal: testProposerSeal, - CommittedSeals: &SerializedSeal{ - []byte{0x1}, - []byte{0x2}, - }, - }, - newCommittedSeals: &SerializedSeal{ - []byte{0x3}, - []byte{0x4}, - }, - roundNumber: nil, - }, - { - name: "BLSExtra", - extra: &IstanbulExtra{ - Validators: validators.NewBLSValidatorSet( - blsValidator1, - ), - ProposerSeal: testProposerSeal, - CommittedSeals: &AggregatedSeal{ - Bitmap: new(big.Int).SetBytes([]byte{0x8}), - Signature: []byte{0x1}, - }, - ParentCommittedSeals: &AggregatedSeal{ - Bitmap: new(big.Int).SetBytes([]byte{0x9}), - Signature: []byte{0x2}, - }, - }, - newCommittedSeals: &AggregatedSeal{ - Bitmap: new(big.Int).SetBytes([]byte{0xa}), - Signature: []byte{0x2}, - }, - roundNumber: nil, - }, - { - name: "BLSExtra without ParentCommittedSeals", - extra: &IstanbulExtra{ - Validators: validators.NewBLSValidatorSet( - blsValidator1, - ), - ProposerSeal: testProposerSeal, - CommittedSeals: &AggregatedSeal{ - Bitmap: new(big.Int).SetBytes([]byte{0x8}), - Signature: []byte{0x1}, - }, - ParentCommittedSeals: &AggregatedSeal{ - Bitmap: new(big.Int).SetBytes([]byte{0x9}), - Signature: []byte{0x2}, - }, - }, - newCommittedSeals: &AggregatedSeal{ - Bitmap: new(big.Int).SetBytes([]byte{0xa}), - Signature: []byte{0x2}, - }, - roundNumber: nil, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - originalCommittedSeals := test.extra.CommittedSeals - - // create expected data - test.extra.CommittedSeals = test.newCommittedSeals - expectedJSON := JSONMarshalHelper(t, test.extra) - test.extra.CommittedSeals = originalCommittedSeals - - // update committed seals - newExtraBytes := packCommittedSealsAndRoundNumberIntoExtra( - // prepend IstanbulExtraHeader - append( - make([]byte, IstanbulExtraVanity), - test.extra.MarshalRLPTo(nil)..., - ), - test.newCommittedSeals, - test.roundNumber, - ) - - // decode RLP data - assert.NoError( - t, - test.extra.UnmarshalRLP(newExtraBytes[IstanbulExtraVanity:]), - ) - - // check json of decoded data matches with the original data - jsonData := JSONMarshalHelper(t, test.extra) - - assert.Equal( - t, - expectedJSON, - jsonData, - ) - }) - } -} - -func Test_unmarshalRLPForParentCS(t *testing.T) { - tests := []struct { - name string - extra *IstanbulExtra - targetExtra *IstanbulExtra - }{ - { - name: "ECDSAExtra", - extra: &IstanbulExtra{ - Validators: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ), - ProposerSeal: testProposerSeal, - CommittedSeals: &SerializedSeal{ - []byte{0x1}, - []byte{0x2}, - }, - ParentCommittedSeals: &SerializedSeal{ - []byte{0x3}, - []byte{0x4}, - }, - }, - targetExtra: &IstanbulExtra{ - ParentCommittedSeals: &SerializedSeal{}, - }, - }, - { - name: "BLSExtra", - extra: &IstanbulExtra{ - Validators: validators.NewBLSValidatorSet( - blsValidator1, - ), - ProposerSeal: testProposerSeal, - CommittedSeals: &AggregatedSeal{ - Bitmap: new(big.Int).SetBytes([]byte{0x8}), - Signature: []byte{0x1}, - }, - ParentCommittedSeals: &AggregatedSeal{ - Bitmap: new(big.Int).SetBytes([]byte{0x9}), - Signature: []byte{0x2}, - }, - }, - targetExtra: &IstanbulExtra{ - ParentCommittedSeals: &AggregatedSeal{}, - }, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - bytesData := test.extra.MarshalRLPTo(nil) - - assert.NoError(t, test.targetExtra.unmarshalRLPForParentCS(bytesData)) - - // make sure all data is recovered - assert.Equal( - t, - test.extra.ParentCommittedSeals, - test.targetExtra.ParentCommittedSeals, - ) - }) - } -} - -func Test_putIbftExtra(t *testing.T) { - tests := []struct { - name string - header *types.Header - extra *IstanbulExtra - }{ - { - name: "ECDSAExtra", - header: &types.Header{ - ExtraData: []byte{}, - }, - extra: &IstanbulExtra{ - Validators: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ), - ProposerSeal: testProposerSeal, - CommittedSeals: &SerializedSeal{ - []byte{0x1}, - []byte{0x2}, - }, - ParentCommittedSeals: &SerializedSeal{ - []byte{0x3}, - []byte{0x4}, - }, - }, - }, - { - name: "BLSExtra", - header: &types.Header{ - ExtraData: make([]byte, IstanbulExtraVanity+10), - }, - extra: &IstanbulExtra{ - Validators: validators.NewBLSValidatorSet( - blsValidator1, - ), - ProposerSeal: testProposerSeal, - CommittedSeals: &AggregatedSeal{ - Bitmap: new(big.Int).SetBytes([]byte{0x8}), - Signature: []byte{0x1}, - }, - ParentCommittedSeals: &AggregatedSeal{ - Bitmap: new(big.Int).SetBytes([]byte{0x9}), - Signature: []byte{0x2}, - }, - }, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - putIbftExtra(test.header, test.extra) - - expectedExtraHeader := make([]byte, IstanbulExtraVanity) - expectedExtraBody := test.extra.MarshalRLPTo(nil) - expectedExtra := append(expectedExtraHeader, expectedExtraBody...) //nolint:makezero - - assert.Equal( - t, - expectedExtra, - test.header.ExtraData, - ) - }) - } -} diff --git a/consensus/ibft/signer/helper.go b/consensus/ibft/signer/helper.go deleted file mode 100644 index 4b167e3a17..0000000000 --- a/consensus/ibft/signer/helper.go +++ /dev/null @@ -1,142 +0,0 @@ -package signer - -import ( - "crypto/ecdsa" - "fmt" - "testing" - - "github.com/0xPolygon/polygon-edge/crypto" - "github.com/0xPolygon/polygon-edge/helper/keccak" - "github.com/0xPolygon/polygon-edge/secrets" - "github.com/0xPolygon/polygon-edge/secrets/helper" - "github.com/0xPolygon/polygon-edge/types" - "github.com/0xPolygon/polygon-edge/validators" - "github.com/coinbase/kryptology/pkg/signatures/bls/bls_sig" - "github.com/umbracle/fastrlp" -) - -const ( - // legacyCommitCode is the value that is contained in - // legacy committed seals, so it needs to be preserved in order - // for new clients to read old committed seals - legacyCommitCode = 2 -) - -// wrapCommitHash calculates digest for CommittedSeal -func wrapCommitHash(data []byte) []byte { - return crypto.Keccak256(data, []byte{byte(legacyCommitCode)}) -} - -// getOrCreateECDSAKey loads ECDSA key or creates a new key -func getOrCreateECDSAKey(manager secrets.SecretsManager) (*ecdsa.PrivateKey, error) { - if !manager.HasSecret(secrets.ValidatorKey) { - if _, err := helper.InitECDSAValidatorKey(manager); err != nil { - return nil, err - } - } - - keyBytes, err := manager.GetSecret(secrets.ValidatorKey) - if err != nil { - return nil, err - } - - return crypto.BytesToECDSAPrivateKey(keyBytes) -} - -// getOrCreateECDSAKey loads BLS key or creates a new key -func getOrCreateBLSKey(manager secrets.SecretsManager) (*bls_sig.SecretKey, error) { - if !manager.HasSecret(secrets.ValidatorBLSKey) { - if _, err := helper.InitBLSValidatorKey(manager); err != nil { - return nil, err - } - } - - keyBytes, err := manager.GetSecret(secrets.ValidatorBLSKey) - if err != nil { - return nil, err - } - - return crypto.BytesToBLSSecretKey(keyBytes) -} - -// calculateHeaderHash is hash calculation of header for IBFT -func calculateHeaderHash(h *types.Header) types.Hash { - arena := fastrlp.DefaultArenaPool.Get() - defer fastrlp.DefaultArenaPool.Put(arena) - - vv := arena.NewArray() - vv.Set(arena.NewBytes(h.ParentHash.Bytes())) - vv.Set(arena.NewBytes(h.Sha3Uncles.Bytes())) - vv.Set(arena.NewCopyBytes(h.Miner)) - vv.Set(arena.NewBytes(h.StateRoot.Bytes())) - vv.Set(arena.NewBytes(h.TxRoot.Bytes())) - vv.Set(arena.NewBytes(h.ReceiptsRoot.Bytes())) - vv.Set(arena.NewBytes(h.LogsBloom[:])) - vv.Set(arena.NewUint(h.Difficulty)) - vv.Set(arena.NewUint(h.Number)) - vv.Set(arena.NewUint(h.GasLimit)) - vv.Set(arena.NewUint(h.GasUsed)) - vv.Set(arena.NewUint(h.Timestamp)) - vv.Set(arena.NewCopyBytes(h.ExtraData)) - - buf := keccak.Keccak256Rlp(nil, vv) - - return types.BytesToHash(buf) -} - -// ecrecover recovers signer address from the given digest and signature -func ecrecover(sig, msg []byte) (types.Address, error) { - pub, err := crypto.RecoverPubkey(sig, msg) - if err != nil { - return types.Address{}, err - } - - return crypto.PubKeyToAddress(pub), nil -} - -// NewKeyManagerFromType creates KeyManager based on the given type -func NewKeyManagerFromType( - secretManager secrets.SecretsManager, - validatorType validators.ValidatorType, -) (KeyManager, error) { - switch validatorType { - case validators.ECDSAValidatorType: - return NewECDSAKeyManager(secretManager) - case validators.BLSValidatorType: - return NewBLSKeyManager(secretManager) - default: - return nil, fmt.Errorf("unsupported validator type: %s", validatorType) - } -} - -// verifyIBFTExtraSize checks whether header.ExtraData has enough size for IBFT Extra -func verifyIBFTExtraSize(header *types.Header) error { - if len(header.ExtraData) < IstanbulExtraVanity { - return fmt.Errorf( - "wrong extra size, expected greater than or equal to %d but actual %d", - IstanbulExtraVanity, - len(header.ExtraData), - ) - } - - return nil -} - -// UseIstanbulHeaderHashInTest is a helper function for the test -func UseIstanbulHeaderHashInTest(t *testing.T, signer Signer) { - t.Helper() - - originalHashCalc := types.HeaderHash - types.HeaderHash = func(h *types.Header) types.Hash { - hash, err := signer.CalculateHeaderHash(h) - if err != nil { - return types.ZeroHash - } - - return hash - } - - t.Cleanup(func() { - types.HeaderHash = originalHashCalc - }) -} diff --git a/consensus/ibft/signer/helper_test.go b/consensus/ibft/signer/helper_test.go deleted file mode 100644 index 4386f63ee7..0000000000 --- a/consensus/ibft/signer/helper_test.go +++ /dev/null @@ -1,456 +0,0 @@ -package signer - -import ( - "crypto/ecdsa" - "errors" - "fmt" - "testing" - - "github.com/0xPolygon/polygon-edge/crypto" - "github.com/0xPolygon/polygon-edge/helper/hex" - "github.com/0xPolygon/polygon-edge/secrets" - "github.com/0xPolygon/polygon-edge/types" - "github.com/0xPolygon/polygon-edge/validators" - "github.com/coinbase/kryptology/pkg/signatures/bls/bls_sig" - "github.com/stretchr/testify/assert" -) - -var ( - testHeader = &types.Header{ - ParentHash: types.BytesToHash(crypto.Keccak256([]byte{0x1})), - Sha3Uncles: types.BytesToHash(crypto.Keccak256([]byte{0x2})), - Miner: crypto.Keccak256([]byte{0x3}), - StateRoot: types.BytesToHash(crypto.Keccak256([]byte{0x4})), - TxRoot: types.BytesToHash(crypto.Keccak256([]byte{0x5})), - ReceiptsRoot: types.BytesToHash(crypto.Keccak256([]byte{0x6})), - LogsBloom: types.Bloom{0x7}, - Difficulty: 8, - Number: 9, - GasLimit: 10, - GasUsed: 11, - Timestamp: 12, - ExtraData: crypto.Keccak256([]byte{0x13}), - } - - testHeaderHashHex = "0xd6701b3d601fd78734ce2f2542dc3d9cc1c75b1ed980c61c8d69cd2cb638f89c" -) - -func newTestECDSAKey(t *testing.T) (*ecdsa.PrivateKey, []byte) { - t.Helper() - - testKey, testKeyEncoded, err := crypto.GenerateAndEncodeECDSAPrivateKey() - assert.NoError(t, err, "failed to initialize ECDSA key") - - return testKey, testKeyEncoded -} - -func newTestBLSKey(t *testing.T) (*bls_sig.SecretKey, []byte) { - t.Helper() - - testKey, testKeyEncoded, err := crypto.GenerateAndEncodeBLSSecretKey() - - assert.NoError(t, err, "failed to initialize test ECDSA key") - - return testKey, testKeyEncoded -} - -// Make sure the target function always returns the same result -func Test_wrapCommitHash(t *testing.T) { - t.Parallel() - - var ( - input = crypto.Keccak256([]byte{0x1}) - expectedOutputHex = "0x8a319084d2e52be9c9192645aa98900413ee2a7c93c2916ef99d62218207d1da" - ) - - expectedOutput, err := hex.DecodeHex(expectedOutputHex) - if err != nil { - t.Fatalf("failed to parse expected output: %s, %v", expectedOutputHex, err) - } - - output := wrapCommitHash(input) - - assert.Equal(t, expectedOutput, output) -} - -//nolint -func Test_getOrCreateECDSAKey(t *testing.T) { - t.Parallel() - - testKey, testKeyEncoded := newTestECDSAKey(t) - - testSecretName := func(name string) { - t.Helper() - - // make sure that the correct key is given - assert.Equal(t, secrets.ValidatorKey, name) - } - - //lint:ignore dupl - tests := []struct { - name string - mockSecretManager *MockSecretManager - expectedResult *ecdsa.PrivateKey - expectedErr error - }{ - { - name: "should load ECDSA key from secret manager if the key exists", - mockSecretManager: &MockSecretManager{ - HasSecretFn: func(name string) bool { - testSecretName(name) - - return true - }, - GetSecretFn: func(name string) ([]byte, error) { - testSecretName(name) - - return testKeyEncoded, nil - }, - }, - expectedResult: testKey, - expectedErr: nil, - }, - { - name: "should create new ECDSA key if the key doesn't exist", - mockSecretManager: &MockSecretManager{ - HasSecretFn: func(name string) bool { - testSecretName(name) - - return false - }, - SetSecretFn: func(name string, key []byte) error { - testSecretName(name) - - assert.NotEqual(t, testKeyEncoded, key) - - return nil - }, - GetSecretFn: func(name string) ([]byte, error) { - testSecretName(name) - - return testKeyEncoded, nil - }, - }, - expectedResult: testKey, - expectedErr: nil, - }, - { - name: "should return error if secret manager returns error", - mockSecretManager: &MockSecretManager{ - HasSecretFn: func(name string) bool { - testSecretName(name) - - return true - }, - GetSecretFn: func(name string) ([]byte, error) { - testSecretName(name) - - return nil, errTest - }, - }, - expectedResult: nil, - expectedErr: errTest, - }, - { - name: "should return error if the key manager fails to generate new ECDSA key", - mockSecretManager: &MockSecretManager{ - HasSecretFn: func(name string) bool { - testSecretName(name) - - return false - }, - SetSecretFn: func(name string, key []byte) error { - testSecretName(name) - - return errTest - }, - }, - expectedResult: nil, - expectedErr: errTest, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - res, err := getOrCreateECDSAKey(test.mockSecretManager) - - assert.Equal(t, test.expectedResult, res) - assert.ErrorIs(t, test.expectedErr, err) - }) - } -} - -//nolint -func Test_getOrCreateBLSKey(t *testing.T) { - t.Parallel() - - testKey, testKeyEncoded := newTestBLSKey(t) - - testSecretName := func(name string) { - t.Helper() - - // make sure that the correct key is given - assert.Equal(t, secrets.ValidatorBLSKey, name) - } - - tests := []struct { - name string - mockSecretManager *MockSecretManager - expectedResult *bls_sig.SecretKey - expectedErr error - }{ - { - name: "should load BLS key from secret manager if the key exists", - mockSecretManager: &MockSecretManager{ - HasSecretFn: func(name string) bool { - testSecretName(name) - - return true - }, - GetSecretFn: func(name string) ([]byte, error) { - testSecretName(name) - - return testKeyEncoded, nil - }, - }, - expectedResult: testKey, - expectedErr: nil, - }, - { - name: "should create new BLS key if the key doesn't exist", - mockSecretManager: &MockSecretManager{ - HasSecretFn: func(name string) bool { - testSecretName(name) - - return false - }, - SetSecretFn: func(name string, key []byte) error { - testSecretName(name) - - assert.NotEqual(t, testKeyEncoded, key) - - return nil - }, - GetSecretFn: func(name string) ([]byte, error) { - testSecretName(name) - - return testKeyEncoded, nil - }, - }, - expectedResult: testKey, - expectedErr: nil, - }, - { - name: "should return error if secret manager returns error", - mockSecretManager: &MockSecretManager{ - HasSecretFn: func(name string) bool { - testSecretName(name) - - return true - }, - GetSecretFn: func(name string) ([]byte, error) { - testSecretName(name) - - return nil, errTest - }, - }, - expectedResult: nil, - expectedErr: errTest, - }, - { - name: "should return error if the key manager fails to generate new BLS key", - mockSecretManager: &MockSecretManager{ - HasSecretFn: func(name string) bool { - testSecretName(name) - - return false - }, - SetSecretFn: func(name string, key []byte) error { - testSecretName(name) - - return errTest - }, - }, - expectedResult: nil, - expectedErr: errTest, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - res, err := getOrCreateBLSKey(test.mockSecretManager) - - assert.Equal(t, test.expectedResult, res) - assert.ErrorIs(t, test.expectedErr, err) - }) - } -} - -// make sure that header hash calculation returns the same hash -func Test_calculateHeaderHash(t *testing.T) { - t.Parallel() - - assert.Equal( - t, - types.StringToHash(testHeaderHashHex), - calculateHeaderHash(testHeader), - ) -} - -func Test_ecrecover(t *testing.T) { - t.Parallel() - - testKey, _ := newTestECDSAKey(t) - signerAddress := crypto.PubKeyToAddress(&testKey.PublicKey) - - rawMessage := crypto.Keccak256([]byte{0x1}) - - signature, err := crypto.Sign( - testKey, - rawMessage, - ) - assert.NoError(t, err) - - recoveredAddress, err := ecrecover(signature, rawMessage) - assert.NoError(t, err) - - assert.Equal( - t, - signerAddress, - recoveredAddress, - ) -} - -func TestNewKeyManagerFromType(t *testing.T) { - t.Parallel() - - testECDSAKey, testECDSAKeyEncoded := newTestECDSAKey(t) - testBLSKey, testBLSKeyEncoded := newTestBLSKey(t) - - tests := []struct { - name string - validatorType validators.ValidatorType - mockSecretManager *MockSecretManager - expectedRes KeyManager - expectedErr error - }{ - { - name: "ECDSAValidatorType", - validatorType: validators.ECDSAValidatorType, - mockSecretManager: &MockSecretManager{ - HasSecretFn: func(name string) bool { - return true - }, - GetSecretFn: func(name string) ([]byte, error) { - return testECDSAKeyEncoded, nil - }, - }, - expectedRes: NewECDSAKeyManagerFromKey(testECDSAKey), - expectedErr: nil, - }, - { - name: "BLSValidatorType", - validatorType: validators.BLSValidatorType, - mockSecretManager: &MockSecretManager{ - HasSecretFn: func(name string) bool { - return true - }, - GetSecretFn: func(name string) ([]byte, error) { - switch name { - case secrets.ValidatorKey: - return testECDSAKeyEncoded, nil - case secrets.ValidatorBLSKey: - return testBLSKeyEncoded, nil - } - - return nil, fmt.Errorf("unexpected key name: %s", name) - }, - }, - expectedRes: NewBLSKeyManagerFromKeys(testECDSAKey, testBLSKey), - }, - { - name: "unsupported type", - validatorType: validators.ValidatorType("fake"), - expectedRes: nil, - expectedErr: errors.New("unsupported validator type: fake"), - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - res, err := NewKeyManagerFromType(test.mockSecretManager, test.validatorType) - - assert.Equal(t, test.expectedRes, res) - - if test.expectedErr == nil { - assert.NoError(t, err) - } else { - assert.Error(t, err) - assert.ErrorContains(t, err, test.expectedErr.Error()) - } - }) - } -} - -func Test_verifyIBFTExtraSize(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - extraData []byte - isError bool - }{ - { - name: "should return error if ExtraData size is 0", - extraData: make([]byte, 0), - isError: true, - }, - { - name: "should return error if ExtraData size is less than IstanbulExtraVanity", - extraData: make([]byte, IstanbulExtraVanity-1), - isError: true, - }, - { - name: "should return nil if ExtraData size matches with IstanbulExtraVanity", - extraData: make([]byte, IstanbulExtraVanity), - isError: false, - }, - { - name: "should return nil if ExtraData size is greater than IstanbulExtraVanity", - extraData: make([]byte, IstanbulExtraVanity+1), - isError: false, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - header := &types.Header{ - ExtraData: test.extraData, - } - - err := verifyIBFTExtraSize(header) - - if test.isError { - assert.Error(t, err) - } else { - assert.NoError(t, err) - } - }) - } -} diff --git a/consensus/ibft/signer/key_manager.go b/consensus/ibft/signer/key_manager.go deleted file mode 100644 index f5937aca11..0000000000 --- a/consensus/ibft/signer/key_manager.go +++ /dev/null @@ -1,32 +0,0 @@ -package signer - -import ( - "github.com/0xPolygon/polygon-edge/types" - "github.com/0xPolygon/polygon-edge/validators" -) - -// KeyManager is a delegated module that signs data -type KeyManager interface { - // Type returns Validator type signer supports - Type() validators.ValidatorType - // Address returns an address of signer - Address() types.Address - // NewEmptyValidators creates empty validator collection the Signer expects - NewEmptyValidators() validators.Validators - // NewEmptyCommittedSeals creates empty committed seals the Signer expects - NewEmptyCommittedSeals() Seals - // SignProposerSeal creates a signature for ProposerSeal - SignProposerSeal(hash []byte) ([]byte, error) - // SignCommittedSeal creates a signature for committed seal - SignCommittedSeal(hash []byte) ([]byte, error) - // VerifyCommittedSeal verifies a committed seal - VerifyCommittedSeal(vals validators.Validators, signer types.Address, sig, hash []byte) error - // GenerateCommittedSeals creates CommittedSeals from committed seals - GenerateCommittedSeals(sealsByValidator map[types.Address][]byte, vals validators.Validators) (Seals, error) - // VerifyCommittedSeals verifies CommittedSeals - VerifyCommittedSeals(seals Seals, hash []byte, vals validators.Validators) (int, error) - // SignIBFTMessage signs for arbitrary bytes message - SignIBFTMessage(msg []byte) ([]byte, error) - // Ecrecover recovers address from signature and message - Ecrecover(sig []byte, msg []byte) (types.Address, error) -} diff --git a/consensus/ibft/signer/mock_test.go b/consensus/ibft/signer/mock_test.go deleted file mode 100644 index e9866c4c3d..0000000000 --- a/consensus/ibft/signer/mock_test.go +++ /dev/null @@ -1,89 +0,0 @@ -package signer - -import ( - "github.com/0xPolygon/polygon-edge/secrets" - "github.com/0xPolygon/polygon-edge/types" - "github.com/0xPolygon/polygon-edge/validators" -) - -type MockSecretManager struct { - // skip implementing the methods not to be used - secrets.SecretsManager - - HasSecretFn func(string) bool - GetSecretFn func(string) ([]byte, error) - SetSecretFn func(string, []byte) error -} - -func (m *MockSecretManager) HasSecret(name string) bool { - return m.HasSecretFn(name) -} - -func (m *MockSecretManager) GetSecret(name string) ([]byte, error) { - return m.GetSecretFn(name) -} - -func (m *MockSecretManager) SetSecret(name string, key []byte) error { - return m.SetSecretFn(name, key) -} - -type MockKeyManager struct { - TypeFunc func() validators.ValidatorType - AddressFunc func() types.Address - NewEmptyValidatorsFunc func() validators.Validators - NewEmptyCommittedSealsFunc func() Seals - SignProposerSealFunc func([]byte) ([]byte, error) - SignCommittedSealFunc func([]byte) ([]byte, error) - VerifyCommittedSealFunc func(validators.Validators, types.Address, []byte, []byte) error - GenerateCommittedSealsFunc func(map[types.Address][]byte, validators.Validators) (Seals, error) - VerifyCommittedSealsFunc func(Seals, []byte, validators.Validators) (int, error) - SignIBFTMessageFunc func([]byte) ([]byte, error) - EcrecoverFunc func([]byte, []byte) (types.Address, error) -} - -func (m *MockKeyManager) Type() validators.ValidatorType { - return m.TypeFunc() -} - -func (m *MockKeyManager) Address() types.Address { - return m.AddressFunc() -} - -func (m *MockKeyManager) NewEmptyValidators() validators.Validators { - return m.NewEmptyValidatorsFunc() -} - -func (m *MockKeyManager) NewEmptyCommittedSeals() Seals { - return m.NewEmptyCommittedSealsFunc() -} - -func (m *MockKeyManager) SignProposerSeal(hash []byte) ([]byte, error) { - return m.SignProposerSealFunc(hash) -} - -func (m *MockKeyManager) SignCommittedSeal(hash []byte) ([]byte, error) { - return m.SignCommittedSealFunc(hash) -} - -func (m *MockKeyManager) VerifyCommittedSeal(vals validators.Validators, signer types.Address, sig, hash []byte) error { - return m.VerifyCommittedSealFunc(vals, signer, sig, hash) -} - -func (m *MockKeyManager) GenerateCommittedSeals( - sealsByValidator map[types.Address][]byte, - vals validators.Validators, -) (Seals, error) { - return m.GenerateCommittedSealsFunc(sealsByValidator, vals) -} - -func (m *MockKeyManager) VerifyCommittedSeals(seals Seals, hash []byte, vals validators.Validators) (int, error) { - return m.VerifyCommittedSealsFunc(seals, hash, vals) -} - -func (m *MockKeyManager) SignIBFTMessage(msg []byte) ([]byte, error) { - return m.SignIBFTMessageFunc(msg) -} - -func (m *MockKeyManager) Ecrecover(sig []byte, msg []byte) (types.Address, error) { - return m.EcrecoverFunc(sig, msg) -} diff --git a/consensus/ibft/signer/signer.go b/consensus/ibft/signer/signer.go deleted file mode 100644 index c26b3b048e..0000000000 --- a/consensus/ibft/signer/signer.go +++ /dev/null @@ -1,384 +0,0 @@ -package signer - -import ( - "errors" - - "github.com/0xPolygon/polygon-edge/crypto" - "github.com/0xPolygon/polygon-edge/types" - "github.com/0xPolygon/polygon-edge/validators" -) - -var ( - ErrEmptyCommittedSeals = errors.New("empty committed seals") - ErrEmptyParentCommittedSeals = errors.New("empty parent committed seals") - ErrInvalidCommittedSealLength = errors.New("invalid committed seal length") - ErrInvalidCommittedSealType = errors.New("invalid committed seal type") - ErrRepeatedCommittedSeal = errors.New("repeated seal in committed seals") - ErrNonValidatorCommittedSeal = errors.New("found committed seal signed by non validator") - ErrNotEnoughCommittedSeals = errors.New("not enough seals to seal block") - ErrSignerMismatch = errors.New("mismatch address between signer and message sender") - ErrValidatorNotFound = errors.New("validator not found in validator set") - ErrInvalidValidators = errors.New("invalid validators type") - ErrInvalidValidator = errors.New("invalid validator type") - ErrInvalidSignature = errors.New("invalid signature") -) - -// Signer is responsible for signing for blocks and messages in IBFT -type Signer interface { - Type() validators.ValidatorType - Address() types.Address - - // IBFT Extra - InitIBFTExtra(*types.Header, validators.Validators, Seals) - GetIBFTExtra(*types.Header) (*IstanbulExtra, error) - GetValidators(*types.Header) (validators.Validators, error) - - // ProposerSeal - WriteProposerSeal(*types.Header) (*types.Header, error) - EcrecoverFromHeader(*types.Header) (types.Address, error) - - // CommittedSeal - CreateCommittedSeal([]byte) ([]byte, error) - VerifyCommittedSeal(validators.Validators, types.Address, []byte, []byte) error - - // CommittedSeals - WriteCommittedSeals( - header *types.Header, - roundNumber uint64, - sealMap map[types.Address][]byte, - ) (*types.Header, error) - VerifyCommittedSeals( - hash types.Hash, - committedSeals Seals, - validators validators.Validators, - quorumSize int, - ) error - - // ParentCommittedSeals - VerifyParentCommittedSeals( - parentHash types.Hash, - header *types.Header, - parentValidators validators.Validators, - quorum int, - mustExist bool, - ) error - - // IBFTMessage - SignIBFTMessage([]byte) ([]byte, error) - EcrecoverFromIBFTMessage([]byte, []byte) (types.Address, error) - - // Hash of Header - CalculateHeaderHash(*types.Header) (types.Hash, error) - FilterHeaderForHash(*types.Header) (*types.Header, error) -} - -// SignerImpl is an implementation that meets Signer -type SignerImpl struct { - keyManager KeyManager - parentKeyManager KeyManager -} - -// NewSigner is a constructor of SignerImpl -func NewSigner( - keyManager KeyManager, - parentKeyManager KeyManager, -) *SignerImpl { - return &SignerImpl{ - keyManager: keyManager, - parentKeyManager: parentKeyManager, - } -} - -// Type returns that validator type the signer expects -func (s *SignerImpl) Type() validators.ValidatorType { - return s.keyManager.Type() -} - -// Address returns the signer's address -func (s *SignerImpl) Address() types.Address { - return s.keyManager.Address() -} - -// InitIBFTExtra initializes the extra field in the given header -// based on given validators and parent committed seals -func (s *SignerImpl) InitIBFTExtra( - header *types.Header, - validators validators.Validators, - parentCommittedSeals Seals, -) { - s.initIbftExtra( - header, - validators, - parentCommittedSeals, - ) -} - -// GetIBFTExtra extracts IBFT Extra from the given header -func (s *SignerImpl) GetIBFTExtra(header *types.Header) (*IstanbulExtra, error) { - if err := verifyIBFTExtraSize(header); err != nil { - return nil, err - } - - data := header.ExtraData[IstanbulExtraVanity:] - extra := &IstanbulExtra{ - Validators: s.keyManager.NewEmptyValidators(), - ProposerSeal: []byte{}, - CommittedSeals: s.keyManager.NewEmptyCommittedSeals(), - } - - if header.Number > 1 { - extra.ParentCommittedSeals = s.parentKeyManager.NewEmptyCommittedSeals() - } - - if err := extra.UnmarshalRLP(data); err != nil { - return nil, err - } - - return extra, nil -} - -// WriteProposerSeal signs and set ProposerSeal into IBFT Extra of the header -func (s *SignerImpl) WriteProposerSeal(header *types.Header) (*types.Header, error) { - hash, err := s.CalculateHeaderHash(header) - if err != nil { - return nil, err - } - - seal, err := s.keyManager.SignProposerSeal( - crypto.Keccak256(hash.Bytes()), - ) - if err != nil { - return nil, err - } - - header.ExtraData = packProposerSealIntoExtra( - header.ExtraData, - seal, - ) - - return header, nil -} - -// EcrecoverFromIBFTMessage recovers signer address from given signature and header hash -func (s *SignerImpl) EcrecoverFromHeader(header *types.Header) (types.Address, error) { - extra, err := s.GetIBFTExtra(header) - if err != nil { - return types.Address{}, err - } - - return s.keyManager.Ecrecover(extra.ProposerSeal, crypto.Keccak256(header.Hash.Bytes())) -} - -// CreateCommittedSeal returns CommittedSeal from given hash -func (s *SignerImpl) CreateCommittedSeal(hash []byte) ([]byte, error) { - return s.keyManager.SignCommittedSeal( - // Of course, this keccaking of an extended array is not according to the IBFT 2.0 spec, - // but almost nothing in this legacy signing package is. This is kept - // in order to preserve the running chains that used these - // old (and very, very incorrect) signing schemes - crypto.Keccak256( - wrapCommitHash(hash[:]), - ), - ) -} - -// CreateCommittedSeal verifies a CommittedSeal -func (s *SignerImpl) VerifyCommittedSeal( - validators validators.Validators, - signer types.Address, - signature, hash []byte, -) error { - return s.keyManager.VerifyCommittedSeal( - validators, - signer, - signature, - crypto.Keccak256( - wrapCommitHash(hash[:]), - ), - ) -} - -// WriteCommittedSeals builds and writes CommittedSeals into IBFT Extra of the header -func (s *SignerImpl) WriteCommittedSeals( - header *types.Header, - roundNumber uint64, - sealMap map[types.Address][]byte, -) (*types.Header, error) { - if len(sealMap) == 0 { - return nil, ErrEmptyCommittedSeals - } - - validators, err := s.GetValidators(header) - if err != nil { - return nil, err - } - - committedSeal, err := s.keyManager.GenerateCommittedSeals(sealMap, validators) - if err != nil { - return nil, err - } - - header.ExtraData = packCommittedSealsAndRoundNumberIntoExtra( - header.ExtraData, - committedSeal, - &roundNumber, - ) - - return header, nil -} - -// VerifyCommittedSeals verifies CommittedSeals in IBFT Extra of the header -func (s *SignerImpl) VerifyCommittedSeals( - hash types.Hash, - committedSeals Seals, - validators validators.Validators, - quorumSize int, -) error { - rawMsg := crypto.Keccak256( - wrapCommitHash(hash.Bytes()), - ) - - numSeals, err := s.keyManager.VerifyCommittedSeals( - committedSeals, - rawMsg, - validators, - ) - if err != nil { - return err - } - - if numSeals < quorumSize { - return ErrNotEnoughCommittedSeals - } - - return nil -} - -// VerifyParentCommittedSeals verifies ParentCommittedSeals in IBFT Extra of the header -func (s *SignerImpl) VerifyParentCommittedSeals( - parentHash types.Hash, - header *types.Header, - parentValidators validators.Validators, - quorum int, - mustExist bool, -) error { - parentCommittedSeals, err := s.GetParentCommittedSeals(header) - if err != nil { - return err - } - - if parentCommittedSeals == nil || parentCommittedSeals.Num() == 0 { - // Throw error for the proposed header - if mustExist { - return ErrEmptyParentCommittedSeals - } - - // Don't throw if the flag is unset for backward compatibility - // (for the past headers) - return nil - } - - rawMsg := crypto.Keccak256( - wrapCommitHash(parentHash[:]), - ) - - numSeals, err := s.keyManager.VerifyCommittedSeals( - parentCommittedSeals, - rawMsg, - parentValidators, - ) - if err != nil { - return err - } - - if numSeals < quorum { - return ErrNotEnoughCommittedSeals - } - - return nil -} - -// SignIBFTMessage signs arbitrary message -func (s *SignerImpl) SignIBFTMessage(msg []byte) ([]byte, error) { - return s.keyManager.SignIBFTMessage(crypto.Keccak256(msg)) -} - -// EcrecoverFromIBFTMessage recovers signer address from given signature and digest -func (s *SignerImpl) EcrecoverFromIBFTMessage(signature, digest []byte) (types.Address, error) { - return s.keyManager.Ecrecover(signature, crypto.Keccak256(digest)) -} - -// InitIBFTExtra initializes the extra field -func (s *SignerImpl) initIbftExtra( - header *types.Header, - validators validators.Validators, - parentCommittedSeal Seals, -) { - putIbftExtra(header, &IstanbulExtra{ - Validators: validators, - ProposerSeal: []byte{}, - CommittedSeals: s.keyManager.NewEmptyCommittedSeals(), - ParentCommittedSeals: parentCommittedSeal, - }) -} - -// CalculateHeaderHash calculates header hash for IBFT Extra -func (s *SignerImpl) CalculateHeaderHash(header *types.Header) (types.Hash, error) { - filteredHeader, err := s.FilterHeaderForHash(header) - if err != nil { - return types.ZeroHash, err - } - - return calculateHeaderHash(filteredHeader), nil -} - -func (s *SignerImpl) GetValidators(header *types.Header) (validators.Validators, error) { - extra, err := s.GetIBFTExtra(header) - if err != nil { - return nil, err - } - - return extra.Validators, nil -} - -// GetParentCommittedSeals extracts Parent Committed Seals from IBFT Extra in Header -func (s *SignerImpl) GetParentCommittedSeals(header *types.Header) (Seals, error) { - if err := verifyIBFTExtraSize(header); err != nil { - return nil, err - } - - data := header.ExtraData[IstanbulExtraVanity:] - extra := &IstanbulExtra{ - ParentCommittedSeals: s.keyManager.NewEmptyCommittedSeals(), - } - - if err := extra.unmarshalRLPForParentCS(data); err != nil { - return nil, err - } - - return extra.ParentCommittedSeals, nil -} - -// filterHeaderForHash removes unnecessary fields from IBFT Extra of the header -// for hash calculation -func (s *SignerImpl) FilterHeaderForHash(header *types.Header) (*types.Header, error) { - clone := header.Copy() - - extra, err := s.GetIBFTExtra(header) - if err != nil { - return nil, err - } - - parentCommittedSeals := extra.ParentCommittedSeals - if parentCommittedSeals != nil && parentCommittedSeals.Num() == 0 { - // avoid to set ParentCommittedSeals in extra for hash calculation - // in case of empty ParentCommittedSeals for backward compatibility - parentCommittedSeals = nil - } - - // This will effectively remove the Seal and CommittedSeals from the IBFT Extra of header, - // while keeping proposer vanity, validator set, and ParentCommittedSeals - s.initIbftExtra(clone, extra.Validators, parentCommittedSeals) - - return clone, nil -} diff --git a/consensus/ibft/signer/signer_test.go b/consensus/ibft/signer/signer_test.go deleted file mode 100644 index 36d89bba3d..0000000000 --- a/consensus/ibft/signer/signer_test.go +++ /dev/null @@ -1,1269 +0,0 @@ -package signer - -import ( - "errors" - "fmt" - "math/big" - "testing" - - "github.com/0xPolygon/polygon-edge/crypto" - testHelper "github.com/0xPolygon/polygon-edge/helper/tests" - "github.com/0xPolygon/polygon-edge/types" - "github.com/0xPolygon/polygon-edge/validators" - "github.com/stretchr/testify/assert" -) - -var ( - errTest = errors.New("test err") - - testAddr1 = types.StringToAddress("1") - testAddr2 = types.StringToAddress("1") - - testBLSPubKey1 = newTestBLSKeyBytes() - testBLSPubKey2 = newTestBLSKeyBytes() - - ecdsaValidator1 = validators.NewECDSAValidator( - testAddr1, - ) - ecdsaValidator2 = validators.NewECDSAValidator( - testAddr2, - ) - - blsValidator1 = validators.NewBLSValidator(testAddr1, testBLSPubKey1) - blsValidator2 = validators.NewBLSValidator(testAddr2, testBLSPubKey2) - - ecdsaValidators = validators.NewECDSAValidatorSet( - ecdsaValidator1, - ecdsaValidator2, - ) - blsValidators = validators.NewBLSValidatorSet( - blsValidator1, - blsValidator2, - ) - - testProposerSeal = crypto.Keccak256([]byte{0x1}) - testSerializedSeals1 = &SerializedSeal{[]byte{0x1}, []byte{0x2}} - testSerializedSeals2 = &SerializedSeal{[]byte{0x3}, []byte{0x4}} - testAggregatedSeals1 = newTestAggregatedSeals([]int{0, 1}, []byte{0x12}) - testAggregatedSeals2 = newTestAggregatedSeals([]int{2, 3}, []byte{0x23}) -) - -func newTestAggregatedSeals(bitFlags []int, signature []byte) *AggregatedSeal { - bitMap := new(big.Int) - for _, idx := range bitFlags { - bitMap = bitMap.SetBit(bitMap, idx, 1) - } - - return &AggregatedSeal{ - Bitmap: bitMap, - Signature: signature, - } -} - -func newTestBLSKeyBytes() validators.BLSValidatorPublicKey { - key, err := crypto.GenerateBLSKey() - if err != nil { - return nil - } - - pubKey, err := key.GetPublicKey() - if err != nil { - return nil - } - - buf, err := pubKey.MarshalBinary() - if err != nil { - return nil - } - - return buf -} - -func newTestSingleKeyManagerSigner(km KeyManager) *SignerImpl { - return &SignerImpl{ - keyManager: km, - parentKeyManager: km, - } -} - -func getTestExtraBytes( - validators validators.Validators, - proposerSeal []byte, - committedSeals Seals, - parentCommittedSeals Seals, - roundNumber *uint64, -) []byte { - extra := &IstanbulExtra{ - Validators: validators, - ProposerSeal: proposerSeal, - CommittedSeals: committedSeals, - ParentCommittedSeals: parentCommittedSeals, - RoundNumber: roundNumber, - } - - return append( - make([]byte, IstanbulExtraVanity), - extra.MarshalRLPTo(nil)..., - ) -} - -func TestNewKeyManager(t *testing.T) { - t.Parallel() - - keyManager := &MockKeyManager{} - parentKeyManager := &MockKeyManager{} - - signer := NewSigner(keyManager, parentKeyManager) - - assert.Same( - t, - keyManager, - signer.keyManager, - ) - - assert.Same( - t, - parentKeyManager, - signer.parentKeyManager, - ) -} - -func TestSignerType(t *testing.T) { - t.Parallel() - - validatorType := validators.ECDSAValidatorType - signer := newTestSingleKeyManagerSigner( - &MockKeyManager{ - TypeFunc: func() validators.ValidatorType { - return validatorType - }, - }, - ) - - assert.Equal( - t, - validatorType, - signer.Type(), - ) -} - -func TestSignerAddress(t *testing.T) { - t.Parallel() - - addr := testAddr1 - signer := newTestSingleKeyManagerSigner( - &MockKeyManager{ - AddressFunc: func() types.Address { - return addr - }, - }, - ) - - assert.Equal( - t, - addr, - signer.Address(), - ) -} - -func TestSignerInitIBFTExtra(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - validators validators.Validators - committedSeals Seals - parentCommittedSeals Seals - }{ - { - name: "ECDSA Serialized Seals", - validators: ecdsaValidators, - committedSeals: &SerializedSeal{}, - parentCommittedSeals: testSerializedSeals1, - }, - { - name: "BLS Aggregated Seals", - validators: blsValidators, - committedSeals: &AggregatedSeal{}, - parentCommittedSeals: testAggregatedSeals1, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - header := &types.Header{} - - signer := newTestSingleKeyManagerSigner( - &MockKeyManager{ - NewEmptyCommittedSealsFunc: func() Seals { - return test.committedSeals - }, - }, - ) - - signer.InitIBFTExtra( - header, - test.validators, - test.parentCommittedSeals, - ) - - expectedExtraBytes := getTestExtraBytes( - test.validators, - []byte{}, - test.committedSeals, - test.parentCommittedSeals, - nil, - ) - - assert.Equal( - t, - expectedExtraBytes, - header.ExtraData, - ) - }) - } -} - -func TestSignerGetIBFTExtra(t *testing.T) { - tests := []struct { - name string - header *types.Header - signer *SignerImpl - expectedExtra *IstanbulExtra - expectedErr error - }{ - { - name: "should return error if the size of header.ExtraData is less than IstanbulExtraVanity", - header: &types.Header{ - ExtraData: []byte{}, - }, - signer: NewSigner(nil, nil), - expectedExtra: nil, - expectedErr: fmt.Errorf( - "wrong extra size, expected greater than or equal to %d but actual %d", - IstanbulExtraVanity, - 0, - ), - }, - { - name: "should return IstanbulExtra for the header at 1 (ECDSA Serialized Seal)", - header: &types.Header{ - Number: 1, - ExtraData: getTestExtraBytes( - ecdsaValidators, - testProposerSeal, - testSerializedSeals1, - nil, - nil, - ), - }, - signer: NewSigner( - &MockKeyManager{ - NewEmptyValidatorsFunc: func() validators.Validators { - return ecdsaValidators - }, - NewEmptyCommittedSealsFunc: func() Seals { - return &SerializedSeal{} - }, - }, - nil, - ), - expectedExtra: &IstanbulExtra{ - Validators: ecdsaValidators, - ProposerSeal: testProposerSeal, - CommittedSeals: testSerializedSeals1, - ParentCommittedSeals: nil, - }, - expectedErr: nil, - }, - { - name: "should return IstanbulExtra for the header at 1 (BLS Aggregated Seals)", - header: &types.Header{ - Number: 1, - ExtraData: getTestExtraBytes( - blsValidators, - testProposerSeal, - testAggregatedSeals1, - nil, - nil, - ), - }, - signer: NewSigner( - &MockKeyManager{ - NewEmptyValidatorsFunc: func() validators.Validators { - return blsValidators - }, - NewEmptyCommittedSealsFunc: func() Seals { - return &AggregatedSeal{} - }, - }, - nil, - ), - expectedExtra: &IstanbulExtra{ - Validators: blsValidators, - ProposerSeal: testProposerSeal, - CommittedSeals: testAggregatedSeals1, - ParentCommittedSeals: nil, - }, - expectedErr: nil, - }, - { - name: "should return IstanbulExtra with ParentCommittedSeals for the header at 2 (ECDSA Serialized Seal)", - header: &types.Header{ - Number: 2, - ExtraData: getTestExtraBytes( - ecdsaValidators, - testProposerSeal, - testSerializedSeals1, - testSerializedSeals2, - nil, - ), - }, - signer: NewSigner( - &MockKeyManager{ - NewEmptyValidatorsFunc: func() validators.Validators { - return ecdsaValidators - }, - NewEmptyCommittedSealsFunc: func() Seals { - return &SerializedSeal{} - }, - }, - &MockKeyManager{ - NewEmptyCommittedSealsFunc: func() Seals { - return &SerializedSeal{} - }, - }, - ), - expectedExtra: &IstanbulExtra{ - Validators: ecdsaValidators, - ProposerSeal: testProposerSeal, - CommittedSeals: testSerializedSeals1, - ParentCommittedSeals: testSerializedSeals2, - }, - expectedErr: nil, - }, - { - name: "should return IstanbulExtra with ParentCommittedSeals for the header at 2 (BLS Aggregated Seal)", - header: &types.Header{ - Number: 2, - ExtraData: getTestExtraBytes( - blsValidators, - testProposerSeal, - testAggregatedSeals1, - testAggregatedSeals2, - nil, - ), - }, - signer: NewSigner( - &MockKeyManager{ - NewEmptyValidatorsFunc: func() validators.Validators { - return blsValidators - }, - NewEmptyCommittedSealsFunc: func() Seals { - return &AggregatedSeal{} - }, - }, - &MockKeyManager{ - NewEmptyCommittedSealsFunc: func() Seals { - return &AggregatedSeal{} - }, - }, - ), - expectedExtra: &IstanbulExtra{ - Validators: blsValidators, - ProposerSeal: testProposerSeal, - CommittedSeals: testAggregatedSeals1, - ParentCommittedSeals: testAggregatedSeals2, - }, - expectedErr: nil, - }, - { - name: "should return IstanbulExtra for BLS even if parent committed seals is created by ECDSA", - header: &types.Header{ - Number: 3, - ExtraData: getTestExtraBytes( - blsValidators, - testProposerSeal, - testAggregatedSeals1, - testSerializedSeals1, - nil, - ), - }, - signer: NewSigner( - &MockKeyManager{ - NewEmptyValidatorsFunc: func() validators.Validators { - return blsValidators - }, - NewEmptyCommittedSealsFunc: func() Seals { - return &AggregatedSeal{} - }, - }, - &MockKeyManager{ - NewEmptyCommittedSealsFunc: func() Seals { - return &SerializedSeal{} - }, - }, - ), - expectedExtra: &IstanbulExtra{ - Validators: blsValidators, - ProposerSeal: testProposerSeal, - CommittedSeals: testAggregatedSeals1, - ParentCommittedSeals: testSerializedSeals1, - }, - expectedErr: nil, - }, - { - name: "should return IstanbulExtra for ECDSA even if parent committed seals is created by BLS", - header: &types.Header{ - Number: 3, - ExtraData: getTestExtraBytes( - ecdsaValidators, - testProposerSeal, - testSerializedSeals1, - testAggregatedSeals1, - nil, - ), - }, - signer: NewSigner( - &MockKeyManager{ - NewEmptyValidatorsFunc: func() validators.Validators { - return ecdsaValidators - }, - NewEmptyCommittedSealsFunc: func() Seals { - return &SerializedSeal{} - }, - }, - &MockKeyManager{ - NewEmptyCommittedSealsFunc: func() Seals { - return &AggregatedSeal{} - }, - }, - ), - expectedExtra: &IstanbulExtra{ - Validators: ecdsaValidators, - ProposerSeal: testProposerSeal, - CommittedSeals: testSerializedSeals1, - ParentCommittedSeals: testAggregatedSeals1, - }, - expectedErr: nil, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - extra, err := test.signer.GetIBFTExtra(test.header) - - assert.Equal( - t, - test.expectedExtra, - extra, - ) - - testHelper.AssertErrorMessageContains( - t, - test.expectedErr, - err, - ) - }) - } -} - -func TestSignerWriteProposerSeal(t *testing.T) { - tests := []struct { - name string - header *types.Header - signer *SignerImpl - expectedHeader *types.Header - expectedErr error - }{ - { - name: "should return error if GetIBFTExtra fails", - header: &types.Header{ - ExtraData: []byte{}, - }, - signer: NewSigner( - &MockKeyManager{ - NewEmptyValidatorsFunc: func() validators.Validators { - return ecdsaValidators - }, - NewEmptyCommittedSealsFunc: func() Seals { - return &SerializedSeal{} - }, - }, - nil, - ), - expectedHeader: nil, - expectedErr: fmt.Errorf( - "wrong extra size, expected greater than or equal to %d but actual %d", - IstanbulExtraVanity, - 0, - ), - }, - { - name: "should return error if SignProposerSeal returns error", - header: &types.Header{ - Number: 1, - ExtraData: getTestExtraBytes( - ecdsaValidators, - testProposerSeal, - testSerializedSeals1, - nil, - nil, - ), - }, - signer: NewSigner( - &MockKeyManager{ - NewEmptyValidatorsFunc: func() validators.Validators { - return ecdsaValidators - }, - NewEmptyCommittedSealsFunc: func() Seals { - return &SerializedSeal{} - }, - SignProposerSealFunc: func(b []byte) ([]byte, error) { - return nil, errTest - }, - }, - nil, - ), - expectedHeader: nil, - expectedErr: errTest, - }, - { - name: "should set ProposerSeal into Header", - header: &types.Header{ - Number: 1, - ExtraData: getTestExtraBytes( - ecdsaValidators, - nil, - testSerializedSeals1, - nil, - nil, - ), - }, - signer: NewSigner( - &MockKeyManager{ - NewEmptyValidatorsFunc: func() validators.Validators { - return ecdsaValidators - }, - NewEmptyCommittedSealsFunc: func() Seals { - return &SerializedSeal{} - }, - SignProposerSealFunc: func(b []byte) ([]byte, error) { - return testProposerSeal, nil - }, - }, - nil, - ), - expectedHeader: &types.Header{ - Number: 1, - ExtraData: getTestExtraBytes( - ecdsaValidators, - testProposerSeal, - testSerializedSeals1, - nil, - nil, - ), - }, - expectedErr: nil, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - UseIstanbulHeaderHashInTest(t, test.signer) - - header, err := test.signer.WriteProposerSeal(test.header) - - assert.Equal( - t, - test.expectedHeader, - header, - ) - - testHelper.AssertErrorMessageContains( - t, - test.expectedErr, - err, - ) - }) - } -} - -func TestSignerEcrecoverFromHeader(t *testing.T) { - tests := []struct { - name string - header *types.Header - signer *SignerImpl - expectedAddr types.Address - expectedErr error - }{ - { - name: "should return error if GetIBFTExtra fails", - header: &types.Header{ - Number: 0, - ExtraData: []byte{}, - }, - signer: NewSigner( - &MockKeyManager{ - NewEmptyValidatorsFunc: func() validators.Validators { - return ecdsaValidators - }, - NewEmptyCommittedSealsFunc: func() Seals { - return &SerializedSeal{} - }, - }, - nil, - ), - expectedAddr: types.ZeroAddress, - expectedErr: fmt.Errorf( - "wrong extra size, expected greater than or equal to %d but actual %d", - IstanbulExtraVanity, - 0, - ), - }, - { - name: "should return address", - header: &types.Header{ - Number: 1, - ExtraData: getTestExtraBytes( - ecdsaValidators, - testProposerSeal, - testSerializedSeals1, - nil, - nil, - ), - }, - signer: NewSigner( - &MockKeyManager{ - NewEmptyValidatorsFunc: func() validators.Validators { - return ecdsaValidators - }, - NewEmptyCommittedSealsFunc: func() Seals { - return &SerializedSeal{} - }, - EcrecoverFunc: func(b1, b2 []byte) (types.Address, error) { - return ecdsaValidator1.Address, nil - }, - }, - nil, - ), - expectedAddr: ecdsaValidator1.Address, - expectedErr: nil, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - UseIstanbulHeaderHashInTest(t, test.signer) - - addr, err := test.signer.EcrecoverFromHeader(test.header) - - assert.Equal( - t, - test.expectedAddr, - addr, - ) - - testHelper.AssertErrorMessageContains( - t, - test.expectedErr, - err, - ) - }) - } -} - -func TestSignerCreateCommittedSeal(t *testing.T) { - t.Parallel() - - hash := crypto.Keccak256([]byte{0x1}) - sig := crypto.Keccak256([]byte{0x2}) - - signer := newTestSingleKeyManagerSigner( - &MockKeyManager{ - SignCommittedSealFunc: func(b []byte) ([]byte, error) { - assert.Equal( - t, - crypto.Keccak256(wrapCommitHash(hash)), - b, - ) - - return sig, nil - }, - }, - ) - - res, err := signer.CreateCommittedSeal(hash) - - assert.Equal(t, sig, res) - assert.NoError(t, err) -} - -func TestVerifyCommittedSeal(t *testing.T) { - t.Parallel() - - hash := crypto.Keccak256([]byte{0x1}) - sig := crypto.Keccak256([]byte{0x2}) - - signer := newTestSingleKeyManagerSigner( - &MockKeyManager{ - VerifyCommittedSealFunc: func(vals validators.Validators, author types.Address, s, h []byte) error { - assert.Equal(t, ecdsaValidators, vals) - assert.Equal(t, testAddr1, author) - assert.Equal(t, sig, s) - assert.Equal(t, crypto.Keccak256( - wrapCommitHash(hash[:]), - ), h) - - return errTest - }, - }, - ) - - assert.Equal( - t, - errTest, - signer.VerifyCommittedSeal( - ecdsaValidators, - testAddr1, - sig, - hash, - ), - ) -} - -func TestSignerWriteCommittedSeals(t *testing.T) { - var round0 uint64 = 0 - - tests := []struct { - name string - header *types.Header - roundNumber uint64 - sealMap map[types.Address][]byte - keyManager *MockKeyManager - expectedHeader *types.Header - expectedErr error - }{ - { - name: "should return ErrEmptyCommittedSeals if sealMap is empty", - header: &types.Header{}, - roundNumber: 0, - sealMap: map[types.Address][]byte{}, - keyManager: nil, - expectedHeader: nil, - expectedErr: ErrEmptyCommittedSeals, - }, - { - name: "should return error if GetValidators fails", - header: &types.Header{}, - roundNumber: 0, - sealMap: map[types.Address][]byte{ - testAddr1: []byte("test"), - }, - keyManager: nil, - expectedHeader: nil, - expectedErr: fmt.Errorf( - "wrong extra size, expected greater than or equal to %d but actual %d", - IstanbulExtraVanity, - 0, - ), - }, - { - name: "should return error if GenerateCommittedSeals fails", - header: &types.Header{ - Number: 1, - ExtraData: getTestExtraBytes( - ecdsaValidators, - testProposerSeal, - &SerializedSeal{}, - nil, - nil, - ), - }, - roundNumber: 0, - sealMap: map[types.Address][]byte{ - testAddr1: []byte("test"), - }, - keyManager: &MockKeyManager{ - NewEmptyValidatorsFunc: func() validators.Validators { - return ecdsaValidators - }, - NewEmptyCommittedSealsFunc: func() Seals { - return &SerializedSeal{} - }, - GenerateCommittedSealsFunc: func(m map[types.Address][]byte, v validators.Validators) (Seals, error) { - return nil, errTest - }, - }, - expectedHeader: nil, - expectedErr: errTest, - }, - { - name: "should set CommittedSeals into IBFTExtra", - header: &types.Header{ - Number: 1, - ExtraData: getTestExtraBytes( - ecdsaValidators, - testProposerSeal, - &SerializedSeal{}, - nil, - nil, - ), - }, - roundNumber: 0, - sealMap: map[types.Address][]byte{ - testAddr1: []byte("test"), - }, - keyManager: &MockKeyManager{ - NewEmptyValidatorsFunc: func() validators.Validators { - return ecdsaValidators - }, - NewEmptyCommittedSealsFunc: func() Seals { - return &SerializedSeal{} - }, - GenerateCommittedSealsFunc: func(m map[types.Address][]byte, v validators.Validators) (Seals, error) { - return testSerializedSeals1, nil - }, - }, - expectedHeader: &types.Header{ - Number: 1, - ExtraData: getTestExtraBytes( - ecdsaValidators, - testProposerSeal, - testSerializedSeals1, - nil, - &round0, - ), - }, - expectedErr: nil, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - signer := newTestSingleKeyManagerSigner(test.keyManager) - - header, err := signer.WriteCommittedSeals(test.header, test.roundNumber, test.sealMap) - - testHelper.AssertErrorMessageContains( - t, - test.expectedErr, - err, - ) - assert.Equal( - t, - test.expectedHeader, - header, - ) - }) - } -} - -func TestSignerVerifyCommittedSeals(t *testing.T) { - committedSeals := &SerializedSeal{} - - tests := []struct { - name string - header *types.Header - validators validators.Validators - quorumSize int - verifyCommittedSealsRes int - verifyCommittedSealsErr error - expectedErr error - }{ - { - name: "should return error if VerifyCommittedSeals fails", - header: &types.Header{ - Number: 1, - ExtraData: getTestExtraBytes( - ecdsaValidators, - testProposerSeal, - testSerializedSeals1, - nil, - nil, - ), - }, - validators: ecdsaValidators, - quorumSize: 0, - verifyCommittedSealsRes: 0, - verifyCommittedSealsErr: errTest, - expectedErr: errTest, - }, - { - name: "should return error if number of signers is less than quorumSize", - header: &types.Header{ - Number: 1, - ExtraData: getTestExtraBytes( - ecdsaValidators, - testProposerSeal, - testSerializedSeals1, - nil, - nil, - ), - }, - validators: ecdsaValidators, - quorumSize: 5, - verifyCommittedSealsRes: 3, - verifyCommittedSealsErr: nil, - expectedErr: ErrNotEnoughCommittedSeals, - }, - { - name: "should succeed", - header: &types.Header{ - Number: 1, - ExtraData: getTestExtraBytes( - ecdsaValidators, - testProposerSeal, - testSerializedSeals1, - nil, - nil, - ), - }, - validators: ecdsaValidators, - quorumSize: 5, - verifyCommittedSealsRes: 6, - verifyCommittedSealsErr: nil, - expectedErr: nil, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - var expectedSig []byte - - signer := newTestSingleKeyManagerSigner(&MockKeyManager{ - NewEmptyValidatorsFunc: func() validators.Validators { - return test.validators - }, - NewEmptyCommittedSealsFunc: func() Seals { - return committedSeals - }, - VerifyCommittedSealsFunc: func(s Seals, b []byte, v validators.Validators) (int, error) { - assert.Equal(t, testSerializedSeals1, s) - assert.Equal(t, ecdsaValidators, v) - assert.Equal(t, expectedSig, b) - - return test.verifyCommittedSealsRes, test.verifyCommittedSealsErr - }, - }) - - UseIstanbulHeaderHashInTest(t, signer) - - expectedSig = crypto.Keccak256( - wrapCommitHash( - test.header.ComputeHash().Hash.Bytes(), - ), - ) - - testHelper.AssertErrorMessageContains( - t, - test.expectedErr, - signer.VerifyCommittedSeals( - test.header.Hash, - committedSeals, - test.validators, - test.quorumSize, - ), - ) - }) - } -} - -func TestSignerVerifyParentCommittedSeals(t *testing.T) { - t.Parallel() - - parentHeaderHash := crypto.Keccak256(types.ZeroAddress.Bytes()) - - tests := []struct { - name string - parentHeader *types.Header - header *types.Header - parentValidators validators.Validators - quorumSize int - mustExist bool - verifyCommittedSealsRes int - verifyCommittedSealsErr error - expectedErr error - }{ - { - name: "should return error if GetIBFTExtra fails", - parentHeader: &types.Header{ - Hash: types.BytesToHash(parentHeaderHash), - }, - header: &types.Header{ - ExtraData: []byte{}, - }, - parentValidators: ecdsaValidators, - quorumSize: 0, - mustExist: true, - verifyCommittedSealsRes: 0, - verifyCommittedSealsErr: nil, - expectedErr: fmt.Errorf( - "wrong extra size, expected greater than or equal to %d but actual %d", - IstanbulExtraVanity, - 0, - ), - }, - { - name: "should return error if header doesn't have ParentCommittedSeals and must exist is true", - parentHeader: &types.Header{ - Hash: types.BytesToHash(parentHeaderHash), - }, - header: &types.Header{ - ExtraData: getTestExtraBytes( - ecdsaValidators, - testProposerSeal, - testSerializedSeals1, - nil, - nil, - ), - }, - parentValidators: ecdsaValidators, - quorumSize: 0, - mustExist: true, - verifyCommittedSealsRes: 0, - verifyCommittedSealsErr: nil, - expectedErr: ErrEmptyParentCommittedSeals, - }, - { - name: "should succeed if header doesn't have ParentCommittedSeals and must exist is false", - parentHeader: &types.Header{ - Hash: types.BytesToHash(parentHeaderHash), - }, - header: &types.Header{ - ExtraData: getTestExtraBytes( - ecdsaValidators, - testProposerSeal, - testSerializedSeals1, - nil, - nil, - ), - }, - parentValidators: ecdsaValidators, - quorumSize: 0, - mustExist: false, - verifyCommittedSealsRes: 0, - verifyCommittedSealsErr: nil, - expectedErr: nil, - }, - { - name: "should return error if VerifyCommittedSeals fails", - parentHeader: &types.Header{ - Hash: types.BytesToHash(parentHeaderHash), - }, - header: &types.Header{ - ExtraData: getTestExtraBytes( - ecdsaValidators, - testProposerSeal, - testSerializedSeals1, - testSerializedSeals2, - nil, - ), - }, - parentValidators: ecdsaValidators, - quorumSize: 0, - mustExist: false, - verifyCommittedSealsRes: 0, - verifyCommittedSealsErr: errTest, - expectedErr: errTest, - }, - { - name: "should return ErrNotEnoughCommittedSeals if the number of signers is less than quorum", - parentHeader: &types.Header{ - Hash: types.BytesToHash(parentHeaderHash), - }, - header: &types.Header{ - ExtraData: getTestExtraBytes( - ecdsaValidators, - testProposerSeal, - testSerializedSeals1, - testSerializedSeals2, - nil, - ), - }, - parentValidators: ecdsaValidators, - quorumSize: 5, - mustExist: false, - verifyCommittedSealsRes: 2, - verifyCommittedSealsErr: nil, - expectedErr: ErrNotEnoughCommittedSeals, - }, - { - name: "should succeed", - parentHeader: &types.Header{ - Hash: types.BytesToHash(parentHeaderHash), - }, - header: &types.Header{ - ExtraData: getTestExtraBytes( - ecdsaValidators, - testProposerSeal, - testSerializedSeals1, - testSerializedSeals2, - nil, - ), - }, - parentValidators: ecdsaValidators, - quorumSize: 5, - mustExist: false, - verifyCommittedSealsRes: 6, - verifyCommittedSealsErr: nil, - expectedErr: nil, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - expectedSig := crypto.Keccak256( - wrapCommitHash( - test.parentHeader.Hash.Bytes(), - ), - ) - - signer := newTestSingleKeyManagerSigner(&MockKeyManager{ - NewEmptyValidatorsFunc: func() validators.Validators { - return ecdsaValidators - }, - NewEmptyCommittedSealsFunc: func() Seals { - return &SerializedSeal{} - }, - VerifyCommittedSealsFunc: func(s Seals, b []byte, v validators.Validators) (int, error) { - assert.Equal(t, testSerializedSeals2, s) - assert.Equal(t, ecdsaValidators, v) - assert.Equal(t, expectedSig, b) - - return test.verifyCommittedSealsRes, test.verifyCommittedSealsErr - }, - }) - - testHelper.AssertErrorMessageContains( - t, - test.expectedErr, - signer.VerifyParentCommittedSeals( - test.parentHeader.Hash, - test.header, - test.parentValidators, - test.quorumSize, - test.mustExist, - ), - ) - }) - } -} - -func TestSignerSignIBFTMessage(t *testing.T) { - t.Parallel() - - msg := []byte("test") - sig := []byte("signature") - - signer := &SignerImpl{ - keyManager: &MockKeyManager{ - SignIBFTMessageFunc: func(data []byte) ([]byte, error) { - assert.Equal(t, crypto.Keccak256(msg), data) - - return sig, errTest - }, - }, - } - - res, err := signer.SignIBFTMessage(msg) - - assert.Equal( - t, - sig, - res, - ) - - assert.Equal( - t, - errTest, - err, - ) -} - -func TestEcrecoverFromIBFTMessage(t *testing.T) { - t.Parallel() - - msg := []byte("test") - sig := []byte("signature") - - signer := &SignerImpl{ - keyManager: &MockKeyManager{ - EcrecoverFunc: func(b1, b2 []byte) (types.Address, error) { - assert.Equal(t, sig, b1) - assert.Equal(t, crypto.Keccak256(msg), b2) - - return testAddr1, errTest - }, - }, - } - - res, err := signer.EcrecoverFromIBFTMessage(sig, msg) - - assert.Equal( - t, - testAddr1, - res, - ) - - assert.Equal( - t, - errTest, - err, - ) -} - -func TestSignerSignIBFTMessageAndEcrecoverFromIBFTMessage(t *testing.T) { - t.Parallel() - - msg := []byte("message") - - ecdsaKeyManager, _ := newTestECDSAKeyManager(t) - blsKeyManager, _, _ := newTestBLSKeyManager(t) - - tests := []struct { - name string - keyManager KeyManager - }{ - { - name: "ECDSA Signer", - keyManager: ecdsaKeyManager, - }, - { - name: "BLS Signer", - keyManager: blsKeyManager, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - signer := newTestSingleKeyManagerSigner(test.keyManager) - - sig, err := signer.SignIBFTMessage(msg) - assert.NoError(t, err) - - recovered, err := signer.EcrecoverFromIBFTMessage(sig, msg) - assert.NoError(t, err) - - assert.Equal( - t, - signer.Address(), - recovered, - ) - }) - } -} diff --git a/consensus/ibft/state_test.go b/consensus/ibft/state_test.go deleted file mode 100644 index db374e0466..0000000000 --- a/consensus/ibft/state_test.go +++ /dev/null @@ -1,67 +0,0 @@ -package ibft - -import ( - "strconv" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestState_FaultyNodes(t *testing.T) { - cases := []struct { - Network, Faulty uint64 - }{ - {1, 0}, - {2, 0}, - {3, 0}, - {4, 1}, - {5, 1}, - {6, 1}, - {7, 2}, - {8, 2}, - {9, 2}, - } - for _, c := range cases { - pool := newTesterAccountPool(t, int(c.Network)) - vals := pool.ValidatorSet() - assert.Equal(t, CalcMaxFaultyNodes(vals), int(c.Faulty)) - } -} - -// TestNumValid checks if the quorum size is calculated -// correctly based on number of validators (network size). -func TestNumValid(t *testing.T) { - cases := []struct { - Network, Quorum uint64 - }{ - {1, 1}, - {2, 2}, - {3, 3}, - {4, 3}, - {5, 4}, - {6, 4}, - {7, 5}, - {8, 6}, - {9, 6}, - } - - addAccounts := func( - pool *testerAccountPool, - numAccounts int, - ) { - // add accounts - for i := 0; i < numAccounts; i++ { - pool.add(strconv.Itoa(i)) - } - } - - for _, c := range cases { - pool := newTesterAccountPool(t, int(c.Network)) - addAccounts(pool, int(c.Network)) - - assert.Equal(t, - int(c.Quorum), - OptimalQuorumSize(pool.ValidatorSet()), - ) - } -} diff --git a/consensus/ibft/transport.go b/consensus/ibft/transport.go deleted file mode 100644 index c52e201ec7..0000000000 --- a/consensus/ibft/transport.go +++ /dev/null @@ -1,67 +0,0 @@ -package ibft - -import ( - "github.com/0xPolygon/go-ibft/messages/proto" - "github.com/0xPolygon/polygon-edge/network" - "github.com/0xPolygon/polygon-edge/types" - "github.com/libp2p/go-libp2p/core/peer" -) - -type transport interface { - Multicast(msg *proto.Message) error -} - -type gossipTransport struct { - topic *network.Topic -} - -func (g *gossipTransport) Multicast(msg *proto.Message) error { - return g.topic.Publish(msg) -} - -func (i *backendIBFT) Multicast(msg *proto.Message) { - if err := i.transport.Multicast(msg); err != nil { - i.logger.Error("fail to gossip", "err", err) - } -} - -// setupTransport sets up the gossip transport protocol -func (i *backendIBFT) setupTransport() error { - // Define a new topic - topic, err := i.network.NewTopic(ibftProto, &proto.Message{}) - if err != nil { - return err - } - - // Subscribe to the newly created topic - if err := topic.Subscribe( - func(obj interface{}, _ peer.ID) { - if !i.isActiveValidator() { - return - } - - msg, ok := obj.(*proto.Message) - if !ok { - i.logger.Error("invalid type assertion for message request") - - return - } - - i.consensus.AddMessage(msg) - - i.logger.Debug( - "validator message received", - "type", msg.Type.String(), - "height", msg.GetView().Height, - "round", msg.GetView().Round, - "addr", types.BytesToAddress(msg.From).String(), - ) - }, - ); err != nil { - return err - } - - i.transport = &gossipTransport{topic: topic} - - return nil -} diff --git a/consensus/ibft/validators.go b/consensus/ibft/validators.go deleted file mode 100644 index 9c1a59a2f8..0000000000 --- a/consensus/ibft/validators.go +++ /dev/null @@ -1,73 +0,0 @@ -package ibft - -import ( - "math" - - "github.com/0xPolygon/polygon-edge/types" - "github.com/0xPolygon/polygon-edge/validators" -) - -func CalcMaxFaultyNodes(s validators.Validators) int { - // N -> number of nodes in IBFT - // F -> number of faulty nodes - // - // N = 3F + 1 - // => F = (N - 1) / 3 - // - // IBFT tolerates 1 failure with 4 nodes - // 4 = 3 * 1 + 1 - // To tolerate 2 failures, IBFT requires 7 nodes - // 7 = 3 * 2 + 1 - // It should always take the floor of the result - return (s.Len() - 1) / 3 -} - -type QuorumImplementation func(validators.Validators) int - -// LegacyQuorumSize returns the legacy quorum size for the given validator set -func LegacyQuorumSize(set validators.Validators) int { - // According to the IBFT spec, the number of valid messages - // needs to be 2F + 1 - return 2*CalcMaxFaultyNodes(set) + 1 -} - -// OptimalQuorumSize returns the optimal quorum size for the given validator set -func OptimalQuorumSize(set validators.Validators) int { - // if the number of validators is less than 4, - // then the entire set is required - if CalcMaxFaultyNodes(set) == 0 { - /* - N: 1 -> Q: 1 - N: 2 -> Q: 2 - N: 3 -> Q: 3 - */ - return set.Len() - } - - // (quorum optimal) Q = ceil(2/3 * N) - return int(math.Ceil(2 * float64(set.Len()) / 3)) -} - -func CalcProposer( - validators validators.Validators, - round uint64, - lastProposer types.Address, -) validators.Validator { - var seed uint64 - - if lastProposer == types.ZeroAddress { - seed = round - } else { - offset := int64(0) - - if index := validators.Index(lastProposer); index != -1 { - offset = index - } - - seed = uint64(offset) + round + 1 - } - - pick := seed % uint64(validators.Len()) - - return validators.At(pick) -} diff --git a/consensus/ibft/verifier.go b/consensus/ibft/verifier.go deleted file mode 100644 index 6918a8662c..0000000000 --- a/consensus/ibft/verifier.go +++ /dev/null @@ -1,230 +0,0 @@ -package ibft - -import ( - "bytes" - "encoding/binary" - "encoding/hex" - - "github.com/0xPolygon/go-ibft/messages" - protoIBFT "github.com/0xPolygon/go-ibft/messages/proto" - "github.com/0xPolygon/polygon-edge/consensus/ibft/signer" - "github.com/0xPolygon/polygon-edge/crypto" - "github.com/0xPolygon/polygon-edge/types" -) - -// Verifier impl for go-ibft -// calculateProposalHashFromBlockBytes is a helper method to marshal ethereum block in bytes -// and pass to calculateProposalHash -func (i *backendIBFT) calculateProposalHashFromBlockBytes( - proposal []byte, - round *uint64, -) (types.Hash, error) { - block := &types.Block{} - if err := block.UnmarshalRLP(proposal); err != nil { - return types.ZeroHash, err - } - - signer, err := i.forkManager.GetSigner(block.Number()) - if err != nil { - return types.ZeroHash, err - } - - return i.calculateProposalHash( - signer, - block.Header, - round, - ) -} - -// calculateProposalHash is new hash calculation for proposal in go-ibft, -// which includes round number block is finalized at -func (i *backendIBFT) calculateProposalHash( - signer signer.Signer, - header *types.Header, - round *uint64, -) (types.Hash, error) { - if round == nil { - // legacy hash calculation - return header.Hash, nil - } - - roundBytes := make([]byte, 8) - binary.BigEndian.PutUint64(roundBytes, *round) - - return types.BytesToHash( - crypto.Keccak256( - header.Hash.Bytes(), - roundBytes, - ), - ), nil -} - -func (i *backendIBFT) IsValidProposal(rawProposal []byte) bool { - var ( - latestHeader = i.blockchain.Header() - latestBlockNumber = latestHeader.Number - newBlock = &types.Block{} - ) - - // retrieve the newBlock proposal - if err := newBlock.UnmarshalRLP(rawProposal); err != nil { - i.logger.Error("IsValidProposal: fail to unmarshal block", "err", err) - - return false - } - - if latestBlockNumber+1 != newBlock.Number() { - i.logger.Error( - "sequence not correct", - "block", newBlock.Number, - "sequence", latestBlockNumber+1, - ) - - return false - } - - if err := i.verifyHeaderImpl( - latestHeader, - newBlock.Header, - i.currentSigner, - i.currentValidators, - i.currentHooks, - true, - ); err != nil { - i.logger.Error("block header verification failed", "err", err) - - return false - } - - if err := i.blockchain.VerifyPotentialBlock(newBlock); err != nil { - i.logger.Error("block verification failed", "err", err) - - return false - } - - if err := i.currentHooks.VerifyBlock(newBlock); err != nil { - i.logger.Error("additional block verification failed", "err", err) - - return false - } - - return true -} - -func (i *backendIBFT) IsValidValidator(msg *protoIBFT.Message) bool { - msgNoSig, err := msg.PayloadNoSig() - if err != nil { - return false - } - - signerAddress, err := i.currentSigner.EcrecoverFromIBFTMessage( - msg.Signature, - msgNoSig, - ) - - if err != nil { - i.logger.Error("failed to ecrecover message", "err", err) - - return false - } - - // verify the signature came from the sender - if !bytes.Equal(msg.From, signerAddress.Bytes()) { - i.logger.Error( - "signer address doesn't match with From", - "from", hex.EncodeToString(msg.From), - "signer", signerAddress, - "err", err, - ) - - return false - } - - validators, err := i.forkManager.GetValidators(msg.View.Height) - if err != nil { - return false - } - - // verify the sender is in the active validator set - if !validators.Includes(signerAddress) { - i.logger.Error( - "signer address doesn't included in validators", - "signer", signerAddress, - ) - - return false - } - - return true -} - -func (i *backendIBFT) IsProposer(id []byte, height, round uint64) bool { - previousHeader, exists := i.blockchain.GetHeaderByNumber(height - 1) - if !exists { - i.logger.Error("header not found", "height", height-1) - - return false - } - - previousProposer, err := i.extractProposer(previousHeader) - if err != nil { - i.logger.Error("failed to extract the last proposer", "height", height-1, "err", err) - - return false - } - - nextProposer := CalcProposer( - i.currentValidators, - round, - previousProposer, - ) - - return types.BytesToAddress(id) == nextProposer.Addr() -} - -func (i *backendIBFT) IsValidProposalHash(proposal *protoIBFT.Proposal, hash []byte) bool { - proposalHash, err := i.calculateProposalHashFromBlockBytes(proposal.RawProposal, &proposal.Round) - if err != nil { - return false - } - - return bytes.Equal(proposalHash.Bytes(), hash) -} - -func (i *backendIBFT) IsValidCommittedSeal( - proposalHash []byte, - committedSeal *messages.CommittedSeal, -) bool { - err := i.currentSigner.VerifyCommittedSeal( - i.currentValidators, - types.BytesToAddress(committedSeal.Signer), - committedSeal.Signature, - proposalHash, - ) - - if err != nil { - i.logger.Error("IsValidCommittedSeal: failed to verify committed seal", "err", err) - - return false - } - - return true -} - -func (i *backendIBFT) extractProposer(header *types.Header) (types.Address, error) { - if header.Number == 0 { - return types.ZeroAddress, nil - } - - signer, err := i.forkManager.GetSigner(header.Number) - if err != nil { - return types.ZeroAddress, err - } - - proposer, err := signer.EcrecoverFromHeader(header) - if err != nil { - return types.ZeroAddress, err - } - - return proposer, nil -} diff --git a/contracts/abis/abis.go b/contracts/abis/abis.go deleted file mode 100644 index dfc285c8b1..0000000000 --- a/contracts/abis/abis.go +++ /dev/null @@ -1,13 +0,0 @@ -package abis - -import ( - "github.com/umbracle/ethgo/abi" -) - -var ( - // ABI for Staking Contract - StakingABI = abi.MustNewABI(StakingJSONABI) - - // ABI for Contract used in e2e stress test - StressTestABI = abi.MustNewABI(StressTestJSONABI) -) diff --git a/contracts/abis/json.go b/contracts/abis/json.go deleted file mode 100644 index 88cb4b0d9d..0000000000 --- a/contracts/abis/json.go +++ /dev/null @@ -1,386 +0,0 @@ -package abis - -const StakingJSONABI = `[ - { - "inputs": [ - { - "internalType": "uint256", - "name": "minNumValidators", - "type": "uint256" - }, - { - "internalType": "uint256", - "name": "maxNumValidators", - "type": "uint256" - } - ], - "stateMutability": "nonpayable", - "type": "constructor" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "address", - "name": "account", - "type": "address" - }, - { - "indexed": false, - "internalType": "uint256", - "name": "amount", - "type": "uint256" - } - ], - "name": "Staked", - "type": "event" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "address", - "name": "account", - "type": "address" - }, - { - "indexed": false, - "internalType": "uint256", - "name": "amount", - "type": "uint256" - } - ], - "name": "Unstaked", - "type": "event" - }, - { - "inputs": [], - "name": "VALIDATOR_THRESHOLD", - "outputs": [ - { - "internalType": "uint128", - "name": "", - "type": "uint128" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "", - "type": "address" - } - ], - "name": "_addressToBLSPublicKey", - "outputs": [ - { - "internalType": "bytes", - "name": "", - "type": "bytes" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "", - "type": "address" - } - ], - "name": "_addressToIsValidator", - "outputs": [ - { - "internalType": "bool", - "name": "", - "type": "bool" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "", - "type": "address" - } - ], - "name": "_addressToStakedAmount", - "outputs": [ - { - "internalType": "uint256", - "name": "", - "type": "uint256" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "", - "type": "address" - } - ], - "name": "_addressToValidatorIndex", - "outputs": [ - { - "internalType": "uint256", - "name": "", - "type": "uint256" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "_maximumNumValidators", - "outputs": [ - { - "internalType": "uint256", - "name": "", - "type": "uint256" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "_minimumNumValidators", - "outputs": [ - { - "internalType": "uint256", - "name": "", - "type": "uint256" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "_stakedAmount", - "outputs": [ - { - "internalType": "uint256", - "name": "", - "type": "uint256" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "uint256", - "name": "", - "type": "uint256" - } - ], - "name": "_validators", - "outputs": [ - { - "internalType": "address", - "name": "", - "type": "address" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "addr", - "type": "address" - } - ], - "name": "accountStake", - "outputs": [ - { - "internalType": "uint256", - "name": "", - "type": "uint256" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "address", - "name": "addr", - "type": "address" - } - ], - "name": "isValidator", - "outputs": [ - { - "internalType": "bool", - "name": "", - "type": "bool" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "maximumNumValidators", - "outputs": [ - { - "internalType": "uint256", - "name": "", - "type": "uint256" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "minimumNumValidators", - "outputs": [ - { - "internalType": "uint256", - "name": "", - "type": "uint256" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "bytes", - "name": "blsPubKey", - "type": "bytes" - } - ], - "name": "registerBLSPublicKey", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [], - "name": "stake", - "outputs": [], - "stateMutability": "payable", - "type": "function" - }, - { - "inputs": [], - "name": "stakedAmount", - "outputs": [ - { - "internalType": "uint256", - "name": "", - "type": "uint256" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "unstake", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [], - "name": "validatorBLSPublicKeys", - "outputs": [ - { - "internalType": "bytes[]", - "name": "", - "type": "bytes[]" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "validators", - "outputs": [ - { - "internalType": "address[]", - "name": "", - "type": "address[]" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "stateMutability": "payable", - "type": "receive" - } -]` - -const StressTestJSONABI = `[ - { - "inputs": [], - "stateMutability": "nonpayable", - "type": "constructor" - }, - { - "anonymous": false, - "inputs": [ - { - "indexed": false, - "internalType": "uint256", - "name": "number", - "type": "uint256" - } - ], - "name": "txnDone", - "type": "event" - }, - { - "inputs": [], - "name": "getCount", - "outputs": [ - { - "internalType": "uint256", - "name": "", - "type": "uint256" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "string", - "name": "sName", - "type": "string" - } - ], - "name": "setName", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - } - ]` diff --git a/contracts/staking/query.go b/contracts/staking/query.go deleted file mode 100644 index 441a1f8213..0000000000 --- a/contracts/staking/query.go +++ /dev/null @@ -1,168 +0,0 @@ -package staking - -import ( - "errors" - "math/big" - - "github.com/umbracle/ethgo" - - "github.com/0xPolygon/polygon-edge/contracts/abis" - "github.com/0xPolygon/polygon-edge/state/runtime" - "github.com/0xPolygon/polygon-edge/types" - "github.com/umbracle/ethgo/abi" -) - -const ( - methodValidators = "validators" - methodValidatorBLSPublicKeys = "validatorBLSPublicKeys" -) - -var ( - // staking contract address - AddrStakingContract = types.StringToAddress("1001") - - // Gas limit used when querying the validator set - queryGasLimit uint64 = 1000000 - - ErrMethodNotFoundInABI = errors.New("method not found in ABI") - ErrFailedTypeAssertion = errors.New("failed type assertion") -) - -// TxQueryHandler is a interface to call view method in the contract -type TxQueryHandler interface { - Apply(*types.Transaction) (*runtime.ExecutionResult, error) - GetNonce(types.Address) uint64 - SetNonPayable(nonPayable bool) -} - -// decodeWeb3ArrayOfBytes is a helper function to parse the data -// representing array of bytes in contract result -func decodeWeb3ArrayOfBytes( - result interface{}, -) ([][]byte, error) { - mapResult, ok := result.(map[string]interface{}) - if !ok { - return nil, ErrFailedTypeAssertion - } - - bytesArray, ok := mapResult["0"].([][]byte) - if !ok { - return nil, ErrFailedTypeAssertion - } - - return bytesArray, nil -} - -// createCallViewTx is a helper function to create a transaction to call view method -func createCallViewTx( - from types.Address, - contractAddress types.Address, - methodID []byte, - nonce uint64, -) *types.Transaction { - return &types.Transaction{ - From: from, - To: &contractAddress, - Input: methodID, - Nonce: nonce, - Gas: queryGasLimit, - Value: big.NewInt(0), - GasPrice: big.NewInt(0), - } -} - -// DecodeValidators parses contract call result and returns array of address -func DecodeValidators(method *abi.Method, returnValue []byte) ([]types.Address, error) { - decodedResults, err := method.Outputs.Decode(returnValue) - if err != nil { - return nil, err - } - - results, ok := decodedResults.(map[string]interface{}) - if !ok { - return nil, errors.New("failed type assertion from decodedResults to map") - } - - web3Addresses, ok := results["0"].([]ethgo.Address) - - if !ok { - return nil, errors.New("failed type assertion from results[0] to []ethgo.Address") - } - - addresses := make([]types.Address, len(web3Addresses)) - for idx, waddr := range web3Addresses { - addresses[idx] = types.Address(waddr) - } - - return addresses, nil -} - -// QueryValidators is a helper function to get validator addresses from contract -func QueryValidators(t TxQueryHandler, from types.Address) ([]types.Address, error) { - method, ok := abis.StakingABI.Methods[methodValidators] - if !ok { - return nil, ErrMethodNotFoundInABI - } - - t.SetNonPayable(true) - res, err := t.Apply(createCallViewTx( - from, - AddrStakingContract, - method.ID(), - t.GetNonce(from), - )) - - if err != nil { - return nil, err - } - - if res.Failed() { - return nil, res.Err - } - - return DecodeValidators(method, res.ReturnValue) -} - -// decodeBLSPublicKeys parses contract call result and returns array of bytes -func decodeBLSPublicKeys( - method *abi.Method, - returnValue []byte, -) ([][]byte, error) { - decodedResults, err := method.Outputs.Decode(returnValue) - if err != nil { - return nil, err - } - - blsPublicKeys, err := decodeWeb3ArrayOfBytes(decodedResults) - if err != nil { - return nil, err - } - - return blsPublicKeys, nil -} - -// QueryBLSPublicKeys is a helper function to get BLS Public Keys from contract -func QueryBLSPublicKeys(t TxQueryHandler, from types.Address) ([][]byte, error) { - method, ok := abis.StakingABI.Methods[methodValidatorBLSPublicKeys] - if !ok { - return nil, ErrMethodNotFoundInABI - } - - t.SetNonPayable(true) - res, err := t.Apply(createCallViewTx( - from, - AddrStakingContract, - method.ID(), - t.GetNonce(from), - )) - - if err != nil { - return nil, err - } - - if res.Failed() { - return nil, res.Err - } - - return decodeBLSPublicKeys(method, res.ReturnValue) -} diff --git a/contracts/staking/query_test.go b/contracts/staking/query_test.go deleted file mode 100644 index 12ce47247a..0000000000 --- a/contracts/staking/query_test.go +++ /dev/null @@ -1,227 +0,0 @@ -package staking - -import ( - "errors" - "math/big" - "testing" - - "github.com/0xPolygon/polygon-edge/contracts/abis" - "github.com/0xPolygon/polygon-edge/state/runtime" - "github.com/0xPolygon/polygon-edge/types" - "github.com/stretchr/testify/assert" -) - -var ( - addr1 = types.StringToAddress("1") - addr2 = types.StringToAddress("2") -) - -func leftPad(buf []byte, n int) []byte { - l := len(buf) - if l > n { - return buf - } - - tmp := make([]byte, n) - copy(tmp[n-l:], buf) - - return tmp -} - -func appendAll(bytesArrays ...[]byte) []byte { - var res []byte - - for idx := range bytesArrays { - res = append(res, bytesArrays[idx]...) - } - - return res -} - -type TxMock struct { - hashToRes map[types.Hash]*runtime.ExecutionResult - nonce map[types.Address]uint64 - nonPayable bool -} - -func (m *TxMock) Apply(tx *types.Transaction) (*runtime.ExecutionResult, error) { - if m.hashToRes == nil { - return nil, nil - } - - tx.ComputeHash(1) - - res, ok := m.hashToRes[tx.Hash] - if ok { - return res, nil - } - - return nil, errors.New("not found") -} - -func (m *TxMock) GetNonce(addr types.Address) uint64 { - if m.nonce != nil { - return m.nonce[addr] - } - - return 0 -} - -func (m *TxMock) SetNonPayable(nonPayable bool) { - m.nonPayable = nonPayable -} - -func Test_decodeValidators(t *testing.T) { - tests := []struct { - name string - value []byte - succeed bool - expected []types.Address - }{ - { - name: "should fail to parse", - value: appendAll( - leftPad([]byte{0x20}, 32), // Offset of the beginning of array - leftPad([]byte{0x01}, 32), // Number of addresses - ), - succeed: false, - }, - { - name: "should succeed", - value: appendAll( - leftPad([]byte{0x20}, 32), // Offset of the beginning of array - leftPad([]byte{0x02}, 32), // Number of addresses - leftPad(addr1[:], 32), // Address 1 - leftPad(addr2[:], 32), // Address 2 - ), - succeed: true, - expected: []types.Address{ - addr1, - addr2, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - method := abis.StakingABI.Methods["validators"] - assert.NotNil(t, method) - - res, err := DecodeValidators(method, tt.value) - if tt.succeed { - assert.NoError(t, err) - } else { - assert.Error(t, err) - } - assert.Equal(t, tt.expected, res) - }) - } -} - -func TestQueryValidators(t *testing.T) { - method := abis.StakingABI.Methods["validators"] - if method == nil { - t.Fail() - } - - type MockArgs struct { - addr types.Address - tx *types.Transaction - } - - type MockReturns struct { - nonce uint64 - res *runtime.ExecutionResult - err error - } - - tests := []struct { - name string - from types.Address - mockArgs *MockArgs - mockReturns *MockReturns - succeed bool - expected []types.Address - err error - }{ - { - name: "should failed", - from: addr1, - mockArgs: &MockArgs{ - addr: addr1, - tx: &types.Transaction{ - From: addr1, - To: &AddrStakingContract, - Value: big.NewInt(0), - Input: method.ID(), - GasPrice: big.NewInt(0), - Gas: 100000000, - Nonce: 10, - }, - }, - mockReturns: &MockReturns{ - nonce: 10, - res: &runtime.ExecutionResult{ - Err: runtime.ErrExecutionReverted, - }, - err: nil, - }, - succeed: false, - expected: nil, - err: runtime.ErrExecutionReverted, - }, - { - name: "should succeed", - from: addr1, - mockArgs: &MockArgs{ - addr: addr1, - tx: &types.Transaction{ - From: addr1, - To: &AddrStakingContract, - Value: big.NewInt(0), - Input: method.ID(), - GasPrice: big.NewInt(0), - Gas: queryGasLimit, - Nonce: 10, - }, - }, - mockReturns: &MockReturns{ - nonce: 10, - res: &runtime.ExecutionResult{ - ReturnValue: appendAll( - leftPad([]byte{0x20}, 32), // Offset of the beginning of array - leftPad([]byte{0x00}, 32), // Number of addresses - ), - }, - err: nil, - }, - succeed: true, - expected: []types.Address{}, - err: nil, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - method := abis.StakingABI.Methods["validators"] - assert.NotNil(t, method) - - mock := &TxMock{ - hashToRes: map[types.Hash]*runtime.ExecutionResult{ - tt.mockArgs.tx.ComputeHash(1).Hash: tt.mockReturns.res, - }, - nonce: map[types.Address]uint64{ - tt.mockArgs.addr: tt.mockReturns.nonce, - }, - } - - res, err := QueryValidators(mock, tt.from) - if tt.succeed { - assert.NoError(t, err) - } else { - assert.Error(t, err) - } - assert.Equal(t, tt.expected, res) - }) - } -} diff --git a/e2e/const.go b/e2e/const.go index 651c127bcb..a7b6664aa7 100644 --- a/e2e/const.go +++ b/e2e/const.go @@ -3,7 +3,6 @@ package e2e // Data for mock contract const sampleByteCode = `608060405234801561001057600080fd5b50610286806100206000396000f3fe6080604052600436106100345760003560e01c8063498e6857146100395780636e7e996e14610043578063dd13c6171461004d575b600080fd5b610041610069565b005b61004b6100da565b005b610067600480360361006291908101906101be565b61014b565b005b600073ffffffffffffffffffffffffffffffffffffffff1673010000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff167fec89f5028137f6e2218f9178e2ddfa454a509f4778d9cf323e96f42a902d307f60405160405180910390a3565b73010000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16600073ffffffffffffffffffffffffffffffffffffffff167fec89f5028137f6e2218f9178e2ddfa454a509f4778d9cf323e96f42a902d307f60405160405180910390a3565b8073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff167fec89f5028137f6e2218f9178e2ddfa454a509f4778d9cf323e96f42a902d307f60405160405180910390a35050565b6000813590506101b88161022c565b92915050565b600080604083850312156101d157600080fd5b60006101df858286016101a9565b92505060206101f0858286016101a9565b9150509250929050565b60006102058261020c565b9050919050565b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b610235816101fa565b811461024057600080fd5b5056fea365627a7a72315820874667b781b97cc25ada056a6b1d529cadb5d6e734754990b055e2d072d506d16c6578706572696d656e74616cf564736f6c63430005110040` -const stressTestBytecode = `608060405234801561001057600080fd5b50600060018190555061044f806100286000396000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c8063a87d942c1461003b578063c47f002714610059575b600080fd5b610043610075565b6040516100509190610245565b60405180910390f35b610073600480360381019061006e91906101f5565b61007f565b005b6000600154905090565b6001600081548092919061009290610332565b919050555080600090805190602001906100ad9291906100ea565b507fd3c4663647064fe58be7148d6b8ed9f8478094055b18a5816ce5bd82897009136001546040516100df9190610245565b60405180910390a150565b8280546100f6906102cf565b90600052602060002090601f016020900481019282610118576000855561015f565b82601f1061013157805160ff191683800117855561015f565b8280016001018555821561015f579182015b8281111561015e578251825591602001919060010190610143565b5b50905061016c9190610170565b5090565b5b80821115610189576000816000905550600101610171565b5090565b60006101a061019b84610285565b610260565b9050828152602081018484840111156101b857600080fd5b6101c38482856102c0565b509392505050565b600082601f8301126101dc57600080fd5b81356101ec84826020860161018d565b91505092915050565b60006020828403121561020757600080fd5b600082013567ffffffffffffffff81111561022157600080fd5b61022d848285016101cb565b91505092915050565b61023f816102b6565b82525050565b600060208201905061025a6000830184610236565b92915050565b600061026a61027b565b90506102768282610301565b919050565b6000604051905090565b600067ffffffffffffffff8211156102a05761029f6103d9565b5b6102a982610408565b9050602081019050919050565b6000819050919050565b82818337600083830152505050565b600060028204905060018216806102e757607f821691505b602082108114156102fb576102fa6103aa565b5b50919050565b61030a82610408565b810181811067ffffffffffffffff82111715610329576103286103d9565b5b80604052505050565b600061033d826102b6565b91507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8214156103705761036f61037b565b5b600182019050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6000601f19601f830116905091905056fea26469706673582212203d39571e4f57977f6bfdebc3fb2a07c4f3d483749642a991e34438f2b41a722b64736f6c63430008040033` const bloomFilterTestBytecode = `608060405234801561001057600080fd5b5060fa8061001f6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c806362b9ab5b14602d575b600080fd5b60336035565b005b3373ffffffffffffffffffffffffffffffffffffffff167fdf50c7bb3b25f812aedef81bc334454040e7b27e27de95a79451d663013b7e17602a604051607a91906091565b60405180910390a2565b608b8160b4565b82525050565b600060208201905060a460008301846084565b92915050565b6000819050919050565b600060bd8260aa565b905091905056fea2646970667358221220052488d0b0bc3f50f2d498d982ba7d31d724617d6055258426a7b819be800bf864736f6c63430008000033` // Default settings for IBFT nodes diff --git a/e2e/framework/config.go b/e2e/framework/config.go index 5758c6b542..9af187d929 100644 --- a/e2e/framework/config.go +++ b/e2e/framework/config.go @@ -1,21 +1,16 @@ package framework import ( - "crypto/ecdsa" "math/big" - "path/filepath" - "github.com/0xPolygon/polygon-edge/consensus/ibft" "github.com/0xPolygon/polygon-edge/crypto" "github.com/0xPolygon/polygon-edge/types" - "github.com/0xPolygon/polygon-edge/validators" ) type ConsensusType int const ( - ConsensusIBFT ConsensusType = iota - ConsensusDev + ConsensusDev ConsensusType = iota ConsensusDummy ) @@ -33,34 +28,32 @@ type PredeployParams struct { // TestServerConfig for the test server type TestServerConfig struct { ReservedPorts []ReservedPort - JSONRPCPort int // The JSON RPC endpoint port - GRPCPort int // The GRPC endpoint port - LibP2PPort int // The Libp2p endpoint port - RootDir string // The root directory for test environment - IBFTDirPrefix string // The prefix of data directory for IBFT - IBFTDir string // The name of data directory for IBFT - PremineAccts []*SrvAccount // Accounts with existing balances (genesis accounts) - GenesisValidatorBalance *big.Int // Genesis the balance for the validators - DevStakers []types.Address // List of initial staking addresses for the staking SC - Consensus ConsensusType // Consensus MechanismType - ValidatorType validators.ValidatorType // Validator Type - Bootnodes []string // Bootnode Addresses - PriceLimit *uint64 // Minimum gas price limit to enforce for acceptance into the pool - DevInterval int // Dev consensus update interval [s] - EpochSize uint64 // The epoch size in blocks for the IBFT layer - BlockGasLimit uint64 // Block gas limit - BlockGasTarget uint64 // Gas target for new blocks - BaseFee uint64 // Initial base fee - ShowsLog bool // Flag specifying if logs are shown - Name string // Name of the server - SaveLogs bool // Flag specifying if logs are saved - LogsDir string // Directory where logs are saved - IsPos bool // Specifies the mechanism used for IBFT (PoA / PoS) - Signer crypto.TxSigner // Signer used for transactions - MinValidatorCount uint64 // Min validator count - MaxValidatorCount uint64 // Max validator count - BlockTime uint64 // Minimum block generation time (in s) - IBFTBaseTimeout uint64 // Base Timeout in seconds for IBFT + JSONRPCPort int // The JSON RPC endpoint port + GRPCPort int // The GRPC endpoint port + LibP2PPort int // The Libp2p endpoint port + RootDir string // The root directory for test environment + IBFTDirPrefix string // The prefix of data directory for IBFT + IBFTDir string // The name of data directory for IBFT + PremineAccts []*SrvAccount // Accounts with existing balances (genesis accounts) + GenesisValidatorBalance *big.Int // Genesis the balance for the validators + DevStakers []types.Address // List of initial staking addresses for the staking SC + Consensus ConsensusType // Consensus MechanismType + Bootnodes []string // Bootnode Addresses + PriceLimit *uint64 // Minimum gas price limit to enforce for acceptance into the pool + DevInterval int // Dev consensus update interval [s] + EpochSize uint64 // The epoch size in blocks for the IBFT layer + BlockGasLimit uint64 // Block gas limit + BlockGasTarget uint64 // Gas target for new blocks + BaseFee uint64 // Initial base fee + ShowsLog bool // Flag specifying if logs are shown + Name string // Name of the server + SaveLogs bool // Flag specifying if logs are saved + LogsDir string // Directory where logs are saved + IsPos bool // Specifies the mechanism used for IBFT (PoA / PoS) + Signer crypto.TxSigner // Signer used for transactions + MinValidatorCount uint64 // Min validator count + MaxValidatorCount uint64 // Max validator count + BlockTime uint64 // Minimum block generation time (in s) PredeployParams *PredeployParams BurnContracts map[uint64]types.Address } @@ -71,12 +64,7 @@ func (t *TestServerConfig) SetPredeployParams(params *PredeployParams) { // DataDir returns path of data directory server uses func (t *TestServerConfig) DataDir() string { - switch t.Consensus { - case ConsensusIBFT: - return filepath.Join(t.RootDir, t.IBFTDir) - default: - return t.RootDir - } + return t.RootDir } func (t *TestServerConfig) SetSigner(signer crypto.TxSigner) { @@ -87,15 +75,6 @@ func (t *TestServerConfig) SetBlockTime(blockTime uint64) { t.BlockTime = blockTime } -func (t *TestServerConfig) SetIBFTBaseTimeout(baseTimeout uint64) { - t.IBFTBaseTimeout = baseTimeout -} - -// PrivateKey returns a private key in data directory -func (t *TestServerConfig) PrivateKey() (*ecdsa.PrivateKey, error) { - return crypto.GenerateOrReadPrivateKey(filepath.Join(t.DataDir(), "consensus", ibft.IbftKeyName)) -} - // CALLBACKS // // Premine callback specifies an account with a balance (in WEI) @@ -134,11 +113,6 @@ func (t *TestServerConfig) SetConsensus(c ConsensusType) { t.Consensus = c } -// SetValidatorType callback sets validator type -func (t *TestServerConfig) SetValidatorType(vt validators.ValidatorType) { - t.ValidatorType = vt -} - // SetDevInterval sets the update interval for the dev consensus func (t *TestServerConfig) SetDevInterval(interval int) { t.DevInterval = interval diff --git a/e2e/framework/helper.go b/e2e/framework/helper.go index 5c68a6efdf..bcd8b25ace 100644 --- a/e2e/framework/helper.go +++ b/e2e/framework/helper.go @@ -2,22 +2,18 @@ package framework import ( "context" - "crypto/ecdsa" "errors" "fmt" "math/big" "net" "os" + "path" "strings" "sync" "testing" "time" - "github.com/0xPolygon/polygon-edge/contracts/abis" - "github.com/0xPolygon/polygon-edge/contracts/staking" - "github.com/0xPolygon/polygon-edge/crypto" "github.com/0xPolygon/polygon-edge/helper/common" - "github.com/0xPolygon/polygon-edge/helper/hex" "github.com/0xPolygon/polygon-edge/helper/tests" "github.com/0xPolygon/polygon-edge/server/proto" txpoolProto "github.com/0xPolygon/polygon-edge/txpool/proto" @@ -82,136 +78,6 @@ func GetAccountBalance(t *testing.T, address types.Address, rpcClient *jsonrpc.C return accountBalance } -// GetValidatorSet returns the validator set from the SC -func GetValidatorSet(from types.Address, rpcClient *jsonrpc.Client) ([]types.Address, error) { - validatorsMethod, ok := abis.StakingABI.Methods["validators"] - if !ok { - return nil, errors.New("validators method doesn't exist in Staking contract ABI") - } - - toAddress := ethgo.Address(staking.AddrStakingContract) - selector := validatorsMethod.ID() - response, err := rpcClient.Eth().Call( - ðgo.CallMsg{ - From: ethgo.Address(from), - To: &toAddress, - Data: selector, - GasPrice: 1000000000, - Value: big.NewInt(0), - }, - ethgo.Latest, - ) - - if err != nil { - return nil, fmt.Errorf("unable to call Staking contract method validators, %w", err) - } - - byteResponse, decodeError := hex.DecodeHex(response) - if decodeError != nil { - return nil, fmt.Errorf("unable to decode hex response, %w", decodeError) - } - - return staking.DecodeValidators(validatorsMethod, byteResponse) -} - -// StakeAmount is a helper function for staking an amount on the Staking SC -func StakeAmount( - from types.Address, - senderKey *ecdsa.PrivateKey, - amount *big.Int, - srv *TestServer, -) error { - // Stake Balance - txn := &PreparedTransaction{ - From: from, - To: &staking.AddrStakingContract, - GasPrice: ethgo.Gwei(1), - Gas: 1000000, - Value: amount, - Input: MethodSig("stake"), - } - - ctx, cancel := context.WithTimeout(context.Background(), DefaultTimeout) - defer cancel() - - _, err := srv.SendRawTx(ctx, txn, senderKey) - - if err != nil { - return fmt.Errorf("unable to call Staking contract method stake, %w", err) - } - - return nil -} - -// UnstakeAmount is a helper function for unstaking the entire amount on the Staking SC -func UnstakeAmount( - from types.Address, - senderKey *ecdsa.PrivateKey, - srv *TestServer, -) (*ethgo.Receipt, error) { - // Stake Balance - txn := &PreparedTransaction{ - From: from, - To: &staking.AddrStakingContract, - GasPrice: ethgo.Gwei(1), - Gas: DefaultGasLimit, - Value: big.NewInt(0), - Input: MethodSig("unstake"), - } - - ctx, cancel := context.WithTimeout(context.Background(), DefaultTimeout) - defer cancel() - - receipt, err := srv.SendRawTx(ctx, txn, senderKey) - - if err != nil { - return nil, fmt.Errorf("unable to call Staking contract method unstake, %w", err) - } - - return receipt, nil -} - -// GetStakedAmount is a helper function for getting the staked amount on the Staking SC -func GetStakedAmount(from types.Address, rpcClient *jsonrpc.Client) (*big.Int, error) { - stakedAmountMethod, ok := abis.StakingABI.Methods["stakedAmount"] - if !ok { - return nil, errors.New("stakedAmount method doesn't exist in Staking contract ABI") - } - - toAddress := ethgo.Address(staking.AddrStakingContract) - selector := stakedAmountMethod.ID() - response, err := rpcClient.Eth().Call( - ðgo.CallMsg{ - From: ethgo.Address(from), - To: &toAddress, - Data: selector, - GasPrice: 1000000000, - Value: big.NewInt(0), - }, - ethgo.Latest, - ) - - if err != nil { - return nil, fmt.Errorf("unable to call Staking contract method stakedAmount, %w", err) - } - - bigResponse, decodeErr := common.ParseUint256orHex(&response) - if decodeErr != nil { - return nil, fmt.Errorf("unable to decode hex response") - } - - return bigResponse, nil -} - -func EcrecoverFromBlockhash(hash types.Hash, signature []byte) (types.Address, error) { - pubKey, err := crypto.RecoverPubkey(signature, crypto.Keccak256(hash.Bytes())) - if err != nil { - return types.Address{}, err - } - - return crypto.PubKeyToAddress(pubKey), nil -} - func MultiJoinSerial(t *testing.T, srvs []*TestServer) { t.Helper() @@ -560,3 +426,14 @@ func WaitForServersToSeal(servers []*TestServer, desiredHeight uint64) []error { return waitErrors } + +func initLogsDir(t *testing.T) (string, error) { + t.Helper() + logsDir := path.Join("..", fmt.Sprintf("e2e-logs-%d", time.Now().UTC().Unix()), t.Name()) + + if err := common.CreateDirSafe(logsDir, 0755); err != nil { + return "", err + } + + return logsDir, nil +} diff --git a/e2e/framework/ibft.go b/e2e/framework/ibft.go deleted file mode 100644 index 32d3b490a5..0000000000 --- a/e2e/framework/ibft.go +++ /dev/null @@ -1,145 +0,0 @@ -package framework - -import ( - "context" - "fmt" - "os" - "path" - "testing" - "time" - - "github.com/0xPolygon/polygon-edge/helper/common" - "github.com/0xPolygon/polygon-edge/types" -) - -type IBFTServersManager struct { - t *testing.T - servers []*TestServer -} - -type IBFTServerConfigCallback func(index int, config *TestServerConfig) - -var startTime int64 - -func init() { - startTime = time.Now().UTC().UnixMilli() -} - -func NewIBFTServersManager( - t *testing.T, - numNodes int, - ibftDirPrefix string, - callback IBFTServerConfigCallback, -) *IBFTServersManager { - t.Helper() - - dataDir, err := tempDir() - if err != nil { - t.Fatal(err) - } - - srvs := make([]*TestServer, 0, numNodes) - - t.Cleanup(func() { - for _, s := range srvs { - s.Stop() - } - if err := os.RemoveAll(dataDir); err != nil { - t.Log(err) - } - }) - - bootnodes := make([]string, 0, numNodes) - genesisValidators := make([]string, 0, numNodes) - - logsDir, err := initLogsDir(t) - if err != nil { - t.Fatal(err) - } - - for i := 0; i < numNodes; i++ { - srv := NewTestServer(t, dataDir, func(config *TestServerConfig) { - config.SetConsensus(ConsensusIBFT) - config.SetIBFTDirPrefix(ibftDirPrefix) - config.SetIBFTDir(fmt.Sprintf("%s%d", ibftDirPrefix, i)) - config.SetLogsDir(logsDir) - config.SetSaveLogs(true) - config.SetName(fmt.Sprintf("node-%d", i)) - callback(i, config) - }) - res, err := srv.SecretsInit() - - if err != nil { - t.Fatal(err) - } - - libp2pAddr := ToLocalIPv4LibP2pAddr(srv.Config.LibP2PPort, res.NodeID) - - srvs = append(srvs, srv) - bootnodes = append(bootnodes, libp2pAddr) - genesisValidators = append(genesisValidators, res.Address) - } - - srv := srvs[0] - srv.Config.SetBootnodes(bootnodes) - // Set genesis staking balance for genesis validators - for i, v := range genesisValidators { - addr := types.StringToAddress(v) - conf := srvs[i].Config - - if conf.GenesisValidatorBalance != nil { - srv.Config.Premine(addr, conf.GenesisValidatorBalance) - } - } - - if err := srv.GenerateGenesis(); err != nil { - t.Fatal(err) - } - - if err := srv.GenesisPredeploy(); err != nil { - t.Fatal(err) - } - - return &IBFTServersManager{t, srvs} -} - -func (m *IBFTServersManager) StartServers(ctx context.Context) { - for idx, srv := range m.servers { - if err := srv.Start(ctx); err != nil { - m.t.Logf("server %d failed to start: %+v", idx, err) - m.t.Fatal(err) - } - } - - for idx, srv := range m.servers { - if err := srv.WaitForReady(ctx); err != nil { - m.t.Logf("server %d couldn't advance block: %+v", idx, err) - m.t.Fatal(err) - } - } -} - -func (m *IBFTServersManager) StopServers() { - for _, srv := range m.servers { - srv.Stop() - } -} - -func (m *IBFTServersManager) GetServer(i int) *TestServer { - if i >= len(m.servers) { - return nil - } - - return m.servers[i] -} - -func initLogsDir(t *testing.T) (string, error) { - t.Helper() - logsDir := path.Join("..", fmt.Sprintf("e2e-logs-%d", startTime), t.Name()) - - if err := common.CreateDirSafe(logsDir, 0755); err != nil { - return "", err - } - - return logsDir, nil -} diff --git a/e2e/framework/testserver.go b/e2e/framework/testserver.go index 13cff2db5c..ff89d2da91 100644 --- a/e2e/framework/testserver.go +++ b/e2e/framework/testserver.go @@ -21,8 +21,6 @@ import ( "testing" "time" - "github.com/hashicorp/go-hclog" - "github.com/libp2p/go-libp2p/core/peer" "github.com/umbracle/ethgo" "github.com/umbracle/ethgo/jsonrpc" "github.com/umbracle/ethgo/wallet" @@ -34,22 +32,13 @@ import ( "github.com/0xPolygon/polygon-edge/command" "github.com/0xPolygon/polygon-edge/command/genesis" "github.com/0xPolygon/polygon-edge/command/genesis/predeploy" - ibftSwitch "github.com/0xPolygon/polygon-edge/command/ibft/switch" - initCmd "github.com/0xPolygon/polygon-edge/command/secrets/init" "github.com/0xPolygon/polygon-edge/command/server" - "github.com/0xPolygon/polygon-edge/consensus/ibft/fork" - ibftOp "github.com/0xPolygon/polygon-edge/consensus/ibft/proto" "github.com/0xPolygon/polygon-edge/crypto" "github.com/0xPolygon/polygon-edge/helper/common" - stakingHelper "github.com/0xPolygon/polygon-edge/helper/staking" "github.com/0xPolygon/polygon-edge/helper/tests" - "github.com/0xPolygon/polygon-edge/network" - "github.com/0xPolygon/polygon-edge/secrets" - "github.com/0xPolygon/polygon-edge/secrets/local" "github.com/0xPolygon/polygon-edge/server/proto" txpoolProto "github.com/0xPolygon/polygon-edge/txpool/proto" "github.com/0xPolygon/polygon-edge/types" - "github.com/0xPolygon/polygon-edge/validators" ) type TestServerConfigCallback func(*TestServerConfig) @@ -85,7 +74,6 @@ func NewTestServer(t *testing.T, rootDir string, callback TestServerConfigCallba JSONRPCPort: ports[2].Port(), RootDir: rootDir, Signer: crypto.NewSigner(chain.AllForksEnabled.At(0), 100), - ValidatorType: validators.ECDSAValidatorType, } if callback != nil { @@ -149,17 +137,6 @@ func (t *TestServer) TxnPoolOperator() txpoolProto.TxnPoolOperatorClient { return txpoolProto.NewTxnPoolOperatorClient(conn) } -func (t *TestServer) IBFTOperator() ibftOp.IbftOperatorClient { - conn, err := grpc.Dial( - t.GrpcAddr(), - grpc.WithTransportCredentials(insecure.NewCredentials())) - if err != nil { - t.t.Fatal(err) - } - - return ibftOp.NewIbftOperatorClient(conn) -} - func (t *TestServer) ReleaseReservedPorts() { for _, p := range t.Config.ReservedPorts { if err := p.Close(); err != nil { @@ -189,84 +166,6 @@ type InitIBFTResult struct { NodeID string } -func (t *TestServer) SecretsInit() (*InitIBFTResult, error) { - secretsInitCmd := initCmd.GetCommand() - - var args []string - - commandSlice := strings.Split(fmt.Sprintf("secrets %s", secretsInitCmd.Use), " ") - args = append(args, commandSlice...) - args = append(args, "--data-dir", filepath.Join(t.Config.IBFTDir, "tmp")) - args = append(args, "--insecure") - - cmd := exec.Command(resolveBinary(), args...) //nolint:gosec - cmd.Dir = t.Config.RootDir - - if _, err := cmd.Output(); err != nil { - return nil, err - } - - res := &InitIBFTResult{} - - localSecretsManager, factoryErr := local.SecretsManagerFactory( - nil, - &secrets.SecretsManagerParams{ - Logger: hclog.NewNullLogger(), - Extra: map[string]interface{}{ - secrets.Path: filepath.Join(cmd.Dir, t.Config.IBFTDir), - }, - }) - if factoryErr != nil { - return nil, factoryErr - } - - // Generate the IBFT validator private key - validatorKey, validatorKeyEncoded, keyErr := crypto.GenerateAndEncodeECDSAPrivateKey() - if keyErr != nil { - return nil, keyErr - } - - // Write the validator private key to the secrets manager storage - if setErr := localSecretsManager.SetSecret(secrets.ValidatorKey, validatorKeyEncoded); setErr != nil { - return nil, setErr - } - - // Generate the libp2p private key - libp2pKey, libp2pKeyEncoded, keyErr := network.GenerateAndEncodeLibp2pKey() - if keyErr != nil { - return nil, keyErr - } - - // Write the networking private key to the secrets manager storage - if setErr := localSecretsManager.SetSecret(secrets.NetworkKey, libp2pKeyEncoded); setErr != nil { - return nil, setErr - } - - if t.Config.ValidatorType == validators.BLSValidatorType { - // Generate the BLS Key - _, blsKeyEncoded, keyErr := crypto.GenerateAndEncodeBLSSecretKey() - if keyErr != nil { - return nil, keyErr - } - - // Write the networking private key to the secrets manager storage - if setErr := localSecretsManager.SetSecret(secrets.ValidatorBLSKey, blsKeyEncoded); setErr != nil { - return nil, setErr - } - } - - // Get the node ID from the private key - nodeID, err := peer.IDFromPrivateKey(libp2pKey) - if err != nil { - return nil, err - } - - res.Address = crypto.PubKeyToAddress(&validatorKey.PublicKey).String() - res.NodeID = nodeID.String() - - return res, nil -} - func (t *TestServer) GenerateGenesis() error { genesisCmd := genesis.GetCommand() args := []string{ @@ -287,28 +186,10 @@ func (t *TestServer) GenerateGenesis() error { // add consensus flags switch t.Config.Consensus { - case ConsensusIBFT: - args = append( - args, - "--consensus", "ibft", - "--ibft-validator-type", string(t.Config.ValidatorType), - ) - - if t.Config.IBFTDirPrefix == "" { - return errors.New("prefix of IBFT directory is not set") - } - - args = append(args, "--validators-prefix", t.Config.IBFTDirPrefix) - - if t.Config.EpochSize != 0 { - args = append(args, "--epoch-size", strconv.FormatUint(t.Config.EpochSize, 10)) - } - case ConsensusDev: args = append( args, "--consensus", "dev", - "--ibft-validator-type", string(t.Config.ValidatorType), ) // Set up any initial staker addresses for the predeployed Staking SC @@ -328,11 +209,11 @@ func (t *TestServer) GenerateGenesis() error { args = append(args, "--pos") if t.Config.MinValidatorCount == 0 { - t.Config.MinValidatorCount = stakingHelper.MinValidatorCount + t.Config.MinValidatorCount = command.MinValidatorCount } if t.Config.MaxValidatorCount == 0 { - t.Config.MaxValidatorCount = stakingHelper.MaxValidatorCount + t.Config.MaxValidatorCount = command.MaxValidatorCount } args = append(args, "--min-validator-count", strconv.FormatUint(t.Config.MinValidatorCount, 10)) @@ -426,8 +307,6 @@ func (t *TestServer) Start(ctx context.Context) error { } switch t.Config.Consensus { - case ConsensusIBFT: - args = append(args, "--data-dir", filepath.Join(t.Config.RootDir, t.Config.IBFTDir)) case ConsensusDev: args = append(args, "--data-dir", t.Config.RootDir) args = append(args, "--dev") @@ -452,10 +331,6 @@ func (t *TestServer) Start(ctx context.Context) error { args = append(args, "--block-gas-target", *common.EncodeUint64(t.Config.BlockGasTarget)) } - if t.Config.IBFTBaseTimeout != 0 { - args = append(args, "--ibft-base-timeout", strconv.FormatUint(t.Config.IBFTBaseTimeout, 10)) - } - t.ReleaseReservedPorts() // Start the server @@ -495,44 +370,6 @@ func (t *TestServer) Start(ctx context.Context) error { return nil } -func (t *TestServer) SwitchIBFTType(typ fork.IBFTType, from uint64, to, deployment *uint64) error { - t.t.Helper() - - ibftSwitchCmd := ibftSwitch.GetCommand() - args := make([]string, 0) - - commandSlice := strings.Split(fmt.Sprintf("ibft %s", ibftSwitchCmd.Use), " ") - - args = append(args, commandSlice...) - args = append(args, - // add custom chain - "--chain", filepath.Join(t.Config.RootDir, "genesis.json"), - "--type", string(typ), - "--from", strconv.FormatUint(from, 10), - ) - - // Default ibft validator type for e2e tests is ECDSA - args = append(args, "--ibft-validator-type", string(validators.ECDSAValidatorType)) - - if to != nil { - args = append(args, "--to", strconv.FormatUint(*to, 10)) - } - - if deployment != nil { - args = append(args, "--deployment", strconv.FormatUint(*deployment, 10)) - } - - // Start the server - t.cmd = exec.Command(resolveBinary(), args...) //nolint:gosec - t.cmd.Dir = t.Config.RootDir - - stdout := t.GetStdout() - t.cmd.Stdout = stdout - t.cmd.Stderr = stdout - - return t.cmd.Run() -} - // SignTx is a helper method for signing transactions func (t *TestServer) SignTx( transaction *types.Transaction, diff --git a/e2e/genesis_test.go b/e2e/genesis_test.go deleted file mode 100644 index 3d0eaa3701..0000000000 --- a/e2e/genesis_test.go +++ /dev/null @@ -1,426 +0,0 @@ -package e2e - -import ( - "context" - "fmt" - "math/big" - "os" - "path/filepath" - "regexp" - "strings" - "testing" - "time" - - "github.com/0xPolygon/polygon-edge/command" - "github.com/0xPolygon/polygon-edge/helper/hex" - "github.com/0xPolygon/polygon-edge/types" - "github.com/umbracle/ethgo" - "github.com/umbracle/ethgo/abi" - - "github.com/0xPolygon/polygon-edge/e2e/framework" - "github.com/0xPolygon/polygon-edge/helper/tests" - "github.com/stretchr/testify/assert" -) - -// TestGenesisBlockGasLimit tests the genesis block limit setting -func TestGenesisBlockGasLimit(t *testing.T) { - testTable := []struct { - name string - blockGasLimit uint64 - expectedBlockGasLimit uint64 - }{ - { - "Custom block gas limit", - 5000000000, - 5000000000, - }, - { - "Default block gas limit", - 0, - command.DefaultGenesisGasLimit, - }, - } - - for _, testCase := range testTable { - t.Run(testCase.name, func(t *testing.T) { - _, addr := tests.GenerateKeyAndAddr(t) - - ibftManager := framework.NewIBFTServersManager( - t, - 1, - IBFTDirPrefix, - func(i int, config *framework.TestServerConfig) { - config.Premine(addr, framework.EthToWei(10)) - config.SetBlockTime(1) - - if testCase.blockGasLimit != 0 { - config.SetBlockLimit(testCase.blockGasLimit) - } - }, - ) - - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - defer cancel() - - ibftManager.StartServers(ctx) - srv := ibftManager.GetServer(0) - - client := srv.JSONRPC() - - block, err := client.Eth().GetBlockByNumber(0, true) - if err != nil { - t.Fatalf("failed to retrieve block: %v", err) - } - - assert.Equal(t, testCase.expectedBlockGasLimit, block.GasLimit) - }) - } -} - -func extractABIFromJSONBody(body string) string { - r, _ := regexp.Compile("\"abi\": \\[(?s).*\\]") - - match := r.FindString(body) - - return match[strings.Index(match, "["):] -} - -// ArgGroup is the type of the third argument of HRM contract -type ArgGroup struct { - name string - number uint - flag bool - people ArgHumans -} - -// String returns text for predeploy flag -func (g *ArgGroup) String() string { - return fmt.Sprintf( - "[%s, %d, %t, %s]", - g.name, - g.number, - g.flag, - g.people.String(), - ) -} - -// ArgGroups is a collection of ArgGroup -type ArgGroups []ArgGroup - -// String returns the text for predeploy flag -func (gs *ArgGroups) String() string { - groupStrs := make([]string, len(*gs)) - - for i, group := range *gs { - groupStrs[i] = group.String() - } - - return fmt.Sprintf("[%s]", strings.Join(groupStrs, ",")) -} - -// ArgGroup is the type of the third argument of HRM contract -type ArgHuman struct { - addr string - name string - number int -} - -// String returns the text for predeploy flag -func (h *ArgHuman) String() string { - return fmt.Sprintf("[%s, %s, %d]", h.addr, h.name, h.number) -} - -// ArgHumans is a collection of ArgHuman -type ArgHumans []ArgHuman - -// String returns the text for predeploy flag -func (hs *ArgHumans) String() string { - humanStrs := make([]string, len(*hs)) - - for i, human := range *hs { - humanStrs[i] = human.String() - } - - return fmt.Sprintf("[%s]", strings.Join(humanStrs, ",")) -} - -func TestGenesis_Predeployment(t *testing.T) { - t.Parallel() - - var ( - artifactPath = "./metadata/predeploy_abi.json" - - _, senderAddr = tests.GenerateKeyAndAddr(t) - contractAddr = types.StringToAddress("1200") - - // predeploy arguments - id = 1000000 - name = "TestContract" - groups = ArgGroups{ - { - name: "group1", - number: 1, - flag: true, - people: ArgHumans{ - { - addr: types.StringToAddress("1").String(), - name: "A", - number: 11, - }, - { - addr: types.StringToAddress("2").String(), - name: "B", - number: 12, - }, - }, - }, - { - name: "group2", - number: 2, - flag: false, - people: ArgHumans{ - { - addr: types.StringToAddress("3").String(), - name: "C", - number: 21, - }, - { - addr: types.StringToAddress("4").String(), - name: "D", - number: 22, - }, - }, - }, - } - ) - - artifactsPath, err := filepath.Abs(artifactPath) - if err != nil { - t.Fatalf("unable to get working directory, %v", err) - } - - ibftManager := framework.NewIBFTServersManager( - t, - 1, - IBFTDirPrefix, - func(_ int, config *framework.TestServerConfig) { - config.Premine(senderAddr, framework.EthToWei(10)) - config.SetPredeployParams(&framework.PredeployParams{ - ArtifactsPath: artifactsPath, - PredeployAddress: contractAddr.String(), - ConstructorArgs: []string{ - fmt.Sprintf("%d", id), - name, - groups.String(), - }, - }) - }, - ) - - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - - t.Cleanup(func() { - cancel() - }) - - ibftManager.StartServers(ctx) - - srv := ibftManager.GetServer(0) - clt := srv.JSONRPC() - - // Extract the contract ABI from the metadata test file - content, err := os.ReadFile(artifactPath) - if err != nil { - t.Fatalf("unable to open JSON file, %v", err) - } - - predeployABI := abi.MustNewABI( - extractABIFromJSONBody(string(content)), - ) - - callSCMethod := func(t *testing.T, methodName string, args ...interface{}) interface{} { - t.Helper() - - method, ok := predeployABI.Methods[methodName] - assert.Truef(t, ok, "%s method not present in SC", methodName) - - data := method.ID() - - if len(args) > 0 { - input, err := method.Inputs.Encode(args) - assert.NoError(t, err) - - data = append(data, input...) - } - - toAddress := ethgo.Address(contractAddr) - - response, err := clt.Eth().Call( - ðgo.CallMsg{ - From: ethgo.Address(senderAddr), - To: &toAddress, - GasPrice: 100000000, - Value: big.NewInt(0), - Data: data, - }, - ethgo.BlockNumber(1), - ) - assert.NoError(t, err, "failed to call SC method") - - byteResponse, decodeError := hex.DecodeHex(response) - if decodeError != nil { - t.Fatalf("unable to decode hex response, %v", decodeError) - } - - decodedResults, err := method.Outputs.Decode(byteResponse) - if err != nil { - t.Fatalf("unable to decode response, %v", err) - } - - results, ok := decodedResults.(map[string]interface{}) - if !ok { - t.Fatal("failed type assertion from decodedResults to map") - } - - return results["0"] - } - - t.Run("id is set correctly", func(t *testing.T) { - t.Parallel() - - rawResID := callSCMethod(t, "getID") - resID, ok := rawResID.(*big.Int) - - assert.Truef(t, ok, "failed to cast the result to *big.Int, actual %T", rawResID) - assert.Zero( - t, - big.NewInt(int64(id)).Cmp(resID), - ) - }) - - t.Run("name is set correctly", func(t *testing.T) { - t.Parallel() - - rawResName := callSCMethod(t, "getName") - resName, ok := rawResName.(string) - - assert.Truef(t, ok, "failed to cast the result to string, actual %T", rawResName) - assert.Equal( - t, - name, - resName, - ) - }) - - testHuman := func(t *testing.T, groupIndex int, humanIndex int) { - t.Helper() - - human := groups[groupIndex].people[humanIndex] - - t.Run("human addr is set correctly", func(t *testing.T) { - t.Parallel() - - rawResAddr := callSCMethod(t, "getHumanAddr", groupIndex, humanIndex) - resAddr, ok := rawResAddr.(ethgo.Address) - - assert.Truef(t, ok, "failed to cast the result to ethgo.Address, actual %T", rawResAddr) - assert.Equal( - t, - human.addr, - resAddr.String(), - ) - }) - - t.Run("human name is set correctly", func(t *testing.T) { - t.Parallel() - - rawResName := callSCMethod(t, "getHumanName", groupIndex, humanIndex) - resName, ok := rawResName.(string) - - assert.Truef(t, ok, "failed to cast the result to string, actual %T", rawResName) - assert.Equal( - t, - human.name, - resName, - ) - }) - - t.Run("human number is set correctly", func(t *testing.T) { - t.Parallel() - - rawResNumber := callSCMethod(t, "getHumanNumber", groupIndex, humanIndex) - resNumber, ok := rawResNumber.(*big.Int) - - assert.Truef(t, ok, "failed to cast the result to *big.Int, actual %T", resNumber) - assert.Zero( - t, - big.NewInt(int64(human.number)).Cmp(resNumber), - ) - }) - } - - testGroup := func(t *testing.T, groupIndex int) { - t.Helper() - - group := groups[groupIndex] - - t.Run("group name is set correctly", func(t *testing.T) { - t.Parallel() - - rawResName := callSCMethod(t, "getGroupName", groupIndex) - resName, ok := rawResName.(string) - - assert.Truef(t, ok, "failed to cast the result to string, actual %T", rawResName) - assert.Equal( - t, - group.name, - resName, - ) - }) - - t.Run("group number is set correctly", func(t *testing.T) { - t.Parallel() - - rawResNumber := callSCMethod(t, "getGroupNumber", groupIndex) - resNumber, ok := rawResNumber.(*big.Int) - - assert.Truef(t, ok, "failed to cast the result to int, actual %T", rawResNumber) - assert.Zero( - t, - big.NewInt(int64(group.number)).Cmp(resNumber), - ) - }) - - t.Run("group flag is set correctly", func(t *testing.T) { - t.Parallel() - - rawResFlag := callSCMethod(t, "getGroupFlag", groupIndex) - resFlag, ok := rawResFlag.(bool) - - assert.Truef(t, ok, "failed to cast the result to bool, actual %T", rawResFlag) - assert.Equal( - t, - group.flag, - resFlag, - ) - }) - - for humanIndex := range group.people { - humanIndex := humanIndex - t.Run(fmt.Sprintf("groups[%d].people[%d] is set correctly", groupIndex, humanIndex), func(t *testing.T) { - t.Parallel() - - testHuman(t, groupIndex, humanIndex) - }) - } - } - - for idx := range groups { - idx := idx - t.Run(fmt.Sprintf("groups[%d] is set correctly", idx), func(t *testing.T) { - t.Parallel() - - testGroup(t, idx) - }) - } -} diff --git a/e2e/ibft_test.go b/e2e/ibft_test.go deleted file mode 100644 index ef2645d07b..0000000000 --- a/e2e/ibft_test.go +++ /dev/null @@ -1,203 +0,0 @@ -package e2e - -import ( - "context" - "math/big" - "testing" - "time" - - ibftSigner "github.com/0xPolygon/polygon-edge/consensus/ibft/signer" - "github.com/0xPolygon/polygon-edge/e2e/framework" - "github.com/0xPolygon/polygon-edge/helper/tests" - "github.com/0xPolygon/polygon-edge/types" - "github.com/0xPolygon/polygon-edge/validators" - "github.com/stretchr/testify/assert" - "github.com/umbracle/ethgo" -) - -// TestIbft_Transfer sends a transfer transaction (EOA -> EOA) -// and verifies it was mined -func TestIbft_Transfer(t *testing.T) { - const defaultBlockTime uint64 = 2 - - testCases := []struct { - name string - blockTime uint64 - ibftBaseTimeout uint64 - validatorType validators.ValidatorType - }{ - { - name: "default block time", - blockTime: defaultBlockTime, - ibftBaseTimeout: 0, // use default value - validatorType: validators.ECDSAValidatorType, - }, - { - name: "longer block time", - blockTime: 10, - ibftBaseTimeout: 20, - validatorType: validators.ECDSAValidatorType, - }, - { - name: "with BLS", - blockTime: defaultBlockTime, - ibftBaseTimeout: 0, // use default value - validatorType: validators.BLSValidatorType, - }, - } - - var ( - senderKey, senderAddr = tests.GenerateKeyAndAddr(t) - _, receiverAddr = tests.GenerateKeyAndAddr(t) - ) - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - ibftManager := framework.NewIBFTServersManager(t, - IBFTMinNodes, - IBFTDirPrefix, - func(i int, config *framework.TestServerConfig) { - config.Premine(senderAddr, framework.EthToWei(10)) - config.SetBlockTime(tc.blockTime) - config.SetIBFTBaseTimeout(tc.ibftBaseTimeout) - config.SetValidatorType(tc.validatorType) - }, - ) - - var ( - startTimeout = time.Duration(tc.ibftBaseTimeout+60) * time.Second - txTimeout = time.Duration(tc.ibftBaseTimeout+20) * time.Second - ) - - ctxForStart, cancelStart := context.WithTimeout(context.Background(), startTimeout) - defer cancelStart() - - ibftManager.StartServers(ctxForStart) - - txn := &framework.PreparedTransaction{ - From: senderAddr, - To: &receiverAddr, - GasPrice: ethgo.Gwei(2), - Gas: 1000000, - Value: framework.EthToWei(1), - } - - ctxForTx, cancelTx := context.WithTimeout(context.Background(), txTimeout) - defer cancelTx() - - // send tx and wait for receipt - receipt, err := ibftManager. - GetServer(0). - SendRawTx(ctxForTx, txn, senderKey) - - assert.NoError(t, err) - if receipt == nil { - t.Fatalf("receipt not received") - } - - assert.NotNil(t, receipt.TransactionHash) - }) - } -} - -func TestIbft_TransactionFeeRecipient(t *testing.T) { - testCases := []struct { - name string - contractCall bool - txAmount *big.Int - }{ - { - name: "transfer transaction", - contractCall: false, - txAmount: framework.EthToWei(1), - }, - { - name: "contract function execution", - contractCall: true, - txAmount: big.NewInt(0), - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - senderKey, senderAddr := tests.GenerateKeyAndAddr(t) - _, receiverAddr := tests.GenerateKeyAndAddr(t) - - ibftManager := framework.NewIBFTServersManager( - t, - IBFTMinNodes, - IBFTDirPrefix, - func(i int, config *framework.TestServerConfig) { - config.Premine(senderAddr, framework.EthToWei(10)) - }) - - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - defer cancel() - ibftManager.StartServers(ctx) - - srv := ibftManager.GetServer(0) - clt := srv.JSONRPC() - - txn := &framework.PreparedTransaction{ - From: senderAddr, - To: &receiverAddr, - GasPrice: ethgo.Gwei(1), - Gas: 1000000, - Value: tc.txAmount, - } - - if tc.contractCall { - // Deploy contract - deployTx := &framework.PreparedTransaction{ - From: senderAddr, - GasPrice: ethgo.Gwei(1), // fees should be greater than base fee - Gas: 1000000, - Value: big.NewInt(0), - Input: framework.MethodSig("setA1"), - } - ctx, cancel := context.WithTimeout(context.Background(), framework.DefaultTimeout) - defer cancel() - receipt, err := srv.SendRawTx(ctx, deployTx, senderKey) - assert.NoError(t, err) - assert.NotNil(t, receipt) - - contractAddr := types.Address(receipt.ContractAddress) - txn.To = &contractAddr - txn.Input = framework.MethodSig("setA1") - } - - ctx1, cancel1 := context.WithTimeout(context.Background(), framework.DefaultTimeout) - defer cancel1() - receipt, err := srv.SendRawTx(ctx1, txn, senderKey) - assert.NoError(t, err) - - if receipt == nil { - t.Fatalf("receipt not received") - } - - // Get the block proposer from the extra data seal - assert.NotNil(t, receipt.BlockHash) - block, err := clt.Eth().GetBlockByHash(receipt.BlockHash, false) - assert.NoError(t, err) - extraData := &ibftSigner.IstanbulExtra{ - Validators: validators.NewECDSAValidatorSet(), - CommittedSeals: &ibftSigner.SerializedSeal{}, - ParentCommittedSeals: &ibftSigner.SerializedSeal{}, - } - extraDataWithoutVanity := block.ExtraData[ibftSigner.IstanbulExtraVanity:] - - err = extraData.UnmarshalRLP(extraDataWithoutVanity) - assert.NoError(t, err) - - proposerAddr, err := framework.EcrecoverFromBlockhash(types.Hash(block.Hash), extraData.ProposerSeal) - assert.NoError(t, err) - - // Given that this is the first transaction on the blockchain, proposer's balance should be equal to the tx fee - balanceProposer, err := clt.Eth().GetBalance(ethgo.Address(proposerAddr), ethgo.Latest) - assert.NoError(t, err) - - txFee := new(big.Int).Mul(new(big.Int).SetUint64(receipt.GasUsed), txn.GasPrice) - assert.Equalf(t, txFee, balanceProposer, "Proposer didn't get appropriate transaction fee") - }) - } -} diff --git a/e2e/jsonrpc_test.go b/e2e/jsonrpc_test.go deleted file mode 100644 index f74eddd52a..0000000000 --- a/e2e/jsonrpc_test.go +++ /dev/null @@ -1,279 +0,0 @@ -package e2e - -import ( - "context" - "encoding/hex" - - "math/big" - "testing" - - "github.com/0xPolygon/polygon-edge/e2e/framework" - "github.com/0xPolygon/polygon-edge/types" - "github.com/stretchr/testify/require" - "github.com/umbracle/ethgo" - "github.com/umbracle/ethgo/wallet" -) - -var ( - one = big.NewInt(1) -) - -func TestJsonRPC(t *testing.T) { - fund, err := wallet.GenerateKey() - require.NoError(t, err) - - bytecode, err := hex.DecodeString(sampleByteCode) - require.NoError(t, err) - - ibftManager := framework.NewIBFTServersManager( - t, - 1, - IBFTDirPrefix, - func(i int, config *framework.TestServerConfig) { - config.Premine(types.Address(fund.Address()), ethgo.Ether(10)) - config.SetBlockTime(1) - }, - ) - - ibftManager.StartServers(context.Background()) - defer ibftManager.StopServers() - - srv := ibftManager.GetServer(0) - - client := srv.JSONRPC().Eth() - - t.Run("eth_getBalance", func(t *testing.T) { - key1, err := wallet.GenerateKey() - require.NoError(t, err) - - // Test. return zero if the account does not exists - balance1, err := client.GetBalance(key1.Address(), ethgo.Latest) - require.NoError(t, err) - require.Equal(t, balance1, big.NewInt(0)) - - // Test. return the balance of an account - newBalance := ethgo.Ether(1) - txn, err := srv.Txn(fund).Transfer(key1.Address(), newBalance).Send() - require.NoError(t, err) - txn.NoFail(t) - - balance1, err = client.GetBalance(key1.Address(), ethgo.Latest) - require.NoError(t, err) - require.Equal(t, balance1, newBalance) - - // Test. return 0 if the balance of an existing account is empty - gasPrice, err := client.GasPrice() - require.NoError(t, err) - - toAddr := key1.Address() - msg := ðgo.CallMsg{ - From: fund.Address(), - To: &toAddr, - Value: newBalance, - GasPrice: gasPrice, - } - - estimatedGas, err := client.EstimateGas(msg) - require.NoError(t, err) - txPrice := gasPrice * estimatedGas - // subtract gasPrice * estimatedGas from the balance and transfer the rest to the other account - // in order to leave no funds on the account - amountToSend := new(big.Int).Sub(newBalance, big.NewInt(int64(txPrice))) - txn, err = srv.Txn(key1).Transfer(fund.Address(), amountToSend). - GasLimit(estimatedGas). - Send() - require.NoError(t, err) - txn.NoFail(t) - - balance1, err = client.GetBalance(key1.Address(), ethgo.Latest) - require.NoError(t, err) - require.Equal(t, big.NewInt(0), balance1) - }) - - t.Run("eth_getTransactionCount", func(t *testing.T) { - key1, err := wallet.GenerateKey() - require.NoError(t, err) - - nonce, err := client.GetNonce(key1.Address(), ethgo.Latest) - require.Equal(t, uint64(0), nonce) - require.NoError(t, err) - - txn, err := srv.Txn(fund).Transfer(key1.Address(), big.NewInt(10000000000000000)).Send() - require.NoError(t, err) - txn.NoFail(t) - - // Test. increase the nonce with new transactions - txn = srv.Txn(key1) - txn, err = txn.Send() - require.NoError(t, err) - txn.NoFail(t) - - nonce1, err := client.GetNonce(key1.Address(), ethgo.Latest) - require.NoError(t, err) - require.Equal(t, nonce1, uint64(1)) - - // Test. you can query the nonce at any block number in time - nonce1, err = client.GetNonce(key1.Address(), ethgo.BlockNumber(txn.Receipt().BlockNumber-1)) - require.NoError(t, err) - require.Equal(t, nonce1, uint64(0)) - - block, err := client.GetBlockByNumber(ethgo.BlockNumber(txn.Receipt().BlockNumber)-1, false) - require.NoError(t, err) - - _, err = client.GetNonce(key1.Address(), ethgo.BlockNumber(block.Number)) - require.NoError(t, err) - }) - - t.Run("eth_getStorage", func(t *testing.T) { - key1, err := wallet.GenerateKey() - require.NoError(t, err) - - txn := srv.Txn(fund) - txn, err = txn.Transfer(key1.Address(), one).Send() - require.NoError(t, err) - txn.NoFail(t) - - txn = srv.Txn(fund) - txn, err = txn.Deploy(bytecode).Send() - require.NoError(t, err) - txn.NoFail(t) - - resp, err := client.GetStorageAt(txn.Receipt().ContractAddress, ethgo.Hash{}, ethgo.Latest) - require.NoError(t, err) - require.Equal(t, "0x0000000000000000000000000000000000000000000000000000000000000000", resp.String()) - }) - - t.Run("eth_getCode", func(t *testing.T) { - // we use a predefined private key so that the deployed contract address is deterministic. - // Note that in order to work, this private key should only be used for this test. - priv, err := hex.DecodeString("2c15bd0dc992a47ca660983ae4b611f4ffb6178e14e04e2b34d153f3a74ce741") - require.NoError(t, err) - key1, err := wallet.NewWalletFromPrivKey(priv) - require.NoError(t, err) - - // fund the account so that it can deploy a contract - txn, err := srv.Txn(fund).Transfer(key1.Address(), big.NewInt(10000000000000000)).Send() - require.NoError(t, err) - txn.NoFail(t) - - codeAddr := ethgo.HexToAddress("0xDBfca0c43cA12759256a7Dd587Dc4c6EEC1D89A5") - - // Test. We get empty code from an empty contract - code, err := client.GetCode(codeAddr, ethgo.Latest) - require.NoError(t, err) - require.Equal(t, code, "0x") - - txn = srv.Txn(key1) - txn, err = txn.Deploy(bytecode).Send() - require.NoError(t, err) - txn.NoFail(t) - - receipt := txn.Receipt() - - // Test. The deployed address is the one expected - require.Equal(t, codeAddr, receipt.ContractAddress) - - // Test. We can retrieve the code by address on latest, block number and block hash - cases := []ethgo.BlockNumberOrHash{ - ethgo.Latest, - ethgo.BlockNumber(receipt.BlockNumber), - } - for _, c := range cases { - code, err = client.GetCode(codeAddr, c) - require.NoError(t, err) - require.NotEqual(t, code, "0x") - } - - // Test. We can query in past state (when the code was empty) - code, err = client.GetCode(codeAddr, ethgo.BlockNumber(receipt.BlockNumber-1)) - require.NoError(t, err) - require.Equal(t, code, "0x") - - // Test. Query by pending should default to latest - code, err = client.GetCode(codeAddr, ethgo.Pending) - require.NoError(t, err) - require.NotEqual(t, code, "0x") - }) - - t.Run("eth_getBlockByHash", func(t *testing.T) { - key1, err := wallet.GenerateKey() - require.NoError(t, err) - txn := srv.Txn(fund) - txn, err = txn.Transfer(key1.Address(), one).Send() - require.NoError(t, err) - txn.NoFail(t) - txReceipt := txn.Receipt() - - block, err := client.GetBlockByHash(txReceipt.BlockHash, false) - require.NoError(t, err) - require.Equal(t, txReceipt.BlockNumber, block.Number) - require.Equal(t, txReceipt.BlockHash, block.Hash) - }) - - t.Run("eth_getBlockByNumber", func(t *testing.T) { - key1, err := wallet.GenerateKey() - require.NoError(t, err) - txn := srv.Txn(fund) - txn, err = txn.Transfer(key1.Address(), one).Send() - require.NoError(t, err) - txn.NoFail(t) - txReceipt := txn.Receipt() - - block, err := client.GetBlockByNumber(ethgo.BlockNumber(txReceipt.BlockNumber), false) - require.NoError(t, err) - require.Equal(t, txReceipt.BlockNumber, block.Number) - require.Equal(t, txReceipt.BlockHash, block.Hash) - }) - - t.Run("eth_getTransactionReceipt", func(t *testing.T) { - key1, err := wallet.GenerateKey() - require.NoError(t, err) - - txn := srv.Txn(fund) - txn, err = txn.Transfer(key1.Address(), one).Send() - require.NoError(t, err) - txn.NoFail(t) - - // Test. We cannot retrieve a receipt of an empty hash - emptyReceipt, err := client.GetTransactionReceipt(ethgo.ZeroHash) - require.NoError(t, err) // Note that ethgo does not return an error when the item does not exists - require.Nil(t, emptyReceipt) - - // Test. We can retrieve the receipt by the hash - receipt, err := client.GetTransactionReceipt(txn.Receipt().TransactionHash) - require.NoError(t, err) - - // Test. The populated fields match with the block - block, err := client.GetBlockByHash(receipt.BlockHash, false) - require.NoError(t, err) - - require.Equal(t, receipt.TransactionHash, txn.Receipt().TransactionHash) - require.Equal(t, receipt.BlockNumber, block.Number) - require.Equal(t, receipt.BlockHash, block.Hash) - - // Test. The receipt of a deployed contract has the 'ContractAddress' field. - txn = srv.Txn(fund) - txn, err = txn.Deploy(bytecode).Send() - require.NoError(t, err) - txn.NoFail(t) - - require.NotEqual(t, txn.Receipt().ContractAddress, ethgo.ZeroAddress) - }) - - t.Run("eth_getTransactionByHash", func(t *testing.T) { - key1, err := wallet.GenerateKey() - require.NoError(t, err) - - // Test. We should be able to query the transaction by its hash - txn := srv.Txn(fund) - txn, err = txn.Transfer(key1.Address(), one).Send() - require.NoError(t, err) - txn.NoFail(t) - - ethTxn, err := client.GetTransactionByHash(txn.Receipt().TransactionHash) - require.NoError(t, err) - - // Test. The dynamic 'from' field is populated - require.NotEqual(t, ethTxn.From, ethgo.ZeroAddress) - }) -} diff --git a/e2e/logs_test.go b/e2e/logs_test.go deleted file mode 100644 index 157500c583..0000000000 --- a/e2e/logs_test.go +++ /dev/null @@ -1,286 +0,0 @@ -package e2e - -import ( - "context" - "math/big" - "testing" - "time" - - "github.com/umbracle/ethgo" - - "github.com/0xPolygon/polygon-edge/e2e/framework" - "github.com/0xPolygon/polygon-edge/helper/hex" - "github.com/0xPolygon/polygon-edge/helper/tests" - "github.com/0xPolygon/polygon-edge/types" - "github.com/0xPolygon/polygon-edge/validators" - - "github.com/stretchr/testify/assert" - "golang.org/x/crypto/sha3" -) - -func TestNewFilter_Logs(t *testing.T) { - runTest := func(t *testing.T, validatorType validators.ValidatorType) { - t.Helper() - - key, addr := tests.GenerateKeyAndAddr(t) - - ibftManager := framework.NewIBFTServersManager( - t, - 1, - IBFTDirPrefix, - func(i int, config *framework.TestServerConfig) { - config.SetValidatorType(validatorType) - config.Premine(addr, framework.EthToWei(10)) - config.SetBlockTime(1) - }, - ) - - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - defer cancel() - - ibftManager.StartServers(ctx) - srv := ibftManager.GetServer(0) - - ctx1, cancel1 := context.WithTimeout(context.Background(), framework.DefaultTimeout) - defer cancel1() - - contractAddr, err := srv.DeployContract(ctx1, sampleByteCode, key) - castedContractAddr := types.Address(contractAddr) - - if err != nil { - t.Fatal(err) - } - - txpoolClient := srv.TxnPoolOperator() - jsonRPCClient := srv.JSONRPC() - - id, err := jsonRPCClient.Eth().NewFilter(ðgo.LogFilter{}) - assert.NoError(t, err) - - txn, err := tests.GenerateAddTxnReq(tests.GenerateTxReqParams{ - Nonce: 1, // The first transaction was a contract deployment - ReferenceAddr: addr, - ReferenceKey: key, - ToAddress: castedContractAddr, - GasPrice: big.NewInt(framework.DefaultGasPrice), - Input: framework.MethodSig("setA1"), - }) - if err != nil { - return - } - - addTxnContext, addTxnCancelFn := context.WithTimeout(context.Background(), framework.DefaultTimeout) - defer addTxnCancelFn() - - addResp, addErr := txpoolClient.AddTxn(addTxnContext, txn) - if addErr != nil { - t.Fatalf("Unable to add transaction, %v", addErr) - } - - receiptContext, cancelFn := context.WithTimeout(context.Background(), framework.DefaultTimeout) - defer cancelFn() - - txHash := ethgo.Hash(types.StringToHash(addResp.TxHash)) - if _, receiptErr := srv.WaitForReceipt(receiptContext, txHash); receiptErr != nil { - t.Fatalf("Unable to wait for receipt, %v", receiptErr) - } - - res, err := jsonRPCClient.Eth().GetFilterChanges(id) - - assert.NoError(t, err) - assert.Equal(t, 1, len(res)) - } - - t.Run("ECDSA", func(t *testing.T) { - runTest(t, validators.ECDSAValidatorType) - }) - - t.Run("BLS", func(t *testing.T) { - runTest(t, validators.BLSValidatorType) - }) -} - -func TestNewFilter_Block(t *testing.T) { - runTest := func(t *testing.T, validatorType validators.ValidatorType) { - t.Helper() - - fromKey, from := tests.GenerateKeyAndAddr(t) - _, to := tests.GenerateKeyAndAddr(t) - - ibftManager := framework.NewIBFTServersManager( - t, - 1, - IBFTDirPrefix, - func(i int, config *framework.TestServerConfig) { - config.SetValidatorType(validatorType) - config.Premine(from, framework.EthToWei(10)) - config.SetBlockTime(1) - }, - ) - - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - defer cancel() - - ibftManager.StartServers(ctx) - srv := ibftManager.GetServer(0) - - client := srv.JSONRPC() - - id, err := client.Eth().NewBlockFilter() - assert.NoError(t, err) - - ctx, cancelFn := context.WithTimeout(context.Background(), framework.DefaultTimeout) - defer cancelFn() - - if _, sendErr := srv.SendRawTx(ctx, &framework.PreparedTransaction{ - From: from, - To: &to, - GasPrice: ethgo.Gwei(1), - Gas: 1000000, - Value: big.NewInt(10000), - }, fromKey); err != nil { - t.Fatalf("Unable to send transaction %v", sendErr) - } - - assert.NoError(t, err) - - // verify filter picked up block changes - blocks, err := client.Eth().GetFilterChangesBlock(id) - assert.NoError(t, err) - assert.Greater(t, len(blocks), 0) - } - - t.Run("ECDSA", func(t *testing.T) { - runTest(t, validators.ECDSAValidatorType) - }) - - t.Run("BLS", func(t *testing.T) { - runTest(t, validators.BLSValidatorType) - }) -} - -func TestFilterValue(t *testing.T) { - // Scenario : - // - // 1. Deploy a smart contract which is able to emit an event when calling a method. - // The event will contain a data, the number 42. - // - // 1a. Create a filter which will only register a specific event ( - // MyEvent) emitted by the previously deployed contract. - // - // 2. Call the smart contract method and wait for the block. - // - // 3. Query the block's bloom filter to make sure the data has been properly inserted. - // - runTest := func(t *testing.T, validatorType validators.ValidatorType) { - t.Helper() - - key, addr := tests.GenerateKeyAndAddr(t) - - ibftManager := framework.NewIBFTServersManager( - t, - 1, - IBFTDirPrefix, - func(i int, config *framework.TestServerConfig) { - config.SetValidatorType(validatorType) - config.Premine(addr, framework.EthToWei(10)) - config.SetBlockTime(1) - }, - ) - - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - defer cancel() - - ibftManager.StartServers(ctx) - srv := ibftManager.GetServer(0) - - deployCtx, deployCancel := context.WithTimeout(context.Background(), time.Minute) - defer deployCancel() - - contractAddr, err := srv.DeployContract(deployCtx, bloomFilterTestBytecode, key) - - if err != nil { - t.Fatal(err) - } - - txpoolClient := srv.TxnPoolOperator() - jsonRPCClient := srv.JSONRPC() - - // Encode event signature - hash := sha3.NewLegacyKeccak256() - decodeString := []byte("MyEvent(address,uint256)") - hash.Write(decodeString) - - buf := hash.Sum(nil) - - // Convert to right format - var ( - placeholderWrapper []*ethgo.Hash - placeholder ethgo.Hash - filterEventHashes [][]*ethgo.Hash - filterAddresses []ethgo.Address - ) - - copy(placeholder[:], buf) - placeholderWrapper = append(placeholderWrapper, &placeholder) - - filterEventHashes = append(filterEventHashes, placeholderWrapper) - filterAddresses = append(filterAddresses, contractAddr) - - filterID, err := jsonRPCClient.Eth().NewFilter(ðgo.LogFilter{ - Address: filterAddresses, - Topics: filterEventHashes, - }) - - assert.NoError(t, err) - - castedContractAddr := types.Address(contractAddr) - - if err != nil { - t.Fatal(err) - } - - txn, err := tests.GenerateAddTxnReq(tests.GenerateTxReqParams{ - Nonce: 1, - ReferenceAddr: addr, - ReferenceKey: key, - ToAddress: castedContractAddr, - GasPrice: big.NewInt(framework.DefaultGasPrice), - Input: framework.MethodSig("TriggerMyEvent"), - }) - - if err != nil { - return - } - - addTxnContext, cancelFn := context.WithTimeout(context.Background(), framework.DefaultTimeout) - defer cancelFn() - - addResp, addErr := txpoolClient.AddTxn(addTxnContext, txn) - if addErr != nil { - return - } - - receiptContext, cancelFn := context.WithTimeout(context.Background(), framework.DefaultTimeout) - defer cancelFn() - - txHash := ethgo.Hash(types.StringToHash(addResp.TxHash)) - if _, receiptErr := srv.WaitForReceipt(receiptContext, txHash); receiptErr != nil { - return - } - - res, err := jsonRPCClient.Eth().GetFilterChanges(filterID) - - assert.NoError(t, err) - assert.Len(t, res, 1) - assert.Equal(t, "0x000000000000000000000000000000000000000000000000000000000000002a", hex.EncodeToHex(res[0].Data)) - } - - t.Run("ECDSA", func(t *testing.T) { - runTest(t, validators.ECDSAValidatorType) - }) - - t.Run("BLS", func(t *testing.T) { - runTest(t, validators.BLSValidatorType) - }) -} diff --git a/e2e/metadata/StressTest.sol b/e2e/metadata/StressTest.sol deleted file mode 100644 index 40e1cba2f9..0000000000 --- a/e2e/metadata/StressTest.sol +++ /dev/null @@ -1,22 +0,0 @@ -pragma solidity ^0.8.0; - -contract StressTest { - string private name; - uint256 private num; - - constructor (){ - num = 0; - } - - event txnDone(uint number); - - function setName(string memory sName) external { - num++; - name = sName; - emit txnDone(num); - } - - function getCount() view external returns (uint){ - return num; - } -} diff --git a/e2e/metadata/predeploy.sol b/e2e/metadata/predeploy.sol deleted file mode 100644 index 49a729e280..0000000000 --- a/e2e/metadata/predeploy.sol +++ /dev/null @@ -1,100 +0,0 @@ -pragma solidity ^0.8.7; - -// A test contract for predeploy testing -contract HRM { - struct Group { - string name; - uint128 number; - bool flag; - } - - struct Human { - address addr; - string name; - int128 number; - } - - int256 private id; - string private name; - - Group[] private groups; - mapping(uint256 => mapping(uint256 => Human)) people; - - struct ArgGroup { - string name; - uint128 number; - bool flag; - Human[] people; - } - - constructor( - int256 _id, - string memory _name, - ArgGroup[] memory _groups - ) { - id = _id; - name = _name; - - for (uint256 idx = 0; idx < _groups.length; idx++) { - groups.push( - Group({ - name: _groups[idx].name, - number: _groups[idx].number, - flag: _groups[idx].flag - }) - ); - - for (uint256 jdx = 0; jdx < _groups[idx].people.length; jdx++) { - people[idx][jdx] = Human({ - addr: _groups[idx].people[jdx].addr, - name: _groups[idx].people[jdx].name, - number: _groups[idx].people[jdx].number - }); - } - } - } - - function getID() public view returns (int256) { - return id; - } - - function getName() public view returns (string memory) { - return name; - } - - function getGroupName(uint256 index) public view returns (string memory) { - return groups[index].name; - } - - function getGroupNumber(uint256 index) public view returns (uint128) { - return groups[index].number; - } - - function getGroupFlag(uint256 index) public view returns (bool) { - return groups[index].flag; - } - - function getHumanAddr(uint256 groupIndex, uint256 humanIndex) - public - view - returns (address) - { - return people[groupIndex][humanIndex].addr; - } - - function getHumanName(uint256 groupIndex, uint256 humanIndex) - public - view - returns (string memory) - { - return people[groupIndex][humanIndex].name; - } - - function getHumanNumber(uint256 groupIndex, uint256 humanIndex) - public - view - returns (int128) - { - return people[groupIndex][humanIndex].number; - } -} diff --git a/e2e/metadata/predeploy_abi.json b/e2e/metadata/predeploy_abi.json deleted file mode 100644 index b370e0fbff..0000000000 --- a/e2e/metadata/predeploy_abi.json +++ /dev/null @@ -1,226 +0,0 @@ -{ - "_format": "hh-sol-artifact-1", - "contractName": "HRM", - "sourceName": "contracts/Gretter.sol", - "abi": [ - { - "inputs": [ - { - "internalType": "int256", - "name": "_id", - "type": "int256" - }, - { - "internalType": "string", - "name": "_name", - "type": "string" - }, - { - "components": [ - { - "internalType": "string", - "name": "name", - "type": "string" - }, - { - "internalType": "uint128", - "name": "number", - "type": "uint128" - }, - { - "internalType": "bool", - "name": "flag", - "type": "bool" - }, - { - "components": [ - { - "internalType": "address", - "name": "addr", - "type": "address" - }, - { - "internalType": "string", - "name": "name", - "type": "string" - }, - { - "internalType": "int128", - "name": "number", - "type": "int128" - } - ], - "internalType": "struct HRM.Human[]", - "name": "people", - "type": "tuple[]" - } - ], - "internalType": "struct HRM.ArgGroup[]", - "name": "_groups", - "type": "tuple[]" - } - ], - "stateMutability": "nonpayable", - "type": "constructor" - }, - { - "inputs": [ - { - "internalType": "uint256", - "name": "index", - "type": "uint256" - } - ], - "name": "getGroupFlag", - "outputs": [ - { - "internalType": "bool", - "name": "", - "type": "bool" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "uint256", - "name": "index", - "type": "uint256" - } - ], - "name": "getGroupName", - "outputs": [ - { - "internalType": "string", - "name": "", - "type": "string" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "uint256", - "name": "index", - "type": "uint256" - } - ], - "name": "getGroupNumber", - "outputs": [ - { - "internalType": "uint128", - "name": "", - "type": "uint128" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "uint256", - "name": "groupIndex", - "type": "uint256" - }, - { - "internalType": "uint256", - "name": "humanIndex", - "type": "uint256" - } - ], - "name": "getHumanAddr", - "outputs": [ - { - "internalType": "address", - "name": "", - "type": "address" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "uint256", - "name": "groupIndex", - "type": "uint256" - }, - { - "internalType": "uint256", - "name": "humanIndex", - "type": "uint256" - } - ], - "name": "getHumanName", - "outputs": [ - { - "internalType": "string", - "name": "", - "type": "string" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "uint256", - "name": "groupIndex", - "type": "uint256" - }, - { - "internalType": "uint256", - "name": "humanIndex", - "type": "uint256" - } - ], - "name": "getHumanNumber", - "outputs": [ - { - "internalType": "int128", - "name": "", - "type": "int128" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "getID", - "outputs": [ - { - "internalType": "int256", - "name": "", - "type": "int256" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "getName", - "outputs": [ - { - "internalType": "string", - "name": "", - "type": "string" - } - ], - "stateMutability": "view", - "type": "function" - } - ], - "bytecode": "0x60806040523480156200001157600080fd5b506040516200151e3803806200151e83398181016040528101906200003791906200085f565b82600081905550816001908051906020019062000056929190620003cf565b5060005b8151811015620003c5576002604051806060016040528084848151811062000087576200008662000b81565b5b6020026020010151600001518152602001848481518110620000ae57620000ad62000b81565b5b6020026020010151602001516fffffffffffffffffffffffffffffffff168152602001848481518110620000e757620000e662000b81565b5b6020026020010151604001511515815250908060018154018082558091505060019003906000526020600020906002020160009091909190915060008201518160000190805190602001906200013f929190620003cf565b5060208201518160010160006101000a8154816fffffffffffffffffffffffffffffffff02191690836fffffffffffffffffffffffffffffffff16021790555060408201518160010160106101000a81548160ff021916908315150217905550505060005b828281518110620001ba57620001b962000b81565b5b60200260200101516060015151811015620003ae576040518060600160405280848481518110620001f057620001ef62000b81565b5b602002602001015160600151838151811062000211576200021062000b81565b5b60200260200101516000015173ffffffffffffffffffffffffffffffffffffffff1681526020018484815181106200024e576200024d62000b81565b5b60200260200101516060015183815181106200026f576200026e62000b81565b5b602002602001015160200151815260200184848151811062000296576200029562000b81565b5b6020026020010151606001518381518110620002b757620002b662000b81565b5b602002602001015160400151600f0b81525060036000848152602001908152602001600020600083815260200190815260200160002060008201518160000160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550602082015181600101908051906020019062000352929190620003cf565b5060408201518160020160006101000a8154816fffffffffffffffffffffffffffffffff0219169083600f0b6fffffffffffffffffffffffffffffffff1602179055509050508080620003a59062000ad5565b915050620001a4565b508080620003bc9062000ad5565b9150506200005a565b5050505062000c95565b828054620003dd9062000a69565b90600052602060002090601f0160209004810192826200040157600085556200044d565b82601f106200041c57805160ff19168380011785556200044d565b828001600101855582156200044d579182015b828111156200044c5782518255916020019190600101906200042f565b5b5090506200045c919062000460565b5090565b5b808211156200047b57600081600090555060010162000461565b5090565b600062000496620004908462000922565b620008f9565b90508083825260208201905082856020860282011115620004bc57620004bb62000bee565b5b60005b858110156200051157815167ffffffffffffffff811115620004e657620004e562000bdf565b5b808601620004f58982620006f7565b85526020850194506020840193505050600181019050620004bf565b5050509392505050565b6000620005326200052c8462000951565b620008f9565b9050808382526020820190508285602086028201111562000558576200055762000bee565b5b60005b85811015620005ad57815167ffffffffffffffff81111562000582576200058162000bdf565b5b808601620005918982620007ba565b855260208501945060208401935050506001810190506200055b565b5050509392505050565b6000620005ce620005c88462000980565b620008f9565b905082815260208101848484011115620005ed57620005ec62000bf3565b5b620005fa84828562000a33565b509392505050565b600081519050620006138162000c13565b92915050565b600082601f83011262000631576200063062000bdf565b5b8151620006438482602086016200047f565b91505092915050565b600082601f83011262000664576200066362000bdf565b5b8151620006768482602086016200051b565b91505092915050565b600081519050620006908162000c2d565b92915050565b600081519050620006a78162000c47565b92915050565b600081519050620006be8162000c61565b92915050565b600082601f830112620006dc57620006db62000bdf565b5b8151620006ee848260208601620005b7565b91505092915050565b60006080828403121562000710576200070f62000be4565b5b6200071c6080620008f9565b9050600082015167ffffffffffffffff8111156200073f576200073e62000be9565b5b6200074d84828501620006c4565b6000830152506020620007638482850162000848565b602083015250604062000779848285016200067f565b604083015250606082015167ffffffffffffffff811115620007a0576200079f62000be9565b5b620007ae848285016200064c565b60608301525092915050565b600060608284031215620007d357620007d262000be4565b5b620007df6060620008f9565b90506000620007f18482850162000602565b600083015250602082015167ffffffffffffffff81111562000818576200081762000be9565b5b6200082684828501620006c4565b60208301525060406200083c8482850162000696565b60408301525092915050565b600081519050620008598162000c7b565b92915050565b6000806000606084860312156200087b576200087a62000bfd565b5b60006200088b86828701620006ad565b935050602084015167ffffffffffffffff811115620008af57620008ae62000bf8565b5b620008bd86828701620006c4565b925050604084015167ffffffffffffffff811115620008e157620008e062000bf8565b5b620008ef8682870162000619565b9150509250925092565b60006200090562000918565b905062000913828262000a9f565b919050565b6000604051905090565b600067ffffffffffffffff82111562000940576200093f62000bb0565b5b602082029050602081019050919050565b600067ffffffffffffffff8211156200096f576200096e62000bb0565b5b602082029050602081019050919050565b600067ffffffffffffffff8211156200099e576200099d62000bb0565b5b620009a98262000c02565b9050602081019050919050565b6000620009c38262000a09565b9050919050565b60008115159050919050565b600081600f0b9050919050565b6000819050919050565b60006fffffffffffffffffffffffffffffffff82169050919050565b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b6000819050919050565b60005b8381101562000a5357808201518184015260208101905062000a36565b8381111562000a63576000848401525b50505050565b6000600282049050600182168062000a8257607f821691505b6020821081141562000a995762000a9862000b52565b5b50919050565b62000aaa8262000c02565b810181811067ffffffffffffffff8211171562000acc5762000acb62000bb0565b5b80604052505050565b600062000ae28262000a29565b91507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff82141562000b185762000b1762000b23565b5b600182019050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600080fd5b600080fd5b600080fd5b600080fd5b600080fd5b600080fd5b600080fd5b6000601f19601f8301169050919050565b62000c1e81620009b6565b811462000c2a57600080fd5b50565b62000c3881620009ca565b811462000c4457600080fd5b50565b62000c5281620009d6565b811462000c5e57600080fd5b50565b62000c6c81620009e3565b811462000c7857600080fd5b50565b62000c8681620009ed565b811462000c9257600080fd5b50565b6108798062000ca56000396000f3fe608060405234801561001057600080fd5b50600436106100885760003560e01c8063a39e22e91161005b578063a39e22e91461013b578063ab9dbd071461016b578063e852ac3d14610189578063f82f3cd3146101b957610088565b806317d7de7c1461008d57806329a6ca32146100ab5780632c40a7e5146100db578063813198411461010b575b600080fd5b6100956101e9565b6040516100a2919061067f565b60405180910390f35b6100c560048036038101906100c09190610522565b61027b565b6040516100d2919061067f565b60405180910390f35b6100f560048036038101906100f0919061054f565b610332565b604051610102919061067f565b60405180910390f35b6101256004803603810190610120919061054f565b6103ec565b6040516101329190610649565b60405180910390f35b61015560048036038101906101509190610522565b61042b565b60405161016291906106a1565b60405180910390f35b610173610476565b6040516101809190610664565b60405180910390f35b6101a3600480360381019061019e9190610522565b61047f565b6040516101b0919061062e565b60405180910390f35b6101d360048036038101906101ce919061054f565b6104bb565b6040516101e09190610613565b60405180910390f35b6060600180546101f890610786565b80601f016020809104026020016040519081016040528092919081815260200182805461022490610786565b80156102715780601f1061024657610100808354040283529160200191610271565b820191906000526020600020905b81548152906001019060200180831161025457829003601f168201915b5050505050905090565b606060028281548110610291576102906107e7565b5b906000526020600020906002020160000180546102ad90610786565b80601f01602080910402602001604051908101604052809291908181526020018280546102d990610786565b80156103265780601f106102fb57610100808354040283529160200191610326565b820191906000526020600020905b81548152906001019060200180831161030957829003601f168201915b50505050509050919050565b6060600360008481526020019081526020016000206000838152602001908152602001600020600101805461036690610786565b80601f016020809104026020016040519081016040528092919081815260200182805461039290610786565b80156103df5780601f106103b4576101008083540402835291602001916103df565b820191906000526020600020905b8154815290600101906020018083116103c257829003601f168201915b5050505050905092915050565b600060036000848152602001908152602001600020600083815260200190815260200160002060020160009054906101000a9004600f0b905092915050565b600060028281548110610441576104406107e7565b5b906000526020600020906002020160010160009054906101000a90046fffffffffffffffffffffffffffffffff169050919050565b60008054905090565b600060028281548110610495576104946107e7565b5b906000526020600020906002020160010160109054906101000a900460ff169050919050565b600060036000848152602001908152602001600020600083815260200190815260200160002060000160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905092915050565b60008135905061051c8161082c565b92915050565b60006020828403121561053857610537610816565b5b60006105468482850161050d565b91505092915050565b6000806040838503121561056657610565610816565b5b60006105748582860161050d565b92505060206105858582860161050d565b9150509250929050565b610598816106d8565b82525050565b6105a7816106ea565b82525050565b6105b6816106f6565b82525050565b6105c581610703565b82525050565b60006105d6826106bc565b6105e081856106c7565b93506105f0818560208601610753565b6105f98161081b565b840191505092915050565b61060d8161070d565b82525050565b6000602082019050610628600083018461058f565b92915050565b6000602082019050610643600083018461059e565b92915050565b600060208201905061065e60008301846105ad565b92915050565b600060208201905061067960008301846105bc565b92915050565b6000602082019050818103600083015261069981846105cb565b905092915050565b60006020820190506106b66000830184610604565b92915050565b600081519050919050565b600082825260208201905092915050565b60006106e382610729565b9050919050565b60008115159050919050565b600081600f0b9050919050565b6000819050919050565b60006fffffffffffffffffffffffffffffffff82169050919050565b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b6000819050919050565b60005b83811015610771578082015181840152602081019050610756565b83811115610780576000848401525b50505050565b6000600282049050600182168061079e57607f821691505b602082108114156107b2576107b16107b8565b5b50919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b600080fd5b6000601f19601f8301169050919050565b61083581610749565b811461084057600080fd5b5056fea26469706673582212204a8c3c14501d9a77ba559944f69f49472c6d1fa537b901e772919f21111559c364736f6c63430008070033", - "deployedBytecode": "0x608060405234801561001057600080fd5b50600436106100885760003560e01c8063a39e22e91161005b578063a39e22e91461013b578063ab9dbd071461016b578063e852ac3d14610189578063f82f3cd3146101b957610088565b806317d7de7c1461008d57806329a6ca32146100ab5780632c40a7e5146100db578063813198411461010b575b600080fd5b6100956101e9565b6040516100a2919061067f565b60405180910390f35b6100c560048036038101906100c09190610522565b61027b565b6040516100d2919061067f565b60405180910390f35b6100f560048036038101906100f0919061054f565b610332565b604051610102919061067f565b60405180910390f35b6101256004803603810190610120919061054f565b6103ec565b6040516101329190610649565b60405180910390f35b61015560048036038101906101509190610522565b61042b565b60405161016291906106a1565b60405180910390f35b610173610476565b6040516101809190610664565b60405180910390f35b6101a3600480360381019061019e9190610522565b61047f565b6040516101b0919061062e565b60405180910390f35b6101d360048036038101906101ce919061054f565b6104bb565b6040516101e09190610613565b60405180910390f35b6060600180546101f890610786565b80601f016020809104026020016040519081016040528092919081815260200182805461022490610786565b80156102715780601f1061024657610100808354040283529160200191610271565b820191906000526020600020905b81548152906001019060200180831161025457829003601f168201915b5050505050905090565b606060028281548110610291576102906107e7565b5b906000526020600020906002020160000180546102ad90610786565b80601f01602080910402602001604051908101604052809291908181526020018280546102d990610786565b80156103265780601f106102fb57610100808354040283529160200191610326565b820191906000526020600020905b81548152906001019060200180831161030957829003601f168201915b50505050509050919050565b6060600360008481526020019081526020016000206000838152602001908152602001600020600101805461036690610786565b80601f016020809104026020016040519081016040528092919081815260200182805461039290610786565b80156103df5780601f106103b4576101008083540402835291602001916103df565b820191906000526020600020905b8154815290600101906020018083116103c257829003601f168201915b5050505050905092915050565b600060036000848152602001908152602001600020600083815260200190815260200160002060020160009054906101000a9004600f0b905092915050565b600060028281548110610441576104406107e7565b5b906000526020600020906002020160010160009054906101000a90046fffffffffffffffffffffffffffffffff169050919050565b60008054905090565b600060028281548110610495576104946107e7565b5b906000526020600020906002020160010160109054906101000a900460ff169050919050565b600060036000848152602001908152602001600020600083815260200190815260200160002060000160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905092915050565b60008135905061051c8161082c565b92915050565b60006020828403121561053857610537610816565b5b60006105468482850161050d565b91505092915050565b6000806040838503121561056657610565610816565b5b60006105748582860161050d565b92505060206105858582860161050d565b9150509250929050565b610598816106d8565b82525050565b6105a7816106ea565b82525050565b6105b6816106f6565b82525050565b6105c581610703565b82525050565b60006105d6826106bc565b6105e081856106c7565b93506105f0818560208601610753565b6105f98161081b565b840191505092915050565b61060d8161070d565b82525050565b6000602082019050610628600083018461058f565b92915050565b6000602082019050610643600083018461059e565b92915050565b600060208201905061065e60008301846105ad565b92915050565b600060208201905061067960008301846105bc565b92915050565b6000602082019050818103600083015261069981846105cb565b905092915050565b60006020820190506106b66000830184610604565b92915050565b600081519050919050565b600082825260208201905092915050565b60006106e382610729565b9050919050565b60008115159050919050565b600081600f0b9050919050565b6000819050919050565b60006fffffffffffffffffffffffffffffffff82169050919050565b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b6000819050919050565b60005b83811015610771578082015181840152602081019050610756565b83811115610780576000848401525b50505050565b6000600282049050600182168061079e57607f821691505b602082108114156107b2576107b16107b8565b5b50919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b600080fd5b6000601f19601f8301169050919050565b61083581610749565b811461084057600080fd5b5056fea26469706673582212204a8c3c14501d9a77ba559944f69f49472c6d1fa537b901e772919f21111559c364736f6c63430008070033", - "linkReferences": {}, - "deployedLinkReferences": {} -} \ No newline at end of file diff --git a/e2e/metadata/sample.sol b/e2e/metadata/sample.sol deleted file mode 100644 index 705a7463c9..0000000000 --- a/e2e/metadata/sample.sol +++ /dev/null @@ -1,24 +0,0 @@ -pragma solidity ^0.5.5; -pragma experimental ABIEncoderV2; - -contract Sample { - event A(address indexed val_0, address indexed val_1); - - function setterA(address val_0, address val_1) public payable { - emit A(val_0, val_1); - } - - function setA1() public payable { - emit A( - 0x0000000000000000000000000000000000000000, - 0x0100000000000000000000000000000000000000 - ); - } - - function setA2() public payable { - emit A( - 0x0100000000000000000000000000000000000000, - 0x0000000000000000000000000000000000000000 - ); - } -} diff --git a/e2e/pos_poa_switch_test.go b/e2e/pos_poa_switch_test.go deleted file mode 100644 index e80d0568a3..0000000000 --- a/e2e/pos_poa_switch_test.go +++ /dev/null @@ -1,167 +0,0 @@ -package e2e - -import ( - "context" - "crypto/ecdsa" - "sync" - "testing" - "time" - - "github.com/0xPolygon/polygon-edge/consensus/ibft/fork" - ibftOp "github.com/0xPolygon/polygon-edge/consensus/ibft/proto" - "github.com/0xPolygon/polygon-edge/crypto" - "github.com/0xPolygon/polygon-edge/e2e/framework" - "github.com/0xPolygon/polygon-edge/types" - "github.com/stretchr/testify/assert" -) - -// Test scenario: -// PoA [0, 29] -// - Check validator set in each validator equals to genesis validator -// - 3 validators stake to the validator contract after contract deploys at block #10 -// PoS [30, ] -// - Check validator set in each validator has only 3 validators -func TestPoAPoSSwitch(t *testing.T) { - var ( - // switch configuration - posDeployContractAt = uint64(10) - posStartAt = uint64(30) - - defaultBalance = framework.EthToWei(1000) - stakeAmount = framework.EthToWei(5) - ) - - ibftManager := framework.NewIBFTServersManager( - t, - IBFTMinNodes, - IBFTDirPrefix, - func(i int, config *framework.TestServerConfig) { - config.PremineValidatorBalance(defaultBalance) - }) - - // Set switch configuration into genesis.json - err := ibftManager.GetServer(0).SwitchIBFTType(fork.PoS, posStartAt, nil, &posDeployContractAt) - assert.NoError(t, err) - - // Get server slice - servers := make([]*framework.TestServer, 0) - for i := 0; i < IBFTMinNodes; i++ { - servers = append(servers, ibftManager.GetServer(i)) - } - - // Get genesis validators - genesisValidatorKeys := make([]*ecdsa.PrivateKey, IBFTMinNodes) - genesisValidatorAddrs := make([]types.Address, IBFTMinNodes) - - for idx := 0; idx < IBFTMinNodes; idx++ { - validatorKey, err := ibftManager.GetServer(idx).Config.PrivateKey() - assert.NoError(t, err) - - validatorAddr := crypto.PubKeyToAddress(&validatorKey.PublicKey) - genesisValidatorKeys[idx] = validatorKey - genesisValidatorAddrs[idx] = validatorAddr - } - - // Start servers - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - defer cancel() - ibftManager.StartServers(ctx) - - // Test in PoA - validateSnapshot := func( - ctx context.Context, - srv *framework.TestServer, - height uint64, - expectedValidators []types.Address, - ) { - res, err := srv.IBFTOperator().GetSnapshot(ctx, &ibftOp.SnapshotReq{ - Number: height, - }) - assert.NoError(t, err) - - snapshotValidators := make([]types.Address, len(res.Validators)) - for idx, v := range res.Validators { - snapshotValidators[idx] = types.BytesToAddress(v.Data) - } - - assert.ElementsMatch(t, expectedValidators, snapshotValidators) - } - - // Check validator set in each validator - var wg sync.WaitGroup - for i := 0; i < IBFTMinNodes; i++ { - wg.Add(1) - - srv := ibftManager.GetServer(i) - - go func(srv *framework.TestServer) { - defer wg.Done() - - ctx, cancel := context.WithTimeout(context.Background(), framework.DefaultTimeout) - defer cancel() - - // every validator should have 4 validators in validator set - validateSnapshot(ctx, srv, 1, genesisValidatorAddrs) - }(srv) - } - wg.Wait() - - // Wait until the staking contract is deployed - waitErrors := framework.WaitForServersToSeal(servers, posDeployContractAt) - if len(waitErrors) != 0 { - t.Fatalf("Unable to wait for all nodes to seal blocks, %v", waitErrors) - } - - // Stake balance - // 4 genesis validators will stake but 1 gensis validator won't - numStakedValidators := 4 - wg = sync.WaitGroup{} - - for idx := 0; idx < numStakedValidators; idx++ { - wg.Add(1) - - srv := ibftManager.GetServer(idx) - key, addr := genesisValidatorKeys[idx], genesisValidatorAddrs[idx] - - go func(srv *framework.TestServer, key *ecdsa.PrivateKey, addr types.Address) { - defer wg.Done() - - err := framework.StakeAmount( - addr, - key, - stakeAmount, - srv, - ) - assert.NoError(t, err) - }(srv, key, addr) - } - wg.Wait() - - // Wait until PoS begins - waitErrors = framework.WaitForServersToSeal(servers, posStartAt) - if len(waitErrors) != 0 { - t.Fatalf("Unable to wait for all nodes to seal blocks, %v", waitErrors) - } - - expectedPoSValidators := genesisValidatorAddrs[:4] - - // Test in PoS - wg = sync.WaitGroup{} - - for i := 0; i < IBFTMinNodes; i++ { - wg.Add(1) - - srv := ibftManager.GetServer(i) - - go func(srv *framework.TestServer) { - defer wg.Done() - - ctx, cancel := context.WithTimeout(context.Background(), framework.DefaultTimeout) - defer cancel() - - // every validator should have only 4 validators in validator set - validateSnapshot(ctx, srv, posStartAt, expectedPoSValidators) - }(srv) - } - wg.Wait() -} diff --git a/e2e/pos_test.go b/e2e/pos_test.go deleted file mode 100644 index 5a06fef4d3..0000000000 --- a/e2e/pos_test.go +++ /dev/null @@ -1,914 +0,0 @@ -package e2e - -import ( - "context" - "crypto/ecdsa" - "fmt" - "math/big" - "strconv" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/umbracle/ethgo" - "github.com/umbracle/ethgo/jsonrpc" - - "github.com/0xPolygon/polygon-edge/chain" - ibftOp "github.com/0xPolygon/polygon-edge/consensus/ibft/proto" - "github.com/0xPolygon/polygon-edge/contracts/staking" - "github.com/0xPolygon/polygon-edge/crypto" - "github.com/0xPolygon/polygon-edge/e2e/framework" - "github.com/0xPolygon/polygon-edge/helper/common" - stakingHelper "github.com/0xPolygon/polygon-edge/helper/staking" - "github.com/0xPolygon/polygon-edge/helper/tests" - txpoolOp "github.com/0xPolygon/polygon-edge/txpool/proto" - "github.com/0xPolygon/polygon-edge/types" - "github.com/golang/protobuf/ptypes/any" -) - -// foundInValidatorSet is a helper function for searching through the passed in set for a specific -// address -func foundInValidatorSet(validatorSet []types.Address, searchValidator types.Address) bool { - searchStr := searchValidator.String() - for _, validator := range validatorSet { - if validator.String() == searchStr { - return true - } - } - - return false -} - -// getBigDefaultStakedBalance returns the default staked balance as a *big.Int -func getBigDefaultStakedBalance(t *testing.T) *big.Int { - t.Helper() - - val := stakingHelper.DefaultStakedBalance - bigDefaultStakedBalance, err := common.ParseUint256orHex(&val) - - if err != nil { - t.Fatalf("unable to parse DefaultStakedBalance, %v", err) - } - - return bigDefaultStakedBalance -} - -// validateValidatorSet makes sure that the address is present / not present in the -// validator set, as well as if the validator set is of a certain size -func validateValidatorSet( - t *testing.T, - address types.Address, - client *jsonrpc.Client, - expectedExistence bool, - expectedSize int, -) { - t.Helper() - - validatorSet, validatorSetErr := framework.GetValidatorSet(address, client) - if validatorSetErr != nil { - t.Fatalf("Unable to fetch validator set, %v", validatorSetErr) - } - - assert.NotNil(t, validatorSet) - assert.Len(t, validatorSet, expectedSize) - - if expectedExistence { - assert.Truef( - t, - foundInValidatorSet(validatorSet, address), - "expected address to be present in the validator set", - ) - } else { - assert.Falsef(t, - foundInValidatorSet(validatorSet, address), - "expected address to not be present in the validator set", - ) - } -} - -func TestPoS_ValidatorBoundaries(t *testing.T) { - accounts := []struct { - key *ecdsa.PrivateKey - address types.Address - }{} - stakeAmount := framework.EthToWei(1) - numGenesisValidators := IBFTMinNodes - minValidatorCount := uint64(1) - maxValidatorCount := uint64(numGenesisValidators + 1) - numNewStakers := 2 - - for i := 0; i < numNewStakers; i++ { - k, a := tests.GenerateKeyAndAddr(t) - - accounts = append(accounts, struct { - key *ecdsa.PrivateKey - address types.Address - }{ - key: k, - address: a, - }) - } - - defaultBalance := framework.EthToWei(100) - ibftManager := framework.NewIBFTServersManager( - t, - numGenesisValidators, - IBFTDirPrefix, - func(i int, config *framework.TestServerConfig) { - config.SetEpochSize(2) - config.PremineValidatorBalance(defaultBalance) - for j := 0; j < numNewStakers; j++ { - config.Premine(accounts[j].address, defaultBalance) - } - config.SetIBFTPoS(true) - config.SetMinValidatorCount(minValidatorCount) - config.SetMaxValidatorCount(maxValidatorCount) - }) - - t.Cleanup(func() { - ibftManager.StopServers() - }) - - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - defer cancel() - ibftManager.StartServers(ctx) - - srv := ibftManager.GetServer(0) - - client := srv.JSONRPC() - - testCases := []struct { - name string - address types.Address - key *ecdsa.PrivateKey - expectedExistence bool - expectedSize int - }{ - { - name: "Can add a 5th validator", - address: accounts[0].address, - key: accounts[0].key, - expectedExistence: true, - expectedSize: numGenesisValidators + 1, - }, - { - name: "Can not add a 6th validator", - address: accounts[1].address, - key: accounts[1].key, - expectedExistence: false, - expectedSize: numGenesisValidators + 1, - }, - } - - for _, tt := range testCases { - t.Run(tt.name, func(t *testing.T) { - err := framework.StakeAmount(tt.address, tt.key, stakeAmount, srv) - assert.NoError(t, err) - validateValidatorSet(t, tt.address, client, tt.expectedExistence, tt.expectedSize) - }) - } -} - -func TestPoS_Stake(t *testing.T) { - stakerKey, stakerAddr := tests.GenerateKeyAndAddr(t) - defaultBalance := framework.EthToWei(100) - stakeAmount := framework.EthToWei(5) - - numGenesisValidators := IBFTMinNodes - ibftManager := framework.NewIBFTServersManager( - t, - numGenesisValidators, - IBFTDirPrefix, - func(i int, config *framework.TestServerConfig) { - config.SetEpochSize(2) // Need to leave room for the endblock - config.PremineValidatorBalance(defaultBalance) - config.Premine(stakerAddr, defaultBalance) - config.SetIBFTPoS(true) - }) - - t.Cleanup(func() { - ibftManager.StopServers() - }) - - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - defer cancel() - ibftManager.StartServers(ctx) - - srv := ibftManager.GetServer(0) - - client := srv.JSONRPC() - - // Stake Balance - stakeError := framework.StakeAmount( - stakerAddr, - stakerKey, - stakeAmount, - srv, - ) - if stakeError != nil { - t.Fatalf("Unable to stake amount, %v", stakeError) - } - - // Check validator set - validateValidatorSet(t, stakerAddr, client, true, numGenesisValidators+1) - - // Check the SC balance - bigDefaultStakedBalance := getBigDefaultStakedBalance(t) - - scBalance := framework.GetAccountBalance(t, staking.AddrStakingContract, client) - expectedBalance := big.NewInt(0).Mul( - bigDefaultStakedBalance, - big.NewInt(int64(numGenesisValidators)), - ) - expectedBalance.Add(expectedBalance, stakeAmount) - - assert.Equal(t, expectedBalance.String(), scBalance.String()) - - stakedAmount, stakedAmountErr := framework.GetStakedAmount(stakerAddr, client) - if stakedAmountErr != nil { - t.Fatalf("Unable to get staked amount, %v", stakedAmountErr) - } - - assert.Equal(t, expectedBalance.String(), stakedAmount.String()) -} - -func TestPoS_Unstake(t *testing.T) { - stakingContractAddr := staking.AddrStakingContract - defaultBalance := framework.EthToWei(100) - - // The last genesis validator will leave from validator set by unstaking - numGenesisValidators := IBFTMinNodes + 1 - ibftManager := framework.NewIBFTServersManager( - t, - numGenesisValidators, - IBFTDirPrefix, - func(_ int, config *framework.TestServerConfig) { - // Premine to send unstake transaction - config.SetEpochSize(2) // Need to leave room for the endblock - config.PremineValidatorBalance(defaultBalance) - config.SetIBFTPoS(true) - }) - - t.Cleanup(func() { - ibftManager.StopServers() - }) - - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - defer cancel() - ibftManager.StartServers(ctx) - srv := ibftManager.GetServer(0) - - // Get key of last node - unstakerSrv := ibftManager.GetServer(IBFTMinNodes) - unstakerKey, err := unstakerSrv.Config.PrivateKey() - assert.NoError(t, err) - - unstakerAddr := crypto.PubKeyToAddress(&unstakerKey.PublicKey) - - client := srv.JSONRPC() - - // Check the validator is in validator set - validateValidatorSet(t, unstakerAddr, client, true, numGenesisValidators) - - // Send transaction to unstake - receipt, unstakeError := framework.UnstakeAmount( - unstakerAddr, - unstakerKey, - srv, - ) - if unstakeError != nil { - t.Fatalf("Unable to unstake amount, %v", unstakeError) - } - - // Check validator set - validateValidatorSet(t, unstakerAddr, client, false, numGenesisValidators-1) - - // Check the SC balance - bigDefaultStakedBalance := getBigDefaultStakedBalance(t) - - scBalance := framework.GetAccountBalance(t, staking.AddrStakingContract, client) - expectedBalance := big.NewInt(0).Mul( - bigDefaultStakedBalance, - big.NewInt(int64(numGenesisValidators)), - ) - expectedBalance.Sub(expectedBalance, bigDefaultStakedBalance) - - assert.Equal(t, expectedBalance.String(), scBalance.String()) - - stakedAmount, stakedAmountErr := framework.GetStakedAmount(stakingContractAddr, client) - if stakedAmountErr != nil { - t.Fatalf("Unable to get staked amount, %v", stakedAmountErr) - } - - assert.Equal(t, expectedBalance.String(), stakedAmount.String()) - - // Check the address balance - fee := new(big.Int).Mul( - big.NewInt(int64(receipt.GasUsed)), - ethgo.Gwei(1), - ) - - accountBalance := framework.GetAccountBalance(t, unstakerAddr, client) - expectedAccountBalance := big.NewInt(0).Add(defaultBalance, bigDefaultStakedBalance) - expectedAccountBalance.Sub(expectedAccountBalance, fee) - - assert.Equal(t, expectedAccountBalance.String(), accountBalance.String()) -} - -// Test scenario: -// User has 10 ETH staked and a balance of 10 ETH -// Unstake -> Unstake -> Unstake -> Unstake... -// The code below tests numTransactions cycles of Unstake -// Expected result for tests: Staked: 0 ETH; Balance: ~20 ETH -func TestPoS_UnstakeExploit(t *testing.T) { - // Predefined values - stakingContractAddr := staking.AddrStakingContract - - senderKey, senderAddr := tests.GenerateKeyAndAddr(t) - bigDefaultStakedBalance := getBigDefaultStakedBalance(t) - defaultBalance := framework.EthToWei(100) - bigGasPrice := big.NewInt(1000000000) - - devInterval := 5 // s - numDummyValidators := 5 - - // Set up the test server - srvs := framework.NewTestServers(t, 1, func(config *framework.TestServerConfig) { - config.SetConsensus(framework.ConsensusDev) - config.SetDevInterval(devInterval) - config.Premine(senderAddr, defaultBalance) - config.SetDevStakingAddresses(append(generateStakingAddresses(numDummyValidators), senderAddr)) - config.SetIBFTPoS(true) - config.SetBlockLimit(5000000000) - }) - srv := srvs[0] - client := srv.JSONRPC() - - previousAccountBalance := framework.GetAccountBalance(t, senderAddr, client) - - // Check if the stake is present on the SC - actualStakingSCBalance, fetchError := framework.GetStakedAmount(senderAddr, client) - if fetchError != nil { - t.Fatalf("Unable to fetch staking SC balance, %v", fetchError) - } - - assert.Equalf(t, - big.NewInt(0).Mul(bigDefaultStakedBalance, big.NewInt(int64(numDummyValidators+1))).String(), - actualStakingSCBalance.String(), - "Staked address balance mismatch before unstake exploit", - ) - - // Required default values - numTransactions := 5 - signer := crypto.NewSigner(chain.AllForksEnabled.At(0), 100) - currentNonce := 0 - - // TxPool client - clt := srv.TxnPoolOperator() - - generateTx := func(i int) *types.Transaction { - unsignedTx := &types.Transaction{ - Nonce: uint64(currentNonce), - From: types.ZeroAddress, - To: &stakingContractAddr, - Gas: framework.DefaultGasLimit, - Value: big.NewInt(0), - V: big.NewInt(1), // it is necessary to encode in rlp, - Input: framework.MethodSig("unstake"), - } - - // Just make very second transaction with dynamic gas fee - if i%2 == 0 { - unsignedTx.Type = types.DynamicFeeTx - unsignedTx.GasFeeCap = bigGasPrice - unsignedTx.GasTipCap = bigGasPrice - } else { - unsignedTx.Type = types.LegacyTx - unsignedTx.GasPrice = bigGasPrice - } - - signedTx, err := signer.SignTx(unsignedTx, senderKey) - require.NoError(t, err, "Unable to sign transaction") - - currentNonce++ - - return signedTx - } - - txHashes := make([]ethgo.Hash, 0) - - for i := 0; i < numTransactions; i++ { - var msg *txpoolOp.AddTxnReq - - unstakeTxn := generateTx(i) - - msg = &txpoolOp.AddTxnReq{ - Raw: &any.Any{ - Value: unstakeTxn.MarshalRLP(), - }, - From: types.ZeroAddress.String(), - } - - addCtx, addCtxCn := context.WithTimeout(context.Background(), framework.DefaultTimeout) - - addResp, addErr := clt.AddTxn(addCtx, msg) - if addErr != nil { - t.Fatalf("Unable to add txn, %v", addErr) - } - - txHashes = append(txHashes, ethgo.HexToHash(addResp.TxHash)) - - addCtxCn() - } - - // Wait for the transactions to go through - totalGasUsed := srv.GetGasTotal(txHashes) - - // Find how much the address paid for all the transactions in this block - paidFee := big.NewInt(0).Mul(bigGasPrice, big.NewInt(int64(totalGasUsed))) - - // Check the balances - actualAccountBalance := framework.GetAccountBalance(t, senderAddr, client) - actualStakingSCBalance, fetchError = framework.GetStakedAmount(senderAddr, client) - - if fetchError != nil { - t.Fatalf("Unable to fetch staking SC balance, %v", fetchError) - } - - // Make sure the balances match up - - // expBalance = previousAccountBalance + stakeRefund - block fees - expBalance := big.NewInt(0).Sub(big.NewInt(0).Add(previousAccountBalance, bigDefaultStakedBalance), paidFee) - - assert.Equalf(t, - expBalance.String(), - actualAccountBalance.String(), - "Account balance mismatch after unstake exploit", - ) - - assert.Equalf(t, - big.NewInt(0).Mul(bigDefaultStakedBalance, big.NewInt(int64(numDummyValidators))).String(), - actualStakingSCBalance.String(), - "Staked address balance mismatch after unstake exploit", - ) -} - -// generateStakingAddresses is a helper method for generating dummy staking addresses -func generateStakingAddresses(numAddresses int) []types.Address { - result := make([]types.Address, numAddresses) - - for i := 0; i < numAddresses; i++ { - result[i] = types.StringToAddress(strconv.Itoa(i + 100)) - } - - return result -} - -// Test scenario: -// User has 10 ETH staked and a balance of 100 ETH -// Unstake -> Stake 1 ETH -> Unstake -> Stake 1 ETH... -// The code below tests (numTransactions / 2) cycles of Unstake -> Stake 1 ETH -// Expected result for tests: Staked: 1 ETH; Balance: ~119 ETH -func TestPoS_StakeUnstakeExploit(t *testing.T) { - // Predefined values - var blockGasLimit uint64 = 5000000000 - - stakingContractAddr := staking.AddrStakingContract - bigDefaultStakedBalance := getBigDefaultStakedBalance(t) - defaultBalance := framework.EthToWei(100) - bigGasPrice := big.NewInt(1000000000) - - senderKey, senderAddr := tests.GenerateKeyAndAddr(t) - numDummyStakers := 100 - - devInterval := 5 // s - - // Set up the test server - srvs := framework.NewTestServers(t, 1, func(config *framework.TestServerConfig) { - config.SetConsensus(framework.ConsensusDev) - config.SetDevInterval(devInterval) - config.Premine(senderAddr, defaultBalance) - config.SetBlockLimit(blockGasLimit) - config.SetIBFTPoS(true) - // This call will add numDummyStakers + 1 staking address to the staking SC. - // This is done in order to pump the stakedAmount value on the staking SC - config.SetDevStakingAddresses(append(generateStakingAddresses(numDummyStakers), senderAddr)) - }) - srv := srvs[0] - client := srv.JSONRPC() - - initialStakingSCBalance, fetchError := framework.GetStakedAmount(senderAddr, client) - if fetchError != nil { - t.Fatalf("Unable to fetch staking SC balance, %v", fetchError) - } - - assert.Equalf(t, - (big.NewInt(0).Mul(big.NewInt(int64(numDummyStakers+1)), bigDefaultStakedBalance)).String(), - initialStakingSCBalance.String(), - "Staked address balance mismatch before stake / unstake exploit", - ) - - // Required default values - numTransactions := 6 - signer := crypto.NewSigner(chain.AllForksEnabled.At(0), 100) - currentNonce := 0 - - // TxPool client - txpoolClient := srv.TxnPoolOperator() - - generateTx := func(i int, value *big.Int, methodName string) *types.Transaction { - unsignedTx := &types.Transaction{ - Nonce: uint64(currentNonce), - From: types.ZeroAddress, - To: &stakingContractAddr, - Gas: framework.DefaultGasLimit, - Value: value, - V: big.NewInt(1), // it is necessary to encode in rlp - Input: framework.MethodSig(methodName), - } - - // Just make very second transaction with dynamic gas fee - if i%2 == 0 { - unsignedTx.Type = types.DynamicFeeTx - unsignedTx.GasFeeCap = bigGasPrice - unsignedTx.GasTipCap = bigGasPrice - } else { - unsignedTx.Type = types.LegacyTx - unsignedTx.GasPrice = bigGasPrice - } - - signedTx, err := signer.SignTx(unsignedTx, senderKey) - require.NoError(t, err, "Unable to sign transaction") - - currentNonce++ - - return signedTx - } - - oneEth := framework.EthToWei(1) - zeroEth := framework.EthToWei(0) - txHashes := make([]ethgo.Hash, 0) - - for i := 0; i < numTransactions; i++ { - var msg *txpoolOp.AddTxnReq - - if i%2 == 0 { - unstakeTxn := generateTx(i, zeroEth, "unstake") - msg = &txpoolOp.AddTxnReq{ - Raw: &any.Any{ - Value: unstakeTxn.MarshalRLP(), - }, - From: types.ZeroAddress.String(), - } - } else { - stakeTxn := generateTx(i, oneEth, "stake") - msg = &txpoolOp.AddTxnReq{ - Raw: &any.Any{ - Value: stakeTxn.MarshalRLP(), - }, - From: types.ZeroAddress.String(), - } - } - - addResp, addErr := txpoolClient.AddTxn(context.Background(), msg) - if addErr != nil { - t.Fatalf("Unable to add txn, %v", addErr) - } - - txHashes = append(txHashes, ethgo.HexToHash(addResp.TxHash)) - } - - // Set up the blockchain listener to catch the added block event - totalGasUsed := srv.GetGasTotal(txHashes) - - // Find how much the address paid for all the transactions in this block - paidFee := big.NewInt(0).Mul(bigGasPrice, big.NewInt(int64(totalGasUsed))) - - // Check the balances - actualAccountBalance := framework.GetAccountBalance(t, senderAddr, client) - actualStakingSCBalance, fetchError := framework.GetStakedAmount(senderAddr, client) - - if fetchError != nil { - t.Fatalf("Unable to fetch staking SC balance, %v", fetchError) - } - - expStake := big.NewInt(0).Mul(big.NewInt(int64(numDummyStakers)), bigDefaultStakedBalance) - expStake.Add(expStake, oneEth) - - assert.Equalf(t, - expStake.String(), - actualStakingSCBalance.String(), - "Staked address balance mismatch after stake / unstake exploit", - ) - - // Make sure the address balances match up - - // expBalance = previousAccountBalance + stakeRefund - 1 ETH - block fees - expBalance := big.NewInt(0).Sub(big.NewInt(0).Add(defaultBalance, bigDefaultStakedBalance), oneEth) - expBalance = big.NewInt(0).Sub(expBalance, paidFee) - - assert.Equalf(t, - expBalance.String(), - actualAccountBalance.String(), - "Account balance mismatch after stake / unstake exploit", - ) -} - -// Test scenario: -// User has 0 ETH staked and a balance of 100 ETH -// Stake 2 ETH -> Unstake -// Expected result for tests: Staked: 0 ETH; Balance: ~100 ETH; not a validator -func TestPoS_StakeUnstakeWithinSameBlock(t *testing.T) { - // Predefined values - var blockGasLimit uint64 = 5000000000 - - stakingContractAddr := staking.AddrStakingContract - defaultBalance := framework.EthToWei(100) - bigGasPrice := big.NewInt(framework.DefaultGasPrice) - - senderKey, senderAddr := tests.GenerateKeyAndAddr(t) - numDummyStakers := 10 - - devInterval := 5 // s - - // Set up the test server - srvs := framework.NewTestServers(t, 1, func(config *framework.TestServerConfig) { - config.SetConsensus(framework.ConsensusDev) - config.SetDevInterval(devInterval) - config.Premine(senderAddr, defaultBalance) - config.SetBlockLimit(blockGasLimit) - config.SetDevStakingAddresses(generateStakingAddresses(numDummyStakers)) - config.SetIBFTPoS(true) - }) - srv := srvs[0] - client := srv.JSONRPC() - - initialStakingSCBalance, fetchError := framework.GetStakedAmount(senderAddr, client) - if fetchError != nil { - t.Fatalf("Unable to fetch staking SC balance, %v", fetchError) - } - - // Required default values - signer := crypto.NewSigner(chain.AllForksEnabled.At(0), 100) - currentNonce := 0 - - // TxPool client - txpoolClient := srv.TxnPoolOperator() - - generateTx := func(dynamicTx bool, value *big.Int, methodName string) *types.Transaction { - unsignedTx := &types.Transaction{ - Nonce: uint64(currentNonce), - From: types.ZeroAddress, - To: &stakingContractAddr, - Gas: framework.DefaultGasLimit, - Value: value, - V: big.NewInt(1), // it is necessary to encode in rlp - Input: framework.MethodSig(methodName), - } - - if dynamicTx { - unsignedTx.Type = types.DynamicFeeTx - unsignedTx.GasFeeCap = bigGasPrice - unsignedTx.GasTipCap = bigGasPrice - } else { - unsignedTx.Type = types.LegacyTx - unsignedTx.GasPrice = bigGasPrice - } - - signedTx, err := signer.SignTx(unsignedTx, senderKey) - require.NoError(t, err, "Unable to signatransaction") - - currentNonce++ - - return signedTx - } - - zeroEth := framework.EthToWei(0) - txHashes := make([]ethgo.Hash, 0) - - // addTxn is a helper method for generating and adding a transaction - // through the operator command - addTxn := func(dynamicTx bool, value *big.Int, methodName string) { - txn := generateTx(dynamicTx, value, methodName) - txnMsg := &txpoolOp.AddTxnReq{ - Raw: &any.Any{ - Value: txn.MarshalRLP(), - }, - From: types.ZeroAddress.String(), - } - - addResp, addErr := txpoolClient.AddTxn(context.Background(), txnMsg) - if addErr != nil { - t.Fatalf("Unable to add txn, %v", addErr) - } - - txHashes = append(txHashes, ethgo.HexToHash(addResp.TxHash)) - } - - // Stake transaction - addTxn(false, oneEth, "stake") - - // Unstake transaction - addTxn(true, zeroEth, "unstake") - - // Wait for the transactions to go through - totalGasUsed := srv.GetGasTotal(txHashes) - - // Find how much the address paid for all the transactions in this block - paidFee := big.NewInt(0).Mul(bigGasPrice, big.NewInt(int64(totalGasUsed))) - - // Check the balances - actualAccountBalance := framework.GetAccountBalance(t, senderAddr, client) - actualStakingSCBalance, fetchError := framework.GetStakedAmount(senderAddr, client) - - if fetchError != nil { - t.Fatalf("Unable to fetch staking SC balance, %v", fetchError) - } - - assert.Equalf(t, - initialStakingSCBalance.String(), - actualStakingSCBalance.String(), - "Staked address balance mismatch after stake / unstake events", - ) - - // Make sure the address balances match up - - // expBalance = previousAccountBalance - block fees - expBalance := big.NewInt(0).Sub(defaultBalance, paidFee) - - assert.Equalf(t, - expBalance.String(), - actualAccountBalance.String(), - "Account balance mismatch after stake / unstake events", - ) - - validateValidatorSet(t, senderAddr, client, false, numDummyStakers) -} - -func getSnapshot( - client ibftOp.IbftOperatorClient, - blockNum uint64, - ctx context.Context, -) (*ibftOp.Snapshot, error) { - snapshot, snapshotErr := client.GetSnapshot(ctx, &ibftOp.SnapshotReq{ - Latest: false, - Number: blockNum, - }) - - return snapshot, snapshotErr -} - -func getNextEpochBlock(blockNum uint64, epochSize uint64) uint64 { - if epochSize > blockNum { - return epochSize - } - - return epochSize*(blockNum/epochSize) + epochSize -} - -func TestSnapshotUpdating(t *testing.T) { - faucetKey, faucetAddr := tests.GenerateKeyAndAddr(t) - - defaultBalance := framework.EthToWei(1000) - stakeAmount := framework.EthToWei(5) - epochSize := uint64(5) - - numGenesisValidators := IBFTMinNodes - numNonValidators := 2 - totalServers := numGenesisValidators + numNonValidators - - ibftManager := framework.NewIBFTServersManager( - t, - totalServers, - IBFTDirPrefix, - func(i int, config *framework.TestServerConfig) { - if i < numGenesisValidators { - // Only IBFTMinNodes should be validators - config.PremineValidatorBalance(defaultBalance) - } else { - // Other nodes should not be in the validator set - dirPrefix := "polygon-edge-non-validator-" - config.SetIBFTDirPrefix(dirPrefix) - config.SetIBFTDir(fmt.Sprintf("%s%d", dirPrefix, i)) - } - - config.SetEpochSize(epochSize) - config.Premine(faucetAddr, defaultBalance) - config.SetIBFTPoS(true) - }) - - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - defer cancel() - ibftManager.StartServers(ctx) - firstValidator := ibftManager.GetServer(0) - - // Make sure the non-validator has funds before staking - firstNonValidator := ibftManager.GetServer(IBFTMinNodes) - firstNonValidatorKey, err := firstNonValidator.Config.PrivateKey() - assert.NoError(t, err) - - firstNonValidatorAddr := crypto.PubKeyToAddress(&firstNonValidatorKey.PublicKey) - - sendCtx, sendWaitFn := context.WithTimeout(context.Background(), time.Second*30) - defer sendWaitFn() - - receipt, transferErr := firstValidator.SendRawTx( - sendCtx, - &framework.PreparedTransaction{ - From: faucetAddr, - To: &firstNonValidatorAddr, - GasPrice: ethgo.Gwei(1), - Gas: 1000000, - Value: framework.EthToWei(300), - }, faucetKey) - if transferErr != nil { - t.Fatalf("Unable to transfer funds, %v", transferErr) - } - - // Now that the non-validator has funds, they can stake - stakeError := framework.StakeAmount( - firstNonValidatorAddr, - firstNonValidatorKey, - stakeAmount, - firstValidator, - ) - - if stakeError != nil { - t.Fatalf("Unable to stake amount, %v", stakeError) - } - - // Check validator set on the Staking Smart Contract - validateValidatorSet(t, firstNonValidatorAddr, firstValidator.JSONRPC(), true, numGenesisValidators+1) - - // Find the nearest next epoch block - nextEpoch := getNextEpochBlock(receipt.BlockNumber, epochSize) + epochSize - - servers := make([]*framework.TestServer, 0) - for i := 0; i < totalServers; i++ { - servers = append(servers, ibftManager.GetServer(i)) - } - - // Wait for all the nodes to reach the epoch block - waitErrors := framework.WaitForServersToSeal(servers, nextEpoch+1) - - if len(waitErrors) != 0 { - t.Fatalf("Unable to wait for all nodes to seal blocks, %v", waitErrors) - } - - // Grab all the operators - serverOperators := make([]ibftOp.IbftOperatorClient, totalServers) - for i := 0; i < totalServers; i++ { - serverOperators[i] = ibftManager.GetServer(i).IBFTOperator() - } - - // isValidatorInSnapshot checks if a certain reference address - // is among the validators for the specific snapshot - isValidatorInSnapshot := func( - client ibftOp.IbftOperatorClient, - blockNumber uint64, - referenceAddr types.Address, - ) bool { - snapshotCtx, ctxCancelFn := context.WithTimeout(context.Background(), time.Second*5) - snapshot, snapshotErr := getSnapshot(client, blockNumber, snapshotCtx) - - if snapshotErr != nil { - t.Fatalf("Unable to fetch snapshot, %v", snapshotErr) - } - - ctxCancelFn() - - for _, validator := range snapshot.Validators { - if types.BytesToAddress(validator.Data) == referenceAddr { - return true - } - } - - return false - } - - // Make sure every node in the network has good snapshot upkeep - for i := 0; i < totalServers; i++ { - // Check the snapshot before the node became a validator - assert.Falsef( - t, - isValidatorInSnapshot(serverOperators[i], nextEpoch-1, firstNonValidatorAddr), - fmt.Sprintf( - "Validator [%s] is in the snapshot validator list for block %d", - firstNonValidatorAddr, - nextEpoch-1, - ), - ) - - // Check the snapshot after the node became a validator - assert.Truef( - t, - isValidatorInSnapshot(serverOperators[i], nextEpoch+1, firstNonValidatorAddr), - fmt.Sprintf( - "Validator [%s] is not in the snapshot validator list for block %d", - firstNonValidatorAddr, - nextEpoch+1, - ), - ) - } -} diff --git a/e2e/syncer_test.go b/e2e/syncer_test.go deleted file mode 100644 index 05e72adb4b..0000000000 --- a/e2e/syncer_test.go +++ /dev/null @@ -1,60 +0,0 @@ -package e2e - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/0xPolygon/polygon-edge/e2e/framework" - "github.com/0xPolygon/polygon-edge/validators" -) - -func TestClusterBlockSync(t *testing.T) { - const ( - numNonValidators = 2 - desiredHeight = 10 - ) - - runTest := func(t *testing.T, validatorType validators.ValidatorType) { - t.Helper() - - // Start IBFT cluster (4 Validator + 2 Non-Validator) - ibftManager := framework.NewIBFTServersManager( - t, - IBFTMinNodes+numNonValidators, - IBFTDirPrefix, func(i int, config *framework.TestServerConfig) { - config.SetValidatorType(validatorType) - - if i >= IBFTMinNodes { - // Other nodes should not be in the validator set - dirPrefix := "polygon-edge-non-validator-" - config.SetIBFTDirPrefix(dirPrefix) - config.SetIBFTDir(fmt.Sprintf("%s%d", dirPrefix, i)) - } - }) - - startContext, startCancelFn := context.WithTimeout(context.Background(), time.Minute) - defer startCancelFn() - ibftManager.StartServers(startContext) - - servers := make([]*framework.TestServer, 0) - for i := 0; i < IBFTMinNodes+numNonValidators; i++ { - servers = append(servers, ibftManager.GetServer(i)) - } - // All nodes should have mined the same block eventually - waitErrors := framework.WaitForServersToSeal(servers, desiredHeight) - - if len(waitErrors) != 0 { - t.Fatalf("Unable to wait for all nodes to seal blocks, %v", waitErrors) - } - } - - t.Run("ECDSA", func(t *testing.T) { - runTest(t, validators.ECDSAValidatorType) - }) - - t.Run("BLS", func(t *testing.T) { - runTest(t, validators.BLSValidatorType) - }) -} diff --git a/e2e/transaction_test.go b/e2e/transaction_test.go index 71dede8b38..937b799fe1 100644 --- a/e2e/transaction_test.go +++ b/e2e/transaction_test.go @@ -3,28 +3,15 @@ package e2e import ( "context" "crypto/ecdsa" - "encoding/hex" - "errors" - "fmt" "math/big" - "strconv" - "sync" "testing" - "time" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "github.com/umbracle/ethgo" - "github.com/umbracle/ethgo/jsonrpc" - "github.com/0xPolygon/polygon-edge/chain" - "github.com/0xPolygon/polygon-edge/contracts/abis" - "github.com/0xPolygon/polygon-edge/crypto" "github.com/0xPolygon/polygon-edge/e2e/framework" - "github.com/0xPolygon/polygon-edge/helper/common" "github.com/0xPolygon/polygon-edge/helper/tests" "github.com/0xPolygon/polygon-edge/types" - "github.com/0xPolygon/polygon-edge/validators" ) func TestPreminedBalance(t *testing.T) { @@ -228,265 +215,3 @@ func TestEthTransfer(t *testing.T) { }) } } - -// getCount is a helper function for the stress test SC -func getCount( - from types.Address, - contractAddress ethgo.Address, - rpcClient *jsonrpc.Client, -) (*big.Int, error) { - stressTestMethod, ok := abis.StressTestABI.Methods["getCount"] - if !ok { - return nil, errors.New("getCount method doesn't exist in StessTest contract ABI") - } - - selector := stressTestMethod.ID() - response, err := rpcClient.Eth().Call( - ðgo.CallMsg{ - From: ethgo.Address(from), - To: &contractAddress, - Data: selector, - GasPrice: 100000000, - Value: big.NewInt(0), - }, - ethgo.Latest, - ) - - if err != nil { - return nil, fmt.Errorf("unable to call StressTest contract method, %w", err) - } - - if response == "0x" { - response = "0x0" - } - - bigResponse, decodeErr := common.ParseUint256orHex(&response) - - if decodeErr != nil { - return nil, fmt.Errorf("wnable to decode hex response, %w", decodeErr) - } - - return bigResponse, nil -} - -// generateStressTestTx generates a transaction for the -// IBFT_Loop and Dev_Loop stress tests -func generateStressTestTx( - t *testing.T, - txNum int, - currentNonce uint64, - contractAddr types.Address, - senderKey *ecdsa.PrivateKey, -) *types.Transaction { - t.Helper() - - bigGasPrice := big.NewInt(framework.DefaultGasPrice) - signer := crypto.NewSigner(chain.AllForksEnabled.At(0), 100) - - setNameMethod, ok := abis.StressTestABI.Methods["setName"] - if !ok { - t.Fatalf("Unable to get setName method") - } - - encodedInput, encodeErr := setNameMethod.Inputs.Encode( - map[string]interface{}{ - "sName": fmt.Sprintf("Name #%d", currentNonce), - }, - ) - if encodeErr != nil { - t.Fatalf("Unable to encode inputs, %v", encodeErr) - } - - unsignedTx := &types.Transaction{ - Nonce: currentNonce, - From: types.ZeroAddress, - To: &contractAddr, - Gas: framework.DefaultGasLimit, - Value: big.NewInt(0), - V: big.NewInt(1), // it is necessary to encode in rlp, - Input: append(setNameMethod.ID(), encodedInput...), - } - - if txNum%2 == 0 { - unsignedTx.Type = types.DynamicFeeTx - unsignedTx.GasFeeCap = bigGasPrice - unsignedTx.GasTipCap = bigGasPrice - } else { - unsignedTx.Type = types.LegacyTx - unsignedTx.GasPrice = bigGasPrice - } - - signedTx, err := signer.SignTx(unsignedTx, senderKey) - require.NoError(t, err, "Unable to sign transaction") - - return signedTx -} - -// addStressTxnsWithHashes adds numTransactions that call the -// passed in StressTest smart contract method, but saves their transaction -// hashes -func addStressTxnsWithHashes( - t *testing.T, - srv *framework.TestServer, - numTransactions int, - contractAddr types.Address, - senderKey *ecdsa.PrivateKey, -) []ethgo.Hash { - t.Helper() - - currentNonce := 1 // 1 because the first transaction was deployment - - txHashes := make([]ethgo.Hash, 0) - - for i := 0; i < numTransactions; i++ { - setNameTxn := generateStressTestTx( - t, - i, - uint64(currentNonce), - contractAddr, - senderKey, - ) - currentNonce++ - - if txHash, err := srv.JSONRPC().Eth().SendRawTransaction(setNameTxn.MarshalRLP()); err == nil { - txHashes = append(txHashes, txHash) - } - } - - return txHashes -} - -// Test scenario (IBFT): -// Deploy the StressTest smart contract and send ~50 transactions -// that modify it's state, and make sure that all -// transactions were correctly executed -func Test_TransactionIBFTLoop(t *testing.T) { - runTest := func(t *testing.T, validatorType validators.ValidatorType) { - t.Helper() - - senderKey, sender := tests.GenerateKeyAndAddr(t) - defaultBalance := framework.EthToWei(100) - - // Set up the test server - ibftManager := framework.NewIBFTServersManager( - t, - IBFTMinNodes, - IBFTDirPrefix, - func(i int, config *framework.TestServerConfig) { - config.SetValidatorType(validatorType) - config.Premine(sender, defaultBalance) - config.SetBlockLimit(20000000) - }) - - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - defer cancel() - ibftManager.StartServers(ctx) - - srv := ibftManager.GetServer(0) - client := srv.JSONRPC() - - // Deploy the stress test contract - deployCtx, deployCancel := context.WithTimeout(context.Background(), framework.DefaultTimeout) - defer deployCancel() - - buf, err := hex.DecodeString(stressTestBytecode) - if err != nil { - t.Fatalf("Unable to decode bytecode, %v", err) - } - - deployTx := &framework.PreparedTransaction{ - From: sender, - GasPrice: ethgo.Gwei(1), - Gas: framework.DefaultGasLimit, - Value: big.NewInt(0), - Input: buf, - } - receipt, err := srv.SendRawTx(deployCtx, deployTx, senderKey) - - if err != nil { - t.Fatalf("Unable to send transaction, %v", err) - } - - assert.NotNil(t, receipt) - - contractAddr := receipt.ContractAddress - - if err != nil { - t.Fatalf("Unable to send transaction, %v", err) - } - - count, countErr := getCount(sender, contractAddr, client) - if countErr != nil { - t.Fatalf("Unable to call count method, %v", countErr) - } - - // Check that the count is 0 before running the test - assert.Equalf(t, "0", count.String(), "Count doesn't match") - - // Send ~50 transactions - numTransactions := 50 - - var wg sync.WaitGroup - - wg.Add(numTransactions) - - // Add stress test transactions - txHashes := addStressTxnsWithHashes( - t, - srv, - numTransactions, - types.StringToAddress(contractAddr.String()), - senderKey, - ) - if len(txHashes) != numTransactions { - t.Fatalf( - "Invalid number of txns sent [sent %d, expected %d]", - len(txHashes), - numTransactions, - ) - } - - // For each transaction hash, wait for it to get included into a block - for index, txHash := range txHashes { - waitCtx, waitCancel := context.WithTimeout(context.Background(), time.Minute*3) - - receipt, receiptErr := tests.WaitForReceipt(waitCtx, client.Eth(), txHash) - if receipt == nil { - t.Fatalf("Unable to get receipt for hash index [%d]", index) - } else if receiptErr != nil { - t.Fatalf("Unable to get receipt for hash index [%d], %v", index, receiptErr) - } - - waitCancel() - wg.Done() - } - - wg.Wait() - - statusCtx, statusCancel := context.WithTimeout(context.Background(), time.Second*30) - defer statusCancel() - - resp, err := tests.WaitUntilTxPoolEmpty(statusCtx, srv.TxnPoolOperator()) - if err != nil { - t.Fatalf("Unable to get txpool status, %v", err) - } - - assert.Equal(t, 0, int(resp.Length)) - - count, countErr = getCount(sender, contractAddr, client) - if countErr != nil { - t.Fatalf("Unable to call count method, %v", countErr) - } - - // Check that the count is correct - assert.Equalf(t, strconv.Itoa(numTransactions), count.String(), "Count doesn't match") - } - - t.Run("ECDSA", func(t *testing.T) { - runTest(t, validators.ECDSAValidatorType) - }) - - t.Run("BLS", func(t *testing.T) { - runTest(t, validators.BLSValidatorType) - }) -} diff --git a/e2e/txpool_test.go b/e2e/txpool_test.go index d18c1b5598..7e6cfd02bd 100644 --- a/e2e/txpool_test.go +++ b/e2e/txpool_test.go @@ -200,165 +200,6 @@ func TestTxPool_ErrorCodes(t *testing.T) { } } -func TestTxPool_TransactionCoalescing(t *testing.T) { - // Test scenario: - // Add tx with nonce 0 - // -> Check if tx has been parsed - // Add tx with nonce 2 - // -> tx shouldn't be executed, but shelved for later - // Add tx with nonce 1 - // -> check if both tx with nonce 1 and tx with nonce 2 are parsed - // Predefined values - gasPrice := big.NewInt(1000000000) - - referenceKey, referenceAddr := tests.GenerateKeyAndAddr(t) - defaultBalance := framework.EthToWei(10) - - // Set up the test server - ibftManager := framework.NewIBFTServersManager( - t, - 1, - IBFTDirPrefix, - func(i int, config *framework.TestServerConfig) { - config.Premine(referenceAddr, defaultBalance) - config.SetBlockTime(1) - }, - ) - - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - defer cancel() - ibftManager.StartServers(ctx) - - srv := ibftManager.GetServer(0) - client := srv.JSONRPC() - - // Required default values - signer := crypto.NewEIP155Signer(100, true) - - // TxPool client - clt := srv.TxnPoolOperator() - toAddress := types.StringToAddress("1") - oneEth := framework.EthToWei(1) - - generateTx := func(nonce uint64) *types.Transaction { - signedTx, signErr := signer.SignTx(&types.Transaction{ - Nonce: nonce, - From: referenceAddr, - To: &toAddress, - GasPrice: gasPrice, - Gas: 1000000, - Value: oneEth, - V: big.NewInt(1), // it is necessary to encode in rlp - }, referenceKey) - - if signErr != nil { - t.Fatalf("Unable to sign transaction, %v", signErr) - } - - return signedTx - } - - generateReq := func(nonce uint64) *txpoolOp.AddTxnReq { - msg := &txpoolOp.AddTxnReq{ - Raw: &any.Any{ - Value: generateTx(nonce).MarshalRLP(), - }, - From: types.ZeroAddress.String(), - } - - return msg - } - - // testTransaction is a helper structure for - // keeping track of test transaction execution - type testTransaction struct { - txHash ethgo.Hash // the transaction hash - block *uint64 // the block the transaction was included in - } - - testTransactions := make([]*testTransaction, 0) - - // Add the transactions with the following nonce order - nonces := []uint64{0, 2} - for i := 0; i < len(nonces); i++ { - addReq := generateReq(nonces[i]) - - addCtx, addCtxCn := context.WithTimeout(context.Background(), framework.DefaultTimeout) - - addResp, addErr := clt.AddTxn(addCtx, addReq) - if addErr != nil { - t.Fatalf("Unable to add txn, %v", addErr) - } - - testTransactions = append(testTransactions, &testTransaction{ - txHash: ethgo.HexToHash(addResp.TxHash), - }) - - addCtxCn() - } - - // Wait for the first transaction to go through - ctx, cancelFn := context.WithTimeout(context.Background(), framework.DefaultTimeout) - defer cancelFn() - - receipt, receiptErr := tests.WaitForReceipt(ctx, client.Eth(), testTransactions[0].txHash) - if receiptErr != nil { - t.Fatalf("unable to wait for receipt, %v", receiptErr) - } - - testTransactions[0].block = &receipt.BlockNumber - - // Get to account balance - // Only the first tx should've gone through - toAccountBalance := framework.GetAccountBalance(t, toAddress, client) - assert.Equalf(t, - oneEth.String(), - toAccountBalance.String(), - "To address balance mismatch after series of transactions", - ) - - // Add the transaction with the gap nonce value - addReq := generateReq(1) - - addCtx, addCtxCn := context.WithTimeout(context.Background(), framework.DefaultTimeout) - defer addCtxCn() - - addResp, addErr := clt.AddTxn(addCtx, addReq) - if addErr != nil { - t.Fatalf("Unable to add txn, %v", addErr) - } - - testTransactions = append(testTransactions, &testTransaction{ - txHash: ethgo.HexToHash(addResp.TxHash), - }) - - // Start from 1 since there was previously a txn with nonce 0 - for i := 1; i < len(testTransactions); i++ { - // Wait for the first transaction to go through - ctx, cancelFn := context.WithTimeout(context.Background(), framework.DefaultTimeout) - - receipt, receiptErr := tests.WaitForReceipt(ctx, client.Eth(), testTransactions[i].txHash) - if receiptErr != nil { - t.Fatalf("unable to wait for receipt, %v", receiptErr) - } - - testTransactions[i].block = &receipt.BlockNumber - - cancelFn() - } - - // Now both the added tx and the shelved tx should've gone through - toAccountBalance = framework.GetAccountBalance(t, toAddress, client) - assert.Equalf(t, - framework.EthToWei(3).String(), - toAccountBalance.String(), - "To address balance mismatch after gap transaction", - ) - - // Make sure the first transaction and the last transaction didn't get included in the same block - assert.NotEqual(t, *(testTransactions[0].block), *(testTransactions[2].block)) -} - type testAccount struct { key *ecdsa.PrivateKey address types.Address diff --git a/helper/staking/staking.go b/helper/staking/staking.go deleted file mode 100644 index 223d9f4ffc..0000000000 --- a/helper/staking/staking.go +++ /dev/null @@ -1,257 +0,0 @@ -package staking - -import ( - "fmt" - "math/big" - - "github.com/0xPolygon/polygon-edge/chain" - "github.com/0xPolygon/polygon-edge/helper/common" - "github.com/0xPolygon/polygon-edge/helper/hex" - "github.com/0xPolygon/polygon-edge/helper/keccak" - "github.com/0xPolygon/polygon-edge/types" - "github.com/0xPolygon/polygon-edge/validators" -) - -var ( - MinValidatorCount = uint64(1) - MaxValidatorCount = common.MaxSafeJSInt -) - -// getAddressMapping returns the key for the SC storage mapping (address => something) -// -// More information: -// https://docs.soliditylang.org/en/latest/internals/layout_in_storage.html -func getAddressMapping(address types.Address, slot int64) []byte { - bigSlot := big.NewInt(slot) - - finalSlice := append( - common.PadLeftOrTrim(address.Bytes(), 32), - common.PadLeftOrTrim(bigSlot.Bytes(), 32)..., - ) - - return keccak.Keccak256(nil, finalSlice) -} - -// getIndexWithOffset is a helper method for adding an offset to the already found keccak hash -func getIndexWithOffset(keccakHash []byte, offset uint64) []byte { - bigOffset := big.NewInt(int64(offset)) - bigKeccak := big.NewInt(0).SetBytes(keccakHash) - - bigKeccak.Add(bigKeccak, bigOffset) - - return bigKeccak.Bytes() -} - -// getStorageIndexes is a helper function for getting the correct indexes -// of the storage slots which need to be modified during bootstrap. -// -// It is SC dependant, and based on the SC located at: -// https://github.com/0xPolygon/staking-contracts/ -func getStorageIndexes(validator validators.Validator, index int) *StorageIndexes { - storageIndexes := &StorageIndexes{} - address := validator.Addr() - - // Get the indexes for the mappings - // The index for the mapping is retrieved with: - // keccak(address . slot) - // . stands for concatenation (basically appending the bytes) - storageIndexes.AddressToIsValidatorIndex = getAddressMapping( - address, - addressToIsValidatorSlot, - ) - - storageIndexes.AddressToStakedAmountIndex = getAddressMapping( - address, - addressToStakedAmountSlot, - ) - - storageIndexes.AddressToValidatorIndexIndex = getAddressMapping( - address, - addressToValidatorIndexSlot, - ) - - storageIndexes.ValidatorBLSPublicKeyIndex = getAddressMapping( - address, - addressToBLSPublicKeySlot, - ) - - // Index for array types is calculated as keccak(slot) + index - // The slot for the dynamic arrays that's put in the keccak needs to be in hex form (padded 64 chars) - storageIndexes.ValidatorsIndex = getIndexWithOffset( - keccak.Keccak256(nil, common.PadLeftOrTrim(big.NewInt(validatorsSlot).Bytes(), 32)), - uint64(index), - ) - - return storageIndexes -} - -// setBytesToStorage sets bytes data into storage map from specified base index -func setBytesToStorage( - storageMap map[types.Hash]types.Hash, - baseIndexBytes []byte, - data []byte, -) { - dataLen := len(data) - baseIndex := types.BytesToHash(baseIndexBytes) - - if dataLen <= 31 { - bytes := types.Hash{} - - copy(bytes[:len(data)], data) - - // Set 2*Size at the first byte - bytes[len(bytes)-1] = byte(dataLen * 2) - - storageMap[baseIndex] = bytes - - return - } - - // Set size at the base index - baseSlot := types.Hash{} - baseSlot[31] = byte(2*dataLen + 1) - storageMap[baseIndex] = baseSlot - - zeroIndex := keccak.Keccak256(nil, baseIndexBytes) - numBytesInSlot := 256 / 8 - - for i := 0; i < dataLen; i++ { - offset := i / numBytesInSlot - - slotIndex := types.BytesToHash(getIndexWithOffset(zeroIndex, uint64(offset))) - byteIndex := i % numBytesInSlot - - slot := storageMap[slotIndex] - slot[byteIndex] = data[i] - - storageMap[slotIndex] = slot - } -} - -// PredeployParams contains the values used to predeploy the PoS staking contract -type PredeployParams struct { - MinValidatorCount uint64 - MaxValidatorCount uint64 -} - -// StorageIndexes is a wrapper for different storage indexes that -// need to be modified -type StorageIndexes struct { - ValidatorsIndex []byte // []address - ValidatorBLSPublicKeyIndex []byte // mapping(address => byte[]) - AddressToIsValidatorIndex []byte // mapping(address => bool) - AddressToStakedAmountIndex []byte // mapping(address => uint256) - AddressToValidatorIndexIndex []byte // mapping(address => uint256) -} - -// Slot definitions for SC storage -var ( - validatorsSlot = int64(0) // Slot 0 - addressToIsValidatorSlot = int64(1) // Slot 1 - addressToStakedAmountSlot = int64(2) // Slot 2 - addressToValidatorIndexSlot = int64(3) // Slot 3 - stakedAmountSlot = int64(4) // Slot 4 - minNumValidatorSlot = int64(5) // Slot 5 - maxNumValidatorSlot = int64(6) // Slot 6 - addressToBLSPublicKeySlot = int64(7) // Slot 7 -) - -const ( - DefaultStakedBalance = "0x8AC7230489E80000" // 10 ETH - //nolint: lll - StakingSCBytecode = "0x6080604052600436106101185760003560e01c80637a6eea37116100a0578063d94c111b11610064578063d94c111b1461040a578063e387a7ed14610433578063e804fbf61461045e578063f90ecacc14610489578063facd743b146104c657610186565b80637a6eea37146103215780637dceceb81461034c578063af6da36e14610389578063c795c077146103b4578063ca1e7819146103df57610186565b8063373d6132116100e7578063373d6132146102595780633a4b66f1146102845780633c561f041461028e57806351a9ab32146102b9578063714ff425146102f657610186565b806302b751991461018b578063065ae171146101c85780632367f6b5146102055780632def66201461024257610186565b366101865761013c3373ffffffffffffffffffffffffffffffffffffffff16610503565b1561017c576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016101739061178a565b60405180910390fd5b610184610516565b005b600080fd5b34801561019757600080fd5b506101b260048036038101906101ad9190611380565b6105ed565b6040516101bf91906117e5565b60405180910390f35b3480156101d457600080fd5b506101ef60048036038101906101ea9190611380565b610605565b6040516101fc91906116ed565b60405180910390f35b34801561021157600080fd5b5061022c60048036038101906102279190611380565b610625565b60405161023991906117e5565b60405180910390f35b34801561024e57600080fd5b5061025761066e565b005b34801561026557600080fd5b5061026e610759565b60405161027b91906117e5565b60405180910390f35b61028c610763565b005b34801561029a57600080fd5b506102a36107cc565b6040516102b091906116cb565b60405180910390f35b3480156102c557600080fd5b506102e060048036038101906102db9190611380565b610972565b6040516102ed9190611708565b60405180910390f35b34801561030257600080fd5b5061030b610a12565b60405161031891906117e5565b60405180910390f35b34801561032d57600080fd5b50610336610a1c565b60405161034391906117ca565b60405180910390f35b34801561035857600080fd5b50610373600480360381019061036e9190611380565b610a28565b60405161038091906117e5565b60405180910390f35b34801561039557600080fd5b5061039e610a40565b6040516103ab91906117e5565b60405180910390f35b3480156103c057600080fd5b506103c9610a46565b6040516103d691906117e5565b60405180910390f35b3480156103eb57600080fd5b506103f4610a4c565b60405161040191906116a9565b60405180910390f35b34801561041657600080fd5b50610431600480360381019061042c91906113ad565b610ada565b005b34801561043f57600080fd5b50610448610b31565b60405161045591906117e5565b60405180910390f35b34801561046a57600080fd5b50610473610b37565b60405161048091906117e5565b60405180910390f35b34801561049557600080fd5b506104b060048036038101906104ab91906113f6565b610b41565b6040516104bd919061168e565b60405180910390f35b3480156104d257600080fd5b506104ed60048036038101906104e89190611380565b610b80565b6040516104fa91906116ed565b60405180910390f35b600080823b905060008111915050919050565b34600460008282546105289190611906565b9250508190555034600260003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825461057e9190611906565b9250508190555061058e33610bd6565b1561059d5761059c33610c4e565b5b3373ffffffffffffffffffffffffffffffffffffffff167f9e71bc8eea02a63969f509818f2dafb9254532904319f9dbda79b67bd34a5f3d346040516105e391906117e5565b60405180910390a2565b60036020528060005260406000206000915090505481565b60016020528060005260406000206000915054906101000a900460ff1681565b6000600260008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020549050919050565b61068d3373ffffffffffffffffffffffffffffffffffffffff16610503565b156106cd576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016106c49061178a565b60405180910390fd5b6000600260003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020541161074f576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016107469061172a565b60405180910390fd5b610757610d9d565b565b6000600454905090565b6107823373ffffffffffffffffffffffffffffffffffffffff16610503565b156107c2576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016107b99061178a565b60405180910390fd5b6107ca610516565b565b60606000808054905067ffffffffffffffff8111156107ee576107ed611b9e565b5b60405190808252806020026020018201604052801561082157816020015b606081526020019060019003908161080c5790505b50905060005b60008054905081101561096a576007600080838154811061084b5761084a611b6f565b5b9060005260206000200160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002080546108bb90611a36565b80601f01602080910402602001604051908101604052809291908181526020018280546108e790611a36565b80156109345780601f1061090957610100808354040283529160200191610934565b820191906000526020600020905b81548152906001019060200180831161091757829003601f168201915b505050505082828151811061094c5761094b611b6f565b5b6020026020010181905250808061096290611a99565b915050610827565b508091505090565b6007602052806000526040600020600091509050805461099190611a36565b80601f01602080910402602001604051908101604052809291908181526020018280546109bd90611a36565b8015610a0a5780601f106109df57610100808354040283529160200191610a0a565b820191906000526020600020905b8154815290600101906020018083116109ed57829003601f168201915b505050505081565b6000600554905090565b670de0b6b3a764000081565b60026020528060005260406000206000915090505481565b60065481565b60055481565b60606000805480602002602001604051908101604052809291908181526020018280548015610ad057602002820191906000526020600020905b8160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019060010190808311610a86575b5050505050905090565b80600760003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000209080519060200190610b2d929190611243565b5050565b60045481565b6000600654905090565b60008181548110610b5157600080fd5b906000526020600020016000915054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000600160008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060009054906101000a900460ff169050919050565b6000610be182610eef565b158015610c475750670de0b6b3a76400006fffffffffffffffffffffffffffffffff16600260008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205410155b9050919050565b60065460008054905010610c97576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610c8e9061174a565b60405180910390fd5b60018060008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060006101000a81548160ff021916908315150217905550600080549050600360008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055506000819080600181540180825580915050600190039060005260206000200160009091909190916101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555050565b6000600260003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205490506000600260003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055508060046000828254610e38919061195c565b92505081905550610e4833610eef565b15610e5757610e5633610f45565b5b3373ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050158015610e9d573d6000803e3d6000fd5b503373ffffffffffffffffffffffffffffffffffffffff167f0f5bb82176feb1b5e747e28471aa92156a04d9f3ab9f45f28e2d704232b93f7582604051610ee491906117e5565b60405180910390a250565b6000600160008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060009054906101000a900460ff169050919050565b60055460008054905011610f8e576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610f85906117aa565b60405180910390fd5b600080549050600360008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205410611014576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161100b9061176a565b60405180910390fd5b6000600360008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205490506000600160008054905061106c919061195c565b905080821461115a57600080828154811061108a57611089611b6f565b5b9060005260206000200160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905080600084815481106110cc576110cb611b6f565b5b9060005260206000200160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555082600360008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002081905550505b6000600160008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060006101000a81548160ff0219169083151502179055506000600360008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002081905550600080548061120957611208611b40565b5b6001900381819060005260206000200160006101000a81549073ffffffffffffffffffffffffffffffffffffffff02191690559055505050565b82805461124f90611a36565b90600052602060002090601f01602090048101928261127157600085556112b8565b82601f1061128a57805160ff19168380011785556112b8565b828001600101855582156112b8579182015b828111156112b757825182559160200191906001019061129c565b5b5090506112c591906112c9565b5090565b5b808211156112e25760008160009055506001016112ca565b5090565b60006112f96112f484611825565b611800565b90508281526020810184848401111561131557611314611bd2565b5b6113208482856119f4565b509392505050565b60008135905061133781611d0b565b92915050565b600082601f83011261135257611351611bcd565b5b81356113628482602086016112e6565b91505092915050565b60008135905061137a81611d22565b92915050565b60006020828403121561139657611395611bdc565b5b60006113a484828501611328565b91505092915050565b6000602082840312156113c3576113c2611bdc565b5b600082013567ffffffffffffffff8111156113e1576113e0611bd7565b5b6113ed8482850161133d565b91505092915050565b60006020828403121561140c5761140b611bdc565b5b600061141a8482850161136b565b91505092915050565b600061142f838361144f565b60208301905092915050565b6000611447838361154f565b905092915050565b61145881611990565b82525050565b61146781611990565b82525050565b600061147882611876565b61148281856118b1565b935061148d83611856565b8060005b838110156114be5781516114a58882611423565b97506114b083611897565b925050600181019050611491565b5085935050505092915050565b60006114d682611881565b6114e081856118c2565b9350836020820285016114f285611866565b8060005b8581101561152e578484038952815161150f858261143b565b945061151a836118a4565b925060208a019950506001810190506114f6565b50829750879550505050505092915050565b611549816119a2565b82525050565b600061155a8261188c565b61156481856118d3565b9350611574818560208601611a03565b61157d81611be1565b840191505092915050565b60006115938261188c565b61159d81856118e4565b93506115ad818560208601611a03565b6115b681611be1565b840191505092915050565b60006115ce601d836118f5565b91506115d982611bf2565b602082019050919050565b60006115f16027836118f5565b91506115fc82611c1b565b604082019050919050565b60006116146012836118f5565b915061161f82611c6a565b602082019050919050565b6000611637601a836118f5565b915061164282611c93565b602082019050919050565b600061165a6040836118f5565b915061166582611cbc565b604082019050919050565b611679816119ae565b82525050565b611688816119ea565b82525050565b60006020820190506116a3600083018461145e565b92915050565b600060208201905081810360008301526116c3818461146d565b905092915050565b600060208201905081810360008301526116e581846114cb565b905092915050565b60006020820190506117026000830184611540565b92915050565b600060208201905081810360008301526117228184611588565b905092915050565b60006020820190508181036000830152611743816115c1565b9050919050565b60006020820190508181036000830152611763816115e4565b9050919050565b6000602082019050818103600083015261178381611607565b9050919050565b600060208201905081810360008301526117a38161162a565b9050919050565b600060208201905081810360008301526117c38161164d565b9050919050565b60006020820190506117df6000830184611670565b92915050565b60006020820190506117fa600083018461167f565b92915050565b600061180a61181b565b90506118168282611a68565b919050565b6000604051905090565b600067ffffffffffffffff8211156118405761183f611b9e565b5b61184982611be1565b9050602081019050919050565b6000819050602082019050919050565b6000819050602082019050919050565b600081519050919050565b600081519050919050565b600081519050919050565b6000602082019050919050565b6000602082019050919050565b600082825260208201905092915050565b600082825260208201905092915050565b600082825260208201905092915050565b600082825260208201905092915050565b600082825260208201905092915050565b6000611911826119ea565b915061191c836119ea565b9250827fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0382111561195157611950611ae2565b5b828201905092915050565b6000611967826119ea565b9150611972836119ea565b92508282101561198557611984611ae2565b5b828203905092915050565b600061199b826119ca565b9050919050565b60008115159050919050565b60006fffffffffffffffffffffffffffffffff82169050919050565b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b6000819050919050565b82818337600083830152505050565b60005b83811015611a21578082015181840152602081019050611a06565b83811115611a30576000848401525b50505050565b60006002820490506001821680611a4e57607f821691505b60208210811415611a6257611a61611b11565b5b50919050565b611a7182611be1565b810181811067ffffffffffffffff82111715611a9057611a8f611b9e565b5b80604052505050565b6000611aa4826119ea565b91507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff821415611ad757611ad6611ae2565b5b600182019050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600080fd5b600080fd5b600080fd5b600080fd5b6000601f19601f8301169050919050565b7f4f6e6c79207374616b65722063616e2063616c6c2066756e6374696f6e000000600082015250565b7f56616c696461746f72207365742068617320726561636865642066756c6c206360008201527f6170616369747900000000000000000000000000000000000000000000000000602082015250565b7f696e646578206f7574206f662072616e67650000000000000000000000000000600082015250565b7f4f6e6c7920454f412063616e2063616c6c2066756e6374696f6e000000000000600082015250565b7f56616c696461746f72732063616e2774206265206c657373207468616e20746860008201527f65206d696e696d756d2072657175697265642076616c696461746f72206e756d602082015250565b611d1481611990565b8114611d1f57600080fd5b50565b611d2b816119ea565b8114611d3657600080fd5b5056fea26469706673582212201556e5927c99f1e21e8ae2bbc55b0b507bc60d9732fc9a5e25a0708b409c8c8064736f6c63430008070033" -) - -// PredeployStakingSC is a helper method for setting up the staking smart contract account, -// using the passed in validators as pre-staked validators -func PredeployStakingSC( - vals validators.Validators, - params PredeployParams, -) (*chain.GenesisAccount, error) { - // Set the code for the staking smart contract - // Code retrieved from https://github.com/0xPolygon/staking-contracts - scHex, _ := hex.DecodeHex(StakingSCBytecode) - stakingAccount := &chain.GenesisAccount{ - Code: scHex, - } - - // Parse the default staked balance value into *big.Int - val := DefaultStakedBalance - bigDefaultStakedBalance, err := common.ParseUint256orHex(&val) - - if err != nil { - return nil, fmt.Errorf("unable to generate DefaultStatkedBalance, %w", err) - } - - // Generate the empty account storage map - storageMap := make(map[types.Hash]types.Hash) - bigTrueValue := big.NewInt(1) - stakedAmount := big.NewInt(0) - bigMinNumValidators := big.NewInt(int64(params.MinValidatorCount)) - bigMaxNumValidators := big.NewInt(int64(params.MaxValidatorCount)) - valsLen := big.NewInt(0) - - if vals != nil { - valsLen = big.NewInt(int64(vals.Len())) - - for idx := 0; idx < vals.Len(); idx++ { - validator := vals.At(uint64(idx)) - - // Update the total staked amount - stakedAmount = stakedAmount.Add(stakedAmount, bigDefaultStakedBalance) - - // Get the storage indexes - storageIndexes := getStorageIndexes(validator, idx) - - // Set the value for the validators array - storageMap[types.BytesToHash(storageIndexes.ValidatorsIndex)] = - types.BytesToHash( - validator.Addr().Bytes(), - ) - - if blsValidator, ok := validator.(*validators.BLSValidator); ok { - setBytesToStorage( - storageMap, - storageIndexes.ValidatorBLSPublicKeyIndex, - blsValidator.BLSPublicKey, - ) - } - - // Set the value for the address -> validator array index mapping - storageMap[types.BytesToHash(storageIndexes.AddressToIsValidatorIndex)] = - types.BytesToHash(bigTrueValue.Bytes()) - - // Set the value for the address -> staked amount mapping - storageMap[types.BytesToHash(storageIndexes.AddressToStakedAmountIndex)] = - types.StringToHash(hex.EncodeBig(bigDefaultStakedBalance)) - - // Set the value for the address -> validator index mapping - storageMap[types.BytesToHash(storageIndexes.AddressToValidatorIndexIndex)] = - types.StringToHash(hex.EncodeUint64(uint64(idx))) - } - } - - // Set the value for the total staked amount - storageMap[types.BytesToHash(big.NewInt(stakedAmountSlot).Bytes())] = - types.BytesToHash(stakedAmount.Bytes()) - - // Set the value for the size of the validators array - storageMap[types.BytesToHash(big.NewInt(validatorsSlot).Bytes())] = - types.BytesToHash(valsLen.Bytes()) - - // Set the value for the minimum number of validators - storageMap[types.BytesToHash(big.NewInt(minNumValidatorSlot).Bytes())] = - types.BytesToHash(bigMinNumValidators.Bytes()) - - // Set the value for the maximum number of validators - storageMap[types.BytesToHash(big.NewInt(maxNumValidatorSlot).Bytes())] = - types.BytesToHash(bigMaxNumValidators.Bytes()) - - // Save the storage map - stakingAccount.Storage = storageMap - - // Set the Staking SC balance to numValidators * defaultStakedBalance - stakingAccount.Balance = stakedAmount - - return stakingAccount, nil -} diff --git a/server/builtin.go b/server/builtin.go index 81ca49e887..a145c8ce80 100644 --- a/server/builtin.go +++ b/server/builtin.go @@ -5,7 +5,6 @@ import ( "github.com/0xPolygon/polygon-edge/consensus" consensusDev "github.com/0xPolygon/polygon-edge/consensus/dev" consensusDummy "github.com/0xPolygon/polygon-edge/consensus/dummy" - consensusIBFT "github.com/0xPolygon/polygon-edge/consensus/ibft" consensusPolyBFT "github.com/0xPolygon/polygon-edge/consensus/polybft" "github.com/0xPolygon/polygon-edge/forkmanager" "github.com/0xPolygon/polygon-edge/secrets" @@ -26,14 +25,12 @@ type ForkManagerInitialParamsFactory func(config *chain.Chain) (*forkmanager.For const ( DevConsensus ConsensusType = "dev" - IBFTConsensus ConsensusType = "ibft" PolyBFTConsensus ConsensusType = consensusPolyBFT.ConsensusName DummyConsensus ConsensusType = "dummy" ) var consensusBackends = map[ConsensusType]consensus.Factory{ DevConsensus: consensusDev.Factory, - IBFTConsensus: consensusIBFT.Factory, PolyBFTConsensus: consensusPolyBFT.Factory, DummyConsensus: consensusDummy.Factory, } diff --git a/server/server.go b/server/server.go index 8688105ee2..e88e3f27ca 100644 --- a/server/server.go +++ b/server/server.go @@ -146,10 +146,6 @@ func NewServer(config *Config) (*Server, error) { restoreProgression: progress.NewProgressionWrapper(progress.ChainSyncRestore), } - if config.Chain.Params.GetEngine() == string(IBFTConsensus) { - m.logger.Info(common.IBFTImportantNotice) - } - m.logger.Info("Data dir", "path", config.DataDir) var dirPaths = []string{ diff --git a/validators/bls.go b/validators/bls.go deleted file mode 100644 index 827f7613e1..0000000000 --- a/validators/bls.go +++ /dev/null @@ -1,136 +0,0 @@ -package validators - -import ( - "bytes" - "errors" - "fmt" - - "github.com/0xPolygon/polygon-edge/helper/hex" - "github.com/0xPolygon/polygon-edge/types" - "github.com/umbracle/fastrlp" -) - -var ( - ErrInvalidTypeAssert = errors.New("invalid type assert") -) - -type BLSValidatorPublicKey []byte - -// String returns a public key in hex -func (k BLSValidatorPublicKey) String() string { - return hex.EncodeToHex(k[:]) -} - -// MarshalText implements encoding.TextMarshaler -func (k BLSValidatorPublicKey) MarshalText() ([]byte, error) { - return []byte(k.String()), nil -} - -// UnmarshalText parses an BLS Public Key in hex -func (k *BLSValidatorPublicKey) UnmarshalText(input []byte) error { - kk, err := hex.DecodeHex(string(input)) - if err != nil { - return err - } - - *k = kk - - return nil -} - -// BLSValidator is a validator using BLS signing algorithm -type BLSValidator struct { - Address types.Address - BLSPublicKey BLSValidatorPublicKey -} - -// NewBLSValidator is a constructor of BLSValidator -func NewBLSValidator(addr types.Address, blsPubkey []byte) *BLSValidator { - return &BLSValidator{ - Address: addr, - BLSPublicKey: blsPubkey, - } -} - -// Type returns the ValidatorType of BLSValidator -func (v *BLSValidator) Type() ValidatorType { - return BLSValidatorType -} - -// String returns string representation of BLSValidator -// Format => [Address]:[BLSPublicKey] -func (v *BLSValidator) String() string { - return fmt.Sprintf( - "%s:%s", - v.Address.String(), - hex.EncodeToHex(v.BLSPublicKey), - ) -} - -// Addr returns the validator address -func (v *BLSValidator) Addr() types.Address { - return v.Address -} - -// Copy returns copy of BLS Validator -func (v *BLSValidator) Copy() Validator { - pubkey := make([]byte, len(v.BLSPublicKey)) - copy(pubkey, v.BLSPublicKey) - - return &BLSValidator{ - Address: v.Address, - BLSPublicKey: pubkey, - } -} - -// Equal checks the given validator matches with its data -func (v *BLSValidator) Equal(vr Validator) bool { - vv, ok := vr.(*BLSValidator) - if !ok { - return false - } - - return v.Address == vv.Address && bytes.Equal(v.BLSPublicKey, vv.BLSPublicKey) -} - -// MarshalRLPWith is a RLP Marshaller -func (v *BLSValidator) MarshalRLPWith(arena *fastrlp.Arena) *fastrlp.Value { - vv := arena.NewArray() - - vv.Set(arena.NewBytes(v.Address.Bytes())) - vv.Set(arena.NewCopyBytes(v.BLSPublicKey)) - - return vv -} - -// UnmarshalRLPFrom is a RLP Unmarshaller -func (v *BLSValidator) UnmarshalRLPFrom(p *fastrlp.Parser, val *fastrlp.Value) error { - elems, err := val.GetElems() - if err != nil { - return err - } - - if len(elems) < 2 { - return fmt.Errorf("incorrect number of elements to decode BLSValidator, expected 2 but found %d", len(elems)) - } - - if err := elems[0].GetAddr(v.Address[:]); err != nil { - return fmt.Errorf("failed to decode Address: %w", err) - } - - if v.BLSPublicKey, err = elems[1].GetBytes(v.BLSPublicKey); err != nil { - return fmt.Errorf("failed to decode BLSPublicKey: %w", err) - } - - return nil -} - -// Bytes returns bytes of BLSValidator in RLP encode -func (v *BLSValidator) Bytes() []byte { - return types.MarshalRLPTo(v.MarshalRLPWith, nil) -} - -// SetFromBytes parses given bytes in RLP encode and map to its fields -func (v *BLSValidator) SetFromBytes(input []byte) error { - return types.UnmarshalRlp(v.UnmarshalRLPFrom, input) -} diff --git a/validators/bls_test.go b/validators/bls_test.go deleted file mode 100644 index 6abbb3f14a..0000000000 --- a/validators/bls_test.go +++ /dev/null @@ -1,211 +0,0 @@ -package validators - -import ( - "encoding/json" - "fmt" - "strings" - "testing" - - "github.com/0xPolygon/polygon-edge/helper/hex" - "github.com/0xPolygon/polygon-edge/types" - "github.com/stretchr/testify/assert" -) - -func TestBLSValidatorPublicKeyString(t *testing.T) { - t.Parallel() - - assert.Equal( - t, - hex.EncodeToHex([]byte(testBLSPubKey1)), - testBLSPubKey1.String(), - ) -} - -func TestBLSValidatorPublicKeyMarshal(t *testing.T) { - t.Parallel() - - res, err := json.Marshal(testBLSPubKey1) - - assert.NoError(t, err) - assert.Equal( - t, - hex.EncodeToHex([]byte(testBLSPubKey1)), - strings.Trim( - // remove double quotes in json - string(res), - "\"", - ), - ) -} - -func TestBLSValidatorPublicKeyUnmarshal(t *testing.T) { - t.Parallel() - - key := BLSValidatorPublicKey{} - - err := json.Unmarshal( - []byte( - fmt.Sprintf("\"%s\"", hex.EncodeToHex(testBLSPubKey2)), - ), - &key, - ) - - assert.NoError(t, err) - assert.Equal( - t, - testBLSPubKey2, - key, - ) -} - -func TestNewBLSValidator(t *testing.T) { - t.Parallel() - - assert.Equal( - t, - &BLSValidator{addr1, testBLSPubKey1}, - NewBLSValidator(addr1, testBLSPubKey1), - ) -} - -func TestBLSValidatorType(t *testing.T) { - t.Parallel() - - assert.Equal( - t, - BLSValidatorType, - NewBLSValidator(addr1, testBLSPubKey1).Type(), - ) -} - -func TestBLSValidatorString(t *testing.T) { - t.Parallel() - - assert.Equal( - t, - fmt.Sprintf( - "%s:%s", - addr1.String(), - "0x"+hex.EncodeToString(testBLSPubKey1), - ), - NewBLSValidator(addr1, testBLSPubKey1).String(), - ) -} - -func TestBLSValidatorAddr(t *testing.T) { - t.Parallel() - - assert.Equal( - t, - addr1, - NewBLSValidator(addr1, testBLSPubKey1).Addr(), - ) -} - -func TestBLSValidatorCopy(t *testing.T) { - t.Parallel() - - v1 := NewBLSValidator(addr1, testBLSPubKey1) - v2 := v1.Copy() - - assert.Equal(t, v1, v2) - - // check the addresses are different - typedV2, ok := v2.(*BLSValidator) - - assert.True(t, ok) - assert.NotSame(t, v1.Address, typedV2.Address) - assert.NotSame(t, v1.BLSPublicKey, typedV2.BLSPublicKey) -} - -func TestBLSValidatorEqual(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - val1 *BLSValidator - val2 *BLSValidator - expected bool - }{ - { - name: "equal", - val1: NewBLSValidator(addr1, testBLSPubKey1), - val2: NewBLSValidator(addr1, testBLSPubKey1), - expected: true, - }, - { - name: "addr does not equal", - val1: NewBLSValidator(addr1, testBLSPubKey1), - val2: NewBLSValidator(addr2, testBLSPubKey1), - expected: false, - }, - { - name: "public key does not equal", - val1: NewBLSValidator(addr1, testBLSPubKey1), - val2: NewBLSValidator(addr1, testBLSPubKey2), - expected: false, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - assert.Equal( - t, - test.expected, - test.val1.Equal(test.val2), - ) - }) - } -} - -func TestBLSValidatorMarshalAndUnmarshal(t *testing.T) { - t.Parallel() - - val1 := NewBLSValidator(addr1, testBLSPubKey1) - - marshalRes := types.MarshalRLPTo(val1.MarshalRLPWith, nil) - - val2 := new(BLSValidator) - - assert.NoError( - t, - types.UnmarshalRlp(val2.UnmarshalRLPFrom, marshalRes), - ) - - assert.Equal(t, val1, val2) -} - -func TestBLSValidatorBytes(t *testing.T) { - t.Parallel() - - val := NewBLSValidator(addr1, testBLSPubKey1) - - // result of Bytes() equals the data encoded in RLP - assert.Equal( - t, - types.MarshalRLPTo(val.MarshalRLPWith, nil), - val.Bytes(), - ) -} - -func TestBLSValidatorFromBytes(t *testing.T) { - t.Parallel() - - val1 := NewBLSValidator(addr1, testBLSPubKey1) - marshalledData := types.MarshalRLPTo(val1.MarshalRLPWith, nil) - - val2 := new(BLSValidator) - - // SetFromBytes reads RLP encoded data - assert.NoError(t, val2.SetFromBytes(marshalledData)) - - assert.Equal( - t, - val1, - val2, - ) -} diff --git a/validators/ecdsa.go b/validators/ecdsa.go deleted file mode 100644 index 1ec1a5b8bd..0000000000 --- a/validators/ecdsa.go +++ /dev/null @@ -1,72 +0,0 @@ -package validators - -import ( - "github.com/0xPolygon/polygon-edge/types" - "github.com/umbracle/fastrlp" -) - -// BLSValidator is a validator using ECDSA signing algorithm -type ECDSAValidator struct { - Address types.Address -} - -// NewECDSAValidator is a constructor of ECDSAValidator -func NewECDSAValidator(addr types.Address) *ECDSAValidator { - return &ECDSAValidator{ - Address: addr, - } -} - -// Type returns the ValidatorType of ECDSAValidator -func (v *ECDSAValidator) Type() ValidatorType { - return ECDSAValidatorType -} - -// String returns string representation of ECDSAValidator -func (v *ECDSAValidator) String() string { - return v.Address.String() -} - -// Addr returns the validator address -func (v *ECDSAValidator) Addr() types.Address { - return v.Address -} - -// Copy returns copy of ECDSAValidator -func (v *ECDSAValidator) Copy() Validator { - return &ECDSAValidator{ - Address: v.Address, - } -} - -// Equal checks the given validator matches with its data -func (v *ECDSAValidator) Equal(vr Validator) bool { - vv, ok := vr.(*ECDSAValidator) - if !ok { - return false - } - - return v.Address == vv.Address -} - -// MarshalRLPWith is a RLP Marshaller -func (v *ECDSAValidator) MarshalRLPWith(arena *fastrlp.Arena) *fastrlp.Value { - return arena.NewBytes(v.Address.Bytes()) -} - -// UnmarshalRLPFrom is a RLP Unmarshaller -func (v *ECDSAValidator) UnmarshalRLPFrom(p *fastrlp.Parser, val *fastrlp.Value) error { - return val.GetAddr(v.Address[:]) -} - -// Bytes returns bytes of ECDSAValidator -func (v *ECDSAValidator) Bytes() []byte { - return v.Address.Bytes() -} - -// SetFromBytes parses given bytes -func (v *ECDSAValidator) SetFromBytes(input []byte) error { - v.Address = types.BytesToAddress(input) - - return nil -} diff --git a/validators/ecdsa_test.go b/validators/ecdsa_test.go deleted file mode 100644 index 399c8c6e69..0000000000 --- a/validators/ecdsa_test.go +++ /dev/null @@ -1,150 +0,0 @@ -package validators - -import ( - "testing" - - "github.com/0xPolygon/polygon-edge/types" - "github.com/stretchr/testify/assert" -) - -func TestNewECDSAValidator(t *testing.T) { - t.Parallel() - - assert.Equal( - t, - &ECDSAValidator{addr1}, - NewECDSAValidator(addr1), - ) -} - -func TestECDSAValidatorType(t *testing.T) { - t.Parallel() - - assert.Equal( - t, - ECDSAValidatorType, - NewECDSAValidator(addr1).Type(), - ) -} - -func TestECDSAValidatorString(t *testing.T) { - t.Parallel() - - assert.Equal( - t, - addr1.String(), - NewECDSAValidator(addr1).String(), - ) -} - -func TestECDSAValidatorAddr(t *testing.T) { - t.Parallel() - - assert.Equal( - t, - addr1, - NewECDSAValidator(addr1).Addr(), - ) -} - -func TestECDSAValidatorCopy(t *testing.T) { - t.Parallel() - - v1 := NewECDSAValidator(addr1) - - v2 := v1.Copy() - - assert.Equal(t, v1, v2) - - // check the addresses are different - typedV2, ok := v2.(*ECDSAValidator) - - assert.True(t, ok) - assert.NotSame(t, v1.Address, typedV2.Address) -} - -func TestECDSAValidatorEqual(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - val1 *ECDSAValidator - val2 *ECDSAValidator - expected bool - }{ - { - name: "equal", - val1: NewECDSAValidator(addr1), - val2: NewECDSAValidator(addr1), - expected: true, - }, - { - name: "not equal", - val1: NewECDSAValidator(addr1), - val2: NewECDSAValidator(addr2), - expected: false, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - assert.Equal( - t, - test.expected, - test.val1.Equal(test.val2), - ) - }) - } -} - -func TestECDSAValidatorMarshalAndUnmarshal(t *testing.T) { - t.Parallel() - - val1 := NewECDSAValidator(addr1) - - marshalRes := types.MarshalRLPTo(val1.MarshalRLPWith, nil) - - val2 := new(ECDSAValidator) - - assert.NoError( - t, - types.UnmarshalRlp(val2.UnmarshalRLPFrom, marshalRes), - ) - - assert.Equal(t, val1, val2) -} - -func TestECDSAValidatorBytes(t *testing.T) { - t.Parallel() - - val := NewECDSAValidator(addr1) - - // result of Bytes() equals the data encoded in RLP - assert.Equal( - t, - val.Address.Bytes(), - val.Bytes(), - ) -} - -func TestECDSAValidatorFromBytes(t *testing.T) { - t.Parallel() - - val1 := NewECDSAValidator(addr1) - marshalledData := types.MarshalRLPTo(val1.MarshalRLPWith, nil) - - val2 := new(ECDSAValidator) - - // SetFromBytes reads RLP encoded data - assert.NoError(t, val2.SetFromBytes(marshalledData)) - - assert.Equal( - t, - val1, - val2, - ) -} diff --git a/validators/helper.go b/validators/helper.go deleted file mode 100644 index e7a60713bc..0000000000 --- a/validators/helper.go +++ /dev/null @@ -1,131 +0,0 @@ -package validators - -import ( - "encoding/hex" - "errors" - "fmt" - "strings" - - "github.com/0xPolygon/polygon-edge/types" -) - -var ( - ErrInvalidBLSValidatorFormat = errors.New("invalid validator format, expected [Validator Address]:[BLS Public Key]") -) - -// NewValidatorFromType instantiates a validator by specified type -func NewValidatorFromType(t ValidatorType) (Validator, error) { - switch t { - case ECDSAValidatorType: - return new(ECDSAValidator), nil - case BLSValidatorType: - return new(BLSValidator), nil - } - - return nil, ErrInvalidValidatorType -} - -// NewValidatorSetFromType instantiates a validators by specified type -func NewValidatorSetFromType(t ValidatorType) Validators { - switch t { - case ECDSAValidatorType: - return NewECDSAValidatorSet() - case BLSValidatorType: - return NewBLSValidatorSet() - } - - return nil -} - -// NewECDSAValidatorSet creates Validator Set for ECDSAValidator with initialized validators -func NewECDSAValidatorSet(ecdsaValidators ...*ECDSAValidator) Validators { - validators := make([]Validator, len(ecdsaValidators)) - - for idx, val := range ecdsaValidators { - validators[idx] = Validator(val) - } - - return &Set{ - ValidatorType: ECDSAValidatorType, - Validators: validators, - } -} - -// NewBLSValidatorSet creates Validator Set for BLSValidator with initialized validators -func NewBLSValidatorSet(blsValidators ...*BLSValidator) Validators { - validators := make([]Validator, len(blsValidators)) - - for idx, val := range blsValidators { - validators[idx] = Validator(val) - } - - return &Set{ - ValidatorType: BLSValidatorType, - Validators: validators, - } -} - -// ParseValidator parses a validator represented in string -func ParseValidator(validatorType ValidatorType, validator string) (Validator, error) { - switch validatorType { - case ECDSAValidatorType: - return ParseECDSAValidator(validator), nil - case BLSValidatorType: - return ParseBLSValidator(validator) - default: - // shouldn't reach here - return nil, fmt.Errorf("invalid validator type: %s", validatorType) - } -} - -// ParseValidators parses an array of validator represented in string -func ParseValidators(validatorType ValidatorType, rawValidators []string) (Validators, error) { - set := NewValidatorSetFromType(validatorType) - if set == nil { - return nil, fmt.Errorf("invalid validator type: %s", validatorType) - } - - for _, s := range rawValidators { - validator, err := ParseValidator(validatorType, s) - if err != nil { - return nil, err - } - - if err := set.Add(validator); err != nil { - return nil, err - } - } - - return set, nil -} - -// ParseBLSValidator parses ECDSAValidator represented in string -func ParseECDSAValidator(validator string) *ECDSAValidator { - return &ECDSAValidator{ - Address: types.StringToAddress(validator), - } -} - -// ParseBLSValidator parses BLSValidator represented in string -func ParseBLSValidator(validator string) (*BLSValidator, error) { - subValues := strings.Split(validator, ":") - - if len(subValues) != 2 { - return nil, ErrInvalidBLSValidatorFormat - } - - addrBytes, err := hex.DecodeString(strings.TrimPrefix(subValues[0], "0x")) - if err != nil { - return nil, fmt.Errorf("failed to parse address: %w", err) - } - - pubKeyBytes, err := hex.DecodeString(strings.TrimPrefix(subValues[1], "0x")) - if err != nil { - return nil, fmt.Errorf("failed to parse BLS Public Key: %w", err) - } - - return &BLSValidator{ - Address: types.BytesToAddress(addrBytes), - BLSPublicKey: pubKeyBytes, - }, nil -} diff --git a/validators/helper_test.go b/validators/helper_test.go deleted file mode 100644 index 341368da48..0000000000 --- a/validators/helper_test.go +++ /dev/null @@ -1,319 +0,0 @@ -package validators - -import ( - "encoding/hex" - "errors" - "fmt" - "testing" - - testHelper "github.com/0xPolygon/polygon-edge/helper/tests" - "github.com/0xPolygon/polygon-edge/types" - "github.com/stretchr/testify/assert" -) - -var ( - addr1 = types.StringToAddress("1") - addr2 = types.StringToAddress("2") - testBLSPubKey1 = BLSValidatorPublicKey([]byte("bls_pubkey1")) - testBLSPubKey2 = BLSValidatorPublicKey([]byte("bls_pubkey2")) - - ecdsaValidator1 = NewECDSAValidator(addr1) - ecdsaValidator2 = NewECDSAValidator(addr2) - blsValidator1 = NewBLSValidator(addr1, testBLSPubKey1) - blsValidator2 = NewBLSValidator(addr2, testBLSPubKey2) - - fakeValidatorType = ValidatorType("fake") -) - -func createTestBLSValidatorString( - addr types.Address, - blsPubKey []byte, -) string { - return fmt.Sprintf( - "%s:%s", - addr.String(), - "0x"+hex.EncodeToString(blsPubKey), - ) -} - -func TestNewValidatorFromType(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - validatorType ValidatorType - expected Validator - err error - }{ - { - name: "ECDSAValidator", - validatorType: ECDSAValidatorType, - expected: new(ECDSAValidator), - err: nil, - }, - { - name: "BLSValidator", - validatorType: BLSValidatorType, - expected: new(BLSValidator), - err: nil, - }, - { - name: "undefined type", - validatorType: fakeValidatorType, - expected: nil, - err: ErrInvalidValidatorType, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - res, err := NewValidatorFromType(test.validatorType) - - assert.Equal( - t, - test.expected, - res, - ) - - assert.ErrorIs( - t, - test.err, - err, - ) - }) - } -} - -func TestNewValidatorSetFromType(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - validatorType ValidatorType - expected Validators - }{ - { - name: "ECDSAValidators", - validatorType: ECDSAValidatorType, - expected: &Set{ - ValidatorType: ECDSAValidatorType, - Validators: []Validator{}, - }, - }, - { - name: "BLSValidators", - validatorType: BLSValidatorType, - expected: &Set{ - ValidatorType: BLSValidatorType, - Validators: []Validator{}, - }, - }, - { - name: "undefined type", - validatorType: fakeValidatorType, - expected: nil, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - assert.Equal( - t, - test.expected, - NewValidatorSetFromType(test.validatorType), - ) - }) - } -} - -func TestParseValidator(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - // inputs - validatorType ValidatorType - validatorStr string - // outputs - expectedValidator Validator - expectedErr error - }{ - { - name: "ECDSAValidator", - validatorType: ECDSAValidatorType, - validatorStr: addr1.String(), - expectedValidator: ecdsaValidator1, - expectedErr: nil, - }, - { - name: "BLSValidator", - validatorType: BLSValidatorType, - validatorStr: createTestBLSValidatorString(addr1, testBLSPubKey1), - expectedValidator: blsValidator1, - expectedErr: nil, - }, - { - name: "undefined type", - validatorType: fakeValidatorType, - validatorStr: addr1.String(), - expectedValidator: nil, - expectedErr: fmt.Errorf("invalid validator type: %s", fakeValidatorType), - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - val, err := ParseValidator( - test.validatorType, - test.validatorStr, - ) - - assert.Equal(t, test.expectedValidator, val) - - assert.Equal(t, test.expectedErr, err) - }) - } -} - -func TestParseValidators(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - // inputs - validatorType ValidatorType - validatorStrs []string - // outputs - expectedValidators Validators - expectedErr error - }{ - { - name: "ECDSAValidator", - validatorType: ECDSAValidatorType, - validatorStrs: []string{ - addr1.String(), - addr2.String(), - }, - expectedValidators: NewECDSAValidatorSet( - ecdsaValidator1, - ecdsaValidator2, - ), - expectedErr: nil, - }, - { - name: "BLSValidator", - validatorType: BLSValidatorType, - validatorStrs: []string{ - createTestBLSValidatorString(addr1, testBLSPubKey1), - createTestBLSValidatorString(addr2, testBLSPubKey2), - }, - expectedValidators: NewBLSValidatorSet( - blsValidator1, - blsValidator2, - ), - expectedErr: nil, - }, - { - name: "undefined type", - validatorType: fakeValidatorType, - validatorStrs: []string{ - createTestBLSValidatorString(addr1, testBLSPubKey1), - createTestBLSValidatorString(addr2, testBLSPubKey2), - }, - expectedValidators: nil, - expectedErr: fmt.Errorf("invalid validator type: %s", fakeValidatorType), - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - vals, err := ParseValidators( - test.validatorType, - test.validatorStrs, - ) - - assert.Equal(t, test.expectedValidators, vals) - - assert.Equal(t, test.expectedErr, err) - }) - } -} - -func TestParseECDSAValidator(t *testing.T) { - t.Parallel() - - assert.Equal( - t, - ecdsaValidator1, - ParseECDSAValidator(addr1.String()), - ) -} - -func TestParseBLSValidator(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - // inputs - validatorStr string - // outputs - expectedValidator *BLSValidator - expectedErr error - }{ - { - name: "should parse correctly", - validatorStr: createTestBLSValidatorString(addr1, testBLSPubKey1), - expectedValidator: blsValidator1, - expectedErr: nil, - }, - { - name: "should return error for incorrect format", - validatorStr: addr1.String(), - expectedValidator: nil, - expectedErr: ErrInvalidBLSValidatorFormat, - }, - { - name: "should return error for incorrect Address format", - validatorStr: fmt.Sprintf("%s:%s", "aaaaa", testBLSPubKey1.String()), - expectedValidator: nil, - expectedErr: errors.New("failed to parse address:"), - }, - { - name: "should return for incorrect BLS Public Key format", - validatorStr: fmt.Sprintf("%s:%s", addr1.String(), "bbbbb"), - expectedValidator: nil, - expectedErr: errors.New("failed to parse BLS Public Key:"), - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - val, err := ParseBLSValidator( - test.validatorStr, - ) - - assert.Equal(t, test.expectedValidator, val) - testHelper.AssertErrorMessageContains(t, test.expectedErr, err) - }) - } -} diff --git a/validators/json_test.go b/validators/json_test.go deleted file mode 100644 index a7d0ccfa7c..0000000000 --- a/validators/json_test.go +++ /dev/null @@ -1,155 +0,0 @@ -package validators - -import ( - "encoding/json" - "fmt" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestECDSAValidatorsMarshalJSON(t *testing.T) { - t.Parallel() - - validators := &Set{ - ValidatorType: ECDSAValidatorType, - Validators: []Validator{ - &ECDSAValidator{addr1}, - &ECDSAValidator{addr2}, - }, - } - - res, err := json.Marshal(validators) - - assert.NoError(t, err) - - assert.JSONEq( - t, - fmt.Sprintf( - `[ - { - "Address": "%s" - }, - { - "Address": "%s" - } - ]`, - addr1.String(), - addr2.String(), - ), - string(res), - ) -} - -func TestECDSAValidatorsUnmarshalJSON(t *testing.T) { - t.Parallel() - - inputStr := fmt.Sprintf( - `[ - { - "Address": "%s" - }, - { - "Address": "%s" - } - ]`, - addr1.String(), - addr2.String(), - ) - - validators := NewECDSAValidatorSet() - - assert.NoError( - t, - json.Unmarshal([]byte(inputStr), validators), - ) - - assert.Equal( - t, - &Set{ - ValidatorType: ECDSAValidatorType, - Validators: []Validator{ - &ECDSAValidator{addr1}, - &ECDSAValidator{addr2}, - }, - }, - validators, - ) -} - -func TestBLSValidatorsMarshalJSON(t *testing.T) { - t.Parallel() - - validators := &Set{ - ValidatorType: BLSValidatorType, - Validators: []Validator{ - &BLSValidator{addr1, testBLSPubKey1}, - &BLSValidator{addr2, testBLSPubKey2}, - }, - } - - res, err := json.Marshal(validators) - - assert.NoError(t, err) - - assert.JSONEq( - t, - fmt.Sprintf( - `[ - { - "Address": "%s", - "BLSPublicKey": "%s" - }, - { - "Address": "%s", - "BLSPublicKey": "%s" - } - ]`, - addr1, - testBLSPubKey1, - addr2, - testBLSPubKey2, - ), - string(res), - ) -} - -func TestBLSValidatorsUnmarshalJSON(t *testing.T) { - t.Parallel() - - inputStr := fmt.Sprintf( - `[ - { - "Address": "%s", - "BLSPublicKey": "%s" - }, - { - "Address": "%s", - "BLSPublicKey": "%s" - } - ]`, - addr1, - testBLSPubKey1, - addr2, - testBLSPubKey2, - ) - - validators := NewBLSValidatorSet() - - assert.NoError( - t, - json.Unmarshal([]byte(inputStr), validators), - ) - - assert.Equal( - t, - &Set{ - ValidatorType: BLSValidatorType, - Validators: []Validator{ - &BLSValidator{addr1, testBLSPubKey1}, - &BLSValidator{addr2, testBLSPubKey2}, - }, - }, - validators, - ) -} diff --git a/validators/set.go b/validators/set.go deleted file mode 100644 index c7cc99f57e..0000000000 --- a/validators/set.go +++ /dev/null @@ -1,200 +0,0 @@ -package validators - -import ( - "encoding/json" - - "github.com/0xPolygon/polygon-edge/types" - "github.com/umbracle/fastrlp" -) - -type Set struct { - ValidatorType ValidatorType - Validators []Validator -} - -// Type returns the type of validator -func (s *Set) Type() ValidatorType { - return s.ValidatorType -} - -// Len returns the size of its collection -func (s *Set) Len() int { - return len(s.Validators) -} - -// Equal checks the given validators matches with its data -func (s *Set) Equal(ss Validators) bool { - if s.ValidatorType != ss.Type() { - return false - } - - if s.Len() != ss.Len() { - return false - } - - for idx := 0; idx < s.Len(); idx++ { - val1 := s.At(uint64(idx)) - val2 := ss.At(uint64(idx)) - - if !val1.Equal(val2) { - return false - } - } - - return true -} - -// Copy returns a copy of BLSValidators -func (s *Set) Copy() Validators { - cloneValidators := make([]Validator, len(s.Validators)) - - for idx, val := range s.Validators { - cloneValidators[idx] = val.Copy() - } - - return &Set{ - ValidatorType: s.ValidatorType, - Validators: cloneValidators, - } -} - -// At returns a validator at specified index in the collection -func (s *Set) At(index uint64) Validator { - return s.Validators[index] -} - -// Index returns the index of the validator whose address matches with the given address -func (s *Set) Index(addr types.Address) int64 { - for i, val := range s.Validators { - if val.Addr() == addr { - return int64(i) - } - } - - return -1 -} - -// Includes return the bool indicating whether the validator -// whose address matches with the given address exists or not -func (s *Set) Includes(addr types.Address) bool { - return s.Index(addr) != -1 -} - -// Add adds a validator into the collection -func (s *Set) Add(val Validator) error { - if s.ValidatorType != val.Type() { - return ErrMismatchValidatorType - } - - if s.Includes(val.Addr()) { - return ErrValidatorAlreadyExists - } - - s.Validators = append(s.Validators, val) - - return nil -} - -// Del removes a validator from the collection -func (s *Set) Del(val Validator) error { - if s.ValidatorType != val.Type() { - return ErrMismatchValidatorType - } - - index := s.Index(val.Addr()) - - if index == -1 { - return ErrValidatorNotFound - } - - s.Validators = append(s.Validators[:index], s.Validators[index+1:]...) - - return nil -} - -// Merge introduces the given collection into its collection -func (s *Set) Merge(ss Validators) error { - if s.ValidatorType != ss.Type() { - return ErrMismatchValidatorsType - } - - for idx := 0; idx < ss.Len(); idx++ { - newVal := ss.At(uint64(idx)) - - if s.Includes(newVal.Addr()) { - continue - } - - if err := s.Add(newVal); err != nil { - return err - } - } - - return nil -} - -// MarshalRLPWith is a RLP Marshaller -func (s *Set) MarshalRLPWith(arena *fastrlp.Arena) *fastrlp.Value { - vv := arena.NewArray() - - for _, v := range s.Validators { - vv.Set(v.MarshalRLPWith(arena)) - } - - return vv -} - -// UnmarshalRLPFrom is a RLP Unmarshaller -func (s *Set) UnmarshalRLPFrom(p *fastrlp.Parser, val *fastrlp.Value) error { - elems, err := val.GetElems() - if err != nil { - return err - } - - s.Validators = make([]Validator, len(elems)) - - for idx, e := range elems { - if s.Validators[idx], err = NewValidatorFromType(s.ValidatorType); err != nil { - return err - } - - if err := s.Validators[idx].UnmarshalRLPFrom(p, e); err != nil { - return err - } - } - - return nil -} - -// Marshal implements json marshal function -func (s *Set) MarshalJSON() ([]byte, error) { - return json.Marshal(s.Validators) -} - -// UnmarshalJSON implements json unmarshal function -func (s *Set) UnmarshalJSON(data []byte) error { - var ( - rawValidators = []json.RawMessage{} - err error - ) - - if err = json.Unmarshal(data, &rawValidators); err != nil { - return err - } - - validators := make([]Validator, len(rawValidators)) - - for idx := range validators { - if validators[idx], err = NewValidatorFromType(s.ValidatorType); err != nil { - return err - } - - if err := json.Unmarshal(rawValidators[idx], validators[idx]); err != nil { - return err - } - } - - s.Validators = validators - - return nil -} diff --git a/validators/set_test.go b/validators/set_test.go deleted file mode 100644 index c8b0a0ed0d..0000000000 --- a/validators/set_test.go +++ /dev/null @@ -1,617 +0,0 @@ -package validators - -import ( - "testing" - - "github.com/0xPolygon/polygon-edge/types" - "github.com/stretchr/testify/assert" -) - -func TestSetType(t *testing.T) { - t.Parallel() - - t.Run("ECDSAValidators", func(t *testing.T) { - t.Parallel() - - assert.Equal( - t, - ECDSAValidatorType, - NewECDSAValidatorSet().Type(), - ) - }) - - t.Run("BLSValidators", func(t *testing.T) { - t.Parallel() - - assert.Equal( - t, - BLSValidatorType, - NewBLSValidatorSet().Type(), - ) - }) -} - -func TestSetLen(t *testing.T) { - t.Parallel() - - assert.Equal( - t, - 2, - NewECDSAValidatorSet( - NewECDSAValidator(addr1), - NewECDSAValidator(addr2), - ).Len(), - ) -} - -func TestSetEqual(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - vals1 Validators - vals2 Validators - expected bool - }{ - { - name: "types are not equal", - vals1: NewECDSAValidatorSet( - NewECDSAValidator(addr1), - NewECDSAValidator(addr2), - ), - vals2: NewBLSValidatorSet( - NewBLSValidator(addr1, testBLSPubKey1), - NewBLSValidator(addr2, testBLSPubKey2), - ), - expected: false, - }, - { - name: "sizes are not equal", - vals1: NewECDSAValidatorSet( - NewECDSAValidator(addr1), - NewECDSAValidator(addr2), - ), - vals2: NewECDSAValidatorSet( - NewECDSAValidator(addr1), - ), - expected: false, - }, - { - name: "equal (ECDSAValidators)", - vals1: NewECDSAValidatorSet( - NewECDSAValidator(addr1), - NewECDSAValidator(addr2), - ), - vals2: NewECDSAValidatorSet( - NewECDSAValidator(addr1), - NewECDSAValidator(addr2), - ), - expected: true, - }, - { - name: "not equal (ECDSAValidators)", - vals1: NewECDSAValidatorSet( - NewECDSAValidator(addr1), - NewECDSAValidator(addr2), - ), - vals2: NewECDSAValidatorSet( - NewECDSAValidator(addr1), - NewECDSAValidator(addr1), - ), - expected: false, - }, - { - name: "equal (BLSValidators)", - vals1: NewECDSAValidatorSet( - NewECDSAValidator(addr1), - NewECDSAValidator(addr2), - ), - vals2: NewECDSAValidatorSet( - NewECDSAValidator(addr1), - NewECDSAValidator(addr2), - ), - expected: true, - }, - { - name: "not equal (BLSValidators)", - vals1: NewBLSValidatorSet( - NewBLSValidator(addr1, testBLSPubKey1), - NewBLSValidator(addr2, testBLSPubKey2), - ), - vals2: NewBLSValidatorSet( - NewBLSValidator(addr1, testBLSPubKey1), - NewBLSValidator(addr1, testBLSPubKey1), - ), - expected: false, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - assert.Equal( - t, - test.expected, - test.vals1.Equal(test.vals2), - ) - }) - } -} - -func TestSetCopy(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - validators Validators - }{ - { - name: "ECDSAValidators", - validators: NewECDSAValidatorSet( - NewECDSAValidator(addr1), - NewECDSAValidator(addr2), - ), - }, - { - name: "BLSValidators", - validators: NewBLSValidatorSet( - NewBLSValidator(addr1, testBLSPubKey1), - NewBLSValidator(addr1, testBLSPubKey1), - ), - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - copied := test.validators.Copy() - - assert.Equal(t, test.validators, copied) - - // check the addresses are different - for i := 0; i < test.validators.Len(); i++ { - assert.NotSame( - t, - test.validators.At(uint64(i)), - copied.At(uint64(i)), - ) - } - }) - } -} - -func TestSetAt(t *testing.T) { - t.Parallel() - - validators := NewECDSAValidatorSet( - NewECDSAValidator(addr1), - NewECDSAValidator(addr2), - ) - - set, ok := validators.(*Set) - assert.True(t, ok) - - for idx, val := range set.Validators { - assert.Equal( - t, - val, - set.At(uint64(idx)), - ) - - // check the addresses are same - assert.Same( - t, - val, - set.At(uint64(idx)), - ) - } -} - -func TestSetIndex(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - validators Validators - addr types.Address - expected int64 - }{ - { - name: "ECDSAValidators", - validators: NewECDSAValidatorSet( - NewECDSAValidator(addr1), - NewECDSAValidator(addr2), - ), - addr: addr1, - expected: 0, - }, - { - name: "BLSValidators", - validators: NewBLSValidatorSet( - NewBLSValidator(addr1, testBLSPubKey1), - NewBLSValidator(addr2, testBLSPubKey2), - ), - addr: addr2, - expected: 1, - }, - { - name: "not found", - validators: NewECDSAValidatorSet( - NewECDSAValidator(addr1), - NewECDSAValidator(addr2), - ), - addr: types.StringToAddress("fake"), - expected: -1, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - assert.Equal( - t, - test.expected, - test.validators.Index(test.addr), - ) - }) - } -} - -func TestSetIncludes(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - validators Validators - addr types.Address - expected bool - }{ - { - name: "ECDSAValidators", - validators: NewECDSAValidatorSet( - NewECDSAValidator(addr1), - NewECDSAValidator(addr2), - ), - addr: addr1, - expected: true, - }, - { - name: "BLSValidators", - validators: NewBLSValidatorSet( - NewBLSValidator(addr1, testBLSPubKey1), - NewBLSValidator(addr2, testBLSPubKey2), - ), - addr: addr2, - expected: true, - }, - { - name: "not found", - validators: NewECDSAValidatorSet( - NewECDSAValidator(addr1), - NewECDSAValidator(addr2), - ), - addr: types.StringToAddress("fake"), - expected: false, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - assert.Equal( - t, - test.expected, - test.validators.Includes(test.addr), - ) - }) - } -} - -func TestSetAdd(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - validators Validators - newValidator Validator - expectedErr error - expectedValidators Validators - }{ - { - name: "should return error in case of type mismatch", - validators: NewECDSAValidatorSet( - NewECDSAValidator(addr1), - ), - newValidator: NewBLSValidator(addr2, testBLSPubKey2), - expectedErr: ErrMismatchValidatorType, - expectedValidators: NewECDSAValidatorSet( - NewECDSAValidator(addr1), - ), - }, - { - name: "should return error in case of duplicated validator", - validators: NewECDSAValidatorSet( - NewECDSAValidator(addr1), - ), - newValidator: NewECDSAValidator(addr1), - expectedErr: ErrValidatorAlreadyExists, - expectedValidators: NewECDSAValidatorSet( - NewECDSAValidator(addr1), - ), - }, - { - name: "should add ECDSA Validator", - validators: NewECDSAValidatorSet( - NewECDSAValidator(addr1), - ), - newValidator: NewECDSAValidator(addr2), - expectedErr: nil, - expectedValidators: NewECDSAValidatorSet( - NewECDSAValidator(addr1), - NewECDSAValidator(addr2), - ), - }, - { - name: "should add BLS Validator", - validators: NewBLSValidatorSet( - NewBLSValidator(addr1, testBLSPubKey1), - ), - newValidator: NewBLSValidator(addr2, testBLSPubKey2), - expectedErr: nil, - expectedValidators: NewBLSValidatorSet( - NewBLSValidator(addr1, testBLSPubKey1), - NewBLSValidator(addr2, testBLSPubKey2), - ), - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - assert.ErrorIs( - t, - test.expectedErr, - test.validators.Add(test.newValidator), - ) - - assert.Equal( - t, - test.expectedValidators, - test.validators, - ) - }) - } -} - -func TestSetDel(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - validators Validators - removeValidator Validator - expectedErr error - expectedValidators Validators - }{ - { - name: "should return error in case of type mismatch", - validators: NewECDSAValidatorSet( - NewECDSAValidator(addr1), - ), - removeValidator: NewBLSValidator(addr2, testBLSPubKey2), - expectedErr: ErrMismatchValidatorType, - expectedValidators: NewECDSAValidatorSet( - NewECDSAValidator(addr1), - ), - }, - { - name: "should return error in case of non-existing validator", - validators: NewECDSAValidatorSet( - NewECDSAValidator(addr1), - ), - removeValidator: NewECDSAValidator(addr2), - expectedErr: ErrValidatorNotFound, - expectedValidators: NewECDSAValidatorSet( - NewECDSAValidator(addr1), - ), - }, - { - name: "should remove ECDSA Validator", - validators: NewECDSAValidatorSet( - NewECDSAValidator(addr1), - NewECDSAValidator(addr2), - ), - removeValidator: NewECDSAValidator(addr1), - expectedErr: nil, - expectedValidators: NewECDSAValidatorSet( - NewECDSAValidator(addr2), - ), - }, - { - name: "should remove BLS Validator", - validators: NewBLSValidatorSet( - NewBLSValidator(addr1, testBLSPubKey1), - NewBLSValidator(addr2, testBLSPubKey2), - ), - removeValidator: NewBLSValidator(addr2, testBLSPubKey2), - expectedErr: nil, - expectedValidators: NewBLSValidatorSet( - NewBLSValidator(addr1, testBLSPubKey1), - ), - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - assert.ErrorIs( - t, - test.expectedErr, - test.validators.Del(test.removeValidator), - ) - - assert.Equal( - t, - test.expectedValidators, - test.validators, - ) - }) - } -} - -func TestSetMerge(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - validators1 Validators - validators2 Validators - expectedErr error - expectedValidators Validators - }{ - { - name: "should return error in case of type mismatch", - validators1: NewECDSAValidatorSet( - NewECDSAValidator(addr1), - ), - validators2: NewBLSValidatorSet( - NewBLSValidator(addr1, testBLSPubKey1), - ), - expectedErr: ErrMismatchValidatorsType, - expectedValidators: NewECDSAValidatorSet( - NewECDSAValidator(addr1), - ), - }, - { - name: "should merge 2 ECDSAValidators", - validators1: NewECDSAValidatorSet( - NewECDSAValidator(addr1), - ), - validators2: NewECDSAValidatorSet( - NewECDSAValidator(addr2), - ), - expectedErr: nil, - expectedValidators: NewECDSAValidatorSet( - NewECDSAValidator(addr1), - NewECDSAValidator(addr2), - ), - }, - { - name: "should merge BLS Validator", - validators1: NewBLSValidatorSet( - NewBLSValidator(addr1, testBLSPubKey1), - ), - validators2: NewBLSValidatorSet( - NewBLSValidator(addr2, testBLSPubKey2), - ), - expectedErr: nil, - expectedValidators: NewBLSValidatorSet( - NewBLSValidator(addr1, testBLSPubKey1), - NewBLSValidator(addr2, testBLSPubKey2), - ), - }, - { - name: "should merge 2 ECDSAValidators but ignore the validators that already exists in set1", - validators1: NewECDSAValidatorSet( - NewECDSAValidator(addr1), - ), - validators2: NewECDSAValidatorSet( - NewECDSAValidator(addr1), - NewECDSAValidator(addr2), - ), - expectedErr: nil, - expectedValidators: NewECDSAValidatorSet( - NewECDSAValidator(addr1), - NewECDSAValidator(addr2), - ), - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - assert.ErrorIs( - t, - test.expectedErr, - test.validators1.Merge(test.validators2), - ) - - assert.Equal( - t, - test.expectedValidators, - test.validators1, - ) - }) - } -} - -func TestSetMarshalAndUnmarshal(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - validators Validators - }{ - { - name: "ECDSAValidators", - validators: NewECDSAValidatorSet( - NewECDSAValidator(addr1), - NewECDSAValidator(addr2), - ), - }, - { - name: "BLSValidators", - validators: NewBLSValidatorSet( - NewBLSValidator(addr1, testBLSPubKey1), - NewBLSValidator(addr2, testBLSPubKey2), - ), - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - encoded := types.MarshalRLPTo(test.validators.MarshalRLPWith, nil) - - validator2 := &Set{ - ValidatorType: test.validators.Type(), - Validators: []Validator{}, - } - - assert.NoError( - t, - types.UnmarshalRlp(validator2.UnmarshalRLPFrom, encoded), - ) - - assert.Equal( - t, - test.validators, - validator2, - ) - }) - } -} diff --git a/validators/store/contract/contract.go b/validators/store/contract/contract.go deleted file mode 100644 index dbfa7cfa63..0000000000 --- a/validators/store/contract/contract.go +++ /dev/null @@ -1,127 +0,0 @@ -package contract - -import ( - "errors" - "fmt" - - "github.com/0xPolygon/polygon-edge/state" - "github.com/0xPolygon/polygon-edge/types" - "github.com/0xPolygon/polygon-edge/validators" - "github.com/0xPolygon/polygon-edge/validators/store" - "github.com/hashicorp/go-hclog" - lru "github.com/hashicorp/golang-lru" -) - -const ( - // How many validator sets are stored in the cache - // Cache 3 validator sets for 3 epochs - DefaultValidatorSetCacheSize = 3 -) - -var ( - ErrSignerNotFound = errors.New("signer not found") - ErrInvalidValidatorsTypeAssertion = errors.New("invalid type assertion for Validators") -) - -type ContractValidatorStore struct { - logger hclog.Logger - blockchain store.HeaderGetter - executor Executor - - // LRU cache for the validators - validatorSetCache *lru.Cache -} - -type Executor interface { - BeginTxn(types.Hash, *types.Header, types.Address) (*state.Transition, error) -} - -func NewContractValidatorStore( - logger hclog.Logger, - blockchain store.HeaderGetter, - executor Executor, - validatorSetCacheSize int, -) (*ContractValidatorStore, error) { - var ( - validatorsCache *lru.Cache - err error - ) - - if validatorSetCacheSize > 0 { - if validatorsCache, err = lru.New(validatorSetCacheSize); err != nil { - return nil, fmt.Errorf("unable to create validator set cache, %w", err) - } - } - - return &ContractValidatorStore{ - logger: logger, - blockchain: blockchain, - executor: executor, - validatorSetCache: validatorsCache, - }, nil -} - -func (s *ContractValidatorStore) SourceType() store.SourceType { - return store.Contract -} - -func (s *ContractValidatorStore) GetValidatorsByHeight( - validatorType validators.ValidatorType, - height uint64, -) (validators.Validators, error) { - cachedValidators, err := s.loadCachedValidatorSet(height) - if err != nil { - return nil, err - } - - if cachedValidators != nil { - return cachedValidators, nil - } - - transition, err := s.getTransitionForQuery(height) - if err != nil { - return nil, err - } - - fetchedValidators, err := FetchValidators(validatorType, transition, types.ZeroAddress) - if err != nil { - return nil, err - } - - s.saveToValidatorSetCache(height, fetchedValidators) - - return fetchedValidators, nil -} - -func (s *ContractValidatorStore) getTransitionForQuery(height uint64) (*state.Transition, error) { - header, ok := s.blockchain.GetHeaderByNumber(height) - if !ok { - return nil, fmt.Errorf("header not found at %d", height) - } - - return s.executor.BeginTxn(header.StateRoot, header, types.ZeroAddress) -} - -// loadCachedValidatorSet loads validators from validatorSetCache -func (s *ContractValidatorStore) loadCachedValidatorSet(height uint64) (validators.Validators, error) { - cachedRawValidators, ok := s.validatorSetCache.Get(height) - if !ok { - return nil, nil - } - - validators, ok := cachedRawValidators.(validators.Validators) - if !ok { - return nil, ErrInvalidValidatorsTypeAssertion - } - - return validators, nil -} - -// saveToValidatorSetCache saves validators to validatorSetCache -func (s *ContractValidatorStore) saveToValidatorSetCache(height uint64, validators validators.Validators) bool { - if s.validatorSetCache == nil { - return false - } - - return s.validatorSetCache.Add(height, validators) -} diff --git a/validators/store/contract/contract_test.go b/validators/store/contract/contract_test.go deleted file mode 100644 index d0a7f72e50..0000000000 --- a/validators/store/contract/contract_test.go +++ /dev/null @@ -1,563 +0,0 @@ -package contract - -import ( - "errors" - "testing" - - "github.com/hashicorp/go-hclog" - lru "github.com/hashicorp/golang-lru" - "github.com/stretchr/testify/assert" - - "github.com/0xPolygon/polygon-edge/chain" - "github.com/0xPolygon/polygon-edge/contracts/staking" - "github.com/0xPolygon/polygon-edge/crypto" - stakingHelper "github.com/0xPolygon/polygon-edge/helper/staking" - testHelper "github.com/0xPolygon/polygon-edge/helper/tests" - "github.com/0xPolygon/polygon-edge/state" - itrie "github.com/0xPolygon/polygon-edge/state/immutable-trie" - "github.com/0xPolygon/polygon-edge/types" - "github.com/0xPolygon/polygon-edge/validators" - "github.com/0xPolygon/polygon-edge/validators/store" -) - -var ( - addr1 = types.StringToAddress("1") - addr2 = types.StringToAddress("2") - - testBLSPubKey1 = newTestBLSKeyBytes() - testBLSPubKey2 = newTestBLSKeyBytes() - - testPredeployParams = stakingHelper.PredeployParams{ - MinValidatorCount: 0, - MaxValidatorCount: 10, - } - testBlockGasLimit uint64 = 10000000 -) - -func newTestBLSKeyBytes() validators.BLSValidatorPublicKey { - key, err := crypto.GenerateBLSKey() - if err != nil { - return nil - } - - pubKey, err := key.GetPublicKey() - if err != nil { - return nil - } - - buf, err := pubKey.MarshalBinary() - if err != nil { - return nil - } - - return buf -} - -func newTestCache(t *testing.T, size int) *lru.Cache { - t.Helper() - - cache, err := lru.New(size) - assert.NoError(t, err) - - return cache -} - -type mockExecutor struct { - BeginTxnFn func(types.Hash, *types.Header, types.Address) (*state.Transition, error) -} - -func (m *mockExecutor) BeginTxn( - hash types.Hash, - header *types.Header, - address types.Address, -) (*state.Transition, error) { - return m.BeginTxnFn(hash, header, address) -} - -func newTestTransition( - t *testing.T, -) *state.Transition { - t.Helper() - - st := itrie.NewState(itrie.NewMemoryStorage()) - - ex := state.NewExecutor(&chain.Params{ - Forks: chain.AllForksEnabled, - BurnContract: map[uint64]types.Address{ - 0: types.ZeroAddress, - }, - }, st, hclog.NewNullLogger()) - - rootHash, err := ex.WriteGenesis(nil, types.Hash{}) - assert.NoError(t, err) - - ex.GetHash = func(h *types.Header) state.GetHashByNumber { - return func(i uint64) types.Hash { - return rootHash - } - } - - transition, err := ex.BeginTxn( - rootHash, - &types.Header{ - // Set enough block gas limit for query - GasLimit: testBlockGasLimit, - }, - types.ZeroAddress, - ) - assert.NoError(t, err) - - return transition -} - -func newTestTransitionWithPredeployedStakingContract( - t *testing.T, - validators validators.Validators, -) *state.Transition { - t.Helper() - - transition := newTestTransition(t) - - contractState, err := stakingHelper.PredeployStakingSC( - validators, - testPredeployParams, - ) - - assert.NoError(t, err) - - assert.NoError( - t, - transition.SetAccountDirectly(staking.AddrStakingContract, contractState), - ) - - return transition -} - -func newTestContractValidatorStore( - t *testing.T, - blockchain store.HeaderGetter, - executor Executor, - cacheSize int, -) *ContractValidatorStore { - t.Helper() - - var cache *lru.Cache - if cacheSize > 0 { - cache = newTestCache(t, cacheSize) - } - - return &ContractValidatorStore{ - logger: hclog.NewNullLogger(), - blockchain: blockchain, - executor: executor, - validatorSetCache: cache, - } -} - -func TestNewContractValidatorStore(t *testing.T) { - t.Parallel() - - var ( - logger = hclog.NewNullLogger() - blockchain = store.HeaderGetter( - &store.MockBlockchain{}, - ) - executor = Executor( - &mockExecutor{}, - ) - ) - - tests := []struct { - name string - cacheSize int - expectedRes *ContractValidatorStore - expectedErr error - }{ - { - name: "should return store", - cacheSize: 1, - expectedRes: &ContractValidatorStore{ - logger: logger, - blockchain: blockchain, - executor: executor, - validatorSetCache: newTestCache(t, 1), - }, - expectedErr: nil, - }, - { - name: "should return store without cache if cache size is zero", - cacheSize: 0, - expectedRes: &ContractValidatorStore{ - logger: logger, - blockchain: blockchain, - executor: executor, - }, - expectedErr: nil, - }, - { - name: "should return store without cache if cache size is negative", - cacheSize: -1, - expectedRes: &ContractValidatorStore{ - logger: logger, - blockchain: blockchain, - executor: executor, - }, - expectedErr: nil, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - res, err := NewContractValidatorStore( - logger, - blockchain, - executor, - test.cacheSize, - ) - - assert.Equal(t, test.expectedRes, res) - testHelper.AssertErrorMessageContains(t, test.expectedErr, err) - }) - } -} - -func TestContractValidatorStoreSourceType(t *testing.T) { - t.Parallel() - - s := &ContractValidatorStore{} - - assert.Equal(t, store.Contract, s.SourceType()) -} - -func TestContractValidatorStoreGetValidators(t *testing.T) { - t.Parallel() - - var ( - stateRoot = types.StringToHash("1") - header = &types.Header{ - StateRoot: stateRoot, - } - - ecdsaValidators = validators.NewECDSAValidatorSet( - validators.NewECDSAValidator(addr1), - validators.NewECDSAValidator(addr2), - ) - - blsValidators = validators.NewBLSValidatorSet( - validators.NewBLSValidator(addr1, testBLSPubKey1), - validators.NewBLSValidator(addr2, testBLSPubKey2), - ) - - transitionForECDSAValidators = newTestTransitionWithPredeployedStakingContract( - t, - ecdsaValidators, - ) - - transitionForBLSValidators = newTestTransitionWithPredeployedStakingContract( - t, - blsValidators, - ) - ) - - tests := []struct { - name string - blockchain store.HeaderGetter - executor Executor - cacheSize int - initialCaches map[uint64]interface{} - - // input - validatorType validators.ValidatorType - height uint64 - - // output - expectedRes validators.Validators - expectedErr error - // caches after calling GetValidators - finalCaches map[uint64]interface{} - }{ - { - name: "should return error when loadCachedValidatorSet failed", - blockchain: nil, - executor: nil, - cacheSize: 1, - initialCaches: map[uint64]interface{}{ - 0: string("fake"), - }, - height: 0, - expectedRes: nil, - expectedErr: ErrInvalidValidatorsTypeAssertion, - finalCaches: map[uint64]interface{}{ - 0: string("fake"), - }, - }, - { - name: "should return validators if cache exists", - blockchain: nil, - executor: nil, - cacheSize: 1, - initialCaches: map[uint64]interface{}{ - 0: validators.NewECDSAValidatorSet( - validators.NewECDSAValidator(addr1), - ), - }, - height: 0, - expectedRes: validators.NewECDSAValidatorSet( - validators.NewECDSAValidator(addr1), - ), - expectedErr: nil, - finalCaches: map[uint64]interface{}{ - 0: validators.NewECDSAValidatorSet( - validators.NewECDSAValidator(addr1), - ), - }, - }, - { - name: "should return error if header not found", - blockchain: &store.MockBlockchain{ - GetHeaderByNumberFn: func(height uint64) (*types.Header, bool) { - assert.Equal(t, uint64(1), height) - - return nil, false - }, - }, - executor: nil, - cacheSize: 1, - initialCaches: map[uint64]interface{}{}, - height: 1, - expectedRes: nil, - expectedErr: errors.New("header not found at 1"), - finalCaches: map[uint64]interface{}{}, - }, - { - name: "should return error if FetchValidators failed", - blockchain: &store.MockBlockchain{ - GetHeaderByNumberFn: func(height uint64) (*types.Header, bool) { - assert.Equal(t, uint64(1), height) - - return header, true - }, - }, - executor: &mockExecutor{ - BeginTxnFn: func(hash types.Hash, head *types.Header, addr types.Address) (*state.Transition, error) { - assert.Equal(t, stateRoot, hash) - assert.Equal(t, header, head) - assert.Equal(t, types.ZeroAddress, addr) - - return transitionForECDSAValidators, nil - }, - }, - cacheSize: 1, - initialCaches: map[uint64]interface{}{}, - validatorType: validators.ValidatorType("fake"), - height: 1, - expectedRes: nil, - expectedErr: errors.New("unsupported validator type: fake"), - finalCaches: map[uint64]interface{}{}, - }, - { - name: "should return fetched ECDSA validators", - blockchain: &store.MockBlockchain{ - GetHeaderByNumberFn: func(height uint64) (*types.Header, bool) { - assert.Equal(t, uint64(1), height) - - return header, true - }, - }, - executor: &mockExecutor{ - BeginTxnFn: func(hash types.Hash, head *types.Header, addr types.Address) (*state.Transition, error) { - assert.Equal(t, stateRoot, hash) - assert.Equal(t, header, head) - assert.Equal(t, types.ZeroAddress, addr) - - return transitionForECDSAValidators, nil - }, - }, - cacheSize: 1, - initialCaches: map[uint64]interface{}{}, - validatorType: validators.ECDSAValidatorType, - height: 1, - expectedRes: ecdsaValidators, - expectedErr: nil, - finalCaches: map[uint64]interface{}{ - 1: ecdsaValidators, - }, - }, - { - name: "should return fetched BLS validators", - blockchain: &store.MockBlockchain{ - GetHeaderByNumberFn: func(height uint64) (*types.Header, bool) { - assert.Equal(t, uint64(1), height) - - return header, true - }, - }, - executor: &mockExecutor{ - BeginTxnFn: func(hash types.Hash, head *types.Header, addr types.Address) (*state.Transition, error) { - assert.Equal(t, stateRoot, hash) - assert.Equal(t, header, head) - assert.Equal(t, types.ZeroAddress, addr) - - return transitionForBLSValidators, nil - }, - }, - cacheSize: 1, - initialCaches: map[uint64]interface{}{}, - validatorType: validators.BLSValidatorType, - height: 1, - expectedRes: blsValidators, - expectedErr: nil, - finalCaches: map[uint64]interface{}{ - 1: blsValidators, - }, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - store := newTestContractValidatorStore( - t, - test.blockchain, - test.executor, - test.cacheSize, - ) - - for height, data := range test.initialCaches { - store.validatorSetCache.Add(height, data) - } - - res, err := store.GetValidatorsByHeight(test.validatorType, test.height) - - assert.Equal(t, test.expectedRes, res) - testHelper.AssertErrorMessageContains(t, test.expectedErr, err) - - // check cache - assert.Equal(t, len(test.finalCaches), store.validatorSetCache.Len()) - - for height, expected := range test.finalCaches { - cache, ok := store.validatorSetCache.Get(height) - - assert.True(t, ok) - assert.Equal(t, expected, cache) - } - }) - } -} - -func TestContractValidatorStore_CacheChange(t *testing.T) { - var ( - cacheSize = 2 - - store = newTestContractValidatorStore( - t, - nil, - nil, - cacheSize, - ) - - ecdsaValidators1 = validators.NewECDSAValidatorSet( - validators.NewECDSAValidator(addr1), - ) - - ecdsaValidators2 = validators.NewECDSAValidatorSet( - validators.NewECDSAValidator(addr1), - validators.NewECDSAValidator(addr2), - ) - - blsValidators = validators.NewBLSValidatorSet( - validators.NewBLSValidator(addr1, testBLSPubKey1), - validators.NewBLSValidator(addr2, testBLSPubKey2), - ) - ) - - type testCase struct { - height uint64 - expected validators.Validators - } - - testCache := func(t *testing.T, testCases ...testCase) { - t.Helper() - - assert.Equal(t, len(testCases), store.validatorSetCache.Len()) - - for _, testCase := range testCases { - cache, ok := store.validatorSetCache.Get(testCase.height) - - assert.Truef(t, ok, "validators at %d must exist, but not found", testCase.height) - assert.Equal(t, testCase.expected, cache) - } - } - - // initial cache is empty - testCache(t) - - // overflow doesn't occur - assert.False( - t, - store.saveToValidatorSetCache(0, ecdsaValidators1), - ) - - testCache( - t, - testCase{height: 0, expected: ecdsaValidators1}, - ) - - assert.False( - t, - store.saveToValidatorSetCache(1, ecdsaValidators2), - ) - - testCache( - t, - testCase{height: 0, expected: ecdsaValidators1}, - testCase{height: 1, expected: ecdsaValidators2}, - ) - - // make sure ecdsaValidators2 is loaded at the end for LRU cache - store.validatorSetCache.Get(1) - - // overflow occurs and one validator set is removed - assert.True( - t, - store.saveToValidatorSetCache(2, blsValidators), - ) - - testCache( - t, - testCase{height: 1, expected: ecdsaValidators2}, - testCase{height: 2, expected: blsValidators}, - ) -} - -func TestContractValidatorStore_NoCache(t *testing.T) { - t.Parallel() - - var ( - store = newTestContractValidatorStore( - t, - nil, - nil, - 0, - ) - - ecdsaValidators1 = validators.NewECDSAValidatorSet( - validators.NewECDSAValidator(addr1), - ) - ) - - // nothing happens because cache is nil - assert.False( - t, - store.saveToValidatorSetCache(0, ecdsaValidators1), - ) - - assert.Nil(t, store.validatorSetCache) -} diff --git a/validators/store/contract/fetcher.go b/validators/store/contract/fetcher.go deleted file mode 100644 index 2582cc5a2a..0000000000 --- a/validators/store/contract/fetcher.go +++ /dev/null @@ -1,83 +0,0 @@ -package contract - -import ( - "fmt" - - "github.com/0xPolygon/polygon-edge/contracts/staking" - "github.com/0xPolygon/polygon-edge/crypto" - "github.com/0xPolygon/polygon-edge/state" - "github.com/0xPolygon/polygon-edge/types" - "github.com/0xPolygon/polygon-edge/validators" -) - -// FetchValidators fetches validators from a contract switched by validator type -func FetchValidators( - validatorType validators.ValidatorType, - transition *state.Transition, - from types.Address, -) (validators.Validators, error) { - switch validatorType { - case validators.ECDSAValidatorType: - return FetchECDSAValidators(transition, from) - case validators.BLSValidatorType: - return FetchBLSValidators(transition, from) - } - - return nil, fmt.Errorf("unsupported validator type: %s", validatorType) -} - -// FetchECDSAValidators queries a contract for validator addresses and returns ECDSAValidators -func FetchECDSAValidators( - transition *state.Transition, - from types.Address, -) (validators.Validators, error) { - valAddrs, err := staking.QueryValidators(transition, from) - if err != nil { - return nil, err - } - - ecdsaValidators := validators.NewECDSAValidatorSet() - for _, addr := range valAddrs { - if err := ecdsaValidators.Add(validators.NewECDSAValidator(addr)); err != nil { - return nil, err - } - } - - return ecdsaValidators, nil -} - -// FetchBLSValidators queries a contract for validator addresses & BLS Public Keys and returns ECDSAValidators -func FetchBLSValidators( - transition *state.Transition, - from types.Address, -) (validators.Validators, error) { - valAddrs, err := staking.QueryValidators(transition, from) - if err != nil { - return nil, err - } - - blsPublicKeys, err := staking.QueryBLSPublicKeys(transition, from) - if err != nil { - return nil, err - } - - blsValidators := validators.NewBLSValidatorSet() - - for idx := range valAddrs { - // ignore the validator whose BLS Key is not set - // because BLS validator needs to have both Address and BLS Public Key set - // in the contract - if _, err := crypto.UnmarshalBLSPublicKey(blsPublicKeys[idx]); err != nil { - continue - } - - if err := blsValidators.Add(validators.NewBLSValidator( - valAddrs[idx], - blsPublicKeys[idx], - )); err != nil { - return nil, err - } - } - - return blsValidators, nil -} diff --git a/validators/store/contract/fetcher_test.go b/validators/store/contract/fetcher_test.go deleted file mode 100644 index 97e4524065..0000000000 --- a/validators/store/contract/fetcher_test.go +++ /dev/null @@ -1,142 +0,0 @@ -package contract - -import ( - "errors" - "fmt" - "testing" - - testHelper "github.com/0xPolygon/polygon-edge/helper/tests" - "github.com/0xPolygon/polygon-edge/state" - "github.com/0xPolygon/polygon-edge/types" - "github.com/0xPolygon/polygon-edge/validators" - "github.com/stretchr/testify/assert" -) - -func TestFetchValidators(t *testing.T) { - t.Parallel() - - // only check error handling because of the duplicated tests below - fakeValidatorType := validators.ValidatorType("fake") - res, err := FetchValidators( - fakeValidatorType, - nil, - types.ZeroAddress, - ) - - assert.Nil(t, res) - assert.ErrorContains(t, err, fmt.Sprintf("unsupported validator type: %s", fakeValidatorType)) -} - -func TestFetchECDSAValidators(t *testing.T) { - t.Parallel() - - var ( - ecdsaValidators = validators.NewECDSAValidatorSet( - validators.NewECDSAValidator(addr1), - validators.NewECDSAValidator(addr2), - ) - ) - - tests := []struct { - name string - transition *state.Transition - from types.Address - expectedRes validators.Validators - expectedErr error - }{ - { - name: "should return error if QueryValidators failed", - transition: newTestTransition( - t, - ), - from: types.ZeroAddress, - expectedRes: nil, - expectedErr: errors.New("empty input"), - }, - { - name: "should return ECDSA Validators", - transition: newTestTransitionWithPredeployedStakingContract( - t, - ecdsaValidators, - ), - from: types.ZeroAddress, - expectedRes: ecdsaValidators, - expectedErr: nil, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - res, err := FetchValidators( - validators.ECDSAValidatorType, - test.transition, - test.from, - ) - - assert.Equal(t, test.expectedRes, res) - testHelper.AssertErrorMessageContains(t, test.expectedErr, err) - }) - } -} - -func TestFetchBLSValidators(t *testing.T) { - t.Parallel() - - var ( - blsValidators = validators.NewBLSValidatorSet( - validators.NewBLSValidator(addr1, testBLSPubKey1), - validators.NewBLSValidator(addr2, []byte{}), // validator 2 has not set BLS Public Key - ) - ) - - tests := []struct { - name string - transition *state.Transition - from types.Address - expectedRes validators.Validators - expectedErr error - }{ - { - name: "should return error if QueryValidators failed", - transition: newTestTransition( - t, - ), - from: types.ZeroAddress, - expectedRes: nil, - expectedErr: errors.New("empty input"), - }, - { - name: "should return ECDSA Validators", - transition: newTestTransitionWithPredeployedStakingContract( - t, - blsValidators, - ), - from: types.ZeroAddress, - expectedRes: validators.NewBLSValidatorSet( - validators.NewBLSValidator(addr1, testBLSPubKey1), - ), - expectedErr: nil, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - res, err := FetchValidators( - validators.BLSValidatorType, - test.transition, - test.from, - ) - - assert.Equal(t, test.expectedRes, res) - testHelper.AssertErrorMessageContains(t, test.expectedErr, err) - }) - } -} diff --git a/validators/store/snapshot/helper.go b/validators/store/snapshot/helper.go deleted file mode 100644 index ce2bda7d82..0000000000 --- a/validators/store/snapshot/helper.go +++ /dev/null @@ -1,47 +0,0 @@ -package snapshot - -import ( - "github.com/0xPolygon/polygon-edge/types" - "github.com/0xPolygon/polygon-edge/validators" -) - -// isAuthorize is a helper function to return the bool value from Nonce -func isAuthorize( - nonce types.Nonce, -) (bool, error) { - switch nonce { - case nonceAuthVote: - return true, nil - case nonceDropVote: - return false, nil - default: - return false, ErrIncorrectNonce - } -} - -// shouldProcessVote is a helper function to return -// the flag indicating whether vote should be processed or not -// based on vote action and validator set -func shouldProcessVote( - validators validators.Validators, - candidate types.Address, - voteAction bool, // true => add, false => remove -) bool { - // if vote action is... - // true => validator set expects not to have a candidate - // false => validator set expects to have a candidate - return voteAction != validators.Includes(candidate) -} - -// addsOrDelsCandidate is a helper function to add/remove candidate to/from validators -func addsOrDelsCandidate( - validators validators.Validators, - candidate validators.Validator, - updateAction bool, -) error { - if updateAction { - return validators.Add(candidate) - } else { - return validators.Del(candidate) - } -} diff --git a/validators/store/snapshot/helper_test.go b/validators/store/snapshot/helper_test.go deleted file mode 100644 index 3f90027a99..0000000000 --- a/validators/store/snapshot/helper_test.go +++ /dev/null @@ -1,309 +0,0 @@ -package snapshot - -import ( - "testing" - - "github.com/0xPolygon/polygon-edge/types" - "github.com/0xPolygon/polygon-edge/validators" - "github.com/stretchr/testify/assert" -) - -var ( - addr1 = types.StringToAddress("1") - addr2 = types.StringToAddress("2") - addr3 = types.StringToAddress("3") - testBLSPubKey1 = validators.BLSValidatorPublicKey([]byte("bls_pubkey1")) - testBLSPubKey2 = validators.BLSValidatorPublicKey([]byte("bls_pubkey2")) - testBLSPubKey3 = validators.BLSValidatorPublicKey([]byte("bls_pubkey3")) - - ecdsaValidator1 = validators.NewECDSAValidator(addr1) - ecdsaValidator2 = validators.NewECDSAValidator(addr2) - ecdsaValidator3 = validators.NewECDSAValidator(addr3) - blsValidator1 = validators.NewBLSValidator(addr1, testBLSPubKey1) - blsValidator2 = validators.NewBLSValidator(addr2, testBLSPubKey2) - blsValidator3 = validators.NewBLSValidator(addr3, testBLSPubKey3) -) - -func Test_isAuthorize(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - nonce types.Nonce - expectedFlag bool - expectedErr error - }{ - { - name: "nonceAuthVote", - nonce: nonceAuthVote, - expectedFlag: true, - expectedErr: nil, - }, - { - name: "nonceDropVote", - nonce: nonceDropVote, - expectedFlag: false, - expectedErr: nil, - }, - { - name: "invalid nonce", - nonce: types.Nonce{0x1}, - expectedFlag: false, - expectedErr: ErrIncorrectNonce, - }, - } - - for _, test := range tests { - test := test - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - res, err := isAuthorize(test.nonce) - - assert.Equal(t, test.expectedFlag, res) - assert.ErrorIs(t, test.expectedErr, err) - }) - } -} - -func Test_shouldProcessVote(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - validators validators.Validators - candidate types.Address - voteAction bool - expected bool - }{ - // ECDSA - { - name: "ECDSA: vote for addition when the candidate isn't in validators", - validators: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ), - candidate: ecdsaValidator2.Addr(), - voteAction: true, - expected: true, - }, - { - name: "ECDSA: vote for addition when the candidate is already in validators", - validators: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ecdsaValidator2, - ), - candidate: ecdsaValidator2.Addr(), - voteAction: true, - expected: false, - }, - { - name: "ECDSA: vote for deletion when the candidate is in validators", - validators: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ecdsaValidator2, - ), - candidate: ecdsaValidator2.Addr(), - voteAction: false, - expected: true, - }, - { - name: "ECDSA: vote for deletion when the candidate isn't in validators", - validators: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ), - candidate: ecdsaValidator2.Addr(), - voteAction: false, - expected: false, - }, - // BLS - { - name: "BLS: vote for addition when the candidate isn't in validators", - validators: validators.NewBLSValidatorSet( - blsValidator1, - ), - candidate: blsValidator2.Addr(), - voteAction: true, - expected: true, - }, - { - name: "BLS: vote for addition when the candidate is already in validators", - validators: validators.NewBLSValidatorSet( - blsValidator1, - blsValidator2, - ), - candidate: blsValidator2.Addr(), - voteAction: true, - expected: false, - }, - { - name: "BLS: vote for deletion when the candidate is in validators", - validators: validators.NewBLSValidatorSet( - blsValidator1, - blsValidator2, - ), - candidate: blsValidator1.Addr(), - voteAction: false, - expected: true, - }, - { - name: "BLS: vote for deletion when the candidate isn't in validators", - validators: validators.NewBLSValidatorSet( - blsValidator1, - ), - candidate: blsValidator2.Addr(), - voteAction: false, - expected: false, - }, - } - - for _, test := range tests { - test := test - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - assert.Equal( - t, - test.expected, - shouldProcessVote(test.validators, test.candidate, test.voteAction), - ) - }) - } -} - -func Test_addsOrDelsCandidate(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - // inputs - validators validators.Validators - candidate validators.Validator - updateAction bool - // outputs - expectedErr error - newValidators validators.Validators - }{ - // ECDSA - { - name: "ECDSA: addition when the candidate isn't in validators", - validators: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ), - candidate: ecdsaValidator2, - updateAction: true, - expectedErr: nil, - newValidators: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ecdsaValidator2, - ), - }, - { - name: "ECDSA: addition when the candidate is already in validators", - validators: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ecdsaValidator2, - ), - candidate: ecdsaValidator2, - updateAction: true, - expectedErr: validators.ErrValidatorAlreadyExists, - newValidators: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ecdsaValidator2, - ), - }, - { - name: "ECDSA: deletion when the candidate is in validators", - validators: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ecdsaValidator2, - ), - candidate: ecdsaValidator1, - updateAction: false, - expectedErr: nil, - newValidators: validators.NewECDSAValidatorSet( - ecdsaValidator2, - ), - }, - { - name: "ECDSA: deletion when the candidate isn't in validators", - validators: validators.NewECDSAValidatorSet( - ecdsaValidator2, - ), - candidate: ecdsaValidator1, - updateAction: false, - expectedErr: validators.ErrValidatorNotFound, - newValidators: validators.NewECDSAValidatorSet( - ecdsaValidator2, - ), - }, - // BLS - { - name: "BLS: addition when the candidate isn't in validators", - validators: validators.NewBLSValidatorSet( - blsValidator1, - ), - candidate: blsValidator2, - updateAction: true, - expectedErr: nil, - newValidators: validators.NewBLSValidatorSet( - blsValidator1, - blsValidator2, - ), - }, - { - name: "BLS: addition when the candidate is already in validators", - validators: validators.NewBLSValidatorSet( - blsValidator1, - blsValidator2, - ), - candidate: blsValidator2, - updateAction: true, - expectedErr: validators.ErrValidatorAlreadyExists, - newValidators: validators.NewBLSValidatorSet( - blsValidator1, - blsValidator2, - ), - }, - { - name: "BLS: deletion when the candidate is in validators", - validators: validators.NewBLSValidatorSet( - blsValidator1, - blsValidator2, - ), - candidate: blsValidator1, - updateAction: false, - expectedErr: nil, - newValidators: validators.NewBLSValidatorSet( - blsValidator2, - ), - }, - { - name: "BLS: deletion when the candidate is in validators", - validators: validators.NewBLSValidatorSet( - blsValidator2, - ), - candidate: blsValidator1, - updateAction: false, - expectedErr: validators.ErrValidatorNotFound, - newValidators: validators.NewBLSValidatorSet( - blsValidator2, - ), - }, - } - - for _, test := range tests { - test := test - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - err := addsOrDelsCandidate( - test.validators, - test.candidate, - test.updateAction, - ) - - assert.ErrorIs(t, test.expectedErr, err) - assert.Equal(t, test.newValidators, test.validators) - }) - } -} diff --git a/validators/store/snapshot/snapshot.go b/validators/store/snapshot/snapshot.go deleted file mode 100644 index a16df59dd4..0000000000 --- a/validators/store/snapshot/snapshot.go +++ /dev/null @@ -1,631 +0,0 @@ -package snapshot - -import ( - "bytes" - "errors" - "fmt" - "sync" - - "github.com/0xPolygon/polygon-edge/types" - "github.com/0xPolygon/polygon-edge/validators" - "github.com/0xPolygon/polygon-edge/validators/store" - "github.com/hashicorp/go-hclog" -) - -const ( - loggerName = "snapshot_validator_set" - preservedEpochs = 2 -) - -// SignerInterface is an interface of the Signer SnapshotValidatorStore calls -type SignerInterface interface { - Type() validators.ValidatorType - EcrecoverFromHeader(*types.Header) (types.Address, error) - GetValidators(*types.Header) (validators.Validators, error) -} - -var ( - // Magic nonce number to vote on adding a new validator - nonceAuthVote = types.Nonce{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} - - // Magic nonce number to vote on removing a validator. - nonceDropVote = types.Nonce{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} -) - -var ( - ErrInvalidNonce = errors.New("invalid nonce specified") - ErrSnapshotNotFound = errors.New("not found snapshot") - ErrUnauthorizedProposer = errors.New("unauthorized proposer") - ErrIncorrectNonce = errors.New("incorrect vote nonce") - ErrAlreadyCandidate = errors.New("already a candidate") - ErrCandidateIsValidator = errors.New("the candidate is already a validator") - ErrCandidateNotExistInSet = errors.New("cannot remove a validator if they're not in the snapshot") - ErrAlreadyVoted = errors.New("already voted for this address") - ErrMultipleVotesBySameValidator = errors.New("more than one proposal per validator per address found") -) - -type SnapshotValidatorStore struct { - // interface - logger hclog.Logger - blockchain store.HeaderGetter - getSigner func(uint64) (SignerInterface, error) - - // configuration - epochSize uint64 - - // data - store *snapshotStore - candidates []*store.Candidate - candidatesLock sync.RWMutex -} - -// NewSnapshotValidatorStore creates and initializes *SnapshotValidatorStore -func NewSnapshotValidatorStore( - logger hclog.Logger, - blockchain store.HeaderGetter, - getSigner func(uint64) (SignerInterface, error), - epochSize uint64, - metadata *SnapshotMetadata, - snapshots []*Snapshot, -) (*SnapshotValidatorStore, error) { - set := &SnapshotValidatorStore{ - logger: logger.Named(loggerName), - store: newSnapshotStore(metadata, snapshots), - blockchain: blockchain, - getSigner: getSigner, - candidates: make([]*store.Candidate, 0), - candidatesLock: sync.RWMutex{}, - epochSize: epochSize, - } - - if err := set.initialize(); err != nil { - return nil, err - } - - return set, nil -} - -// initialize setup the snapshots to catch up latest header in blockchain -func (s *SnapshotValidatorStore) initialize() error { - header := s.blockchain.Header() - meta := s.GetSnapshotMetadata() - - if header.Number == 0 { - // Genesis header needs to be set by hand, all the other - // snapshots are set as part of processHeaders - if err := s.addHeaderSnap(header); err != nil { - return err - } - } - - // If the snapshot is not found, or the latest snapshot belongs to a previous epoch, - // we need to start rebuilding the snapshot from the beginning of the current epoch - // in order to have all the votes and validators correctly set in the snapshot, - // since they reset every epoch. - - // Get epoch of latest header and saved metadata - var ( - currentEpoch = header.Number / s.epochSize - metaEpoch = meta.LastBlock / s.epochSize - - snapshot = s.getSnapshot(header.Number) - ) - - if snapshot == nil || metaEpoch < currentEpoch { - // Restore snapshot at the beginning of the current epoch by block header - // if list doesn't have any snapshots to calculate snapshot for the next header - s.logger.Info("snapshot was not found, restore snapshot at beginning of current epoch", "current epoch", currentEpoch) - beginHeight := currentEpoch * s.epochSize - - beginHeader, ok := s.blockchain.GetHeaderByNumber(beginHeight) - if !ok { - return fmt.Errorf("header at %d not found", beginHeight) - } - - if err := s.addHeaderSnap(beginHeader); err != nil { - return err - } - - s.store.updateLastBlock(beginHeight) - - meta = s.GetSnapshotMetadata() - } - - // Process headers if we missed some blocks in the current epoch - if header.Number > meta.LastBlock { - s.logger.Info("syncing past snapshots", "from", meta.LastBlock, "to", header.Number) - - if err := s.ProcessHeadersInRange(meta.LastBlock+1, header.Number); err != nil { - return err - } - } - - return nil -} - -// SourceType returns validator store type -func (s *SnapshotValidatorStore) SourceType() store.SourceType { - return store.Snapshot -} - -// GetSnapshotMetadata returns metadata -func (s *SnapshotValidatorStore) GetSnapshotMetadata() *SnapshotMetadata { - return &SnapshotMetadata{ - LastBlock: s.store.getLastBlock(), - } -} - -// GetSnapshots returns all Snapshots -func (s *SnapshotValidatorStore) GetSnapshots() []*Snapshot { - return s.store.list -} - -// Candidates returns the current candidates -func (s *SnapshotValidatorStore) Candidates() []*store.Candidate { - return s.candidates -} - -// GetValidators returns the validator set in the Snapshot for the given height -func (s *SnapshotValidatorStore) GetValidatorsByHeight(height uint64) (validators.Validators, error) { - snapshot := s.getSnapshot(height) - if snapshot == nil { - return nil, ErrSnapshotNotFound - } - - return snapshot.Set, nil -} - -// Votes returns the votes in the snapshot at the specified height -func (s *SnapshotValidatorStore) Votes(height uint64) ([]*store.Vote, error) { - snapshot := s.getSnapshot(height) - if snapshot == nil { - return nil, ErrSnapshotNotFound - } - - return snapshot.Votes, nil -} - -// UpdateValidatorSet resets Snapshot with given validators at specified height -func (s *SnapshotValidatorStore) UpdateValidatorSet( - // new validators to be overwritten - newValidators validators.Validators, - // the height from which new validators are used - fromHeight uint64, -) error { - snapshotHeight := fromHeight - 1 - - header, ok := s.blockchain.GetHeaderByNumber(snapshotHeight) - if !ok { - return fmt.Errorf("header at %d not found", snapshotHeight) - } - - s.store.putByNumber(&Snapshot{ - Number: header.Number, - Hash: header.Hash.String(), - // reset validators & votes - Set: newValidators, - Votes: []*store.Vote{}, - }) - - return nil -} - -// ModifyHeader updates Header to vote -func (s *SnapshotValidatorStore) ModifyHeader(header *types.Header, proposer types.Address) error { - snapshot := s.getSnapshot(header.Number) - if snapshot == nil { - return ErrSnapshotNotFound - } - - if candidate := s.getNextCandidate(snapshot, proposer); candidate != nil { - var err error - - header.Miner, err = validatorToMiner(candidate.Validator) - if err != nil { - return err - } - - if candidate.Authorize { - header.Nonce = nonceAuthVote - } else { - header.Nonce = nonceDropVote - } - } - - return nil -} - -// VerifyHeader verifies the fields of Header which are modified in ModifyHeader -func (s *SnapshotValidatorStore) VerifyHeader(header *types.Header) error { - // Check the nonce format. - // The nonce field must have either an AUTH or DROP vote value. - // Block nonce values are not taken into account when the Miner field is set to zeroes, indicating - // no vote casting is taking place within a block - if header.Nonce != nonceAuthVote && header.Nonce != nonceDropVote { - return ErrInvalidNonce - } - - return nil -} - -// ProcessHeadersInRange is a helper function process headers in the given range -func (s *SnapshotValidatorStore) ProcessHeadersInRange( - from, to uint64, -) error { - for i := from; i <= to; i++ { - if i == 0 { - continue - } - - header, ok := s.blockchain.GetHeaderByNumber(i) - if !ok { - return fmt.Errorf("header %d not found", i) - } - - if err := s.ProcessHeader(header); err != nil { - return err - } - } - - return nil -} - -// ProcessHeader processes the header and updates snapshots -func (s *SnapshotValidatorStore) ProcessHeader( - header *types.Header, -) error { - signer, err := s.getSigner(header.Number) - if err != nil { - return err - } - - if signer == nil { - return fmt.Errorf("signer not found at %d", header.Number) - } - - proposer, err := signer.EcrecoverFromHeader(header) - if err != nil { - return err - } - - parentSnap := s.getSnapshot(header.Number - 1) - if parentSnap == nil { - return ErrSnapshotNotFound - } - - if !parentSnap.Set.Includes(proposer) { - return ErrUnauthorizedProposer - } - - snap := parentSnap.Copy() - - // Reset votes when new epoch - if header.Number%s.epochSize == 0 { - s.resetSnapshot(parentSnap, snap, header) - s.removeLowerSnapshots(header.Number) - s.store.updateLastBlock(header.Number) - - return nil - } - - // no vote if miner field is not set - if bytes.Equal(header.Miner, types.ZeroAddress[:]) { - s.store.updateLastBlock(header.Number) - - return nil - } - - // Process votes in the middle of epoch - if err := processVote(snap, header, signer.Type(), proposer); err != nil { - return err - } - - s.store.updateLastBlock(header.Number) - s.saveSnapshotIfChanged(parentSnap, snap, header) - - return nil -} - -// Propose adds new candidate for vote -func (s *SnapshotValidatorStore) Propose(candidate validators.Validator, auth bool, proposer types.Address) error { - s.candidatesLock.Lock() - defer s.candidatesLock.Unlock() - - candidateAddr := candidate.Addr() - - for _, c := range s.candidates { - if c.Validator.Addr() == candidateAddr { - return ErrAlreadyCandidate - } - } - - snap := s.getLatestSnapshot() - if snap == nil { - return ErrSnapshotNotFound - } - - included := snap.Set.Includes(candidateAddr) - - // safe checks - if auth && included { - return ErrCandidateIsValidator - } else if !auth && !included { - return ErrCandidateNotExistInSet - } - - // check if we have already voted for this candidate - count := snap.CountByVoterAndCandidate(proposer, candidate) - if count == 1 { - return ErrAlreadyVoted - } - - return s.addCandidate( - snap.Set, - candidate, - auth, - ) -} - -// AddCandidate adds new candidate to candidate list -// unsafe against concurrent access -func (s *SnapshotValidatorStore) addCandidate( - validators validators.Validators, - candidate validators.Validator, - authrorize bool, -) error { - if authrorize { - s.candidates = append(s.candidates, &store.Candidate{ - Validator: candidate, - Authorize: authrorize, - }) - - return nil - } - - // Get candidate validator information from set - // because don't want user to specify data except for address - // in case of removal - validatorIndex := validators.Index(candidate.Addr()) - if validatorIndex == -1 { - return ErrCandidateNotExistInSet - } - - s.candidates = append(s.candidates, &store.Candidate{ - Validator: validators.At(uint64(validatorIndex)), - Authorize: authrorize, - }) - - return nil -} - -// addHeaderSnap creates the initial snapshot, and adds it to the snapshot store -func (s *SnapshotValidatorStore) addHeaderSnap(header *types.Header) error { - signer, err := s.getSigner(header.Number) - if err != nil { - return err - } - - if signer == nil { - return fmt.Errorf("signer not found %d", header.Number) - } - - validators, err := signer.GetValidators(header) - if err != nil { - return err - } - - // Create the first snapshot from the genesis - s.store.add(&Snapshot{ - Hash: header.Hash.String(), - Number: header.Number, - Votes: []*store.Vote{}, - Set: validators, - }) - - return nil -} - -// getSnapshot returns a snapshot for specified height -func (s *SnapshotValidatorStore) getSnapshot(height uint64) *Snapshot { - return s.store.find(height) -} - -// getLatestSnapshot returns a snapshot for latest height -func (s *SnapshotValidatorStore) getLatestSnapshot() *Snapshot { - return s.getSnapshot(s.store.lastNumber) -} - -// getNextCandidate returns a possible candidate from candidates list -func (s *SnapshotValidatorStore) getNextCandidate( - snap *Snapshot, - proposer types.Address, -) *store.Candidate { - s.candidatesLock.Lock() - defer s.candidatesLock.Unlock() - - // first, we need to remove any candidates that have already been - // selected as validators - s.cleanObsoleteCandidates(snap.Set) - - // now pick the first candidate that has not received a vote yet - return s.pickOneCandidate(snap, proposer) -} - -// cleanObsolateCandidates removes useless candidates from candidates field -// Unsafe against concurrent accesses -func (s *SnapshotValidatorStore) cleanObsoleteCandidates(set validators.Validators) { - newCandidates := make([]*store.Candidate, 0, len(s.candidates)) - - for _, candidate := range s.candidates { - // If Authorize is - // true => Candidate needs to be in Set - // false => Candidate needs not to be in Set - // if the current situetion is not so, it's still a candidate - if candidate.Authorize != set.Includes(candidate.Validator.Addr()) { - newCandidates = append(newCandidates, candidate) - } - } - - s.candidates = newCandidates -} - -// pickOneCandidate returns a proposer candidate from candidates field -// Unsafe against concurrent accesses -func (s *SnapshotValidatorStore) pickOneCandidate( - snap *Snapshot, - proposer types.Address, -) *store.Candidate { - for _, c := range s.candidates { - addr := c.Validator.Addr() - - count := snap.Count(func(v *store.Vote) bool { - return v.Candidate.Addr() == addr && v.Validator == proposer - }) - - if count == 0 { - return c - } - } - - return nil -} - -// saveSnapshotIfChanged is a helper method to save snapshot updated by the given header -// only if the snapshot is updated from parent snapshot -func (s *SnapshotValidatorStore) saveSnapshotIfChanged( - parentSnapshot, snapshot *Snapshot, - header *types.Header, -) { - if snapshot.Equal(parentSnapshot) { - return - } - - snapshot.Number = header.Number - snapshot.Hash = header.Hash.String() - - s.store.add(snapshot) -} - -// resetSnapshot is a helper method to save a snapshot that clears votes -func (s *SnapshotValidatorStore) resetSnapshot( - parentSnapshot, snapshot *Snapshot, - header *types.Header, -) { - snapshot.Votes = nil - - s.saveSnapshotIfChanged(parentSnapshot, snapshot, header) -} - -// removeLowerSnapshots is a helper function to removes old snapshots -func (s *SnapshotValidatorStore) removeLowerSnapshots( - currentHeight uint64, -) { - currentEpoch := currentHeight / s.epochSize - if currentEpoch < preservedEpochs { - return - } - - // remove in-memory snapshots from two epochs before this one - lowerEpoch := currentEpoch - preservedEpochs - purgeBlock := lowerEpoch * s.epochSize - s.store.deleteLower(purgeBlock) -} - -// processVote processes vote in the given header and update snapshot -func processVote( - snapshot *Snapshot, - header *types.Header, - candidateType validators.ValidatorType, - proposer types.Address, -) error { - // the nonce selects the action - authorize, err := isAuthorize(header.Nonce) - if err != nil { - return err - } - - // parse candidate validator set from header.Miner - candidate, err := minerToValidator(candidateType, header.Miner) - if err != nil { - return err - } - - // if candidate has been processed as expected, just update last block - if !shouldProcessVote(snapshot.Set, candidate.Addr(), authorize) { - return nil - } - - voteCount := snapshot.CountByVoterAndCandidate(proposer, candidate) - if voteCount > 1 { - // there can only be one vote per validator per address - return ErrMultipleVotesBySameValidator - } - - if voteCount == 0 { - // cast the new vote since there is no one yet - snapshot.AddVote(proposer, candidate, authorize) - } - - // check the tally for the proposed validator - totalVotes := snapshot.CountByCandidate(candidate) - - // If more than a half of all validators voted - if totalVotes > snapshot.Set.Len()/2 { - if err := addsOrDelsCandidate( - snapshot.Set, - candidate, - authorize, - ); err != nil { - return err - } - - if !authorize { - // remove any votes casted by the removed validator - snapshot.RemoveVotesByVoter(candidate.Addr()) - } - - // remove all the votes that promoted this validator - snapshot.RemoveVotesByCandidate(candidate) - } - - return nil -} - -// validatorToMiner converts validator to bytes for miner field in header -func validatorToMiner(validator validators.Validator) ([]byte, error) { - switch validator.(type) { - case *validators.ECDSAValidator: - // Return Address directly - // to support backward compatibility - return validator.Addr().Bytes(), nil - case *validators.BLSValidator: - return validator.Bytes(), nil - default: - return nil, validators.ErrInvalidValidatorType - } -} - -// minerToValidator converts bytes to validator for miner field in header -func minerToValidator( - validatorType validators.ValidatorType, - miner []byte, -) (validators.Validator, error) { - validator, err := validators.NewValidatorFromType(validatorType) - if err != nil { - return nil, err - } - - switch typedVal := validator.(type) { - case *validators.ECDSAValidator: - typedVal.Address = types.BytesToAddress(miner) - case *validators.BLSValidator: - if err := typedVal.SetFromBytes(miner); err != nil { - return nil, err - } - default: - // shouldn't reach here - return nil, validators.ErrInvalidValidatorType - } - - return validator, nil -} diff --git a/validators/store/snapshot/snapshot_test.go b/validators/store/snapshot/snapshot_test.go deleted file mode 100644 index 58276aa1e7..0000000000 --- a/validators/store/snapshot/snapshot_test.go +++ /dev/null @@ -1,2804 +0,0 @@ -package snapshot - -import ( - "errors" - "fmt" - "math/big" - "sync" - "testing" - - "github.com/0xPolygon/polygon-edge/crypto" - testHelper "github.com/0xPolygon/polygon-edge/helper/tests" - "github.com/0xPolygon/polygon-edge/types" - "github.com/0xPolygon/polygon-edge/validators" - "github.com/0xPolygon/polygon-edge/validators/store" - "github.com/hashicorp/go-hclog" - "github.com/stretchr/testify/assert" -) - -var ( - errTest = errors.New("test error") -) - -// fakeValidator is a invalid validator -type fakeValidator struct { - validators.Validator -} - -func (f *fakeValidator) Addr() types.Address { - return types.ZeroAddress -} - -type mockSigner struct { - TypeFn func() validators.ValidatorType - EcrecoverFromHeaderFn func(*types.Header) (types.Address, error) - GetValidatorsFn func(*types.Header) (validators.Validators, error) -} - -func (m *mockSigner) Type() validators.ValidatorType { - return m.TypeFn() -} - -func (m *mockSigner) EcrecoverFromHeader(h *types.Header) (types.Address, error) { - return m.EcrecoverFromHeaderFn(h) -} - -func (m *mockSigner) GetValidators(h *types.Header) (validators.Validators, error) { - return m.GetValidatorsFn(h) -} - -func newTestHeaderHash(height uint64) types.Hash { - return types.BytesToHash(crypto.Keccak256(big.NewInt(int64(height)).Bytes())) -} - -func newTestHeader(height uint64, miner []byte, nonce types.Nonce) *types.Header { - return &types.Header{ - Number: height, - Hash: newTestHeaderHash(height), - Miner: miner, - Nonce: nonce, - } -} - -func newMockBlockchain(latestHeight uint64, headers map[uint64]*types.Header) store.HeaderGetter { - return &store.MockBlockchain{ - HeaderFn: func() *types.Header { - return headers[latestHeight] - }, - GetHeaderByNumberFn: func(height uint64) (*types.Header, bool) { - header, ok := headers[height] - - return header, ok - }, - } -} - -func newTestSnapshotValidatorStore( - blockchain store.HeaderGetter, - getSigner func(uint64) (SignerInterface, error), - lastBlock uint64, - snapshots []*Snapshot, - candidates []*store.Candidate, - epochSize uint64, -) *SnapshotValidatorStore { - return &SnapshotValidatorStore{ - logger: hclog.NewNullLogger(), - store: newSnapshotStore( - &SnapshotMetadata{ - LastBlock: lastBlock, - }, - snapshots, - ), - blockchain: blockchain, - getSigner: getSigner, - candidates: candidates, - candidatesLock: sync.RWMutex{}, - epochSize: epochSize, - } -} - -func TestNewSnapshotValidatorStore(t *testing.T) { - t.Parallel() - - var ( - logger = hclog.NewNullLogger() - blockchain = newMockBlockchain( - 0, - map[uint64]*types.Header{ - 0: newTestHeader( - 0, - types.ZeroAddress.Bytes(), - types.Nonce{}, - ), - }, - ) - - epochSize uint64 = 10 - metadata = &SnapshotMetadata{ - LastBlock: 20, - } - snapshots = []*Snapshot{} - ) - - t.Run("should return error", func(t *testing.T) { - t.Parallel() - - store, err := NewSnapshotValidatorStore( - logger, - blockchain, - func(u uint64) (SignerInterface, error) { - return nil, errTest - }, - epochSize, - metadata, - snapshots, - ) - - assert.Nil( - t, - store, - ) - - assert.Equal( - t, - errTest, - err, - ) - }) - - t.Run("should succeed", func(t *testing.T) { - t.Parallel() - - vals := validators.NewECDSAValidatorSet( - ecdsaValidator1, - ecdsaValidator2, - ) - - getSigner := func(u uint64) (SignerInterface, error) { - return &mockSigner{ - GetValidatorsFn: func(h *types.Header) (validators.Validators, error) { - return vals, nil - }, - }, nil - } - - snapshotStore, err := NewSnapshotValidatorStore( - logger, - blockchain, - getSigner, - epochSize, - metadata, - snapshots, - ) - - assert.Equal( - t, - snapshotStore.store, - newSnapshotStore(metadata, []*Snapshot{ - { - Number: 0, - Hash: newTestHeaderHash(0).String(), - Set: vals, - Votes: []*store.Vote{}, - }, - }), - ) - - assert.Equal( - t, - make([]*store.Candidate, 0), - snapshotStore.candidates, - ) - - assert.Equal( - t, - snapshotStore.epochSize, - epochSize, - ) - - assert.NoError( - t, - err, - ) - }) -} - -func TestSnapshotValidatorStore_initialize(t *testing.T) { - t.Parallel() - - var ( - initialCandidates = []*store.Candidate{} - epochSize uint64 = 10 - ) - - newGetSigner := func( - validatorType validators.ValidatorType, - headerCreators map[uint64]types.Address, - headerValidators map[uint64]validators.Validators, - ) func(uint64) (SignerInterface, error) { - return func(u uint64) (SignerInterface, error) { - return &mockSigner{ - TypeFn: func() validators.ValidatorType { - return validatorType - }, - EcrecoverFromHeaderFn: func(h *types.Header) (types.Address, error) { - creator, ok := headerCreators[h.Number] - if !ok { - return types.ZeroAddress, errTest - } - - return creator, nil - }, - GetValidatorsFn: func(h *types.Header) (validators.Validators, error) { - validators, ok := headerValidators[h.Number] - if !ok { - return nil, errTest - } - - return validators, nil - }, - }, nil - } - } - - tests := []struct { - name string - latestHeaderNumber uint64 - headers map[uint64]*types.Header - headerCreators map[uint64]types.Address - headerValidators map[uint64]validators.Validators - validatorType validators.ValidatorType - initialLastHeight uint64 - initialSnapshots []*Snapshot - expectedErr error - finalLastHeight uint64 - finalSnapshots []*Snapshot - }{ - { - name: "should a add snapshot created by genesis", - latestHeaderNumber: 0, - headers: map[uint64]*types.Header{ - 0: newTestHeader( - 0, - types.ZeroAddress.Bytes(), - types.Nonce{}, - ), - }, - headerCreators: nil, - headerValidators: map[uint64]validators.Validators{ - 0: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ecdsaValidator2, - ), - }, - validatorType: validators.ECDSAValidatorType, - initialLastHeight: 0, - initialSnapshots: []*Snapshot{}, - expectedErr: nil, - finalLastHeight: 0, - finalSnapshots: []*Snapshot{ - { - Number: 0, - Hash: newTestHeaderHash(0).String(), - Set: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ecdsaValidator2, - ), - Votes: []*store.Vote{}, - }, - }, - }, - { - name: "should add a snapshot on the latest epoch if initial snapshots are empty", - latestHeaderNumber: 20, - headers: map[uint64]*types.Header{ - 20: newTestHeader( - 20, - types.ZeroAddress.Bytes(), - types.Nonce{}, - ), - }, - headerCreators: nil, - headerValidators: map[uint64]validators.Validators{ - 20: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ecdsaValidator2, - ), - }, - validatorType: validators.ECDSAValidatorType, - initialLastHeight: 10, - initialSnapshots: []*Snapshot{}, - expectedErr: nil, - finalLastHeight: 20, - finalSnapshots: []*Snapshot{ - { - Number: 20, - Hash: newTestHeaderHash(20).String(), - Set: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ecdsaValidator2, - ), - Votes: []*store.Vote{}, - }, - }, - }, - { - name: "should add a snapshot on the latest epoch if the latest snapshot is not for the latest epoch", - latestHeaderNumber: 20, - headers: map[uint64]*types.Header{ - 20: newTestHeader( - 20, - types.ZeroAddress.Bytes(), - types.Nonce{}, - ), - }, - headerCreators: nil, - headerValidators: map[uint64]validators.Validators{ - 20: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ecdsaValidator2, - ), - }, - validatorType: validators.ECDSAValidatorType, - initialLastHeight: 10, - initialSnapshots: []*Snapshot{ - { - Number: 10, - Hash: newTestHeaderHash(10).String(), - Set: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ecdsaValidator2, - ), - Votes: []*store.Vote{}, - }, - }, - expectedErr: nil, - finalLastHeight: 20, - finalSnapshots: []*Snapshot{ - { - Number: 10, - Hash: newTestHeaderHash(10).String(), - Set: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ecdsaValidator2, - ), - Votes: []*store.Vote{}, - }, - { - Number: 20, - Hash: newTestHeaderHash(20).String(), - Set: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ecdsaValidator2, - ), - Votes: []*store.Vote{}, - }, - }, - }, - { - name: "should catch up latest header", - latestHeaderNumber: 22, - headers: map[uint64]*types.Header{ - 20: newTestHeader( - 20, - types.ZeroAddress.Bytes(), - types.Nonce{}, - ), - 21: newTestHeader( - 21, - ecdsaValidator3.Address.Bytes(), - nonceAuthVote, - ), - 22: newTestHeader( - 22, - ecdsaValidator1.Address.Bytes(), - nonceDropVote, - ), - }, - headerCreators: map[uint64]types.Address{ - 21: ecdsaValidator1.Address, - 22: ecdsaValidator2.Address, - }, - headerValidators: map[uint64]validators.Validators{ - 20: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ecdsaValidator2, - ), - }, - validatorType: validators.ECDSAValidatorType, - initialLastHeight: 20, - initialSnapshots: []*Snapshot{ - { - Number: 20, - Hash: newTestHeaderHash(20).String(), - Set: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ecdsaValidator2, - ), - Votes: []*store.Vote{}, - }, - }, - expectedErr: nil, - finalLastHeight: 22, - finalSnapshots: []*Snapshot{ - { - Number: 20, - Hash: newTestHeaderHash(20).String(), - Set: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ecdsaValidator2, - ), - Votes: []*store.Vote{}, - }, - { - Number: 21, - Hash: newTestHeaderHash(21).String(), - Set: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ecdsaValidator2, - ), - Votes: []*store.Vote{ - { - Candidate: ecdsaValidator3, - Validator: ecdsaValidator1.Address, - Authorize: true, - }, - }, - }, - { - Number: 22, - Hash: newTestHeaderHash(22).String(), - Set: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ecdsaValidator2, - ), - Votes: []*store.Vote{ - { - Candidate: ecdsaValidator3, - Validator: ecdsaValidator1.Address, - Authorize: true, - }, - { - Candidate: ecdsaValidator1, - Validator: ecdsaValidator2.Address, - Authorize: false, - }, - }, - }, - }, - }, - } - - for _, test := range tests { - test := test - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - snapshotStore := newTestSnapshotValidatorStore( - newMockBlockchain(test.latestHeaderNumber, test.headers), - newGetSigner(test.validatorType, test.headerCreators, test.headerValidators), - test.initialLastHeight, - test.initialSnapshots, - initialCandidates, - epochSize, - ) - - testHelper.AssertErrorMessageContains( - t, - test.expectedErr, - snapshotStore.initialize(), - ) - - assert.Equal( - t, - test.finalSnapshots, - snapshotStore.GetSnapshots(), - ) - assert.Equal( - t, - test.finalLastHeight, - snapshotStore.GetSnapshotMetadata().LastBlock, - ) - assert.Equal( - t, - initialCandidates, - snapshotStore.candidates, - ) - }) - } -} - -func TestSnapshotValidatorStoreSourceType(t *testing.T) { - t.Parallel() - - snapshotStore := newTestSnapshotValidatorStore( - nil, - nil, - 0, - nil, - nil, - 0, - ) - - assert.Equal( - t, - store.Snapshot, - snapshotStore.SourceType(), - ) -} - -func TestSnapshotValidatorStoreGetSnapshotMetadata(t *testing.T) { - t.Parallel() - - var ( - lastBlock = uint64(10) - - snapshotStore = newTestSnapshotValidatorStore( - nil, - nil, - lastBlock, - nil, - nil, - 0, - ) - ) - - assert.Equal( - t, - &SnapshotMetadata{ - LastBlock: lastBlock, - }, - snapshotStore.GetSnapshotMetadata(), - ) -} - -func TestSnapshotValidatorStoreGetSnapshots(t *testing.T) { - t.Parallel() - - var ( - snapshots = []*Snapshot{ - {Number: 10}, - {Number: 20}, - {Number: 30}, - } - - snapshotStore = newTestSnapshotValidatorStore( - nil, - nil, - 0, - snapshots, - nil, - 0, - ) - ) - - assert.Equal( - t, - snapshots, - snapshotStore.GetSnapshots(), - ) -} - -func TestSnapshotValidatorStoreCandidates(t *testing.T) { - t.Parallel() - - var ( - candidates = []*store.Candidate{ - { - Validator: ecdsaValidator1, - Authorize: true, - }, - } - - snapshotStore = newTestSnapshotValidatorStore( - nil, - nil, - 0, - nil, - nil, - 0, - ) - ) - - snapshotStore.candidates = candidates - - assert.Equal( - t, - candidates, - snapshotStore.Candidates(), - ) -} - -func TestSnapshotValidatorStoreGetValidators(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - snapshots []*Snapshot - height uint64 - expectedRes validators.Validators - expectedErr error - }{ - { - name: "should return ErrSnapshotNotFound is the list is empty", - snapshots: []*Snapshot{}, - height: 10, - expectedRes: nil, - expectedErr: ErrSnapshotNotFound, - }, - { - name: "should return validators in the Snapshot for the given height", - snapshots: []*Snapshot{ - {Number: 10}, - { - Number: 20, - Set: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ecdsaValidator2, - ), - }, - {Number: 30}, - }, - height: 25, - expectedRes: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ecdsaValidator2, - ), - expectedErr: nil, - }, - } - - for _, test := range tests { - test := test - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - snapshotStore := newTestSnapshotValidatorStore( - nil, - nil, - 0, - test.snapshots, - nil, - 0, - ) - - res, err := snapshotStore.GetValidatorsByHeight(test.height) - - assert.Equal(t, test.expectedRes, res) - assert.Equal(t, test.expectedErr, err) - }) - } -} - -func TestSnapshotValidatorStoreVotes(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - snapshots []*Snapshot - height uint64 - expectedRes []*store.Vote - expectedErr error - }{ - { - name: "should return ErrSnapshotNotFound is the list is empty", - snapshots: []*Snapshot{}, - height: 10, - expectedRes: nil, - expectedErr: ErrSnapshotNotFound, - }, - { - name: "should return validators in the Snapshot for the given height", - snapshots: []*Snapshot{ - {Number: 10}, - { - Number: 20, - Votes: []*store.Vote{ - newTestVote(ecdsaValidator2, ecdsaValidator1.Address, true), - }, - }, - {Number: 30}, - }, - height: 25, - expectedRes: []*store.Vote{ - newTestVote(ecdsaValidator2, ecdsaValidator1.Address, true), - }, - expectedErr: nil, - }, - } - - for _, test := range tests { - test := test - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - snapshotStore := newTestSnapshotValidatorStore( - nil, - nil, - 0, - test.snapshots, - nil, - 0, - ) - - res, err := snapshotStore.Votes(test.height) - - assert.Equal(t, test.expectedRes, res) - assert.Equal(t, test.expectedErr, err) - }) - } -} - -func TestSnapshotValidatorStoreUpdateValidatorSet(t *testing.T) { - t.Parallel() - - var ( - // Add a snapshot so that snapshot can be used from the target height - targetHeight uint64 = 21 - - header = newTestHeader(targetHeight-1, nil, types.Nonce{}) - - oldValidators = validators.NewECDSAValidatorSet( - ecdsaValidator1, - ) - - newValidators = validators.NewECDSAValidatorSet( - ecdsaValidator1, - ecdsaValidator2, - ecdsaValidator3, - ) - ) - - tests := []struct { - name string - initialSnapshots []*Snapshot - blockchain *store.MockBlockchain - // input - newValidators validators.Validators - height uint64 - // output - expectedErr error - finalSnapshots []*Snapshot - }{ - { - name: "should return an error if the blockchain doesn't have the header", - initialSnapshots: []*Snapshot{ - {Number: 10}, - {Number: 20}, - }, - blockchain: &store.MockBlockchain{ - GetHeaderByNumberFn: func(height uint64) (*types.Header, bool) { - assert.Equal(t, targetHeight-1, height) - - // not found - return nil, false - }, - }, - newValidators: nil, - height: targetHeight, - expectedErr: fmt.Errorf("header at %d not found", targetHeight-1), - finalSnapshots: []*Snapshot{ - {Number: 10}, - {Number: 20}, - }, - }, - { - name: "should replace a new snapshot with the snapshot that has the same height", - initialSnapshots: []*Snapshot{ - {Number: 10}, - { - Number: 20, - Set: oldValidators, - Votes: []*store.Vote{ - newTestVote(ecdsaValidator2, ecdsaValidator2.Address, true), - newTestVote(ecdsaValidator3, ecdsaValidator2.Address, true), - }, - }, - }, - blockchain: &store.MockBlockchain{ - GetHeaderByNumberFn: func(height uint64) (*types.Header, bool) { - assert.Equal(t, targetHeight-1, height) - - return header, true - }, - }, - newValidators: newValidators, - height: targetHeight, - expectedErr: nil, - finalSnapshots: []*Snapshot{ - {Number: 10}, - { - Number: header.Number, - Hash: header.Hash.String(), - Set: newValidators, - Votes: []*store.Vote{}, - }, - }, - }, - { - name: "should add a new snapshot when the snapshot with the same height doesn't exist", - initialSnapshots: []*Snapshot{ - {Number: 10}, - }, - blockchain: &store.MockBlockchain{ - GetHeaderByNumberFn: func(height uint64) (*types.Header, bool) { - assert.Equal(t, targetHeight-1, height) - - return header, true - }, - }, - newValidators: newValidators, - height: targetHeight, - expectedErr: nil, - finalSnapshots: []*Snapshot{ - {Number: 10}, - { - Number: header.Number, - Hash: header.Hash.String(), - Set: newValidators, - Votes: []*store.Vote{}, - }, - }, - }, - } - - for _, test := range tests { - test := test - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - snapshotStore := newTestSnapshotValidatorStore( - test.blockchain, - nil, - 20, - test.initialSnapshots, - nil, - 0, - ) - - err := snapshotStore.UpdateValidatorSet( - test.newValidators, - test.height, - ) - - testHelper.AssertErrorMessageContains(t, test.expectedErr, err) - assert.Equal(t, test.finalSnapshots, snapshotStore.GetSnapshots()) - }) - } -} - -func TestSnapshotValidatorStoreModifyHeader(t *testing.T) { - t.Parallel() - - var ( - targetNumber uint64 = 20 - ) - - newInitialHeader := func() *types.Header { - return newTestHeader(0, types.ZeroAddress.Bytes(), types.Nonce{}) - } - - tests := []struct { - name string - initialSnapshots []*Snapshot - initialCandidates []*store.Candidate - // input - proposer types.Address - // output - expectedErr error - expectedHeader *types.Header - }{ - { - name: "should return ErrSnapshotNotFound if the snapshot not found", - initialSnapshots: []*Snapshot{}, - proposer: addr1, - expectedErr: ErrSnapshotNotFound, - expectedHeader: newInitialHeader(), - }, - { - name: "should return validators.ErrInvalidValidatorType if the candidate is invalid", - initialSnapshots: []*Snapshot{ - { - Number: targetNumber - 1, - Set: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ), - }, - }, - initialCandidates: []*store.Candidate{ - { - Validator: &fakeValidator{}, - Authorize: true, - }, - }, - proposer: addr1, - expectedErr: validators.ErrInvalidValidatorType, - expectedHeader: newTestHeader( - 0, - nil, - types.Nonce{}, - ), - }, - { - name: "should update miner and nonce for the addition", - initialSnapshots: []*Snapshot{ - { - Number: targetNumber - 1, - Set: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ), - }, - }, - initialCandidates: []*store.Candidate{ - { - Validator: ecdsaValidator2, - Authorize: true, - }, - }, - proposer: addr1, - expectedErr: nil, - expectedHeader: newTestHeader( - 0, - ecdsaValidator2.Address.Bytes(), - nonceAuthVote, - ), - }, - { - name: "should update miner and nonce for the deletion", - initialSnapshots: []*Snapshot{ - { - Number: targetNumber - 1, - Set: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ecdsaValidator2, - ), - }, - }, - initialCandidates: []*store.Candidate{ - { - Validator: ecdsaValidator2, - Authorize: false, - }, - }, - proposer: addr1, - expectedErr: nil, - expectedHeader: newTestHeader( - 0, - ecdsaValidator2.Address.Bytes(), - nonceDropVote, - ), - }, - { - name: "should ignore the candidate for the addition if the candidate is in the validator set already", - initialSnapshots: []*Snapshot{ - { - Number: targetNumber - 1, - Set: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ecdsaValidator2, - ), - }, - }, - initialCandidates: []*store.Candidate{ - { - Validator: ecdsaValidator2, - Authorize: true, - }, - }, - proposer: addr1, - expectedErr: nil, - expectedHeader: newInitialHeader(), - }, - { - name: "should ignore the candidate for the deletion if the candidate isn't in the validator set", - initialSnapshots: []*Snapshot{ - { - Number: targetNumber - 1, - Set: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ), - }, - }, - initialCandidates: []*store.Candidate{ - { - Validator: ecdsaValidator2, - Authorize: false, - }, - }, - proposer: addr1, - expectedErr: nil, - expectedHeader: newInitialHeader(), - }, - { - name: "should ignore the candidate if the candidate has been voted", - initialSnapshots: []*Snapshot{ - { - Number: targetNumber - 1, - Set: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ), - Votes: []*store.Vote{ - newTestVote(ecdsaValidator2, ecdsaValidator1.Address, true), - }, - }, - }, - initialCandidates: []*store.Candidate{ - { - Validator: ecdsaValidator2, - Authorize: true, - }, - }, - proposer: addr1, - expectedErr: nil, - expectedHeader: newInitialHeader(), - }, - } - - for _, test := range tests { - test := test - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - header := newInitialHeader() - - snapshotStore := newTestSnapshotValidatorStore( - nil, - nil, - 20, - test.initialSnapshots, - test.initialCandidates, - 0, - ) - - assert.Equal( - t, - test.expectedErr, - snapshotStore.ModifyHeader( - header, - test.proposer, - ), - ) - - assert.Equal(t, test.expectedHeader, header) - }) - } -} - -func TestSnapshotValidatorStoreVerifyHeader(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - nonce types.Nonce - expectedErr error - }{ - { - name: "should return nil in case of zero value nonce", - // same with nonceDropVote - nonce: types.Nonce{}, - expectedErr: nil, - }, - { - name: "should return nil in case of nonceAuthVote", - nonce: nonceAuthVote, - expectedErr: nil, - }, - { - name: "should return nil in case of nonceDropVote", - nonce: nonceDropVote, - expectedErr: nil, - }, - { - name: "should return ErrInvalidNonce in case of other nonces", - nonce: types.Nonce{0xff, 0x00}, - expectedErr: ErrInvalidNonce, - }, - } - - for _, test := range tests { - test := test - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - header := &types.Header{ - Nonce: test.nonce, - } - - snapshotStore := newTestSnapshotValidatorStore( - nil, - nil, - 20, - nil, - nil, - 0, - ) - - assert.Equal( - t, - test.expectedErr, - snapshotStore.VerifyHeader(header), - ) - }) - } -} - -func TestSnapshotValidatorStoreProcessHeadersInRange(t *testing.T) { - t.Parallel() - - var ( - epochSize uint64 = 10 - validatorType = validators.ECDSAValidatorType - - initialValidators = validators.NewECDSAValidatorSet( - ecdsaValidator1, - ) - initialLastHeight uint64 = 0 - initialSnapshot = &Snapshot{ - Number: initialLastHeight, - Set: initialValidators, - Votes: []*store.Vote{}, - } - initialSnapshots = []*Snapshot{ - initialSnapshot, - } - initialCandidates = []*store.Candidate{} - ) - - createHeaderWithVote := func(height uint64, candidate validators.Validator, nonce types.Nonce) *types.Header { - candidateBytes, _ := validatorToMiner(candidate) - - return &types.Header{ - Number: height, - Miner: candidateBytes, - Nonce: nonce, - } - } - - tests := []struct { - name string - from uint64 - to uint64 - headers map[uint64]*types.Header - headerCreators map[uint64]types.Address - expectedErr error - finalSnapshots []*Snapshot - finalLastHeight uint64 - }{ - { - name: "should return error if header not found", - from: 0, - to: 5, - headers: map[uint64]*types.Header{}, - headerCreators: map[uint64]types.Address{}, - expectedErr: fmt.Errorf("header %d not found", 1), - finalSnapshots: initialSnapshots, - finalLastHeight: initialLastHeight, - }, - { - name: "should return error if ProcessHeader fails", - from: 0, - to: 2, - headers: map[uint64]*types.Header{ - 1: createHeaderWithVote(1, ecdsaValidator2, nonceAuthVote), - 2: createHeaderWithVote(2, ecdsaValidator2, nonceAuthVote), - }, - headerCreators: map[uint64]types.Address{ - 1: ecdsaValidator1.Address, - }, - expectedErr: errTest, - finalSnapshots: []*Snapshot{ - initialSnapshot, - { - Number: 1, - Hash: types.ZeroHash.String(), - Set: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ecdsaValidator2, - ), - Votes: []*store.Vote{}, - }, - }, - finalLastHeight: 1, - }, - { - name: "should process all headers for ECDSAValidators", - from: 1, - to: 6, - headers: map[uint64]*types.Header{ - 1: createHeaderWithVote(1, ecdsaValidator2, nonceAuthVote), - 2: createHeaderWithVote(2, ecdsaValidator3, nonceAuthVote), - 3: createHeaderWithVote(3, ecdsaValidator3, nonceAuthVote), - 4: createHeaderWithVote(4, ecdsaValidator2, nonceDropVote), - 5: createHeaderWithVote(5, ecdsaValidator1, nonceDropVote), - 6: createHeaderWithVote(6, ecdsaValidator1, nonceDropVote), - }, - headerCreators: map[uint64]types.Address{ - 1: ecdsaValidator1.Address, - 2: ecdsaValidator2.Address, - 3: ecdsaValidator1.Address, - 4: ecdsaValidator1.Address, - 5: ecdsaValidator2.Address, - 6: ecdsaValidator3.Address, - }, - expectedErr: nil, - finalSnapshots: []*Snapshot{ - initialSnapshot, - { - Number: 1, - Hash: types.ZeroHash.String(), - Set: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ecdsaValidator2, - ), - Votes: []*store.Vote{}, - }, - { - Number: 2, - Hash: types.ZeroHash.String(), - Set: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ecdsaValidator2, - ), - Votes: []*store.Vote{ - { - Candidate: ecdsaValidator3, - Validator: ecdsaValidator2.Address, - Authorize: true, - }, - }, - }, - { - Number: 3, - Hash: types.ZeroHash.String(), - Set: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ecdsaValidator2, - ecdsaValidator3, - ), - Votes: []*store.Vote{}, - }, - { - Number: 4, - Hash: types.ZeroHash.String(), - Set: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ecdsaValidator2, - ecdsaValidator3, - ), - Votes: []*store.Vote{ - { - Candidate: ecdsaValidator2, - Validator: ecdsaValidator1.Address, - Authorize: false, - }, - }, - }, - { - Number: 5, - Hash: types.ZeroHash.String(), - Set: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ecdsaValidator2, - ecdsaValidator3, - ), - Votes: []*store.Vote{ - { - Candidate: ecdsaValidator2, - Validator: ecdsaValidator1.Address, - Authorize: false, - }, - { - Candidate: ecdsaValidator1, - Validator: ecdsaValidator2.Address, - Authorize: false, - }, - }, - }, - { - Number: 6, - Hash: types.ZeroHash.String(), - Set: validators.NewECDSAValidatorSet( - ecdsaValidator2, - ecdsaValidator3, - ), - Votes: []*store.Vote{}, - }, - }, - finalLastHeight: 6, - }, - } - - for _, test := range tests { - test := test - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - snapshotStore := newTestSnapshotValidatorStore( - newMockBlockchain(0, test.headers), - func(u uint64) (SignerInterface, error) { - return &mockSigner{ - TypeFn: func() validators.ValidatorType { - return validatorType - }, - EcrecoverFromHeaderFn: func(h *types.Header) (types.Address, error) { - creator, ok := test.headerCreators[h.Number] - if !ok { - return types.ZeroAddress, errTest - } - - return creator, nil - }, - }, nil - }, - initialLastHeight, - initialSnapshots, - initialCandidates, - epochSize, - ) - - testHelper.AssertErrorMessageContains( - t, - test.expectedErr, - snapshotStore.ProcessHeadersInRange(test.from, test.to), - ) - - assert.Equal( - t, - test.finalSnapshots, - snapshotStore.GetSnapshots(), - ) - assert.Equal( - t, - test.finalLastHeight, - snapshotStore.GetSnapshotMetadata().LastBlock, - ) - }) - } -} - -func TestSnapshotValidatorStoreProcessHeader(t *testing.T) { - t.Parallel() - - var ( - epochSize uint64 = 10 - initialLastHeight uint64 = 49 - headerHeight1 uint64 = 50 - headerHeight2 uint64 = 51 - - initialValidators = validators.NewECDSAValidatorSet( - ecdsaValidator1, - ecdsaValidator2, - ) - initialCandidates = []*store.Candidate{} - initialSnapshot = &Snapshot{ - Number: initialLastHeight, - Set: initialValidators, - } - ) - - newGetSigner := func( - validatorType validators.ValidatorType, - expectedHeight uint64, - expectedHeader *types.Header, - returnAddress types.Address, - returnError error, - ) func(uint64) (SignerInterface, error) { - t.Helper() - - return func(height uint64) (SignerInterface, error) { - assert.Equal(t, expectedHeight, height) - - return &mockSigner{ - TypeFn: func() validators.ValidatorType { - return validatorType - }, - EcrecoverFromHeaderFn: func(header *types.Header) (types.Address, error) { - assert.Equal(t, expectedHeader, header) - - return returnAddress, returnError - }, - }, nil - } - } - - tests := []struct { - name string - getSigner func(uint64) (SignerInterface, error) - initialSnapshots []*Snapshot - header *types.Header - expectedErr error - finalSnapshots []*Snapshot - finalLastBlock uint64 - }{ - { - name: "should return error if getSigner returns error", - getSigner: func(height uint64) (SignerInterface, error) { - assert.Equal(t, headerHeight1, height) - - return nil, errTest - }, - initialSnapshots: []*Snapshot{}, - header: &types.Header{ - Number: headerHeight1, - }, - expectedErr: errTest, - finalSnapshots: []*Snapshot{}, - finalLastBlock: initialLastHeight, - }, - { - name: "should return error if the signer is nil", - getSigner: func(height uint64) (SignerInterface, error) { - assert.Equal(t, headerHeight1, height) - - return nil, nil - }, - initialSnapshots: []*Snapshot{}, - header: &types.Header{ - Number: headerHeight1, - }, - expectedErr: fmt.Errorf("signer not found at %d", headerHeight1), - finalSnapshots: []*Snapshot{}, - finalLastBlock: initialLastHeight, - }, - { - name: "should return error if EcrecoverFromHeader fails", - getSigner: newGetSigner( - validators.ECDSAValidatorType, - headerHeight1, - &types.Header{Number: headerHeight1}, - types.ZeroAddress, - errTest, - ), - initialSnapshots: []*Snapshot{}, - header: &types.Header{ - Number: headerHeight1, - }, - expectedErr: errTest, - finalSnapshots: []*Snapshot{}, - finalLastBlock: initialLastHeight, - }, - { - name: "should return error if snapshot not found", - getSigner: newGetSigner( - validators.ECDSAValidatorType, - headerHeight1, - &types.Header{Number: headerHeight1}, - ecdsaValidator3.Address, - nil, - ), - initialSnapshots: []*Snapshot{}, - header: &types.Header{ - Number: headerHeight1, - }, - expectedErr: ErrSnapshotNotFound, - finalSnapshots: []*Snapshot{}, - finalLastBlock: initialLastHeight, - }, - { - name: "should return ErrUnauthorizedProposer if the header creator is not the validator in the snapshot", - getSigner: newGetSigner( - validators.ECDSAValidatorType, - headerHeight1, - &types.Header{Number: headerHeight1}, - ecdsaValidator3.Address, - nil, - ), - initialSnapshots: []*Snapshot{ - initialSnapshot, - }, - header: &types.Header{ - Number: headerHeight1, - }, - expectedErr: ErrUnauthorizedProposer, - finalSnapshots: []*Snapshot{ - initialSnapshot, - }, - finalLastBlock: initialLastHeight, - }, - { - name: "should reset votes and remove lower snapshots if the height is the beginning of the epoch", - getSigner: newGetSigner( - validators.ECDSAValidatorType, headerHeight1, - &types.Header{Number: headerHeight1}, - ecdsaValidator1.Address, nil, - ), - initialSnapshots: []*Snapshot{ - {Number: 10}, - {Number: 20}, - {Number: 30}, - {Number: 40}, - { - Number: initialLastHeight, - Set: initialValidators, - Votes: []*store.Vote{ - newTestVote(ecdsaValidator3, ecdsaValidator1.Address, true), - newTestVote(ecdsaValidator1, ecdsaValidator2.Address, false), - }, - }, - }, - header: &types.Header{ - Number: headerHeight1, - }, - expectedErr: nil, - finalSnapshots: []*Snapshot{ - {Number: 30}, - {Number: 40}, - { - Number: initialLastHeight, - Set: initialValidators, - Votes: []*store.Vote{ - newTestVote(ecdsaValidator3, ecdsaValidator1.Address, true), - newTestVote(ecdsaValidator1, ecdsaValidator2.Address, false), - }, - }, - { - Number: headerHeight1, - Hash: types.ZeroHash.String(), - Set: initialValidators, - }, - }, - finalLastBlock: headerHeight1, - }, - { - name: "should just update latest height if miner is zero", - getSigner: newGetSigner( - validators.ECDSAValidatorType, - headerHeight2, &types.Header{Number: headerHeight2, Miner: types.ZeroAddress.Bytes()}, - ecdsaValidator1.Address, nil, - ), - initialSnapshots: []*Snapshot{ - { - Number: initialLastHeight, - Set: initialValidators, - Votes: []*store.Vote{ - newTestVote(ecdsaValidator3, ecdsaValidator1.Address, true), - newTestVote(ecdsaValidator1, ecdsaValidator2.Address, false), - }, - }, - }, - header: &types.Header{ - Number: headerHeight2, - Miner: types.ZeroAddress.Bytes(), - }, - expectedErr: nil, - finalSnapshots: []*Snapshot{ - { - Number: initialLastHeight, - Set: initialValidators, - Votes: []*store.Vote{ - newTestVote(ecdsaValidator3, ecdsaValidator1.Address, true), - newTestVote(ecdsaValidator1, ecdsaValidator2.Address, false), - }, - }, - }, - finalLastBlock: headerHeight2, - }, - { - name: "should process the vote in the header and update snapshots and latest height", - getSigner: newGetSigner( - validators.ECDSAValidatorType, headerHeight2, &types.Header{ - Number: headerHeight2, - Miner: ecdsaValidator3.Address.Bytes(), - Nonce: nonceAuthVote, - }, ecdsaValidator1.Address, nil, - ), - initialSnapshots: []*Snapshot{ - { - Number: initialLastHeight, - Set: initialValidators, - Votes: []*store.Vote{ - newTestVote(ecdsaValidator3, ecdsaValidator2.Address, true), - }, - }, - }, - header: &types.Header{ - Number: headerHeight2, - Miner: ecdsaValidator3.Address.Bytes(), - Nonce: nonceAuthVote, - }, - expectedErr: nil, - finalSnapshots: []*Snapshot{ - { - Number: initialLastHeight, - Set: initialValidators, - Votes: []*store.Vote{ - newTestVote(ecdsaValidator3, ecdsaValidator2.Address, true), - }, - }, - { - Number: headerHeight2, - Hash: types.ZeroHash.String(), - Set: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ecdsaValidator2, - ecdsaValidator3, - ), - Votes: []*store.Vote{}, - }, - }, - finalLastBlock: headerHeight2, - }, - } - - for _, test := range tests { - test := test - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - snapshotStore := newTestSnapshotValidatorStore( - nil, - test.getSigner, - initialLastHeight, - test.initialSnapshots, - initialCandidates, - epochSize, - ) - - testHelper.AssertErrorMessageContains( - t, - test.expectedErr, - snapshotStore.ProcessHeader( - test.header, - ), - ) - - assert.Equal( - t, - test.finalSnapshots, - snapshotStore.GetSnapshots(), - ) - - assert.Equal( - t, - test.finalLastBlock, - snapshotStore.GetSnapshotMetadata().LastBlock, - ) - }) - } -} - -func TestSnapshotValidatorStorePropose(t *testing.T) { - t.Parallel() - - var ( - latestHeight uint64 = 20 - ) - - tests := []struct { - name string - initialSnapshots []*Snapshot - initialCandidates []*store.Candidate - candidate validators.Validator - auth bool - proposer types.Address - expectedErr error - finalCandidates []*store.Candidate - }{ - { - name: "should return ErrAlreadyCandidate if the candidate exists in the candidates already", - initialSnapshots: nil, - initialCandidates: []*store.Candidate{ - { - Validator: ecdsaValidator2, - Authorize: true, - }, - }, - candidate: ecdsaValidator2, - auth: true, - proposer: ecdsaValidator1.Address, - expectedErr: ErrAlreadyCandidate, - finalCandidates: []*store.Candidate{ - { - Validator: ecdsaValidator2, - Authorize: true, - }, - }, - }, - { - name: "should return ErrSnapshotNotFound if snapshot not found", - initialSnapshots: []*Snapshot{}, - initialCandidates: []*store.Candidate{}, - candidate: ecdsaValidator2, - auth: true, - proposer: ecdsaValidator1.Address, - expectedErr: ErrSnapshotNotFound, - finalCandidates: []*store.Candidate{}, - }, - { - name: "should return ErrCandidateIsValidator if the candidate for addition exists in the validator set already", - initialSnapshots: []*Snapshot{ - { - Number: latestHeight, - Set: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ecdsaValidator2, - ), - }, - }, - initialCandidates: []*store.Candidate{}, - candidate: ecdsaValidator2, - auth: true, - proposer: ecdsaValidator1.Address, - expectedErr: ErrCandidateIsValidator, - finalCandidates: []*store.Candidate{}, - }, - { - name: "should return ErrCandidateNotExistInSet if the candidate for deletion doesn't exist in the validator set", - initialSnapshots: []*Snapshot{ - { - Number: latestHeight, - Set: validators.NewBLSValidatorSet( - blsValidator1, - blsValidator2, - ), - }, - }, - initialCandidates: []*store.Candidate{}, - candidate: blsValidator3, - auth: false, - proposer: blsValidator1.Address, - expectedErr: ErrCandidateNotExistInSet, - finalCandidates: []*store.Candidate{}, - }, - { - name: "should return ErrAlreadyVoted if the proposer has voted for the same candidate", - initialSnapshots: []*Snapshot{ - { - Number: latestHeight, - Set: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ecdsaValidator2, - ), - Votes: []*store.Vote{ - newTestVote(ecdsaValidator3, ecdsaValidator1.Address, true), - }, - }, - }, - initialCandidates: []*store.Candidate{}, - candidate: ecdsaValidator3, - auth: true, - proposer: ecdsaValidator1.Address, - expectedErr: ErrAlreadyVoted, - finalCandidates: []*store.Candidate{}, - }, - { - name: "should add a new candidate", - initialSnapshots: []*Snapshot{ - { - Number: latestHeight, - Set: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ecdsaValidator2, - ), - Votes: []*store.Vote{}, - }, - }, - initialCandidates: []*store.Candidate{}, - candidate: ecdsaValidator3, - auth: true, - proposer: ecdsaValidator1.Address, - expectedErr: nil, - finalCandidates: []*store.Candidate{ - { - Validator: ecdsaValidator3, - Authorize: true, - }, - }, - }, - } - - for _, test := range tests { - test := test - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - snapshotStore := newTestSnapshotValidatorStore( - nil, - nil, - latestHeight, - test.initialSnapshots, - test.initialCandidates, - 0, - ) - - assert.Equal( - t, - test.expectedErr, - snapshotStore.Propose( - test.candidate, - test.auth, - test.proposer, - ), - ) - }) - } -} - -func TestSnapshotValidatorStore_addCandidate(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - initialCandidates []*store.Candidate - // input - validators validators.Validators - candidate validators.Validator - authorize bool - // output - expectedErr error - finalCandidates []*store.Candidate - }{ - { - name: "should add a new candidate for addition", - initialCandidates: []*store.Candidate{}, - validators: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ), - candidate: ecdsaValidator2, - authorize: true, - expectedErr: nil, - finalCandidates: []*store.Candidate{ - { - Validator: ecdsaValidator2, - Authorize: true, - }, - }, - }, - { - name: "should return ErrCandidateNotExistInSet if the candidate to be removed doesn't exist in set", - initialCandidates: []*store.Candidate{}, - validators: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ), - candidate: ecdsaValidator2, - authorize: false, - expectedErr: ErrCandidateNotExistInSet, - finalCandidates: []*store.Candidate{}, - }, - { - name: "should add a new candidate for deletion", - initialCandidates: []*store.Candidate{}, - validators: validators.NewBLSValidatorSet( - blsValidator1, - blsValidator2, - ), - // candidate just has to have the Address field only - candidate: validators.NewBLSValidator(blsValidator2.Addr(), nil), - authorize: false, - expectedErr: nil, - finalCandidates: []*store.Candidate{ - { - Validator: blsValidator2, - Authorize: false, - }, - }, - }, - } - - for _, test := range tests { - test := test - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - snapshotStore := newTestSnapshotValidatorStore( - nil, - nil, - 20, - nil, - test.initialCandidates, - 0, - ) - - assert.Equal( - t, - test.expectedErr, - snapshotStore.addCandidate( - test.validators, - test.candidate, - test.authorize, - ), - ) - - assert.Equal( - t, - test.finalCandidates, - snapshotStore.candidates, - ) - }) - } -} - -func TestSnapshotValidatorStore_addHeaderSnap(t *testing.T) { - t.Parallel() - - var ( - headerHeight uint64 = 10 - headerHash = types.BytesToHash(crypto.Keccak256([]byte{byte(headerHeight)})) - header = &types.Header{ - Number: headerHeight, - Hash: headerHash, - } - - newValidators = validators.NewECDSAValidatorSet( - ecdsaValidator1, - ecdsaValidator2, - ) - ) - - tests := []struct { - name string - getSigner func(uint64) (SignerInterface, error) - initialSnapshots []*Snapshot - header *types.Header - finalSnapshots []*Snapshot - expectedErr error - }{ - { - name: "should return error if getSigner fails", - getSigner: func(height uint64) (SignerInterface, error) { - assert.Equal(t, headerHeight, height) - - return nil, errTest - }, - initialSnapshots: []*Snapshot{}, - header: header, - expectedErr: errTest, - finalSnapshots: []*Snapshot{}, - }, - { - name: "should return error if getSigner returns nil", - getSigner: func(height uint64) (SignerInterface, error) { - assert.Equal(t, headerHeight, height) - - return nil, nil - }, - initialSnapshots: []*Snapshot{}, - header: header, - expectedErr: fmt.Errorf("signer not found %d", headerHeight), - finalSnapshots: []*Snapshot{}, - }, - { - name: "should return error if signer.GetValidators fails", - getSigner: func(height uint64) (SignerInterface, error) { - assert.Equal(t, headerHeight, height) - - return &mockSigner{ - GetValidatorsFn: func(h *types.Header) (validators.Validators, error) { - return nil, errTest - }, - }, nil - }, - initialSnapshots: []*Snapshot{}, - header: header, - expectedErr: errTest, - finalSnapshots: []*Snapshot{}, - }, - { - name: "should add a new snapshot", - getSigner: func(height uint64) (SignerInterface, error) { - assert.Equal(t, headerHeight, height) - - return &mockSigner{ - GetValidatorsFn: func(h *types.Header) (validators.Validators, error) { - return newValidators, nil - }, - }, nil - }, - initialSnapshots: []*Snapshot{}, - header: header, - expectedErr: nil, - finalSnapshots: []*Snapshot{ - { - Number: headerHeight, - Hash: headerHash.String(), - Votes: []*store.Vote{}, - Set: newValidators, - }, - }, - }, - } - - for _, test := range tests { - test := test - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - snapshotStore := newTestSnapshotValidatorStore( - nil, - test.getSigner, - 20, - test.initialSnapshots, - nil, - 0, - ) - - testHelper.AssertErrorMessageContains( - t, - test.expectedErr, - snapshotStore.addHeaderSnap( - test.header, - ), - ) - - assert.Equal( - t, - test.finalSnapshots, - snapshotStore.GetSnapshots(), - ) - }) - } -} - -func TestSnapshotValidatorStore_getSnapshot(t *testing.T) { - t.Parallel() - - var ( - snapthots = []*Snapshot{ - {Number: 10}, - {Number: 20}, - {Number: 30}, - } - - expectedSnapshot = &Snapshot{ - Number: 20, - } - - targetHeight uint64 = 20 - ) - - snapshotStore := newTestSnapshotValidatorStore( - nil, - nil, - 20, - snapthots, - nil, - 0, - ) - - assert.Equal( - t, - expectedSnapshot, - snapshotStore.getSnapshot(targetHeight), - ) -} - -func TestSnapshotValidatorStore_getLatestSnapshot(t *testing.T) { - t.Parallel() - - var ( - snapthots = []*Snapshot{ - {Number: 10}, - {Number: 20}, - {Number: 30}, - } - - expectedSnapshot = &Snapshot{ - Number: 10, - } - - latestHeight uint64 = 11 - ) - - snapshotStore := newTestSnapshotValidatorStore( - nil, - nil, - latestHeight, - snapthots, - nil, - 0, - ) - - assert.Equal( - t, - expectedSnapshot, - snapshotStore.getLatestSnapshot(), - ) -} - -func TestSnapshotValidatorStore_cleanObsoleteCandidates(t *testing.T) { - t.Parallel() - - var ( - validators = validators.NewECDSAValidatorSet( - ecdsaValidator1, - ) - - initialCandidates = []*store.Candidate{ - { - Validator: ecdsaValidator1, - Authorize: true, - }, - { - Validator: ecdsaValidator1, - Authorize: false, - }, - { - Validator: ecdsaValidator2, - Authorize: true, - }, - { - Validator: ecdsaValidator2, - Authorize: false, - }, - } - - finalCandidates = []*store.Candidate{ - { - Validator: ecdsaValidator1, - Authorize: false, - }, - { - Validator: ecdsaValidator2, - Authorize: true, - }, - } - ) - - snapshotStore := newTestSnapshotValidatorStore( - nil, - nil, - 0, - nil, - initialCandidates, - 0, - ) - - snapshotStore.cleanObsoleteCandidates(validators) - - assert.Equal( - t, - finalCandidates, - snapshotStore.candidates, - ) -} - -func TestSnapshotValidatorStore_pickOneCandidate(t *testing.T) { - t.Parallel() - - var ( - proposer = ecdsaValidator1.Addr() - - candidates = []*store.Candidate{ - { - Validator: ecdsaValidator2, - Authorize: true, - }, - { - Validator: ecdsaValidator3, - Authorize: true, - }, - } - - snapshot = &Snapshot{ - Votes: []*store.Vote{ - // validator1 has voted to validator2 already - newTestVote(ecdsaValidator2, ecdsaValidator1.Address, true), - }, - } - - expected = &store.Candidate{ - Validator: ecdsaValidator3, - Authorize: true, - } - ) - - snapshotStore := newTestSnapshotValidatorStore( - nil, - nil, - 0, - nil, - candidates, - 0, - ) - - candidate := snapshotStore.pickOneCandidate(snapshot, proposer) - - assert.Equal( - t, - expected, - candidate, - ) -} - -func TestSnapshotValidatorStore_saveSnapshotIfChanged(t *testing.T) { - t.Parallel() - - var ( - headerHeight uint64 = 30 - headerHash = types.BytesToHash(crypto.Keccak256([]byte{byte(headerHeight)})) - header = &types.Header{ - Number: headerHeight, - Hash: headerHash, - } - - parentVals = validators.NewECDSAValidatorSet( - ecdsaValidator1, - ) - - newVals = validators.NewECDSAValidatorSet( - ecdsaValidator1, - ecdsaValidator2, - ) - - parentVotes = []*store.Vote{} - - newVotes = []*store.Vote{ - newTestVote(ecdsaValidator2, ecdsaValidator1.Address, true), - } - ) - - tests := []struct { - name string - initialSnapshots []*Snapshot - parentSnapshot *Snapshot - snapshot *Snapshot - finalSnapshots []*Snapshot - }{ - { - name: "shouldn't add a new snapshot if the snapshot equals to the parent snapshot", - initialSnapshots: []*Snapshot{ - {Number: 10}, - {Number: 20}, - }, - parentSnapshot: &Snapshot{Number: 20, Set: parentVals, Votes: parentVotes}, - snapshot: &Snapshot{Number: headerHeight, Set: parentVals, Votes: parentVotes}, - finalSnapshots: []*Snapshot{ - {Number: 10}, - {Number: 20}, - }, - }, - { - name: "should add a new snapshot if the snapshot equals to the parent snapshot", - initialSnapshots: []*Snapshot{ - {Number: 10}, - {Number: 20}, - }, - parentSnapshot: &Snapshot{ - Number: 20, - Set: parentVals, - Votes: parentVotes, - }, - snapshot: &Snapshot{ - Number: headerHeight, - Hash: header.Hash.String(), - Set: newVals, - Votes: newVotes, - }, - finalSnapshots: []*Snapshot{ - {Number: 10}, - {Number: 20}, - { - Number: headerHeight, - Hash: header.Hash.String(), - Set: newVals, - Votes: newVotes, - }, - }, - }, - } - - for _, test := range tests { - test := test - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - snapshotStore := newTestSnapshotValidatorStore( - nil, - nil, - 20, - test.initialSnapshots, - nil, - 0, - ) - - snapshotStore.saveSnapshotIfChanged(test.parentSnapshot, test.snapshot, header) - - assert.Equal( - t, - test.finalSnapshots, - snapshotStore.GetSnapshots(), - ) - }) - } -} - -func TestSnapshotValidatorStore_resetSnapshot(t *testing.T) { - t.Parallel() - - var ( - headerHeight uint64 = 30 - headerHash = types.BytesToHash(crypto.Keccak256([]byte{byte(headerHeight)})) - header = &types.Header{ - Number: headerHeight, - Hash: headerHash, - } - - vals = validators.NewECDSAValidatorSet( - ecdsaValidator1, - ) - - parentVotes = []*store.Vote{ - newTestVote(ecdsaValidator2, ecdsaValidator1.Address, true), - } - ) - - tests := []struct { - name string - initialSnapshots []*Snapshot - parentSnapshot *Snapshot - snapshot *Snapshot - finalSnapshots []*Snapshot - }{ - { - name: "should add a new snapshot without votes if the parent snapshot has votes", - initialSnapshots: []*Snapshot{ - {Number: 10}, - {Number: 20}, - }, - parentSnapshot: &Snapshot{Number: 20, Set: vals, Votes: parentVotes}, - snapshot: &Snapshot{Number: headerHeight, Set: vals, Votes: parentVotes}, - finalSnapshots: []*Snapshot{ - {Number: 10}, - {Number: 20}, - { - Number: headerHeight, - Hash: headerHash.String(), - Set: vals, - Votes: nil, - }, - }, - }, - { - name: "shouldn't add if the parent snapshot doesn't have votes", - initialSnapshots: []*Snapshot{ - {Number: 10}, - {Number: 20}, - }, - parentSnapshot: &Snapshot{Number: 20, Set: vals, Votes: []*store.Vote{}}, - snapshot: &Snapshot{Number: headerHeight, Set: vals, Votes: parentVotes}, - finalSnapshots: []*Snapshot{ - {Number: 10}, - {Number: 20}, - }, - }, - } - - for _, test := range tests { - test := test - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - snapshotStore := newTestSnapshotValidatorStore( - nil, - nil, - 20, - test.initialSnapshots, - nil, - 0, - ) - - snapshotStore.resetSnapshot(test.parentSnapshot, test.snapshot, header) - - assert.Equal( - t, - test.finalSnapshots, - snapshotStore.GetSnapshots(), - ) - }) - } -} - -func TestSnapshotValidatorStore_removeLowerSnapshots(t *testing.T) { - t.Parallel() - - var ( - epochSize uint64 = 10 - ) - - tests := []struct { - name string - initialSnapshots []*Snapshot - height uint64 - finalSnapshots []*Snapshot - }{ - { - name: "should remove the old snapshots", - initialSnapshots: []*Snapshot{ - {Number: 10}, - {Number: 20}, - {Number: 30}, - {Number: 40}, - {Number: 50}, - }, - height: 51, // the beginning of the current epoch is 50 - finalSnapshots: []*Snapshot{ - {Number: 30}, - {Number: 40}, - {Number: 50}, - }, - }, - { - name: "shouldn't remove in case of epoch 0-2", - initialSnapshots: []*Snapshot{ - {Number: 10}, - {Number: 20}, - }, - height: 20, - finalSnapshots: []*Snapshot{ - {Number: 10}, - {Number: 20}, - }, - }, - } - - for _, test := range tests { - test := test - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - snapshotStore := newTestSnapshotValidatorStore( - nil, - nil, - 20, - test.initialSnapshots, - nil, - epochSize, - ) - - snapshotStore.removeLowerSnapshots(test.height) - - assert.Equal( - t, - test.finalSnapshots, - snapshotStore.GetSnapshots(), - ) - }) - } -} - -func TestSnapshotValidatorStore_processVote(t *testing.T) { - var ( - headerNumber uint64 = 21 - - initialECDSAValidatorSet = validators.NewECDSAValidatorSet( - ecdsaValidator1, - ecdsaValidator2, - ) - - initialBLSValidatorSet = validators.NewBLSValidatorSet( - blsValidator1, - blsValidator2, - ) - ) - - tests := []struct { - name string - header *types.Header - candidateType validators.ValidatorType - proposer types.Address - snapshot *Snapshot - expectedErr error - expectedSnapshot *Snapshot - }{ - { - name: "should return ErrIncorrectNonce if header.Nonce is invalid", - header: &types.Header{ - Nonce: types.Nonce{0x01}, - }, - proposer: types.ZeroAddress, - snapshot: nil, - expectedErr: ErrIncorrectNonce, - expectedSnapshot: nil, - }, - { - name: "should return ErrInvalidValidatorType if the signer returns invalid type", - header: &types.Header{ - Nonce: nonceAuthVote, - }, - candidateType: validators.ValidatorType("fake type"), - proposer: types.ZeroAddress, - snapshot: nil, - expectedErr: validators.ErrInvalidValidatorType, - expectedSnapshot: nil, - }, - { - name: "should return error when failing parse Miner field as a validator", - header: &types.Header{ - Nonce: nonceAuthVote, - Miner: []byte{0x1, 0x1}, - }, - candidateType: validators.BLSValidatorType, - proposer: types.ZeroAddress, - snapshot: nil, - expectedErr: errors.New("value is not of type array"), - expectedSnapshot: nil, - }, - { - name: "should update latest block height if the ECDSA candidate for addition is in the validator set already", - header: &types.Header{ - Number: headerNumber, - Nonce: nonceAuthVote, - Miner: ecdsaValidator2.Address.Bytes(), - }, - candidateType: validators.ECDSAValidatorType, - proposer: types.ZeroAddress, - snapshot: &Snapshot{ - Set: initialECDSAValidatorSet, - }, - expectedErr: nil, - expectedSnapshot: &Snapshot{ - Set: initialECDSAValidatorSet, - }, - }, - { - name: "should update latest block height if the BLS candidate for deletion isn't in the validator set", - header: &types.Header{ - Number: headerNumber, - Nonce: nonceDropVote, - Miner: blsValidator3.Bytes(), - }, - candidateType: validators.BLSValidatorType, - proposer: types.ZeroAddress, - snapshot: &Snapshot{ - Set: initialBLSValidatorSet, - }, - expectedErr: nil, - expectedSnapshot: &Snapshot{ - Set: initialBLSValidatorSet, - }, - }, - { - name: "should return ErrMultipleVotesBySameValidator" + - " if the snapshot has multiple votes for the same candidate by the same proposer", - header: &types.Header{ - Number: headerNumber, - Nonce: nonceAuthVote, - Miner: ecdsaValidator3.Bytes(), - }, - candidateType: validators.ECDSAValidatorType, - proposer: ecdsaValidator1.Address, - snapshot: &Snapshot{ - Set: initialECDSAValidatorSet, - Votes: []*store.Vote{ - // duplicated votes - newTestVote(ecdsaValidator3, ecdsaValidator1.Address, true), - newTestVote(ecdsaValidator3, ecdsaValidator1.Address, true), - }, - }, - expectedErr: ErrMultipleVotesBySameValidator, - expectedSnapshot: &Snapshot{ - Set: initialECDSAValidatorSet, - Votes: []*store.Vote{ - newTestVote(ecdsaValidator3, ecdsaValidator1.Address, true), - newTestVote(ecdsaValidator3, ecdsaValidator1.Address, true), - }, - }, - }, - { - name: "should add a vote to the snapshot and save in the snapshots", - header: &types.Header{ - Number: headerNumber, - Nonce: nonceAuthVote, - Miner: ecdsaValidator3.Bytes(), - }, - candidateType: validators.ECDSAValidatorType, - proposer: ecdsaValidator1.Address, - snapshot: &Snapshot{ - Set: initialECDSAValidatorSet, - Votes: []*store.Vote{}, - }, - expectedErr: nil, - expectedSnapshot: &Snapshot{ - Set: initialECDSAValidatorSet, - Votes: []*store.Vote{ - newTestVote(ecdsaValidator3, ecdsaValidator1.Address, true), - }, - }, - }, - { - name: "should drop a BLS validator from validator set and remove the all votes by the deleted validator", - header: &types.Header{ - Number: headerNumber, - Nonce: nonceDropVote, - Miner: blsValidator2.Bytes(), - }, - candidateType: validators.BLSValidatorType, - proposer: blsValidator1.Address, - snapshot: &Snapshot{ - Set: initialBLSValidatorSet, - Votes: []*store.Vote{ - // Votes by Validator 1 - { - Candidate: blsValidator3, - Validator: blsValidator1.Address, - Authorize: true, - }, - // Votes by Validator 2 - { - Candidate: blsValidator2, - Validator: blsValidator2.Address, - Authorize: false, - }, - { - Candidate: blsValidator3, - Validator: blsValidator2.Address, - Authorize: true, - }, - }, - }, - expectedErr: nil, - expectedSnapshot: &Snapshot{ - Set: validators.NewBLSValidatorSet( - blsValidator1, - ), - Votes: []*store.Vote{ - // keep only the votes by validator 1 - { - Candidate: blsValidator3, - Validator: blsValidator1.Address, - Authorize: true, - }, - }, - }, - }, - { - name: "should add a ECDSA candidate to validator set and clear votes for the candidate", - header: &types.Header{ - Number: headerNumber, - Nonce: nonceAuthVote, - Miner: ecdsaValidator3.Bytes(), - }, - candidateType: validators.ECDSAValidatorType, - proposer: ecdsaValidator1.Address, - snapshot: &Snapshot{ - Set: initialECDSAValidatorSet, - Votes: []*store.Vote{ - // Validator2 has voted already - { - Candidate: ecdsaValidator3, - Validator: ecdsaValidator2.Address, - Authorize: true, - }, - }, - }, - expectedErr: nil, - expectedSnapshot: &Snapshot{ - Set: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ecdsaValidator2, - // add the new validator - ecdsaValidator3, - ), - // clear votes - Votes: []*store.Vote{}, - }, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - testHelper.AssertErrorMessageContains( - t, - test.expectedErr, - processVote( - test.snapshot, - test.header, - test.candidateType, - test.proposer, - ), - ) - - assert.Equal( - t, - test.expectedSnapshot, - test.snapshot, - ) - }) - } -} - -func Test_validatorToMiner(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - validator validators.Validator - expectedRes []byte - expectedErr error - }{ - { - name: "ECDSAValidator", - validator: ecdsaValidator1, - expectedRes: ecdsaValidator1.Address.Bytes(), - expectedErr: nil, - }, - { - name: "BLSValidator", - validator: blsValidator1, - expectedRes: blsValidator1.Bytes(), - expectedErr: nil, - }, - { - name: "fake validator", - validator: &fakeValidator{}, - expectedRes: nil, - expectedErr: validators.ErrInvalidValidatorType, - }, - } - - for _, test := range tests { - test := test - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - res, err := validatorToMiner(test.validator) - - assert.Equal( - t, - test.expectedRes, - res, - ) - - assert.Equal( - t, - test.expectedErr, - err, - ) - }) - } -} - -func Test_minerToValidator(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - validatorType validators.ValidatorType - miner []byte - expectedRes validators.Validator - expectedErr error - }{ - { - name: "ECDSAValidator", - validatorType: validators.ECDSAValidatorType, - miner: ecdsaValidator1.Address.Bytes(), - expectedRes: ecdsaValidator1, - expectedErr: nil, - }, - { - name: "BLSValidator", - validatorType: validators.BLSValidatorType, - miner: blsValidator1.Bytes(), - expectedRes: blsValidator1, - expectedErr: nil, - }, - { - name: "fake validator", - validatorType: validators.ValidatorType("fake"), - miner: ecdsaValidator1.Address.Bytes(), - expectedRes: nil, - expectedErr: validators.ErrInvalidValidatorType, - }, - } - - for _, test := range tests { - test := test - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - res, err := minerToValidator(test.validatorType, test.miner) - - assert.Equal( - t, - test.expectedRes, - res, - ) - - assert.Equal( - t, - test.expectedErr, - err, - ) - }) - } -} diff --git a/validators/store/snapshot/types.go b/validators/store/snapshot/types.go deleted file mode 100644 index d7a0f19e9d..0000000000 --- a/validators/store/snapshot/types.go +++ /dev/null @@ -1,438 +0,0 @@ -package snapshot - -import ( - "encoding/json" - "sort" - "sync" - "sync/atomic" - - "github.com/0xPolygon/polygon-edge/types" - "github.com/0xPolygon/polygon-edge/validators" - "github.com/0xPolygon/polygon-edge/validators/store" -) - -// snapshotMetadata defines the metadata for the snapshot -type SnapshotMetadata struct { - // LastBlock represents the latest block in the snapshot - LastBlock uint64 -} - -// Snapshot is the current state at a given point in time for validators and votes -type Snapshot struct { - // block number when the snapshot was created - Number uint64 - - // block hash when the snapshot was created - Hash string - - // votes casted in chronological order - Votes []*store.Vote - - // current set of validators - Set validators.Validators -} - -func (s *Snapshot) MarshalJSON() ([]byte, error) { - jsonData := struct { - Number uint64 - Hash string - Votes []*store.Vote - Type validators.ValidatorType - Set validators.Validators - }{ - Number: s.Number, - Hash: s.Hash, - Votes: s.Votes, - Type: s.Set.Type(), - Set: s.Set, - } - - return json.Marshal(jsonData) -} - -func (s *Snapshot) UnmarshalJSON(data []byte) error { - raw := struct { - Number uint64 - Hash string - Type string - Votes []json.RawMessage - Set json.RawMessage - }{} - - var err error - - if err = json.Unmarshal(data, &raw); err != nil { - return err - } - - s.Number = raw.Number - s.Hash = raw.Hash - - isLegacyFormat := raw.Type == "" - - // determine validators type - valType := validators.ECDSAValidatorType - - if !isLegacyFormat { - if valType, err = validators.ParseValidatorType(raw.Type); err != nil { - return err - } - } - - // Votes - if err := s.unmarshalVotesJSON(valType, raw.Votes); err != nil { - return err - } - - if err := s.unmarshalSetJSON(valType, raw.Set, isLegacyFormat); err != nil { - return err - } - - return nil -} - -// unmarshalVotesJSON is a helper function to unmarshal for Votes field -func (s *Snapshot) unmarshalVotesJSON( - valType validators.ValidatorType, - rawVotes []json.RawMessage, -) error { - votes := make([]*store.Vote, len(rawVotes)) - for idx := range votes { - candidate, err := validators.NewValidatorFromType(valType) - if err != nil { - return err - } - - votes[idx] = &store.Vote{ - Candidate: candidate, - } - - if err := json.Unmarshal(rawVotes[idx], votes[idx]); err != nil { - return err - } - } - - s.Votes = votes - - return nil -} - -// unmarshalSetJSON is a helper function to unmarshal for Set field -func (s *Snapshot) unmarshalSetJSON( - valType validators.ValidatorType, - rawSet json.RawMessage, - isLegacyFormat bool, -) error { - // Set - if isLegacyFormat { - addrs := []types.Address{} - if err := json.Unmarshal(rawSet, &addrs); err != nil { - return err - } - - vals := make([]*validators.ECDSAValidator, len(addrs)) - for idx, addr := range addrs { - vals[idx] = validators.NewECDSAValidator(addr) - } - - s.Set = validators.NewECDSAValidatorSet(vals...) - - return nil - } - - s.Set = validators.NewValidatorSetFromType(valType) - - return json.Unmarshal(rawSet, s.Set) -} - -// Equal checks if two snapshots are equal -func (s *Snapshot) Equal(ss *Snapshot) bool { - // we only check if Votes and Set are equal since Number and Hash - // are only meant to be used for indexing - if len(s.Votes) != len(ss.Votes) { - return false - } - - for indx := range s.Votes { - if !s.Votes[indx].Equal(ss.Votes[indx]) { - return false - } - } - - return s.Set.Equal(ss.Set) -} - -// Count returns the vote tally. -// The count increases if the callback function returns true -func (s *Snapshot) Count(h func(v *store.Vote) bool) (count int) { - for _, v := range s.Votes { - if h(v) { - count++ - } - } - - return -} - -// AddVote adds a vote to snapshot -func (s *Snapshot) AddVote( - voter types.Address, - candidate validators.Validator, - authorize bool, -) { - s.Votes = append(s.Votes, &store.Vote{ - Validator: voter, - Candidate: candidate, - Authorize: authorize, - }) -} - -// Copy makes a copy of the snapshot -func (s *Snapshot) Copy() *Snapshot { - // Do not need to copy Number and Hash - ss := &Snapshot{ - Votes: make([]*store.Vote, len(s.Votes)), - Set: s.Set.Copy(), - } - - for indx, vote := range s.Votes { - ss.Votes[indx] = vote.Copy() - } - - return ss -} - -// CountByCandidateAndVoter is a helper method to count votes by voter address and candidate -func (s *Snapshot) CountByVoterAndCandidate( - voter types.Address, - candidate validators.Validator, -) int { - return s.Count(func(v *store.Vote) bool { - return v.Validator == voter && v.Candidate.Equal(candidate) - }) -} - -// CountByCandidateAndVoter is a helper method to count votes by candidate -func (s *Snapshot) CountByCandidate( - candidate validators.Validator, -) int { - return s.Count(func(v *store.Vote) bool { - return v.Candidate.Equal(candidate) - }) -} - -// RemoveVotes removes the Votes that meet condition defined in the given function -func (s *Snapshot) RemoveVotes(shouldRemoveFn func(v *store.Vote) bool) { - newVotes := make([]*store.Vote, 0, len(s.Votes)) - - for _, vote := range s.Votes { - if shouldRemoveFn(vote) { - continue - } - - newVotes = append(newVotes, vote) - } - - // match capacity with size in order to shrink array - s.Votes = newVotes[:len(newVotes):len(newVotes)] -} - -// RemoveVotesByVoter is a helper method to remove all votes created by specified address -func (s *Snapshot) RemoveVotesByVoter( - address types.Address, -) { - s.RemoveVotes(func(v *store.Vote) bool { - return v.Validator == address - }) -} - -// RemoveVotesByCandidate is a helper method to remove all votes to specified candidate -func (s *Snapshot) RemoveVotesByCandidate( - candidate validators.Validator, -) { - s.RemoveVotes(func(v *store.Vote) bool { - return v.Candidate.Equal(candidate) - }) -} - -// snapshotStore defines the structure of the stored snapshots -type snapshotStore struct { - sync.RWMutex - - // lastNumber is the latest block number stored - lastNumber uint64 - - // list represents the actual snapshot sorted list - list snapshotSortedList -} - -// newSnapshotStore returns a new snapshot store -func newSnapshotStore( - metadata *SnapshotMetadata, - snapshots []*Snapshot, -) *snapshotStore { - store := &snapshotStore{ - list: snapshotSortedList{}, - } - - store.loadData(metadata, snapshots) - - return store -} - -func (s *snapshotStore) loadData( - metadata *SnapshotMetadata, - snapshots []*Snapshot, -) { - if metadata != nil { - s.lastNumber = metadata.LastBlock - } - - for _, snap := range snapshots { - s.add(snap) - } -} - -// getLastBlock returns the latest block number from the snapshot store. [Thread safe] -func (s *snapshotStore) getLastBlock() uint64 { - return atomic.LoadUint64(&s.lastNumber) -} - -// updateLastBlock sets the latest block number in the snapshot store. [Thread safe] -func (s *snapshotStore) updateLastBlock(num uint64) { - atomic.StoreUint64(&s.lastNumber, num) -} - -// deleteLower deletes snapshots that have a block number lower than the passed in parameter -func (s *snapshotStore) deleteLower(num uint64) { - s.Lock() - defer s.Unlock() - - pruneIndex := s.findClosestSnapshotIndex(num) - s.list = s.list[pruneIndex:] -} - -// findClosestSnapshotIndex finds the closest snapshot index for the specified -// block number -func (s *snapshotStore) findClosestSnapshotIndex(blockNum uint64) int { - // Check if the block number is lower than the highest saved snapshot - if blockNum < s.list[0].Number { - return 0 - } - - // Check if the block number if higher than the highest saved snapshot - if blockNum > s.list[len(s.list)-1].Number { - return len(s.list) - 1 - } - - var ( - low = 0 - high = len(s.list) - 1 - ) - - // Find the closest value using binary search - for low <= high { - mid := (high + low) / 2 - - if blockNum < s.list[mid].Number { - high = mid - 1 - } else if blockNum > s.list[mid].Number { - low = mid + 1 - } else { - return mid - } - } - - // Check which of the two positions is closest (and has a higher block num) - if s.list[low].Number-blockNum < blockNum-s.list[high].Number { - return high - } - - return low -} - -// find returns the index of the first closest snapshot to the number specified -func (s *snapshotStore) find(num uint64) *Snapshot { - s.RLock() - defer s.RUnlock() - - if len(s.list) == 0 { - return nil - } - - // fast track, check the last item - if last := s.list[len(s.list)-1]; last.Number < num { - return last - } - - // find the index of the element - // whose Number is bigger than or equals to num, and smallest - i := sort.Search(len(s.list), func(i int) bool { - return s.list[i].Number >= num - }) - - if i < len(s.list) { - if i == 0 { - return s.list[0] - } - - if s.list[i].Number == num { - return s.list[i] - } - - return s.list[i-1] - } - - // should not reach here - return nil -} - -// add adds a new snapshot to the snapshot store -func (s *snapshotStore) add(snap *Snapshot) { - s.Lock() - defer s.Unlock() - - // append and sort the list - s.list = append(s.list, snap) - sort.Sort(&s.list) -} - -// putByNumber replaces snapshot if the snapshot whose Number matches with the given snapshot's Number -// otherwise adds the given snapshot to the list -func (s *snapshotStore) putByNumber(snap *Snapshot) { - s.Lock() - defer s.Unlock() - - i := sort.Search(len(s.list), func(i int) bool { - return s.list[i].Number == snap.Number - }) - - if i < len(s.list) { - // replace if found - s.list[i] = snap - - return - } - - // append if not found - s.list = append(s.list, snap) - sort.Sort(&s.list) -} - -// snapshotSortedList defines the sorted snapshot list -type snapshotSortedList []*Snapshot - -// Len returns the size of the sorted snapshot list -func (s snapshotSortedList) Len() int { - return len(s) -} - -// Swap swaps two values in the sorted snapshot list -func (s snapshotSortedList) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -// Less checks if the element at index I has a lower number than the element at index J -func (s snapshotSortedList) Less(i, j int) bool { - return s[i].Number < s[j].Number -} diff --git a/validators/store/snapshot/types_test.go b/validators/store/snapshot/types_test.go deleted file mode 100644 index 9119eca745..0000000000 --- a/validators/store/snapshot/types_test.go +++ /dev/null @@ -1,1369 +0,0 @@ -package snapshot - -import ( - "encoding/json" - "fmt" - "strings" - "testing" - - "github.com/0xPolygon/polygon-edge/crypto" - "github.com/0xPolygon/polygon-edge/types" - "github.com/0xPolygon/polygon-edge/validators" - "github.com/0xPolygon/polygon-edge/validators/store" - "github.com/stretchr/testify/assert" -) - -var ( - testNumber uint64 = 10 - testHash = types.BytesToHash(crypto.Keccak256([]byte{byte(testNumber)})) -) - -func createExampleECDSASnapshotJSON( - hash types.Hash, - number uint64, - voteAuthorize bool, - voteCandidate *validators.ECDSAValidator, - voteValidator types.Address, - setValidator *validators.ECDSAValidator, -) string { - return fmt.Sprintf(`{ - "Hash": "%s", - "Number": %d, - "Type": "%s", - "Votes": [ - { - "Authorize": %t, - "Candidate": { - "Address": "%s" - }, - "Validator": "%s" - } - ], - "Set": [ - { - "Address": "%s" - } - ] - }`, - hash, - number, - validators.ECDSAValidatorType, - voteAuthorize, - voteCandidate.Addr(), - voteValidator, - setValidator.String(), - ) -} - -func createExampleBLSSnapshotJSON( - hash types.Hash, - number uint64, - voteAuthorize bool, - voteCandidate *validators.BLSValidator, - voteValidator types.Address, - setValidator *validators.BLSValidator, -) string { - return fmt.Sprintf(`{ - "Hash": "%s", - "Number": %d, - "Type": "%s", - "Votes": [ - { - "Authorize": %t, - "Candidate": { - "Address": "%s", - "BLSPublicKey": "%s" - }, - "Validator": "%s" - } - ], - "Set": [ - { - "Address": "%s", - "BLSPublicKey": "%s" - } - ] - }`, - hash, - number, - validators.BLSValidatorType, - voteAuthorize, - voteCandidate.Address, - voteCandidate.BLSPublicKey, - voteValidator, - setValidator.Address, - setValidator.BLSPublicKey, - ) -} - -func newTestVote( - candidate validators.Validator, - validator types.Address, - authorize bool, -) *store.Vote { - return &store.Vote{ - Validator: validator, - Candidate: candidate, - Authorize: authorize, - } -} - -func TestSnapshotMarshalJSON(t *testing.T) { - t.Parallel() - - testMarshalJSON := func( - t *testing.T, - data interface{}, - expectedJSON string, // can be beautified - ) { - t.Helper() - - res, err := json.Marshal(data) - - assert.NoError(t, err) - assert.JSONEq( - t, - strings.TrimSpace(expectedJSON), - string(res), - ) - } - - t.Run("ECDSAValidators", func(t *testing.T) { - t.Parallel() - - vote := newTestVote(ecdsaValidator2, addr1, true) - - testMarshalJSON( - t, - &Snapshot{ - Number: testNumber, - Hash: testHash.String(), - Votes: []*store.Vote{ - vote, - }, - Set: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ), - }, - createExampleECDSASnapshotJSON( - testHash, - testNumber, - vote.Authorize, - ecdsaValidator2, - vote.Validator, - ecdsaValidator1, - ), - ) - }) - - t.Run("BLSValidators", func(t *testing.T) { - t.Parallel() - - vote := newTestVote(blsValidator2, addr1, false) - - testMarshalJSON( - t, - &Snapshot{ - Number: testNumber, - Hash: testHash.String(), - Votes: []*store.Vote{ - vote, - }, - Set: validators.NewBLSValidatorSet( - blsValidator1, - ), - }, - createExampleBLSSnapshotJSON( - testHash, - testNumber, - vote.Authorize, - blsValidator2, - blsValidator1.Addr(), - blsValidator1, - ), - ) - }) -} - -func TestSnapshotUnmarshalJSON(t *testing.T) { - t.Parallel() - - testUnmarshalJSON := func( - t *testing.T, - jsonStr string, - target interface{}, - expected interface{}, - ) { - t.Helper() - - err := json.Unmarshal([]byte(jsonStr), target) - - assert.NoError(t, err) - assert.Equal(t, expected, target) - } - - t.Run("ECDSAValidators", func(t *testing.T) { - t.Parallel() - - testUnmarshalJSON( - t, - createExampleECDSASnapshotJSON( - testHash, - testNumber, - true, - ecdsaValidator1, - ecdsaValidator2.Addr(), - ecdsaValidator2, - ), - &Snapshot{}, - &Snapshot{ - Number: testNumber, - Hash: testHash.String(), - Votes: []*store.Vote{ - newTestVote(ecdsaValidator1, ecdsaValidator2.Addr(), true), - }, - Set: validators.NewECDSAValidatorSet( - ecdsaValidator2, - ), - }, - ) - }) - - t.Run("ECDSAValidators (Legacy format)", func(t *testing.T) { - t.Parallel() - - testUnmarshalJSON( - t, - fmt.Sprintf(` - { - "Number": %d, - "Hash": "%s", - "Votes": [ - { - "Validator": "%s", - "Address": "%s", - "Authorize": %t - } - ], - "Set": [ - "%s" - ] - } - `, - testNumber, - testHash, - ecdsaValidator2.Addr(), - ecdsaValidator1.Addr(), - true, - ecdsaValidator2.Addr(), - ), - &Snapshot{}, - &Snapshot{ - Number: testNumber, - Hash: testHash.String(), - Votes: []*store.Vote{ - newTestVote(ecdsaValidator1, ecdsaValidator2.Addr(), true), - }, - Set: validators.NewECDSAValidatorSet( - ecdsaValidator2, - ), - }, - ) - }) - - t.Run("BLSValidators", func(t *testing.T) { - t.Parallel() - - testUnmarshalJSON( - t, - createExampleBLSSnapshotJSON( - testHash, - testNumber, - false, - blsValidator1, - ecdsaValidator2.Addr(), - blsValidator2, - ), - &Snapshot{}, - &Snapshot{ - Number: testNumber, - Hash: testHash.String(), - Votes: []*store.Vote{ - newTestVote(blsValidator1, blsValidator2.Addr(), false), - }, - Set: validators.NewBLSValidatorSet( - blsValidator2, - ), - }, - ) - }) - - t.Run("error handling", func(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - jsonStr string - }{ - { - name: "should return error if UnmarshalJSON for raw failed", - jsonStr: "[]", - }, - { - name: "should error if parsing Type is failed", - jsonStr: `{ - "Number": 0, - "Hash": "0x1", - "Type": "fake", - "Votes": [], - "Set": [] - }`, - }, - { - name: "should error if unmarshal Votes is failed", - jsonStr: `{ - "Number": 0, - "Hash": "0x1", - "Type": "ecdsa", - "Votes": [ - 1 - ], - "Set": [] - }`, - }, - { - name: "should return error if unmarshal Set is failed", - jsonStr: `{ - "Number": 0, - "Hash": "0x1", - "Type": "ecdsa", - "Votes": [], - "Set": [ - 1 - ] - }`, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - assert.Error( - t, - json.Unmarshal([]byte(test.jsonStr), &Snapshot{}), - ) - }) - } - }) -} - -func TestSnapshotEqual(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - s1 *Snapshot - s2 *Snapshot - expected bool - }{ - { - name: "should return true if they're equal", - s1: &Snapshot{ - Votes: []*store.Vote{ - newTestVote(ecdsaValidator2, ecdsaValidator1.Addr(), true), - }, - Set: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ), - }, - s2: &Snapshot{ - Votes: []*store.Vote{ - newTestVote(ecdsaValidator2, ecdsaValidator1.Addr(), true), - }, - Set: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ), - }, - expected: true, - }, - { - name: "should return false if the sizes of Votes doesn't match with each other", - s1: &Snapshot{ - Votes: []*store.Vote{ - newTestVote(ecdsaValidator2, ecdsaValidator1.Addr(), true), - }, - Set: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ), - }, - s2: &Snapshot{ - Votes: []*store.Vote{}, - Set: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ), - }, - expected: false, - }, - { - name: "should return false if Votes don't match with each other", - s1: &Snapshot{ - Votes: []*store.Vote{ - newTestVote(ecdsaValidator2, ecdsaValidator1.Addr(), true), - }, - Set: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ), - }, - s2: &Snapshot{ - Votes: []*store.Vote{ - newTestVote(ecdsaValidator3, ecdsaValidator1.Addr(), true), - }, - Set: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ), - }, - expected: false, - }, - { - name: "should return true if Sets doesn't match with each other", - s1: &Snapshot{ - Votes: []*store.Vote{ - newTestVote(ecdsaValidator3, ecdsaValidator1.Addr(), true), - }, - Set: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ), - }, - s2: &Snapshot{ - Votes: []*store.Vote{ - newTestVote(ecdsaValidator3, ecdsaValidator1.Addr(), true), - }, - Set: validators.NewECDSAValidatorSet( - ecdsaValidator2, - ), - }, - expected: false, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - assert.Equal( - t, - test.expected, - test.s1.Equal(test.s2), - ) - }) - } -} - -func TestSnapshotCount(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - snapshot *Snapshot - fn func(v *store.Vote) bool - expected int - visited []*store.Vote - }{ - { - name: "should return true if they're equal", - snapshot: &Snapshot{ - Votes: []*store.Vote{ - newTestVote(ecdsaValidator1, ecdsaValidator1.Addr(), true), - newTestVote(ecdsaValidator2, ecdsaValidator2.Addr(), false), - newTestVote(ecdsaValidator3, ecdsaValidator3.Addr(), true), - }, - }, - fn: func(v *store.Vote) bool { - // count all - return true - }, - expected: 3, - visited: []*store.Vote{ - newTestVote(ecdsaValidator1, ecdsaValidator1.Addr(), true), - newTestVote(ecdsaValidator2, ecdsaValidator2.Addr(), false), - newTestVote(ecdsaValidator3, ecdsaValidator3.Addr(), true), - }, - }, - { - name: "shouldn't count but visit all", - snapshot: &Snapshot{ - Votes: []*store.Vote{ - newTestVote(blsValidator1, ecdsaValidator1.Addr(), true), - newTestVote(blsValidator2, ecdsaValidator2.Addr(), false), - }, - }, - fn: func(v *store.Vote) bool { - // don't count - return false - }, - expected: 0, - visited: []*store.Vote{ - newTestVote(blsValidator1, ecdsaValidator1.Addr(), true), - newTestVote(blsValidator2, ecdsaValidator2.Addr(), false), - }, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - visited := make([]*store.Vote, 0, len(test.snapshot.Votes)) - - res := test.snapshot.Count(func(v *store.Vote) bool { - visited = append(visited, v) - - return test.fn(v) - }) - - assert.Equal(t, test.expected, res) - assert.Equal(t, test.visited, visited) - }) - } -} - -func TestSnapshotAddVote(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - snapshot *Snapshot - vote *store.Vote - expected []*store.Vote - }{ - { - name: "should add ECDSA Validator Vote", - snapshot: &Snapshot{ - Votes: []*store.Vote{ - newTestVote(ecdsaValidator1, ecdsaValidator1.Addr(), true), - }, - }, - vote: newTestVote(ecdsaValidator2, ecdsaValidator1.Addr(), false), - expected: []*store.Vote{ - newTestVote(ecdsaValidator1, ecdsaValidator1.Addr(), true), - newTestVote(ecdsaValidator2, ecdsaValidator1.Addr(), false), - }, - }, - { - name: "should add BLS Validator Vote", - snapshot: &Snapshot{ - Votes: []*store.Vote{ - newTestVote(blsValidator1, ecdsaValidator1.Addr(), true), - }, - }, - vote: newTestVote(blsValidator2, ecdsaValidator2.Addr(), false), - expected: []*store.Vote{ - newTestVote(blsValidator1, ecdsaValidator1.Addr(), true), - newTestVote(blsValidator2, ecdsaValidator2.Addr(), false), - }, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - test.snapshot.AddVote( - test.vote.Validator, - test.vote.Candidate, - test.vote.Authorize, - ) - - assert.Equal(t, test.expected, test.snapshot.Votes) - }) - } -} - -func TestSnapshotCopy(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - snapshot *Snapshot - }{ - { - name: "should copy ECDSA Snapshot", - snapshot: &Snapshot{ - Votes: []*store.Vote{ - newTestVote(ecdsaValidator1, ecdsaValidator1.Addr(), true), - }, - Set: validators.NewECDSAValidatorSet( - ecdsaValidator1, - ecdsaValidator2, - ), - }, - }, - { - name: "should copy BLS Snapshot", - snapshot: &Snapshot{ - Votes: []*store.Vote{ - newTestVote(blsValidator1, ecdsaValidator1.Addr(), true), - }, - Set: validators.NewBLSValidatorSet( - blsValidator1, - blsValidator2, - ), - }, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - copied := test.snapshot.Copy() - - // check fields - assert.Equal(t, test.snapshot, copied) - - // check addresses of Set are different - assert.NotSame(t, test.snapshot.Set, copied.Set) - - // check addresses of Votes are different - assert.Equal(t, len(test.snapshot.Votes), len(copied.Votes)) - for idx := range test.snapshot.Votes { - assert.NotSame(t, test.snapshot.Votes[idx], copied.Votes[idx]) - } - }) - } -} - -func TestSnapshotCountByVoterAndCandidate(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - snapshot *Snapshot - voter types.Address - candidate validators.Validator - expected int - }{ - { - name: "should return count of the votes whose Validator and Candidate equal to the given fields", - snapshot: &Snapshot{ - Votes: []*store.Vote{ - newTestVote(ecdsaValidator1, ecdsaValidator1.Addr(), true), // not match - newTestVote(ecdsaValidator2, ecdsaValidator2.Addr(), true), // not match - newTestVote(ecdsaValidator2, ecdsaValidator1.Addr(), true), // match - }, - }, - voter: ecdsaValidator1.Addr(), - candidate: ecdsaValidator2, - expected: 1, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - assert.Equal( - t, - test.expected, - test.snapshot.CountByVoterAndCandidate( - test.voter, - test.candidate, - ), - ) - }) - } -} - -func TestSnapshotCountByCandidate(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - snapshot *Snapshot - candidate validators.Validator - expected int - }{ - { - name: "should return count of the votes whose Candidate equal to the given field", - snapshot: &Snapshot{ - Votes: []*store.Vote{ - newTestVote(ecdsaValidator1, ecdsaValidator1.Addr(), true), // match - newTestVote(ecdsaValidator2, ecdsaValidator1.Addr(), true), // not match - newTestVote(ecdsaValidator3, ecdsaValidator2.Addr(), true), // not match - }, - }, - candidate: ecdsaValidator1, - expected: 1, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - assert.Equal( - t, - test.expected, - test.snapshot.CountByCandidate( - test.candidate, - ), - ) - }) - } -} - -func TestSnapshotRemoveVotes(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - snapshot *Snapshot - fn func(v *store.Vote) bool - expected []*store.Vote - }{ - { - name: "should remove all Votes from Votes", - snapshot: &Snapshot{ - Votes: []*store.Vote{ - newTestVote(ecdsaValidator1, ecdsaValidator1.Addr(), true), - newTestVote(ecdsaValidator2, ecdsaValidator1.Addr(), true), - newTestVote(ecdsaValidator3, ecdsaValidator2.Addr(), true), - }, - }, - fn: func(v *store.Vote) bool { - // remove all - return true - }, - expected: []*store.Vote{}, - }, - { - name: "should removes only Votes created by Validator 1", - snapshot: &Snapshot{ - Votes: []*store.Vote{ - newTestVote(blsValidator1, ecdsaValidator1.Addr(), true), - newTestVote(blsValidator2, ecdsaValidator2.Addr(), true), - }, - }, - fn: func(v *store.Vote) bool { - return v.Validator == ecdsaValidator1.Address - }, - expected: []*store.Vote{ - newTestVote(blsValidator2, ecdsaValidator2.Addr(), true), - }, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - test.snapshot.RemoveVotes( - test.fn, - ) - - assert.Equal(t, test.expected, test.snapshot.Votes) - // make sure the size and capacity equal with each other - assert.Equal(t, len(test.snapshot.Votes), cap(test.snapshot.Votes)) - }) - } -} - -func TestSnapshotRemoveVotesByVoter(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - snapshot *Snapshot - voter types.Address - expected []*store.Vote - }{ - { - name: "should remove all Votes", - snapshot: &Snapshot{ - Votes: []*store.Vote{ - newTestVote(blsValidator1, ecdsaValidator1.Addr(), true), - newTestVote(blsValidator2, ecdsaValidator1.Addr(), false), - }, - }, - voter: ecdsaValidator1.Address, - expected: []*store.Vote{}, - }, - { - name: "should removes only Votes created by Validator 1", - snapshot: &Snapshot{ - Votes: []*store.Vote{ - newTestVote(ecdsaValidator1, ecdsaValidator1.Addr(), true), - newTestVote(ecdsaValidator2, ecdsaValidator1.Addr(), false), - newTestVote(ecdsaValidator3, ecdsaValidator2.Addr(), false), - }, - }, - voter: ecdsaValidator1.Address, - expected: []*store.Vote{ - newTestVote(ecdsaValidator3, ecdsaValidator2.Addr(), false), - }, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - test.snapshot.RemoveVotesByVoter( - test.voter, - ) - - assert.Equal(t, test.expected, test.snapshot.Votes) - // make sure the size and capacity equal with each other - assert.Equal(t, len(test.snapshot.Votes), cap(test.snapshot.Votes)) - }) - } -} - -func TestSnapshotRemoveVotesByCandidate(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - snapshot *Snapshot - candidate validators.Validator - expected []*store.Vote - }{ - { - name: "should remove all Votes", - snapshot: &Snapshot{ - Votes: []*store.Vote{ - newTestVote(blsValidator1, ecdsaValidator1.Addr(), true), - newTestVote(blsValidator1, ecdsaValidator2.Addr(), false), - }, - }, - candidate: blsValidator1, - expected: []*store.Vote{}, - }, - { - name: "should removes only Votes for Validator 1", - snapshot: &Snapshot{ - Votes: []*store.Vote{ - newTestVote(ecdsaValidator1, ecdsaValidator1.Addr(), true), - newTestVote(ecdsaValidator2, ecdsaValidator1.Addr(), false), - newTestVote(ecdsaValidator3, ecdsaValidator2.Addr(), false), - }, - }, - candidate: ecdsaValidator1, - expected: []*store.Vote{ - newTestVote(ecdsaValidator2, ecdsaValidator1.Addr(), false), - newTestVote(ecdsaValidator3, ecdsaValidator2.Addr(), false), - }, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - test.snapshot.RemoveVotesByCandidate( - test.candidate, - ) - - assert.Equal(t, test.expected, test.snapshot.Votes) - // make sure the size and capacity equal with each other - assert.Equal(t, len(test.snapshot.Votes), cap(test.snapshot.Votes)) - }) - } -} - -func Test_snapshotSortedListLen(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - list *snapshotSortedList - expected int - }{ - { - name: "should return the size", - list: &snapshotSortedList{ - &Snapshot{}, - &Snapshot{}, - }, - expected: 2, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - assert.Equal( - t, - test.expected, - test.list.Len(), - ) - }) - } -} - -func Test_snapshotSortedListSwap(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - list *snapshotSortedList - i, j int - expected *snapshotSortedList - }{ - { - name: "should swap elements", - list: &snapshotSortedList{ - &Snapshot{Number: 3}, - &Snapshot{Number: 2}, - &Snapshot{Number: 1}, - }, - i: 0, - j: 2, - expected: &snapshotSortedList{ - &Snapshot{Number: 1}, - &Snapshot{Number: 2}, - &Snapshot{Number: 3}, - }, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - test.list.Swap(test.i, test.j) - - assert.Equal( - t, - test.expected, - test.list, - ) - }) - } -} - -func Test_snapshotSortedListLess(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - list *snapshotSortedList - i, j int - expected bool - }{ - { - name: "should return true when list[i].Number < list[j].Number", - list: &snapshotSortedList{ - &Snapshot{Number: 1}, - &Snapshot{Number: 3}, - }, - expected: true, - }, - { - name: "should return false when list[i].Number == list[j].Number", - list: &snapshotSortedList{ - &Snapshot{Number: 2}, - &Snapshot{Number: 2}, - }, - expected: false, - }, - { - name: "should return false when list[i].Number > list[j].Number", - list: &snapshotSortedList{ - &Snapshot{Number: 2}, - &Snapshot{Number: 1}, - }, - expected: false, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - assert.Equal( - t, - test.expected, - test.list.Less(0, 1), - ) - }) - } -} - -func Test_newSnapshotStore(t *testing.T) { - t.Parallel() - - var ( - metadata = &SnapshotMetadata{ - LastBlock: 10, - } - - snapshots = []*Snapshot{ - {Number: 1}, - {Number: 3}, - } - ) - - assert.Equal( - t, - &snapshotStore{ - lastNumber: metadata.LastBlock, - list: snapshotSortedList( - snapshots, - ), - }, - newSnapshotStore( - metadata, - snapshots, - ), - ) -} - -func Test_snapshotStore_getLastBlock(t *testing.T) { - t.Parallel() - - var ( - metadata = &SnapshotMetadata{ - LastBlock: 10, - } - ) - - store := newSnapshotStore( - metadata, - nil, - ) - - assert.Equal( - t, - metadata.LastBlock, - store.getLastBlock(), - ) -} - -func Test_snapshotStore_updateLastBlock(t *testing.T) { - t.Parallel() - - var ( - metadata = &SnapshotMetadata{ - LastBlock: 10, - } - - newLastBlock = uint64(20) - ) - - store := newSnapshotStore( - metadata, - nil, - ) - - store.updateLastBlock(newLastBlock) - - assert.Equal( - t, - newLastBlock, - store.getLastBlock(), - ) -} - -func Test_snapshotStore_deleteLower(t *testing.T) { - t.Parallel() - - metadata := &SnapshotMetadata{ - LastBlock: 10, - } - - testTable := []struct { - name string - snapshots []*Snapshot - boundary uint64 - expectedSnapshots []*Snapshot - }{ - { - "Drop lower-number snapshots", - []*Snapshot{ - {Number: 10}, - {Number: 19}, - {Number: 25}, - {Number: 30}, - }, - uint64(20), - []*Snapshot{ - {Number: 25}, - {Number: 30}, - }, - }, - { - "Higher block value", - []*Snapshot{ - {Number: 10}, - {Number: 11}, - {Number: 12}, - {Number: 13}, - {Number: 14}, - }, - uint64(15), - []*Snapshot{ - {Number: 14}, - }, - }, - { - // Single snapshots shouldn't be dropped - "Single snapshot", - []*Snapshot{ - {Number: 10}, - }, - uint64(15), - []*Snapshot{ - {Number: 10}, - }, - }, - } - - for _, testCase := range testTable { - testCase := testCase - - t.Run(testCase.name, func(t *testing.T) { - t.Parallel() - - store := newSnapshotStore( - metadata, - testCase.snapshots, - ) - - store.deleteLower(testCase.boundary) - - assert.Equal( - t, - &snapshotStore{ - lastNumber: metadata.LastBlock, - list: testCase.expectedSnapshots, - }, - store, - ) - }) - } -} - -func Test_snapshotStore_find(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - snapshots []*Snapshot - input uint64 - expected *Snapshot - }{ - { - name: "should return nil if the list is empty", - snapshots: nil, - input: 1, - expected: nil, - }, - { - name: "should return the last element if it's lower than given number", - snapshots: []*Snapshot{ - {Number: 10}, - {Number: 20}, - {Number: 30}, - }, - input: 40, - expected: &Snapshot{ - Number: 30, - }, - }, - { - name: "should return the first element if the given value is less than any snapshot", - snapshots: []*Snapshot{ - {Number: 10}, - {Number: 20}, - {Number: 30}, - }, - input: 5, - expected: &Snapshot{ - Number: 10, - }, - }, - { - name: "should return the element whose Number matches with the given number", - snapshots: []*Snapshot{ - {Number: 10}, - {Number: 20}, - {Number: 30}, - }, - input: 20, - expected: &Snapshot{ - Number: 20, - }, - }, - { - name: "should return the one before the element whose Number is bigger than the given value", - snapshots: []*Snapshot{ - {Number: 10}, - {Number: 20}, - {Number: 30}, - }, - input: 29, - expected: &Snapshot{ - Number: 20, - }, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - store := newSnapshotStore( - &SnapshotMetadata{}, - test.snapshots, - ) - - assert.Equal( - t, - test.expected, - store.find(test.input), - ) - }) - } -} - -func Test_snapshotStore_add(t *testing.T) { - t.Parallel() - - var ( - snapshots = []*Snapshot{ - {Number: 30}, - {Number: 25}, - {Number: 20}, - {Number: 15}, - {Number: 10}, - } - - newSnapshot = &Snapshot{Number: 12} - - expected = []*Snapshot{ - // should be sorted in asc - {Number: 10}, - {Number: 12}, - {Number: 15}, - {Number: 20}, - {Number: 25}, - {Number: 30}, - } - ) - - store := newSnapshotStore( - &SnapshotMetadata{}, - snapshots, - ) - - store.add(newSnapshot) - - assert.Equal( - t, - &snapshotStore{ - list: snapshotSortedList( - expected, - ), - }, - store, - ) -} - -func Test_snapshotStore_putByNumber(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - initialSnapshots []*Snapshot - newSnapshot *Snapshot - finalSnapshots []*Snapshot - }{ - { - name: "should replace if the same Number snapshot exists in the list", - initialSnapshots: []*Snapshot{ - {Number: 10, Hash: "10"}, - {Number: 20, Hash: "20"}, - {Number: 30, Hash: "30"}, - }, - newSnapshot: &Snapshot{ - Number: 20, - Hash: "xxx", - }, - finalSnapshots: []*Snapshot{ - {Number: 10, Hash: "10"}, - {Number: 20, Hash: "xxx"}, - {Number: 30, Hash: "30"}, - }, - }, - { - name: "should add if the same Number snapshot doesn't exist in the list", - initialSnapshots: []*Snapshot{ - {Number: 10, Hash: "10"}, - {Number: 20, Hash: "20"}, - {Number: 30, Hash: "30"}, - }, - newSnapshot: &Snapshot{ - Number: 25, - Hash: "25", - }, - finalSnapshots: []*Snapshot{ - {Number: 10, Hash: "10"}, - {Number: 20, Hash: "20"}, - {Number: 25, Hash: "25"}, - {Number: 30, Hash: "30"}, - }, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - store := newSnapshotStore( - &SnapshotMetadata{}, - test.initialSnapshots, - ) - - store.putByNumber(test.newSnapshot) - - assert.Equal( - t, - test.finalSnapshots, - []*Snapshot(store.list), - ) - }) - } -} diff --git a/validators/store/test_helper.go b/validators/store/test_helper.go deleted file mode 100644 index 32870f166d..0000000000 --- a/validators/store/test_helper.go +++ /dev/null @@ -1,30 +0,0 @@ -package store - -import ( - "github.com/0xPolygon/polygon-edge/consensus/ibft/signer" - "github.com/0xPolygon/polygon-edge/types" -) - -// Utilities for test -const ( - TestEpochSize = 100 -) - -func NewMockGetSigner(s signer.Signer) func(uint64) (signer.Signer, error) { - return func(u uint64) (signer.Signer, error) { - return s, nil - } -} - -type MockBlockchain struct { - HeaderFn func() *types.Header - GetHeaderByNumberFn func(uint64) (*types.Header, bool) -} - -func (m *MockBlockchain) Header() *types.Header { - return m.HeaderFn() -} - -func (m *MockBlockchain) GetHeaderByNumber(height uint64) (*types.Header, bool) { - return m.GetHeaderByNumberFn(height) -} diff --git a/validators/store/types.go b/validators/store/types.go deleted file mode 100644 index c995eb5d26..0000000000 --- a/validators/store/types.go +++ /dev/null @@ -1,113 +0,0 @@ -package store - -import ( - "encoding/json" - "fmt" - - "github.com/0xPolygon/polygon-edge/types" - "github.com/0xPolygon/polygon-edge/validators" -) - -type ValidatorTypeGetter func(uint64) (validators.ValidatorType, error) - -// Define the type of the validator set -type SourceType string - -const ( - // For validators saved in-memory - Snapshot SourceType = "Snapshot" - - // For validators managed in contract - Contract SourceType = "Contract" -) - -// String is a helper method for casting a SourceType to a string representation -func (t SourceType) String() string { - return string(t) -} - -// ValidatorStore is an interface that ValidatorStore needs to implement -type ValidatorStore interface { - SourceType() SourceType -} - -// HeaderGetter is an interface in order each ValidatorStore gets latest header and header by number -type HeaderGetter interface { - Header() *types.Header - GetHeaderByNumber(uint64) (*types.Header, bool) -} - -// Vote defines the vote structure -type Vote struct { - Validator types.Address // Voter - Candidate validators.Validator // Candidate - Authorize bool // Add or Remove -} - -// Equal checks if two votes are equal -func (v *Vote) Equal(vv *Vote) bool { - if v.Validator != vv.Validator { - return false - } - - if !v.Candidate.Equal(vv.Candidate) { - return false - } - - if v.Authorize != vv.Authorize { - return false - } - - return true -} - -// Copy makes a copy of the vote, and returns it -func (v *Vote) Copy() *Vote { - return &Vote{ - Validator: v.Validator, - Candidate: v.Candidate.Copy(), - Authorize: v.Authorize, - } -} - -// UnmarshalJSON is JSON unmarshaler -func (v *Vote) UnmarshalJSON(data []byte) error { - rawVote := struct { - Validator types.Address // Voter - Authorize bool // Add or Remove - - Address *types.Address // Field in legacy format - Candidate json.RawMessage // New field in new format - }{} - - var err error - - if err = json.Unmarshal(data, &rawVote); err != nil { - return err - } - - v.Validator = rawVote.Validator - v.Authorize = rawVote.Authorize - - // new format - if rawVote.Candidate != nil { - return json.Unmarshal(rawVote.Candidate, v.Candidate) - } - - // legacy format - if rawVote.Address != nil { - ecdsaCandidate, ok := v.Candidate.(*validators.ECDSAValidator) - if !ok { - return fmt.Errorf("expects ECDSAValidator but got %s", v.Candidate.Type()) - } - - ecdsaCandidate.Address = *rawVote.Address - } - - return nil -} - -type Candidate struct { - Validator validators.Validator - Authorize bool -} diff --git a/validators/store/types_test.go b/validators/store/types_test.go deleted file mode 100644 index b3a36c1f42..0000000000 --- a/validators/store/types_test.go +++ /dev/null @@ -1,347 +0,0 @@ -package store - -import ( - "encoding/json" - "fmt" - "testing" - - "github.com/0xPolygon/polygon-edge/types" - "github.com/0xPolygon/polygon-edge/validators" - "github.com/stretchr/testify/assert" -) - -var ( - addr1 = types.StringToAddress("1") - addr2 = types.StringToAddress("2") - addr3 = types.StringToAddress("3") - - testBLSPubKey1 = validators.BLSValidatorPublicKey([]byte("bls_pubkey1")) - - ecdsaValidator1 = validators.NewECDSAValidator(addr1) - ecdsaValidator2 = validators.NewECDSAValidator(addr2) - blsValidator1 = validators.NewBLSValidator(addr1, testBLSPubKey1) -) - -func createExampleECDSAVoteJSON( - authorize bool, - candidate *validators.ECDSAValidator, - validator types.Address, -) string { - return fmt.Sprintf(`{ - "Authorize": %t, - "Candidate": { - "Address": "%s" - }, - "Validator": "%s" - }`, - authorize, - candidate.Addr(), - validator, - ) -} - -func createExampleLegacyECDSAVoteJSON( - authorize bool, - candidate *validators.ECDSAValidator, - validator types.Address, -) string { - return fmt.Sprintf(`{ - "Authorize": %t, - "Address": "%s", - "Validator": "%s" - }`, - authorize, - candidate.Addr(), - validator, - ) -} - -func createExampleBLSVoteJSON( - authorize bool, - candidate *validators.BLSValidator, - validator types.Address, -) string { - return fmt.Sprintf(` - { - "Authorize": %t, - "Candidate": { - "Address": "%s", - "BLSPublicKey": "%s" - }, - "Validator": "%s" - }`, - authorize, - candidate.Address, - candidate.BLSPublicKey, - validator, - ) -} - -func TestSourceTypeString(t *testing.T) { - t.Parallel() - - tests := []struct { - sourceType SourceType - expected string - }{ - { - sourceType: Snapshot, - expected: "Snapshot", - }, - { - sourceType: Contract, - expected: "Contract", - }, - } - - for _, test := range tests { - test := test - - t.Run(test.expected, func(t *testing.T) { - t.Parallel() - - assert.Equal(t, test.expected, test.sourceType.String()) - }) - } -} - -func TestVoteJSONMarshal(t *testing.T) { - t.Parallel() - - testMarshalJSON := func( - t *testing.T, - data interface{}, - expectedJSON string, // can be beautified - ) { - t.Helper() - - res, err := json.Marshal(data) - - assert.NoError(t, err) - assert.JSONEq( - t, - expectedJSON, - string(res), - ) - } - - t.Run("ECDSAValidator", func(t *testing.T) { - t.Parallel() - - testMarshalJSON( - t, - &Vote{ - Authorize: true, - Candidate: ecdsaValidator2, - Validator: addr1, - }, - createExampleECDSAVoteJSON( - true, - ecdsaValidator2, - addr1, - ), - ) - }) - - t.Run("BLSValidator", func(t *testing.T) { - t.Parallel() - - testMarshalJSON( - t, - &Vote{ - Authorize: false, - Candidate: blsValidator1, - Validator: addr2, - }, - createExampleBLSVoteJSON( - false, - blsValidator1, - addr2, - ), - ) - }) -} - -func TestVoteJSONUnmarshal(t *testing.T) { - t.Parallel() - - testUnmarshalJSON := func( - t *testing.T, - jsonStr string, - target interface{}, - expected interface{}, - ) { - t.Helper() - - err := json.Unmarshal([]byte(jsonStr), target) - - assert.NoError(t, err) - assert.Equal(t, expected, target) - } - - t.Run("ECDSAValidator", func(t *testing.T) { - t.Parallel() - - testUnmarshalJSON( - t, - createExampleECDSAVoteJSON( - false, - ecdsaValidator1, - addr2, - ), - &Vote{ - // need to initialize Candidate before unmarshalling - Candidate: new(validators.ECDSAValidator), - }, - &Vote{ - Authorize: false, - Candidate: ecdsaValidator1, - Validator: addr2, - }, - ) - }) - - t.Run("ECDSAValidator (legacy format)", func(t *testing.T) { - t.Parallel() - - testUnmarshalJSON( - t, - createExampleLegacyECDSAVoteJSON( - false, - ecdsaValidator1, - addr2, - ), - &Vote{ - Candidate: new(validators.ECDSAValidator), - }, - &Vote{ - Authorize: false, - Candidate: ecdsaValidator1, - Validator: addr2, - }, - ) - }) - - t.Run("BLSValidator", func(t *testing.T) { - t.Parallel() - - testUnmarshalJSON( - t, - createExampleBLSVoteJSON( - true, - blsValidator1, - addr2, - ), - &Vote{ - // need to initialize Candidate before unmarshalling - Candidate: new(validators.BLSValidator), - }, - &Vote{ - Authorize: true, - Candidate: blsValidator1, - Validator: addr2, - }, - ) - }) -} - -func TestVoteEqual(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - v1 *Vote - v2 *Vote - expected bool - }{ - { - name: "equal", - v1: &Vote{ - Validator: addr1, - Candidate: validators.NewECDSAValidator(addr2), - Authorize: true, - }, - v2: &Vote{ - Validator: addr1, - Candidate: validators.NewECDSAValidator(addr2), - Authorize: true, - }, - expected: true, - }, - { - name: "Validators don't match with each other", - v1: &Vote{ - Validator: addr1, - Candidate: validators.NewECDSAValidator(addr2), - Authorize: true, - }, - v2: &Vote{ - Validator: addr2, - Candidate: validators.NewECDSAValidator(addr2), - Authorize: true, - }, - expected: false, - }, - { - name: "Candidates don't match with each other", - v1: &Vote{ - Validator: addr1, - Candidate: validators.NewECDSAValidator(addr2), - Authorize: true, - }, - v2: &Vote{ - Validator: addr1, - Candidate: validators.NewECDSAValidator(addr3), - Authorize: true, - }, - expected: false, - }, - { - name: "Authorizes don't match with each other", - v1: &Vote{ - Validator: addr1, - Candidate: validators.NewECDSAValidator(addr2), - Authorize: true, - }, - v2: &Vote{ - Validator: addr1, - Candidate: validators.NewECDSAValidator(addr2), - Authorize: false, - }, - expected: false, - }, - } - - for _, test := range tests { - test := test - - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - assert.Equal( - t, - test.expected, - test.v1.Equal(test.v2), - ) - }) - } -} - -func TestVoteCopy(t *testing.T) { - t.Parallel() - - v1 := &Vote{ - Validator: addr1, - Candidate: validators.NewECDSAValidator(addr2), - Authorize: true, - } - - v2 := v1.Copy() - - assert.Equal(t, v1, v2) - - // check the addresses are different - assert.NotSame(t, v1.Validator, v2.Validator) - assert.NotSame(t, v1.Candidate, v2.Candidate) - assert.NotSame(t, v1.Authorize, v2.Authorize) -} diff --git a/validators/types.go b/validators/types.go deleted file mode 100644 index e8a60d17c9..0000000000 --- a/validators/types.go +++ /dev/null @@ -1,91 +0,0 @@ -package validators - -import ( - "errors" - - "github.com/0xPolygon/polygon-edge/types" - "github.com/umbracle/fastrlp" -) - -var ( - ErrInvalidValidatorType = errors.New("invalid validator type") - ErrMismatchValidatorType = errors.New("mismatch between validator and validators") - ErrMismatchValidatorsType = errors.New("mismatch between two validators") - ErrValidatorAlreadyExists = errors.New("validator already exists in validators") - ErrValidatorNotFound = errors.New("validator not found in validators") - ErrInvalidValidators = errors.New("container is not ") -) - -type ValidatorType string - -const ( - ECDSAValidatorType ValidatorType = "ecdsa" - BLSValidatorType ValidatorType = "bls" -) - -// validatorTypes is the map used for easy string -> ValidatorType lookups -var validatorTypes = map[string]ValidatorType{ - string(ECDSAValidatorType): ECDSAValidatorType, - string(BLSValidatorType): BLSValidatorType, -} - -// ParseValidatorType converts a validatorType string representation to a ValidatorType -func ParseValidatorType(validatorType string) (ValidatorType, error) { - // Check if the cast is possible - castType, ok := validatorTypes[validatorType] - if !ok { - return castType, ErrInvalidValidatorType - } - - return castType, nil -} - -// Validator defines the interface of the methods a validator implements -type Validator interface { - // Return the validator type - Type() ValidatorType - // Return the string representation - String() string - // Return the address of the validator - Addr() types.Address - // Return of copy of the validator - Copy() Validator - // Check the same validator or not - Equal(Validator) bool - // RLP Marshaller to encode to bytes - MarshalRLPWith(*fastrlp.Arena) *fastrlp.Value - // RLP Unmarshaller to encode from bytes - UnmarshalRLPFrom(*fastrlp.Parser, *fastrlp.Value) error - // Return bytes in RLP encode - Bytes() []byte - // Decode bytes in RLP encode and map to the fields - SetFromBytes([]byte) error -} - -// Validators defines the interface of the methods validator collection implements -type Validators interface { - // Return the type of the validators - Type() ValidatorType - // Return the size of collection - Len() int - // Check equality of each element - Equal(Validators) bool - // Return of the whole collection - Copy() Validators - // Get validator at specified height - At(uint64) Validator - // Find the index of the validator that has specified address - Index(types.Address) int64 - // Check the validator that has specified address exists in the collection - Includes(types.Address) bool - // Add a validator into collection - Add(Validator) error - // Remove a validator from collection - Del(Validator) error - // Merge 2 collections into one collection - Merge(Validators) error - // RLP Marshaller to encode to bytes - MarshalRLPWith(*fastrlp.Arena) *fastrlp.Value - // Decode bytes in RLP encode and map to the elements - UnmarshalRLPFrom(*fastrlp.Parser, *fastrlp.Value) error -} diff --git a/validators/types_test.go b/validators/types_test.go deleted file mode 100644 index d9d19525ae..0000000000 --- a/validators/types_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package validators - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestParseValidatorType(t *testing.T) { - t.Parallel() - - t.Run("ECDSA", func(t *testing.T) { - t.Parallel() - - res, err := ParseValidatorType("ecdsa") - - assert.Equal( - t, - ECDSAValidatorType, - res, - ) - - assert.NoError( - t, - err, - ) - }) - - t.Run("BLS", func(t *testing.T) { - t.Parallel() - - res, err := ParseValidatorType("bls") - - assert.Equal( - t, - BLSValidatorType, - res, - ) - - assert.NoError( - t, - err, - ) - }) - - t.Run("other type", func(t *testing.T) { - t.Parallel() - - _, err := ParseValidatorType("fake") - - assert.Equal( - t, - ErrInvalidValidatorType, - err, - ) - }) -}