diff --git a/Makefile b/Makefile index cd715b62..f906edf7 100644 --- a/Makefile +++ b/Makefile @@ -37,7 +37,7 @@ build: $(BUILD_DIR) ## Build go binary. .PHONY: install install: build ## Install the go binary. $(RM) $(INSTALL_DIR)/$(BIN_NAME) - cp $(BUILD_DIR)/$(BIN_NAME) $(INSTALL_DIR) + cp $(BUILD_DIR)/$(BIN_NAME) $(INSTALL_DIR)/ .PHONY: cross cross: $(BUILD_DIR) ## Cross-compile go binaries using CGO. @@ -119,4 +119,4 @@ geth-loadtest: build ## Fund test account with 5k ETH and run loadtest against a .PHONY: avail-loadtest avail-loadtest: build ## Run loadtest against an Avail chain. - $(BUILD_DIR)/$(BIN_NAME) loadtest --verbosity 700 --chain-id 1256 --concurrency 1 --requests 1000 --rate-limit 5 --mode t --data-avail http://127.0.0.1:$(PORT) \ No newline at end of file + $(BUILD_DIR)/$(BIN_NAME) loadtest --verbosity 700 --chain-id 1256 --concurrency 1 --requests 1000 --rate-limit 5 --mode t --data-avail http://127.0.0.1:$(PORT) diff --git a/README.md b/README.md index 56342fdc..b5290b98 100644 --- a/README.md +++ b/README.md @@ -8,6 +8,9 @@ commonly needed tools and provide interfaces and formats. Requirements: - [Go](https://go.dev/) +- make +- jq +- bc To install, clone this repo and run: diff --git a/cmd/forge/forge.go b/cmd/forge/forge.go index 8919d827..8e8d75de 100644 --- a/cmd/forge/forge.go +++ b/cmd/forge/forge.go @@ -47,7 +47,6 @@ import ( "github.com/maticnetwork/polygon-cli/rpctypes" "golang.org/x/exp/slices" - "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/spf13/cobra" ) @@ -133,10 +132,6 @@ Here is an example usage: } inputForge.GenesisData = genesisData - zerolog.SetGlobalLevel(zerolog.TraceLevel) - log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr}) - log.Debug().Msg("Starting logger in console mode") - return nil }, } @@ -145,7 +140,7 @@ func init() { ForgeCmd.PersistentFlags().StringVarP(&inputForge.Client, "client", "c", "edge", "Specify which blockchain client should be use to forge the data") ForgeCmd.PersistentFlags().StringVarP(&inputForge.DataDir, "data-dir", "d", "./forged-data", "Specify a folder to be used to store the chain data") ForgeCmd.PersistentFlags().StringVarP(&inputForge.GenesisFile, "genesis", "g", "genesis.json", "Specify a file to be used for genesis configuration") - ForgeCmd.PersistentFlags().StringVarP(&inputForge.Verifier, "verifier", "v", "dummy", "Specify a consensus engine to use for forging") + ForgeCmd.PersistentFlags().StringVarP(&inputForge.Verifier, "verifier", "V", "dummy", "Specify a consensus engine to use for forging") ForgeCmd.PersistentFlags().StringVarP(&inputForge.Mode, "mode", "m", "json", "The forge mode indicates how we should get the transactions for our blocks [json, proto]") ForgeCmd.PersistentFlags().Uint64VarP(&inputForge.Count, "count", "C", 100, "The number of blocks to try to forge") ForgeCmd.PersistentFlags().StringVarP(&inputForge.BlocksFile, "blocks", "b", "", "A file of encoded blocks; the format of this file should match the mode") @@ -153,9 +148,9 @@ func init() { ForgeCmd.PersistentFlags().StringVarP(&inputForge.ReceiptsFile, "receipts", "r", "", "A file of encoded receipts; the format of this file should match the mode") ForgeCmd.PersistentFlags().BoolVarP(&inputForge.IncludeTxFees, "tx-fees", "t", false, "if the transaction fees should be included when computing block rewards") ForgeCmd.PersistentFlags().BoolVarP(&inputForge.ShouldReadFirstBlock, "read-first-block", "R", false, "whether to read the first block, leave false if first block is genesis") - ForgeCmd.PersistentFlags().BoolVarP(&inputForge.ShouldVerifyBlocks, "verify-blocks", "V", true, "whether to verify blocks, set false if forging nonconsecutive blocks") - ForgeCmd.PersistentFlags().BoolVarP(&inputForge.ShouldRewriteTxNonces, "rewrite-tx-nonces", "", false, "whether to rewrite transaction nonces, set true if forging nonconsecutive blocks") - ForgeCmd.PersistentFlags().BoolVarP(&inputForge.HasConsecutiveBlocks, "consecutive-blocks", "", true, "whether the blocks file has consecutive blocks") + ForgeCmd.PersistentFlags().BoolVar(&inputForge.ShouldVerifyBlocks, "verify-blocks", true, "whether to verify blocks, set false if forging nonconsecutive blocks") + ForgeCmd.PersistentFlags().BoolVar(&inputForge.ShouldRewriteTxNonces, "rewrite-tx-nonces", false, "whether to rewrite transaction nonces, set true if forging nonconsecutive blocks") + ForgeCmd.PersistentFlags().BoolVar(&inputForge.HasConsecutiveBlocks, "consecutive-blocks", true, "whether the blocks file has consecutive blocks") ForgeCmd.PersistentFlags().BoolVarP(&inputForge.ShouldProcessBlocks, "process-blocks", "p", true, "whether the transactions in blocks should be processed applied to the state") if err := cobra.MarkFlagRequired(ForgeCmd.PersistentFlags(), "blocks"); err != nil { @@ -345,7 +340,7 @@ func readAllBlocksToChain(bh *edgeBlockchainHandle, blockReader BlockReader, rec if inputForge.ShouldRewriteTxNonces { for nonce, tx := range edgeBlock.Transactions { tx.Nonce = uint64(nonce) - log.Logger.Debug().Int64("old nonce", int64(tx.Nonce)).Int64("new nonce", int64(nonce)).Str("tx hash", tx.Hash.String()).Msg("Rewrote tx nonce") + log.Debug().Int64("old nonce", int64(tx.Nonce)).Int64("new nonce", int64(nonce)).Str("tx hash", tx.Hash.String()).Msg("Rewrote tx nonce") } } diff --git a/cmd/fork/fork.go b/cmd/fork/fork.go index 26e2adaf..ab52404b 100644 --- a/cmd/fork/fork.go +++ b/cmd/fork/fork.go @@ -8,7 +8,6 @@ import ( "github.com/ethereum/go-ethereum/consensus/clique" "github.com/ethereum/go-ethereum/core/types" ethcrypto "github.com/ethereum/go-ethereum/crypto" - "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/spf13/cobra" "os" @@ -175,10 +174,3 @@ func ecrecover(block *types.Block) ([]byte, error) { return signer, nil } - -func init() { - // flagSet := ForkCmd.PersistentFlags() - zerolog.SetGlobalLevel(zerolog.TraceLevel) - log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr}) - -} diff --git a/cmd/loadtest/loadtest.go b/cmd/loadtest/loadtest.go index 614d4e12..250a6c0a 100644 --- a/cmd/loadtest/loadtest.go +++ b/cmd/loadtest/loadtest.go @@ -162,7 +162,6 @@ var LoadtestCmd = &cobra.Command{ return nil }, Args: func(cmd *cobra.Command, args []string) error { - setLogLevel(inputLoadTestParams) zerolog.DurationFieldUnit = time.Second zerolog.DurationFieldInteger = true @@ -189,31 +188,6 @@ var LoadtestCmd = &cobra.Command{ }, } -func setLogLevel(ltp loadTestParams) { - verbosity := *ltp.Verbosity - if verbosity < 100 { - zerolog.SetGlobalLevel(zerolog.PanicLevel) - } else if verbosity < 200 { - zerolog.SetGlobalLevel(zerolog.FatalLevel) - } else if verbosity < 300 { - zerolog.SetGlobalLevel(zerolog.ErrorLevel) - } else if verbosity < 400 { - zerolog.SetGlobalLevel(zerolog.WarnLevel) - } else if verbosity < 500 { - zerolog.SetGlobalLevel(zerolog.InfoLevel) - } else if verbosity < 600 { - zerolog.SetGlobalLevel(zerolog.DebugLevel) - } else { - zerolog.SetGlobalLevel(zerolog.TraceLevel) - } - if *ltp.PrettyLogs { - log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr}) - log.Debug().Msg("Starting logger in console mode") - } else { - log.Debug().Msg("Starting logger in JSON mode") - } -} - type ( blockSummary struct { Block *rpctypes.RawBlockResponse @@ -237,8 +211,6 @@ type ( Concurrency *int64 BatchSize *uint64 TimeLimit *int64 - Verbosity *int64 - PrettyLogs *bool ToRandom *bool URL *url.URL ChainID *uint64 @@ -293,10 +265,8 @@ func init() { ltp.Concurrency = LoadtestCmd.PersistentFlags().Int64P("concurrency", "c", 1, "Number of multiple requests to perform at a time. Default is one request at a time.") ltp.TimeLimit = LoadtestCmd.PersistentFlags().Int64P("time-limit", "t", -1, "Maximum number of seconds to spend for benchmarking. Use this to benchmark within a fixed total amount of time. Per default there is no timelimit.") // https://logging.apache.org/log4j/2.x/manual/customloglevels.html - ltp.Verbosity = LoadtestCmd.PersistentFlags().Int64P("verbosity", "v", 200, "0 - Silent\n100 Fatals\n200 Errors\n300 Warnings\n400 INFO\n500 Debug\n600 Trace") // extended parameters - ltp.PrettyLogs = LoadtestCmd.PersistentFlags().Bool("pretty-logs", true, "Should we log in pretty format or JSON") ltp.PrivateKey = LoadtestCmd.PersistentFlags().String("private-key", codeQualityPrivateKey, "The hex encoded private key that we'll use to sending transactions") ltp.ChainID = LoadtestCmd.PersistentFlags().Uint64("chain-id", 1256, "The chain id for the transactions that we're going to send") ltp.ToAddress = LoadtestCmd.PersistentFlags().String("to-address", "0xDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEF", "The address that we're going to send to") @@ -577,32 +547,35 @@ func getTxPoolSize(rpc *ethrpc.Client) (uint64, error) { return pendingCount + queuedCount, nil } -func updateRateLimit(rl *rate.Limiter, rpc *ethrpc.Client, steadyStateQueueSize uint64, rateLimitIncrement uint64, cycleDuration time.Duration, backoff float64) { +func updateRateLimit(ctx context.Context, rl *rate.Limiter, rpc *ethrpc.Client, steadyStateQueueSize uint64, rateLimitIncrement uint64, cycleDuration time.Duration, backoff float64) { ticker := time.NewTicker(cycleDuration) defer ticker.Stop() + for { + select { + case <-ticker.C: + txPoolSize, err := getTxPoolSize(rpc) + if err != nil { + log.Error().Err(err).Msg("Error getting txpool size") + return + } - for range ticker.C { - txPoolSize, err := getTxPoolSize(rpc) - if err != nil { - log.Error().Err(err).Msg("Error getting txpool size") + if txPoolSize < steadyStateQueueSize { + // additively increment requests per second if txpool less than queue steady state + newRateLimit := rate.Limit(float64(rl.Limit()) + float64(rateLimitIncrement)) + rl.SetLimit(newRateLimit) + log.Info().Float64("New Rate Limit (RPS)", float64(rl.Limit())).Uint64("Current Tx Pool Size", txPoolSize).Uint64("Steady State Tx Pool Size", steadyStateQueueSize).Msg("Increased rate limit") + } else if txPoolSize > steadyStateQueueSize { + // halve rate limit requests per second if txpool greater than queue steady state + rl.SetLimit(rl.Limit() / rate.Limit(backoff)) + log.Info().Float64("New Rate Limit (RPS)", float64(rl.Limit())).Uint64("Current Tx Pool Size", txPoolSize).Uint64("Steady State Tx Pool Size", steadyStateQueueSize).Msg("Backed off rate limit") + } + case <-ctx.Done(): return } - - if txPoolSize < steadyStateQueueSize { - // additively increment requests per second if txpool less than queue steady state - newRateLimit := rate.Limit(float64(rl.Limit()) + float64(rateLimitIncrement)) - rl.SetLimit(newRateLimit) - log.Info().Float64("New Rate Limit (RPS)", float64(rl.Limit())).Uint64("Current Tx Pool Size", txPoolSize).Uint64("Steady State Tx Pool Size", steadyStateQueueSize).Msg("Increased rate limit") - } else if txPoolSize > steadyStateQueueSize { - // halve rate limit requests per second if txpool greater than queue steady state - rl.SetLimit(rl.Limit() / rate.Limit(backoff)) - log.Info().Float64("New Rate Limit (RPS)", float64(rl.Limit())).Uint64("Current Tx Pool Size", txPoolSize).Uint64("Steady State Tx Pool Size", steadyStateQueueSize).Msg("Backed off rate limit") - } } } func mainLoop(ctx context.Context, c *ethclient.Client, rpc *ethrpc.Client) error { - ltp := inputLoadTestParams log.Trace().Interface("Input Params", ltp).Msg("Params") @@ -619,8 +592,10 @@ func mainLoop(ctx context.Context, c *ethclient.Client, rpc *ethrpc.Client) erro if *ltp.RateLimit <= 0.0 { rl = nil } + rateLimitCtx, cancel := context.WithCancel(ctx) + defer cancel() if *ltp.AdaptiveRateLimit && rl != nil { - go updateRateLimit(rl, rpc, steadyStateTxPoolSize, adaptiveRateLimitIncrement, time.Duration(*ltp.AdaptiveCycleDuration)*time.Second, *ltp.AdaptiveBackoffFactor) + go updateRateLimit(rateLimitCtx, rl, rpc, steadyStateTxPoolSize, adaptiveRateLimitIncrement, time.Duration(*ltp.AdaptiveCycleDuration)*time.Second, *ltp.AdaptiveBackoffFactor) } tops, err := bind.NewKeyedTransactorWithChainID(privateKey, chainID) @@ -872,10 +847,10 @@ func mainLoop(ctx context.Context, c *ethclient.Client, rpc *ethrpc.Client) erro } wg.Done() }(i) - } log.Trace().Msg("Finished starting go routines. Waiting..") wg.Wait() + cancel() log.Debug().Uint64("currentNonce", currentNonce).Msg("Finished main loadtest loop") log.Debug().Msg("Waiting for transactions to actually be mined") finalBlockNumber, err := waitForFinalBlock(ctx, c, rpc, startBlockNumber, startNonce, currentNonce) diff --git a/cmd/monitor/monitor.go b/cmd/monitor/monitor.go index 03015f0b..92705617 100644 --- a/cmd/monitor/monitor.go +++ b/cmd/monitor/monitor.go @@ -21,7 +21,6 @@ import ( "fmt" "math/big" "net/url" - "os" "sort" "sync" "time" @@ -34,7 +33,6 @@ import ( "github.com/gizak/termui/v3/widgets" "github.com/maticnetwork/polygon-cli/metrics" "github.com/maticnetwork/polygon-cli/rpctypes" - "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/spf13/cobra" ) @@ -42,10 +40,8 @@ import ( var ( batchSize uint64 windowSize int - verbosity int64 intervalStr string interval time.Duration - logFile string one = big.NewInt(1) zero = big.NewInt(0) @@ -132,7 +128,7 @@ var MonitorCmd = &cobra.Command{ return err } - return setMonitorLogLevel(verbosity) + return nil }, RunE: func(cmd *cobra.Command, args []string) error { ctx := cmd.Context() @@ -268,9 +264,7 @@ func (ms *monitorStatus) getBlockRange(ctx context.Context, from, to *big.Int, c func init() { MonitorCmd.PersistentFlags().Uint64VarP(&batchSize, "batch-size", "b", 25, "Number of requests per batch") MonitorCmd.PersistentFlags().IntVarP(&windowSize, "window-size", "w", 25, "Number of blocks visible in the window") - MonitorCmd.PersistentFlags().Int64VarP(&verbosity, "verbosity", "v", 200, "0 - Silent\n100 Fatal\n200 Error\n300 Warning\n400 Info\n500 Debug\n600 Trace") MonitorCmd.PersistentFlags().StringVarP(&intervalStr, "interval", "i", "5s", "Amount of time between batch block rpc calls") - MonitorCmd.PersistentFlags().StringVarP(&logFile, "log-file", "l", "", "Write logs to a file (default stderr)") } func renderMonitorUI(ms *monitorStatus) error { @@ -586,34 +580,3 @@ func max(nums ...int) int { } return m } - -// setMonitorLogLevel sets the log level based on the flags. If the log file flag -// is set, then output will be written to the file instead of stdout. Use -// `tail -f ` to see the log output in real time. -func setMonitorLogLevel(verbosity int64) error { - if len(logFile) > 0 { - file, err := os.OpenFile(logFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0664) - if err != nil { - return err - } - log.Logger = zerolog.New(file).With().Logger() - } - - if verbosity < 100 { - zerolog.SetGlobalLevel(zerolog.PanicLevel) - } else if verbosity < 200 { - zerolog.SetGlobalLevel(zerolog.FatalLevel) - } else if verbosity < 300 { - zerolog.SetGlobalLevel(zerolog.ErrorLevel) - } else if verbosity < 400 { - zerolog.SetGlobalLevel(zerolog.WarnLevel) - } else if verbosity < 500 { - zerolog.SetGlobalLevel(zerolog.InfoLevel) - } else if verbosity < 600 { - zerolog.SetGlobalLevel(zerolog.DebugLevel) - } else { - zerolog.SetGlobalLevel(zerolog.TraceLevel) - } - - return nil -} diff --git a/cmd/p2p/p2p.go b/cmd/p2p/p2p.go index f8bd33ce..799345ab 100644 --- a/cmd/p2p/p2p.go +++ b/cmd/p2p/p2p.go @@ -1,7 +1,6 @@ package p2p import ( - "github.com/rs/zerolog" "github.com/spf13/cobra" "github.com/maticnetwork/polygon-cli/cmd/p2p/crawl" @@ -9,39 +8,13 @@ import ( "github.com/maticnetwork/polygon-cli/cmd/p2p/sensor" ) -var verbosity int - var P2pCmd = &cobra.Command{ Use: "p2p", Short: "Commands related to devp2p", - PersistentPreRun: func(cmd *cobra.Command, args []string) { - setMonitorLogLevel(verbosity) - }, } func init() { - P2pCmd.PersistentFlags().IntVarP(&verbosity, "verbosity", "v", 400, "0 - Silent\n100 Fatal\n200 Error\n300 Warning\n400 Info\n500 Debug\n600 Trace") - P2pCmd.AddCommand(sensor.SensorCmd) P2pCmd.AddCommand(crawl.CrawlCmd) P2pCmd.AddCommand(ping.PingCmd) } - -// setMonitorLogLevel sets the log level based on the flags. -func setMonitorLogLevel(verbosity int) { - if verbosity < 100 { - zerolog.SetGlobalLevel(zerolog.PanicLevel) - } else if verbosity < 200 { - zerolog.SetGlobalLevel(zerolog.FatalLevel) - } else if verbosity < 300 { - zerolog.SetGlobalLevel(zerolog.ErrorLevel) - } else if verbosity < 400 { - zerolog.SetGlobalLevel(zerolog.WarnLevel) - } else if verbosity < 500 { - zerolog.SetGlobalLevel(zerolog.InfoLevel) - } else if verbosity < 600 { - zerolog.SetGlobalLevel(zerolog.DebugLevel) - } else { - zerolog.SetGlobalLevel(zerolog.TraceLevel) - } -} diff --git a/cmd/p2p/ping/ping.go b/cmd/p2p/ping/ping.go index 6d195431..7426a5d7 100644 --- a/cmd/p2p/ping/ping.go +++ b/cmd/p2p/ping/ping.go @@ -19,7 +19,6 @@ type ( OutputFile string NodesFile string Listen bool - Verbosity int } pingNodeJSON struct { Record *enode.Node `json:"record"` diff --git a/cmd/p2p/sensor/sensor.go b/cmd/p2p/sensor/sensor.go index 49270157..dd9416d7 100644 --- a/cmd/p2p/sensor/sensor.go +++ b/cmd/p2p/sensor/sensor.go @@ -19,21 +19,23 @@ import ( type ( sensorParams struct { - Bootnodes string - Threads int - NetworkID uint64 - NodesFile string - Database string - ProjectID string - SensorID string - MaxPeers int - MaxConcurrentDatabaseWrites int - ShouldWriteBlocks bool - ShouldWriteTransactions bool - RevalidationInterval string - revalidationInterval time.Duration - ShouldRunPprof bool - PprofPort uint + Bootnodes string + Threads int + NetworkID uint64 + NodesFile string + Database string + ProjectID string + SensorID string + MaxPeers int + MaxConcurrentDatabaseWrites int + ShouldWriteBlocks bool + ShouldWriteBlockEvents bool + ShouldWriteTransactions bool + ShouldWriteTransactionEvents bool + RevalidationInterval string + revalidationInterval time.Duration + ShouldRunPprof bool + PprofPort uint } ) @@ -135,8 +137,12 @@ required, so other nodes in the network can discover each other.`) this will result in less chance of missing data (i.e. broken pipes) but can significantly increase memory usage.`) SensorCmd.PersistentFlags().BoolVarP(&inputSensorParams.ShouldWriteBlocks, "write-blocks", "B", true, "Whether to write blocks to the database.") + SensorCmd.PersistentFlags().BoolVar(&inputSensorParams.ShouldWriteBlockEvents, "write-block-events", true, "Whether to write block events to the database.") SensorCmd.PersistentFlags().BoolVarP(&inputSensorParams.ShouldWriteTransactions, "write-txs", "t", true, `Whether to write transactions to the database. This option could significantly +increase CPU and memory usage.`) + SensorCmd.PersistentFlags().BoolVar(&inputSensorParams.ShouldWriteTransactionEvents, "write-tx-events", true, + `Whether to write transaction events to the database. This option could significantly increase CPU and memory usage.`) SensorCmd.PersistentFlags().StringVarP(&inputSensorParams.RevalidationInterval, "revalidation-interval", "r", "10m", "The amount of time it takes to retry connecting to a failed peer.") SensorCmd.PersistentFlags().BoolVar(&inputSensorParams.ShouldRunPprof, "pprof", false, "Whether to run pprof.") diff --git a/cmd/p2p/sensor/sensor_util.go b/cmd/p2p/sensor/sensor_util.go index f58d36dc..6275aecd 100644 --- a/cmd/p2p/sensor/sensor_util.go +++ b/cmd/p2p/sensor/sensor_util.go @@ -52,14 +52,15 @@ func newSensor(input p2p.NodeSet, disc resolver, iters ...enode.Iterator) *senso iters: iters, inputIter: enode.IterNodes(input.Nodes()), nodeCh: make(chan *enode.Node), - db: database.NewDatastore( - context.Background(), - inputSensorParams.ProjectID, - inputSensorParams.SensorID, - inputSensorParams.MaxConcurrentDatabaseWrites, - inputSensorParams.ShouldWriteBlocks, - inputSensorParams.ShouldWriteTransactions, - ), + db: database.NewDatastore(context.Background(), database.DatastoreOptions{ + ProjectID: inputSensorParams.ProjectID, + SensorID: inputSensorParams.SensorID, + MaxConcurrentWrites: inputSensorParams.MaxConcurrentDatabaseWrites, + ShouldWriteBlocks: inputSensorParams.ShouldWriteBlocks, + ShouldWriteBlockEvents: inputSensorParams.ShouldWriteBlockEvents, + ShouldWriteTransactions: inputSensorParams.ShouldWriteTransactions, + ShouldWriteTransactionEvents: inputSensorParams.ShouldWriteTransactionEvents, + }), peers: make(map[string]struct{}), count: &p2p.MessageCount{}, } diff --git a/cmd/root.go b/cmd/root.go index 355bad42..58f485b3 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -23,6 +23,8 @@ import ( "github.com/maticnetwork/polygon-cli/cmd/fork" "github.com/maticnetwork/polygon-cli/cmd/p2p" "github.com/maticnetwork/polygon-cli/cmd/parseethwallet" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" "github.com/spf13/cobra" "github.com/spf13/viper" @@ -42,15 +44,21 @@ import ( "github.com/maticnetwork/polygon-cli/cmd/wallet" ) -var cfgFile string +var ( + cfgFile string + verbosity int + pretty bool +) // rootCmd represents the base command when called without any subcommands var rootCmd = &cobra.Command{ Use: "polycli", Short: "A Swiss Army knife of blockchain tools", Long: `Polycli is a collection of tools that are meant to be useful while -building, testing, and running block chain applications. -`, +building, testing, and running block chain applications.`, + PersistentPreRun: func(cmd *cobra.Command, args []string) { + setLogLevel(verbosity, pretty) + }, } // Execute adds all child commands to the root command and sets flags appropriately. @@ -70,6 +78,8 @@ func init() { // will be global for your application. rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.polygon-cli.yaml)") + rootCmd.PersistentFlags().IntVarP(&verbosity, "verbosity", "v", 400, "0 - Silent\n100 Fatal\n200 Error\n300 Warning\n400 Info\n500 Debug\n600 Trace") + rootCmd.PersistentFlags().BoolVar(&pretty, "pretty-logs", true, "Should logs be in pretty format or JSON") // Cobra also supports local flags, which will only run // when this action is called directly. @@ -117,3 +127,29 @@ func initConfig() { fmt.Fprintln(os.Stderr, "Using config file:", viper.ConfigFileUsed()) } } + +// setLogLevel sets the log level based on the flags. +func setLogLevel(verbosity int, pretty bool) { + if verbosity < 100 { + zerolog.SetGlobalLevel(zerolog.PanicLevel) + } else if verbosity < 200 { + zerolog.SetGlobalLevel(zerolog.FatalLevel) + } else if verbosity < 300 { + zerolog.SetGlobalLevel(zerolog.ErrorLevel) + } else if verbosity < 400 { + zerolog.SetGlobalLevel(zerolog.WarnLevel) + } else if verbosity < 500 { + zerolog.SetGlobalLevel(zerolog.InfoLevel) + } else if verbosity < 600 { + zerolog.SetGlobalLevel(zerolog.DebugLevel) + } else { + zerolog.SetGlobalLevel(zerolog.TraceLevel) + } + + if pretty { + log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr}) + log.Debug().Msg("Starting logger in console mode") + } else { + log.Debug().Msg("Starting logger in JSON mode") + } +} diff --git a/cmd/rpcfuzz/README.org b/cmd/rpcfuzz/README.org index 9e1d9921..0c39b97b 100644 --- a/cmd/rpcfuzz/README.org +++ b/cmd/rpcfuzz/README.org @@ -1,15 +1,15 @@ Current list of RPCs: -- [ ] ~debug_getBadBlocks~ -- [ ] ~debug_getRawBlock~ -- [ ] ~debug_getRawHeader~ -- [ ] ~debug_getRawReceipts~ -- [ ] ~debug_getRawTransaction~ -- [ ] ~debug_traceBlockByHash~ -- [ ] ~debug_traceBlockByNumber~ -- [ ] ~debug_traceBlock~ -- [ ] ~debug_traceCall~ -- [ ] ~debug_traceTransaction~ +- [X] ~debug_getBadBlocks~ +- [X] ~debug_getRawBlock~ +- [X] ~debug_getRawHeader~ +- [X] ~debug_getRawReceipts~ +- [X] ~debug_getRawTransaction~ +- [X] ~debug_traceBlockByHash~ +- [X] ~debug_traceBlockByNumber~ +- [X] ~debug_traceBlock~ +- [X] ~debug_traceCall~ +- [X] ~debug_traceTransaction~ - [ ] ~engine_exchangeCapabilities~ - [ ] ~engine_exchangeTransitionConfigurationV1~ - [ ] ~engine_forkchoiceUpdatedV1~ @@ -62,3 +62,17 @@ Current list of RPCs: - [X] ~eth_submitWork~ - [X] ~eth_syncing~ - [X] ~eth_uninstallFilter~ + +TODO: +- Add post merge tags + - latest + - earliest + - pending + - safe + - finalized +- Add batch calls to confirm batch behaviors +- Add nonce behavioral issues + - Replacement + - Duplicate + - Too low + - Huge gap diff --git a/cmd/rpcfuzz/rpcfuzz.go b/cmd/rpcfuzz/rpcfuzz.go index 6e4e1913..2324cedc 100644 --- a/cmd/rpcfuzz/rpcfuzz.go +++ b/cmd/rpcfuzz/rpcfuzz.go @@ -28,7 +28,6 @@ import ( "github.com/maticnetwork/polygon-cli/cmd/rpcfuzz/argfuzz" "github.com/maticnetwork/polygon-cli/cmd/rpcfuzz/testreporter" "github.com/maticnetwork/polygon-cli/rpctypes" - "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/spf13/cobra" "github.com/xeipuuv/gojsonschema" @@ -110,13 +109,21 @@ type ( Address string `json:"address,omitempty"` Topics []interface{} `json:"topics,omitempty"` } + + // RPCJSONError can be used to unmarshal a raw error response + RPCJSONError struct { + Code int `json:"code"` + Message string `json:"message"` + Data interface{} `json:"data,omitempty"` + } ) const ( FlagStrictValidation RPCTestFlag = 1 << iota // strict means the test is unsuitable for fuzzing / mutation because it most likely won't match - FlagErrorValidation // error validation means the result is expecte to be an error + FlagErrorValidation // error validation means the result is expected to be an error FlagRequiresUnlock // unlock means the test depends on unlocked accounts FlagEIP1559 // tests that would only exist with EIP-1559 enabled + FlagOrderDependent // This flag indicates that the particular test might fail if shuffled codeQualityPrivateKey = "42b6e34dc21598a807dc19d7784c71b2a7a01f6480dc6f58258f78e539f1a1fa" @@ -124,6 +131,8 @@ const ( defaultGasPrice = "0x1000000000" defaultMaxFeePerGas = "0x1000000000" defaultMaxPriorityFeePerGas = "0x1000000000" + + defaultNonceTestOffset uint64 = 0x100000000 ) var ( @@ -187,7 +196,7 @@ func setupTests(ctx context.Context, rpcClient *rpc.Client) { Flags: FlagErrorValidation | FlagStrictValidation, Method: "web3_sha3", Args: []interface{}{"68656c6c6f20776f726c64"}, - Validator: ValidateError(`cannot unmarshal hex string without 0x prefix`), + Validator: ValidateError(-32602, `cannot unmarshal hex string without 0x prefix`), }) // cast rpc --rpc-url localhost:8545 net_listening @@ -212,7 +221,7 @@ func setupTests(ctx context.Context, rpcClient *rpc.Client) { Flags: FlagErrorValidation | FlagStrictValidation, Method: "eth_protocolVersion", Args: []interface{}{}, - Validator: ValidateError(`method eth_protocolVersion does not exist`), + Validator: ValidateError(-32601, `method eth_protocolVersion does not exist`), }) // cast rpc --rpc-url localhost:8545 eth_syncing @@ -490,7 +499,7 @@ func setupTests(ctx context.Context, rpcClient *rpc.Client) { Name: "RPCTestEthSignFail", Method: "eth_sign", Args: []interface{}{testEthAddress.String(), "0xdeadbeef"}, - Validator: ValidateError(`unknown account`), + Validator: ValidateError(-32000, `unknown account`), Flags: FlagErrorValidation | FlagStrictValidation | FlagRequiresUnlock, }) @@ -519,6 +528,34 @@ func setupTests(ctx context.Context, rpcClient *rpc.Client) { Args: ArgsSignTransaction(ctx, rpcClient, &RPCTestTransactionArgs{To: testEthAddress.String(), Value: "0x123", Gas: "0x5208", Data: "0x", MaxFeePerGas: defaultMaxFeePerGas, MaxPriorityFeePerGas: defaultMaxPriorityFeePerGas}), Validator: ValidateRegexString(`^0x[[:xdigit:]]{64}$`), }) + allTests = append(allTests, &RPCTestDynamicArgs{ + Name: "RPCTestEthSendRawTransactionNonceTooLow", + Method: "eth_sendRawTransaction", + Args: ArgsSignTransactionWithNonce(ctx, rpcClient, &RPCTestTransactionArgs{To: testEthAddress.String(), Value: "0x123", Gas: "0x5208", Data: "0x", MaxFeePerGas: defaultMaxFeePerGas, MaxPriorityFeePerGas: defaultMaxPriorityFeePerGas}, 0), + Validator: ValidateError(-32000, `nonce too low`), + Flags: FlagErrorValidation | FlagStrictValidation, + }) + allTests = append(allTests, &RPCTestDynamicArgs{ + Name: "RPCTestEthSendRawTransactionNonceHigh", + Method: "eth_sendRawTransaction", + Args: ArgsSignTransactionWithNonce(ctx, rpcClient, &RPCTestTransactionArgs{To: testEthAddress.String(), Value: "0x123", Gas: "0x5208", Data: "0x", MaxFeePerGas: defaultMaxFeePerGas, MaxPriorityFeePerGas: defaultMaxPriorityFeePerGas}, testAccountNonce|defaultNonceTestOffset), + Validator: ValidateRegexString(`^0x[[:xdigit:]]{64}$`), + Flags: FlagOrderDependent, + }) + allTests = append(allTests, &RPCTestDynamicArgs{ + Name: "RPCTestEthSendRawTransactionNonceKnown", + Method: "eth_sendRawTransaction", + Args: ArgsSignTransactionWithNonce(ctx, rpcClient, &RPCTestTransactionArgs{To: testEthAddress.String(), Value: "0x123", Gas: "0x5208", Data: "0x", MaxFeePerGas: defaultMaxFeePerGas, MaxPriorityFeePerGas: defaultMaxPriorityFeePerGas}, testAccountNonce|defaultNonceTestOffset), + Validator: ValidateError(-32000, `already known`), + Flags: FlagErrorValidation | FlagStrictValidation | FlagOrderDependent, + }) + allTests = append(allTests, &RPCTestDynamicArgs{ + Name: "RPCTestEthSendRawTransactionNonceUnderpriced", + Method: "eth_sendRawTransaction", + Args: ArgsSignTransactionWithNonce(ctx, rpcClient, &RPCTestTransactionArgs{To: testEthAddress.String(), Value: "0x1234", Gas: "0x5208", Data: "0x", MaxFeePerGas: defaultMaxFeePerGas, MaxPriorityFeePerGas: defaultMaxPriorityFeePerGas}, testAccountNonce|defaultNonceTestOffset), + Validator: ValidateError(-32000, `replacement`), + Flags: FlagErrorValidation | FlagStrictValidation | FlagOrderDependent, + }) // cat contracts/ERC20.abi| go run main.go abi // cast call --rpc-url localhost:8545 0x6fda56c57b0acadb96ed5624ac500c0429d59429 'function name() view returns(string)' @@ -669,28 +706,28 @@ func setupTests(ctx context.Context, rpcClient *rpc.Client) { Flags: FlagErrorValidation | FlagStrictValidation, Method: "eth_getCompilers", Args: []interface{}{}, - Validator: ValidateError(`method eth_getCompilers does not exist`), + Validator: ValidateError(-32601, `method eth_getCompilers does not exist`), }) allTests = append(allTests, &RPCTestGeneric{ Name: "RPCTestEthCompileSolidity", Flags: FlagErrorValidation | FlagStrictValidation, Method: "eth_compileSolidity", Args: []interface{}{}, - Validator: ValidateError(`method eth_compileSolidity does not exist`), + Validator: ValidateError(-32601, `method eth_compileSolidity does not exist`), }) allTests = append(allTests, &RPCTestGeneric{ Name: "RPCTestEthCompileLLL", Flags: FlagErrorValidation | FlagStrictValidation, Method: "eth_compileLLL", Args: []interface{}{}, - Validator: ValidateError(`method eth_compileLLL does not exist`), + Validator: ValidateError(-32601, `method eth_compileLLL does not exist`), }) allTests = append(allTests, &RPCTestGeneric{ Name: "RPCTestEthCompileSerpent", Flags: FlagErrorValidation | FlagStrictValidation, Method: "eth_compileSerpent", Args: []interface{}{}, - Validator: ValidateError(`method eth_compileSerpent does not exist`), + Validator: ValidateError(-32601, `method eth_compileSerpent does not exist`), }) // cast rpc --rpc-url localhost:8545 eth_newFilter "{}" @@ -814,7 +851,7 @@ func setupTests(ctx context.Context, rpcClient *rpc.Client) { Method: "eth_getWork", Args: []interface{}{}, Flags: FlagErrorValidation | FlagStrictValidation, - Validator: ValidateError(`method eth_getWork does not exist`), + Validator: ValidateError(-32601, `method eth_getWork does not exist`), }) // cast rpc --rpc-url localhost:8545 eth_submitWork 0x0011223344556677 0x00112233445566778899AABBCCDDEEFF 0x00112233445566778899AABBCCDDEEFF allTests = append(allTests, &RPCTestGeneric{ @@ -822,7 +859,7 @@ func setupTests(ctx context.Context, rpcClient *rpc.Client) { Method: "eth_submitWork", Args: []interface{}{"0x0011223344556677", "0x00112233445566778899AABBCCDDEEFF", "0x00112233445566778899AABBCCDDEEFF"}, Flags: FlagErrorValidation | FlagStrictValidation, - Validator: ValidateError(`method eth_submitWork does not exist`), + Validator: ValidateError(-32601, `method eth_submitWork does not exist`), }) // cast rpc --rpc-url localhost:8545 eth_submitHashrate 0x00112233445566778899AABBCCDDEEFF00112233445566778899AABBCCDDEEFF 0x00112233445566778899AABBCCDDEEFF00112233445566778899AABBCCDDEEFF allTests = append(allTests, &RPCTestGeneric{ @@ -830,7 +867,7 @@ func setupTests(ctx context.Context, rpcClient *rpc.Client) { Method: "eth_submitHashrate", Args: []interface{}{"0x00112233445566778899AABBCCDDEEFF00112233445566778899AABBCCDDEEFF", "0x00112233445566778899AABBCCDDEEFF00112233445566778899AABBCCDDEEFF"}, Flags: FlagErrorValidation | FlagStrictValidation, - Validator: ValidateError(`method eth_submitHashrate does not exist`), + Validator: ValidateError(-32601, `method eth_submitHashrate does not exist`), }) // cast rpc --rpc-url localhost:8545 eth_feeHistory 128 latest [] @@ -868,6 +905,194 @@ func setupTests(ctx context.Context, rpcClient *rpc.Client) { Validator: ValidateJSONSchema(rpctypes.RPCSchemaEthProof), }) + // cat contracts/ERC20.abi| go run main.go abi + // cast abi-encode 'function mint(uint256 amount) returns()' 1000000000000000000000000 + // cast rpc --rpc-url localhost:8545 debug_traceCall '{"to": "0x6fda56c57b0acadb96ed5624ac500c0429d59429", "data":"0xa0712d6800000000000000000000000000000000000000000000d3c21bcecceda1000000"}' latest | jq '.' + allTests = append(allTests, &RPCTestGeneric{ + Name: "RPCTestDebugTraceCallSimple", + Method: "debug_traceCall", + Args: []interface{}{&RPCTestTransactionArgs{To: *testContractAddress, Value: "0x0", Data: "0x06fdde03"}, "latest"}, + Validator: ValidateJSONSchema(rpctypes.RPCSchemaDebugTrace), + }) + allTests = append(allTests, &RPCTestGeneric{ + Name: "RPCTestDebugTraceCallMint", + Method: "debug_traceCall", + Args: []interface{}{&RPCTestTransactionArgs{To: *testContractAddress, Value: "0x0", Data: "0xa0712d6800000000000000000000000000000000000000000000d3c21bcecceda1000000"}, "latest"}, + Validator: ValidateJSONSchema(rpctypes.RPCSchemaDebugTrace), + }) + allTests = append(allTests, &RPCTestDynamicArgs{ + Name: "RPCTestDebugTraceTransactionSimple", + Method: "debug_traceTransaction", + Args: ArgsTransactionHash(ctx, rpcClient, &RPCTestTransactionArgs{To: *testContractAddress, Value: "0x0", Data: "0x06fdde03", MaxFeePerGas: defaultMaxFeePerGas, MaxPriorityFeePerGas: defaultMaxPriorityFeePerGas, Gas: defaultGas}), + Validator: ValidateJSONSchema(rpctypes.RPCSchemaDebugTrace), + }) + allTests = append(allTests, &RPCTestDynamicArgs{ + Name: "RPCTestDebugTraceTransactionMint", + Method: "debug_traceTransaction", + Args: ArgsTransactionHash(ctx, rpcClient, &RPCTestTransactionArgs{To: *testContractAddress, Value: "0x0", Data: "0xa0712d6800000000000000000000000000000000000000000000d3c21bcecceda1000000", MaxFeePerGas: defaultMaxFeePerGas, MaxPriorityFeePerGas: defaultMaxPriorityFeePerGas, Gas: defaultGas}), + Validator: ValidateJSONSchema(rpctypes.RPCSchemaDebugTrace), + }) + + // cast rpc --rpc-url localhost:8545 debug_getRawBlock latest + allTests = append(allTests, &RPCTestGeneric{ + Name: "RPCTestDebugGetRawBlockLatest", + Method: "debug_getRawBlock", + Args: []interface{}{"latest"}, + Validator: ValidateRegexString(`^0x[0-9a-f]*`), + }) + allTests = append(allTests, &RPCTestGeneric{ + Name: "RPCTestDebugGetRawBlockPending", + Method: "debug_getRawBlock", + Args: []interface{}{"pending"}, + Flags: FlagErrorValidation | FlagStrictValidation, + Validator: ValidateError(-32000, `not found`), + }) + allTests = append(allTests, &RPCTestGeneric{ + Name: "RPCTestDebugGetRawBlockEarliest", + Method: "debug_getRawBlock", + Args: []interface{}{"earliest"}, + Validator: ValidateRegexString(`^0x[0-9a-f]*`), + }) + allTests = append(allTests, &RPCTestGeneric{ + Name: "RPCTestDebugGetRawBlockZero", + Method: "debug_getRawBlock", + Args: []interface{}{"0x0"}, + Validator: ValidateRegexString(`^0x[0-9a-f]*`), + }) + + // cast rpc --rpc-url localhost:8545 debug_getBadBlocks + allTests = append(allTests, &RPCTestGeneric{ + Name: "RPCTestDebugGetBadBlocks", + Method: "debug_getBadBlocks", + Args: []interface{}{}, + Validator: ValidateJSONSchema(rpctypes.RPCSchemaBadBlocks), + }) + + // cast rpc --rpc-url localhost:8545 debug_getRawHeader latest + allTests = append(allTests, &RPCTestGeneric{ + Name: "RPCTestDebugGetRawHeaderLatest", + Method: "debug_getRawHeader", + Args: []interface{}{"latest"}, + Validator: ValidateRegexString(`^0x[0-9a-f]*`), + }) + allTests = append(allTests, &RPCTestGeneric{ + Name: "RPCTestDebugGetRawHeaderPending", + Method: "debug_getRawHeader", + Args: []interface{}{"pending"}, + Flags: FlagErrorValidation | FlagStrictValidation, + Validator: ValidateError(-32000, `not found`), + }) + allTests = append(allTests, &RPCTestGeneric{ + Name: "RPCTestDebugGetRawHeaderEarliest", + Method: "debug_getRawHeader", + Args: []interface{}{"earliest"}, + Validator: ValidateRegexString(`^0x[0-9a-f]*`), + }) + allTests = append(allTests, &RPCTestGeneric{ + Name: "RPCTestDebugGetRawHeaderZero", + Method: "debug_getRawHeader", + Args: []interface{}{"0x0"}, + Validator: ValidateRegexString(`^0x[0-9a-f]*`), + }) + + // cast rpc --rpc-url localhost:8545 debug_getRawReceipts latest + allTests = append(allTests, &RPCTestGeneric{ + Name: "RPCTestDebugGetRawReceiptsLatest", + Method: "debug_getRawReceipts", + Args: []interface{}{"latest"}, + Validator: ValidateJSONSchema(rpctypes.RPCSchemaHexArray), + }) + allTests = append(allTests, &RPCTestGeneric{ + Name: "RPCTestDebugGetRawReceiptsPending", + Method: "debug_getRawReceipts", + Args: []interface{}{"pending"}, + Validator: ValidateJSONSchema(rpctypes.RPCSchemaHexArray), + }) + allTests = append(allTests, &RPCTestGeneric{ + Name: "RPCTestDebugGetRawReceiptsEarliest", + Method: "debug_getRawReceipts", + Args: []interface{}{"earliest"}, + Validator: ValidateJSONSchema(rpctypes.RPCSchemaHexArray), + }) + allTests = append(allTests, &RPCTestGeneric{ + Name: "RPCTestDebugGetRawReceiptsZero", + Method: "debug_getRawReceipts", + Args: []interface{}{"0x0"}, + Validator: ValidateJSONSchema(rpctypes.RPCSchemaHexArray), + }) + + // get raw tx + allTests = append(allTests, &RPCTestDynamicArgs{ + Name: "RPCTestDebugGetRawTransactionSimple", + Method: "debug_getRawTransaction", + Args: ArgsTransactionHash(ctx, rpcClient, &RPCTestTransactionArgs{To: *testContractAddress, Value: "0x0", Data: "0x06fdde03", MaxFeePerGas: defaultMaxFeePerGas, MaxPriorityFeePerGas: defaultMaxPriorityFeePerGas, Gas: defaultGas}), + Validator: ValidateRegexString(`^0x[0-9a-f]*`), + }) + allTests = append(allTests, &RPCTestDynamicArgs{ + Name: "RPCTestDebugGetRawTransactionMint", + Method: "debug_getRawTransaction", + Args: ArgsTransactionHash(ctx, rpcClient, &RPCTestTransactionArgs{To: *testContractAddress, Value: "0x0", Data: "0xa0712d6800000000000000000000000000000000000000000000d3c21bcecceda1000000", MaxFeePerGas: defaultMaxFeePerGas, MaxPriorityFeePerGas: defaultMaxPriorityFeePerGas, Gas: defaultGas}), + Validator: ValidateRegexString(`^0x[0-9a-f]*`), + }) + + // cast rpc --rpc-url localhost:8545 debug_traceBlockByNumber 0x0 + // cast rpc --rpc-url localhost:8545 debug_traceBlockByNumber 0x1 + allTests = append(allTests, &RPCTestGeneric{ + Name: "RPCTestDebugTraceBlockByNumberZero", + Method: "debug_traceBlockByNumber", + Args: []interface{}{"0x0", nil}, + Flags: FlagErrorValidation | FlagStrictValidation, + Validator: ValidateError(-32000, `genesis is not traceable`), + }) + allTests = append(allTests, &RPCTestGeneric{ + Name: "RPCTestDebugTraceBlockByNumberOne", + Method: "debug_traceBlockByNumber", + Args: []interface{}{"0x1", nil}, + Validator: ValidateJSONSchema(rpctypes.RPCSchemaDebugTraceBlock), + }) + allTests = append(allTests, &RPCTestGeneric{ + Name: "RPCTestDebugTraceBlockByNumberLatest", + Method: "debug_traceBlockByNumber", + Args: []interface{}{"latest", nil}, + Validator: ValidateJSONSchema(rpctypes.RPCSchemaDebugTraceBlock), + }) + allTests = append(allTests, &RPCTestGeneric{ + Name: "RPCTestDebugTraceBlockByNumberEarliest", + Method: "debug_traceBlockByNumber", + Args: []interface{}{"earliest", nil}, + Flags: FlagErrorValidation | FlagStrictValidation, + Validator: ValidateError(-32000, `genesis is not traceable`), + }) + allTests = append(allTests, &RPCTestGeneric{ + Name: "RPCTestDebugTraceBlockByNumberPending", + Method: "debug_traceBlockByNumber", + Args: []interface{}{"pending", nil}, + Validator: ValidateJSONSchema(rpctypes.RPCSchemaDebugTraceBlock), + }) + + // debug_traceBlockByHash + allTests = append(allTests, &RPCTestDynamicArgs{ + Name: "RPCTestDebugTraceBlockByHash", + Method: "debug_traceBlockByHash", + Args: ArgsLatestBlockHash(ctx, rpcClient, nil), + Validator: ValidateJSONSchema(rpctypes.RPCSchemaDebugTraceBlock), + }) + + // debug_traceBlock + allTests = append(allTests, &RPCTestDynamicArgs{ + Name: "RPCTestDebugTraceBlock", + Method: "debug_traceBlock", + Args: ArgsRawBlock(ctx, rpcClient, "latest", nil), + Validator: ValidateJSONSchema(rpctypes.RPCSchemaDebugTraceBlock), + }) + allTests = append(allTests, &RPCTestDynamicArgs{ + Name: "RPCTestDebugTraceBlockZero", + Method: "debug_traceBlock", + Args: ArgsRawBlock(ctx, rpcClient, "0x0", nil), + Flags: FlagErrorValidation | FlagStrictValidation, + Validator: ValidateError(-32000, `genesis is not traceable`), + }) + uniqueTests := make(map[RPCTest]struct{}) uniqueTestNames := make(map[string]struct{}) for _, v := range allTests { @@ -995,16 +1220,20 @@ func ValidateRegexString(regEx string) func(result interface{}) error { } // ValidateError will check the error message text against the provide regular expression -func ValidateError(errorMessageRegex string) func(result interface{}) error { +func ValidateError(code int, errorMessageRegex string) func(result interface{}) error { r := regexp.MustCompile(errorMessageRegex) return func(result interface{}) error { - resultError, isValid := result.(error) - if !isValid { - return fmt.Errorf("Invalid result type. Expected error but got %T", result) + fullError, err := genericResultToError(result) + if err != nil { + return err + } + if !r.MatchString(fullError.Error()) { + return fmt.Errorf("The regex %s failed to match result %s", errorMessageRegex, fullError.Error()) } - if !r.MatchString(resultError.Error()) { - return fmt.Errorf("The regex %s failed to match result %s", errorMessageRegex, resultError.Error()) + if code != fullError.Code { + return fmt.Errorf("Expected error code %d but got %d", code, fullError.Code) } + return nil } } @@ -1087,12 +1316,25 @@ func genericResultToTransaction(result interface{}) (*ethtypes.Transaction, stri } return &tx, genericHash, nil } +func genericResultToError(result interface{}) (*RPCJSONError, error) { + jsonErrorData, err := json.Marshal(result) + if err != nil { + return nil, fmt.Errorf("Unable to json marshal error result: %w", err) + } + fullError := new(RPCJSONError) + err = json.Unmarshal(jsonErrorData, fullError) + if err != nil { + return nil, fmt.Errorf("Unable to unmarshal json error: %w", err) + } + return fullError, nil + +} // ArgsLatestBlockHash is meant to generate an argument with the // latest block hash for testing func ArgsLatestBlockHash(ctx context.Context, rpcClient *rpc.Client, extraArgs ...interface{}) func() []interface{} { return func() []interface{} { - blockData, err := getLatestBlock(ctx, rpcClient) + blockData, err := getBlock(ctx, rpcClient, "latest") if err != nil { log.Error().Err(err).Msg("Unable to retreive latest block hash") return []interface{}{"latest"} @@ -1115,7 +1357,7 @@ func ArgsLatestBlockHash(ctx context.Context, rpcClient *rpc.Client, extraArgs . // most recent block's number func ArgsLatestBlockNumber(ctx context.Context, rpcClient *rpc.Client, extraArgs ...interface{}) func() []interface{} { return func() []interface{} { - blockData, err := getLatestBlock(ctx, rpcClient) + blockData, err := getBlock(ctx, rpcClient, "latest") if err != nil { log.Error().Err(err).Msg("Unable to retreive latest block hash") return []interface{}{"latest"} @@ -1135,9 +1377,28 @@ func ArgsLatestBlockNumber(ctx context.Context, rpcClient *rpc.Client, extraArgs } } -func getLatestBlock(ctx context.Context, rpcClient *rpc.Client) (map[string]interface{}, error) { +// ArgsRawBlock will inject raw block RLP data into the arguments +func ArgsRawBlock(ctx context.Context, rpcClient *rpc.Client, blockNumOrHash string, extraArgs ...interface{}) func() []interface{} { + return func() []interface{} { + blockData, err := getRawBlock(ctx, rpcClient, blockNumOrHash) + if err != nil { + log.Error().Err(err).Msg("Unable to retreive latest raw block hash") + return []interface{}{"latest"} + } + args := []interface{}{blockData} + args = append(args, extraArgs...) + return args + } +} + +func getBlock(ctx context.Context, rpcClient *rpc.Client, blockNumOrHash string) (map[string]interface{}, error) { blockData := make(map[string]interface{}) - err := rpcClient.CallContext(ctx, &blockData, "eth_getBlockByNumber", "latest", false) + err := rpcClient.CallContext(ctx, &blockData, "eth_getBlockByNumber", blockNumOrHash, false) + return blockData, err +} +func getRawBlock(ctx context.Context, rpcClient *rpc.Client, blockNumOrHash string) (string, error) { + var blockData string + err := rpcClient.CallContext(ctx, &blockData, "debug_getRawBlock", blockNumOrHash) return blockData, err } @@ -1221,22 +1482,27 @@ func ArgsSignTransaction(ctx context.Context, rpcClient *rpc.Client, tx *RPCTest defer testAccountNonceMutex.Unlock() curNonce := testAccountNonce - chainId := currentChainID - - dft := GenericTransactionToDynamicFeeTx(tx) - dft.ChainID = chainId - dft.Nonce = curNonce - - londonSigner := ethtypes.NewLondonSigner(chainId) - signedTx, err := ethtypes.SignNewTx(testPrivateKey, londonSigner, &dft) + stringTx, err := getSignedRawTx(tx, curNonce) if err != nil { - log.Fatal().Err(err).Msg("There was an issue signing the transaction") + log.Fatal().Err(err).Msg("Failed to sign tx") } - stringTx, err := signedTx.MarshalBinary() + + testAccountNonce += 1 + + args := []interface{}{hexutil.Encode(stringTx)} + args = append(args, extraArgs...) + return args + } +} + +// ArgsSignTransactionWithNonce can be used to manipulate the nonce +// directly in order to create some error cases +func ArgsSignTransactionWithNonce(ctx context.Context, rpcClient *rpc.Client, tx *RPCTestTransactionArgs, nonce uint64, extraArgs ...interface{}) func() []interface{} { + return func() []interface{} { + stringTx, err := getSignedRawTx(tx, nonce) if err != nil { - log.Fatal().Err(err).Msg("Unable to marshal binary for transaction") + log.Fatal().Err(err).Msg("Failed to sign tx") } - testAccountNonce += 1 args := []interface{}{hexutil.Encode(stringTx)} args = append(args, extraArgs...) @@ -1244,6 +1510,27 @@ func ArgsSignTransaction(ctx context.Context, rpcClient *rpc.Client, tx *RPCTest } } +func getSignedRawTx(tx *RPCTestTransactionArgs, curNonce uint64) ([]byte, error) { + chainId := currentChainID + + dft := GenericTransactionToDynamicFeeTx(tx) + dft.ChainID = chainId + dft.Nonce = curNonce + + londonSigner := ethtypes.NewLondonSigner(chainId) + signedTx, err := ethtypes.SignNewTx(testPrivateKey, londonSigner, &dft) + if err != nil { + log.Error().Err(err).Msg("There was an issue signing the transaction") + return nil, err + } + stringTx, err := signedTx.MarshalBinary() + if err != nil { + log.Error().Err(err).Msg("Unable to marshal binary for transaction") + return nil, err + } + return stringTx, nil +} + // ArgsTransactionHash will execute the provided transaction and return // the transaction hash as an argument to be used in other tests. func ArgsTransactionHash(ctx context.Context, rpcClient *rpc.Client, tx *RPCTestTransactionArgs) func() []interface{} { @@ -1291,23 +1578,11 @@ func prepareAndSendTransaction(ctx context.Context, rpcClient *rpc.Client, tx *R defer testAccountNonceMutex.Unlock() curNonce := testAccountNonce - chainId := currentChainID - - dft := GenericTransactionToDynamicFeeTx(tx) - dft.ChainID = chainId - dft.Nonce = curNonce - - londonSigner := ethtypes.NewLondonSigner(chainId) - signedTx, err := ethtypes.SignNewTx(testPrivateKey, londonSigner, &dft) + stringTx, err := getSignedRawTx(tx, curNonce) if err != nil { - log.Error().Err(err).Msg("There was an issue signing the transaction") - return "", nil, err - } - stringTx, err := signedTx.MarshalBinary() - if err != nil { - log.Error().Err(err).Msg("Unable to marshal binary for transaction") return "", nil, err } + resultHash, receipt, err := executeRawTxAndWait(ctx, rpcClient, stringTx) if err != nil { log.Error().Err(err).Msg("Unable to execute transaction") @@ -1320,22 +1595,30 @@ func prepareAndSendTransaction(ctx context.Context, rpcClient *rpc.Client, tx *R } func executeRawTxAndWait(ctx context.Context, rpcClient *rpc.Client, rawTx []byte) (string, map[string]interface{}, error) { + rawHash, err := executeRawTx(ctx, rpcClient, rawTx) + if err != nil { + return "", nil, err + } + + receipt, err := waitForReceipt(ctx, rpcClient, rawHash) + if err != nil { + return "", nil, err + } + return rawHash, receipt, nil +} +func executeRawTx(ctx context.Context, rpcClient *rpc.Client, rawTx []byte) (string, error) { var result interface{} err := rpcClient.CallContext(ctx, &result, "eth_sendRawTransaction", hexutil.Encode(rawTx)) if err != nil { log.Error().Err(err).Msg("Unable to send raw transaction") - return "", nil, err + return "", err } rawHash, ok := result.(string) if !ok { - return "", nil, fmt.Errorf("Invalid result type. Expected string but got %T", result) + return "", fmt.Errorf("Invalid result type. Expected string but got %T", result) } log.Info().Str("txHash", rawHash).Msg("Successfully sent transaction") - receipt, err := waitForReceipt(ctx, rpcClient, rawHash) - if err != nil { - return "", nil, err - } - return rawHash, receipt, nil + return rawHash, nil } func waitForReceipt(ctx context.Context, rpcClient *rpc.Client, txHash string) (map[string]interface{}, error) { @@ -1405,6 +1688,14 @@ func CallRPCAndValidate(ctx context.Context, rpcClient *rpc.Client, currTest RPC NumberOfTestsRan: n, } args := currTest.GetArgs() + + var result interface{} + err = rpcClient.CallContext(ctx, &result, t.GetMethod(), t.GetArgs()...) + if err != nil && !t.ExpectError() { + log.Error().Err(err).Str("method", t.GetMethod()).Msg("Method test failed") + continue + } + idx := 0 // only one run happening var result interface{} @@ -1417,6 +1708,11 @@ func CallRPCAndValidate(ctx context.Context, rpcClient *rpc.Client, currTest RPC currTestResult.Errors[idx] = errors.New("Method test failed: " + err.Error()) return currTestResult } + if err == nil && t.ExpectError() { + currTestResult.NumberOfTestsFailed++ + currTestResult.Errors[idx] = errors.New("Expected an error but didn't get one: " + err.Error()) + return currTestResult + } if currTest.ExpectError() { err = currTest.Validate(err) @@ -1499,6 +1795,10 @@ func (r *RPCTestDynamicArgs) ExpectError() bool { return r.Flags&FlagErrorValidation != 0 } +func (r *RPCJSONError) Error() string { + return r.Message +} + var RPCFuzzCmd = &cobra.Command{ Use: "rpcfuzz http://localhost:8545", Short: "Continually run a variety of RPC calls and fuzzers", @@ -1521,6 +1821,15 @@ in dev mode: --gpo.percentile 1 --gpo.maxprice 10 --gpo.ignoreprice 2 \ --dev.gaslimit 50000000 +If we wanted to use erigon for testing, we could do something like this as well + +# ./build/bin/erigon --chain dev --dev.period 5 --http --http.addr localhost \ + --http.port 8545 \ + --http.api 'admin,debug,web3,eth,txpool,personal,clique,miner,net' \ + --verbosity 5 --rpc.gascap 50000000 \ + --miner.gaslimit 10 --gpo.blocks 1 \ + --gpo.percentile 1 + Once your Eth client is running and the RPC is functional, you'll need to transfer some amount of ether to a known account that ca be used for testing @@ -1554,8 +1863,6 @@ Once this has been completed this will be the address of the contract: if err != nil { return err } - log.Trace().Msg("Doing test setup") - setupTests(ctx, rpcClient) nonce, err := GetTestAccountNonce(ctx, rpcClient) if err != nil { return err @@ -1567,6 +1874,9 @@ Once this has been completed this will be the address of the contract: testAccountNonce = nonce currentChainID = chainId + log.Trace().Uint64("nonce", nonce).Uint64("chainid", chainId.Uint64()).Msg("Doing test setup") + setupTests(ctx, rpcClient) + for _, t := range allTests { if !shouldRunTest(t) { log.Trace().Str("name", t.GetName()).Str("method", t.GetMethod()).Msg("Skipping test") @@ -1646,14 +1956,11 @@ func shouldRunTest(t RPCTest) bool { } func init() { - zerolog.SetGlobalLevel(zerolog.TraceLevel) - log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr}) - flagSet := RPCFuzzCmd.PersistentFlags() testPrivateHexKey = flagSet.String("private-key", codeQualityPrivateKey, "The hex encoded private key that we'll use to sending transactions") testContractAddress = flagSet.String("contract-address", "0x6fda56c57b0acadb96ed5624ac500c0429d59429", "The address of a contract that can be used for testing") - testNamespaces = flagSet.String("namespaces", "eth,web3,net", "Comma separated list of rpc namespaces to test") + testNamespaces = flagSet.String("namespaces", "eth,web3,net,debug", "Comma separated list of rpc namespaces to test") testFuzz = flagSet.Bool("fuzz", false, "Flag to indicate whether to fuzz input or not.") testFuzzNum = flagSet.Int("fuzzn", 100, "Number of times to run the fuzzer per test.") seed = flagSet.Int64("seed", 123456, "A seed for generating random values within the fuzzer") diff --git a/p2p/database/database.go b/p2p/database/database.go index 307ff53c..0eb781d6 100644 --- a/p2p/database/database.go +++ b/p2p/database/database.go @@ -14,14 +14,32 @@ import ( // to. To use another database solution, just implement these methods and // update the sensor to use the new connection. type Database interface { + // WriteBlock will write the both the block and block event to the database + // if ShouldWriteBlocks and ShouldWriteBlockEvents return true, respectively. WriteBlock(context.Context, *enode.Node, *types.Block, *big.Int) + + // WriteBlockHeaders will write the block headers if ShouldWriteBlocks + // returns true. WriteBlockHeaders(context.Context, []*types.Header) + + // WriteBlockHashes will write the block hashes if ShouldWriteBlockEvents + // returns true. WriteBlockHashes(context.Context, *enode.Node, []common.Hash) + + // WriteBlockBodies will write the block bodies if ShouldWriteBlocks returns + // true. WriteBlockBody(context.Context, *eth.BlockBody, common.Hash) + + // WriteTransactions will write the both the transaction and transaction + // event to the database if ShouldWriteTransactions and + // ShouldWriteTransactionEvents return true, respectively. WriteTransactions(context.Context, *enode.Node, []*types.Transaction) + HasParentBlock(context.Context, common.Hash) bool MaxConcurrentWrites() int ShouldWriteBlocks() bool + ShouldWriteBlockEvents() bool ShouldWriteTransactions() bool + ShouldWriteTransactionEvents() bool } diff --git a/p2p/database/datastore.go b/p2p/database/datastore.go index 51d7141c..216e9439 100644 --- a/p2p/database/datastore.go +++ b/p2p/database/datastore.go @@ -22,14 +22,16 @@ const ( transactionEventsKind = "transaction_events" ) -// datastoreWrapper wraps the datastore client and stores the sensorID so -// writing block and transaction events possible. -type datastoreWrapper struct { - client *datastore.Client - sensorID string - maxConcurrentWrites int - shouldWriteBlocks bool - shouldWriteTransactions bool +// Datastore wraps the datastore client, stores the sensorID, and other +// information needed when writing blocks and transactions. +type Datastore struct { + client *datastore.Client + sensorID string + maxConcurrentWrites int + shouldWriteBlocks bool + shouldWriteBlockEvents bool + shouldWriteTransactions bool + shouldWriteTransactionEvents bool } // DatastoreEvent can represent a peer sending the sensor a transaction hash or @@ -90,72 +92,97 @@ type DatastoreTransaction struct { Type int16 } +// DatastoreOptions is used when creating a NewDatastore. +type DatastoreOptions struct { + ProjectID string + SensorID string + MaxConcurrentWrites int + ShouldWriteBlocks bool + ShouldWriteBlockEvents bool + ShouldWriteTransactions bool + ShouldWriteTransactionEvents bool +} + // NewDatastore connects to datastore and creates the client. This should // only be called once unless trying to write to different databases. -func NewDatastore(ctx context.Context, projectID string, sensorID string, maxConcurrentWrites int, shouldWriteBlocks bool, shouldWriteTransactions bool) Database { - client, err := datastore.NewClient(ctx, projectID) +func NewDatastore(ctx context.Context, opts DatastoreOptions) Database { + client, err := datastore.NewClient(ctx, opts.ProjectID) if err != nil { log.Error().Err(err).Msg("Could not connect to Datastore") return nil } - return &datastoreWrapper{ - client: client, - sensorID: sensorID, - maxConcurrentWrites: maxConcurrentWrites, - shouldWriteBlocks: shouldWriteBlocks, - shouldWriteTransactions: shouldWriteTransactions, + return &Datastore{ + client: client, + sensorID: opts.SensorID, + maxConcurrentWrites: opts.MaxConcurrentWrites, + shouldWriteBlocks: opts.ShouldWriteBlocks, + shouldWriteBlockEvents: opts.ShouldWriteBlockEvents, + shouldWriteTransactions: opts.ShouldWriteTransactions, + shouldWriteTransactionEvents: opts.ShouldWriteTransactionEvents, } } // WriteBlock writes the block and the block event to datastore. -func (d *datastoreWrapper) WriteBlock(ctx context.Context, peer *enode.Node, block *types.Block, td *big.Int) { - d.writeEvent(peer, blockEventsKind, block.Hash(), blocksKind) +func (d *Datastore) WriteBlock(ctx context.Context, peer *enode.Node, block *types.Block, td *big.Int) { + if d.ShouldWriteBlockEvents() { + d.writeEvent(peer, blockEventsKind, block.Hash(), blocksKind) + } + + if !d.ShouldWriteBlocks() { + return + } key := datastore.NameKey(blocksKind, block.Hash().Hex(), nil) - var dsBlock DatastoreBlock - // Fetch the block. We don't check the error because if some of the fields - // are nil we will just set them. - _ = d.client.Get(ctx, key, &dsBlock) - shouldWrite := false + _, err := d.client.RunInTransaction(ctx, func(tx *datastore.Transaction) error { + var dsBlock DatastoreBlock + // Fetch the block. We don't check the error because if some of the fields + // are nil we will just set them. + _ = tx.Get(key, &dsBlock) - if dsBlock.DatastoreHeader == nil { - shouldWrite = true - dsBlock.DatastoreHeader = newDatastoreHeader(block.Header()) - } + shouldWrite := false - if len(dsBlock.TotalDifficulty) == 0 { - shouldWrite = true - dsBlock.TotalDifficulty = td.String() - } + if dsBlock.DatastoreHeader == nil { + shouldWrite = true + dsBlock.DatastoreHeader = newDatastoreHeader(block.Header()) + } - if dsBlock.Transactions == nil && len(block.Transactions()) > 0 { - shouldWrite = true - if d.shouldWriteTransactions { - d.writeTransactions(ctx, block.Transactions()) + if len(dsBlock.TotalDifficulty) == 0 { + shouldWrite = true + dsBlock.TotalDifficulty = td.String() } - dsBlock.Transactions = make([]*datastore.Key, 0, len(block.Transactions())) - for _, tx := range block.Transactions() { - dsBlock.Transactions = append(dsBlock.Transactions, datastore.NameKey(transactionsKind, tx.Hash().Hex(), nil)) + if dsBlock.Transactions == nil && len(block.Transactions()) > 0 { + shouldWrite = true + if d.shouldWriteTransactions { + d.writeTransactions(ctx, block.Transactions()) + } + + dsBlock.Transactions = make([]*datastore.Key, 0, len(block.Transactions())) + for _, tx := range block.Transactions() { + dsBlock.Transactions = append(dsBlock.Transactions, datastore.NameKey(transactionsKind, tx.Hash().Hex(), nil)) + } } - } - if dsBlock.Uncles == nil && len(block.Uncles()) > 0 { - shouldWrite = true - dsBlock.Uncles = make([]*datastore.Key, 0, len(block.Uncles())) - for _, uncle := range block.Uncles() { - d.writeBlockHeader(ctx, uncle) - dsBlock.Uncles = append(dsBlock.Uncles, datastore.NameKey(blocksKind, uncle.Hash().Hex(), nil)) + if dsBlock.Uncles == nil && len(block.Uncles()) > 0 { + shouldWrite = true + dsBlock.Uncles = make([]*datastore.Key, 0, len(block.Uncles())) + for _, uncle := range block.Uncles() { + d.writeBlockHeader(ctx, uncle) + dsBlock.Uncles = append(dsBlock.Uncles, datastore.NameKey(blocksKind, uncle.Hash().Hex(), nil)) + } } - } - if !shouldWrite { - return - } + if shouldWrite { + _, err := tx.Put(key, &dsBlock) + return err + } + + return nil + }) - if _, err := d.client.Put(ctx, key, &dsBlock); err != nil { + if err != nil { log.Error().Err(err).Msg("Failed to write new block") } } @@ -164,7 +191,11 @@ func (d *datastoreWrapper) WriteBlock(ctx context.Context, peer *enode.Node, blo // write block events because headers will only be sent to the sensor when // requested. The block events will be written when the hash is received // instead. -func (d *datastoreWrapper) WriteBlockHeaders(ctx context.Context, headers []*types.Header) { +func (d *Datastore) WriteBlockHeaders(ctx context.Context, headers []*types.Header) { + if !d.ShouldWriteBlocks() { + return + } + for _, header := range headers { d.writeBlockHeader(ctx, header) } @@ -175,62 +206,99 @@ func (d *datastoreWrapper) WriteBlockHeaders(ctx context.Context, headers []*typ // requested. The block events will be written when the hash is received // instead. It will write the uncles and transactions to datastore if they // don't already exist. -func (d *datastoreWrapper) WriteBlockBody(ctx context.Context, body *eth.BlockBody, hash common.Hash) { +func (d *Datastore) WriteBlockBody(ctx context.Context, body *eth.BlockBody, hash common.Hash) { + if !d.ShouldWriteBlocks() { + return + } + key := datastore.NameKey(blocksKind, hash.Hex(), nil) - var block DatastoreBlock - if err := d.client.Get(ctx, key, &block); err != nil { - log.Debug().Err(err).Str("hash", hash.Hex()).Msg("Failed to fetch block when writing block body") - } + _, err := d.client.RunInTransaction(ctx, func(tx *datastore.Transaction) error { + var block DatastoreBlock + if err := tx.Get(key, &block); err != nil { + log.Debug().Err(err).Str("hash", hash.Hex()).Msg("Failed to fetch block when writing block body") + } + + shouldWrite := false - if block.Transactions == nil && len(body.Transactions) > 0 { - if d.shouldWriteTransactions { - d.writeTransactions(ctx, body.Transactions) + if block.Transactions == nil && len(body.Transactions) > 0 { + shouldWrite = true + if d.shouldWriteTransactions { + d.writeTransactions(ctx, body.Transactions) + } + + block.Transactions = make([]*datastore.Key, 0, len(body.Transactions)) + for _, tx := range body.Transactions { + block.Transactions = append(block.Transactions, datastore.NameKey(transactionsKind, tx.Hash().Hex(), nil)) + } } - block.Transactions = make([]*datastore.Key, 0, len(body.Transactions)) - for _, tx := range body.Transactions { - block.Transactions = append(block.Transactions, datastore.NameKey(transactionsKind, tx.Hash().Hex(), nil)) + if block.Uncles == nil && len(body.Uncles) > 0 { + shouldWrite = true + block.Uncles = make([]*datastore.Key, 0, len(body.Uncles)) + for _, uncle := range body.Uncles { + d.writeBlockHeader(ctx, uncle) + block.Uncles = append(block.Uncles, datastore.NameKey(blocksKind, uncle.Hash().Hex(), nil)) + } } - } - if block.Uncles == nil && len(body.Uncles) > 0 { - block.Uncles = make([]*datastore.Key, 0, len(body.Uncles)) - for _, uncle := range body.Uncles { - d.writeBlockHeader(ctx, uncle) - block.Uncles = append(block.Uncles, datastore.NameKey(blocksKind, uncle.Hash().Hex(), nil)) + if shouldWrite { + _, err := tx.Put(key, &block) + return err } - } - if _, err := d.client.Put(ctx, key, &block); err != nil { - log.Error().Err(err).Msg("Failed to write block header") + return nil + }) + + if err != nil { + log.Error().Err(err).Msg("Failed to write block body") } } // WriteBlockHashes will write the block events to datastore. -func (d *datastoreWrapper) WriteBlockHashes(ctx context.Context, peer *enode.Node, hashes []common.Hash) { - d.writeEvents(ctx, peer, blockEventsKind, hashes, blocksKind) +func (d *Datastore) WriteBlockHashes(ctx context.Context, peer *enode.Node, hashes []common.Hash) { + if d.ShouldWriteBlockEvents() { + d.writeEvents(ctx, peer, blockEventsKind, hashes, blocksKind) + } } // WriteTransactions will write the transactions and transaction events to datastore. -func (d *datastoreWrapper) WriteTransactions(ctx context.Context, peer *enode.Node, txs []*types.Transaction) { - hashes := d.writeTransactions(ctx, txs) - d.writeEvents(ctx, peer, transactionEventsKind, hashes, transactionsKind) +func (d *Datastore) WriteTransactions(ctx context.Context, peer *enode.Node, txs []*types.Transaction) { + if d.ShouldWriteTransactions() { + d.writeTransactions(ctx, txs) + } + + if d.ShouldWriteTransactionEvents() { + hashes := make([]common.Hash, 0, len(txs)) + for _, tx := range txs { + hashes = append(hashes, tx.Hash()) + } + + d.writeEvents(ctx, peer, transactionEventsKind, hashes, transactionsKind) + } } -func (d *datastoreWrapper) MaxConcurrentWrites() int { +func (d *Datastore) MaxConcurrentWrites() int { return d.maxConcurrentWrites } -func (d *datastoreWrapper) ShouldWriteBlocks() bool { +func (d *Datastore) ShouldWriteBlocks() bool { return d.shouldWriteBlocks } -func (d *datastoreWrapper) ShouldWriteTransactions() bool { +func (d *Datastore) ShouldWriteBlockEvents() bool { + return d.shouldWriteBlockEvents +} + +func (d *Datastore) ShouldWriteTransactions() bool { return d.shouldWriteTransactions } -func (d *datastoreWrapper) HasParentBlock(ctx context.Context, hash common.Hash) bool { +func (d *Datastore) ShouldWriteTransactionEvents() bool { + return d.shouldWriteTransactionEvents +} + +func (d *Datastore) HasParentBlock(ctx context.Context, hash common.Hash) bool { key := datastore.NameKey(blocksKind, hash.Hex(), nil) var block DatastoreBlock err := d.client.Get(ctx, key, &block) @@ -296,7 +364,7 @@ func newDatastoreTransaction(tx *types.Transaction) *DatastoreTransaction { // writeEvent writes either a block or transaction event to datastore depending // on the provided eventKind and hashKind. -func (d *datastoreWrapper) writeEvent(peer *enode.Node, eventKind string, hash common.Hash, hashKind string) { +func (d *Datastore) writeEvent(peer *enode.Node, eventKind string, hash common.Hash, hashKind string) { key := datastore.IncompleteKey(eventKind, nil) event := DatastoreEvent{ SensorId: d.sensorID, @@ -312,7 +380,7 @@ func (d *datastoreWrapper) writeEvent(peer *enode.Node, eventKind string, hash c // writeEvents writes either block or transaction events to datastore depending // on the provided eventKind and hashKind. This is similar to writeEvent but // batches the request. -func (d *datastoreWrapper) writeEvents(ctx context.Context, peer *enode.Node, eventKind string, hashes []common.Hash, hashKind string) { +func (d *Datastore) writeEvents(ctx context.Context, peer *enode.Node, eventKind string, hashes []common.Hash, hashKind string) { keys := make([]*datastore.Key, 0, len(hashes)) events := make([]*DatastoreEvent, 0, len(hashes)) now := time.Now() @@ -336,30 +404,32 @@ func (d *datastoreWrapper) writeEvents(ctx context.Context, peer *enode.Node, ev // writeBlockHeader will write the block header to datastore if it doesn't // exist. -func (d *datastoreWrapper) writeBlockHeader(ctx context.Context, header *types.Header) { +func (d *Datastore) writeBlockHeader(ctx context.Context, header *types.Header) { key := datastore.NameKey(blocksKind, header.Hash().Hex(), nil) - var block DatastoreBlock - if err := d.client.Get(ctx, key, &block); err == nil && block.DatastoreHeader != nil { - return - } + _, err := d.client.RunInTransaction(ctx, func(tx *datastore.Transaction) error { + var block DatastoreBlock + if err := tx.Get(key, &block); err == nil && block.DatastoreHeader != nil { + return nil + } - block.DatastoreHeader = newDatastoreHeader(header) + block.DatastoreHeader = newDatastoreHeader(header) + _, err := tx.Put(key, &block) + return err + }) - if _, err := d.client.Put(ctx, key, &block); err != nil { + if err != nil { log.Error().Err(err).Msg("Failed to write block header") } } // writeTransactions will write the transactions to datastore and return the // transaction hashes. -func (d *datastoreWrapper) writeTransactions(ctx context.Context, txs []*types.Transaction) []common.Hash { - hashes := make([]common.Hash, 0, len(txs)) +func (d *Datastore) writeTransactions(ctx context.Context, txs []*types.Transaction) { keys := make([]*datastore.Key, 0, len(txs)) transactions := make([]*DatastoreTransaction, 0, len(txs)) for _, tx := range txs { - hashes = append(hashes, tx.Hash()) keys = append(keys, datastore.NameKey(transactionsKind, tx.Hash().Hex(), nil)) transactions = append(transactions, newDatastoreTransaction(tx)) } @@ -367,6 +437,4 @@ func (d *datastoreWrapper) writeTransactions(ctx context.Context, txs []*types.T if _, err := d.client.PutMulti(ctx, keys, transactions); err != nil { log.Error().Err(err).Msg("Failed to write transactions") } - - return hashes } diff --git a/p2p/rlpx.go b/p2p/rlpx.go index 139b4d5e..fc910fdd 100644 --- a/p2p/rlpx.go +++ b/p2p/rlpx.go @@ -182,7 +182,7 @@ func (c *Conn) ReadAndServe(db database.Database, count *MessageCount) error { atomic.AddInt32(&count.BlockHeaders, int32(len(msg.BlockHeadersPacket))) c.logger.Trace().Msgf("Received %v BlockHeaders", len(msg.BlockHeadersPacket)) - if db != nil { + if db != nil && db.ShouldWriteBlocks() { for _, header := range msg.BlockHeadersPacket { if err := c.getParentBlock(ctx, db, header); err != nil { return err @@ -259,7 +259,7 @@ func (c *Conn) ReadAndServe(db database.Database, count *MessageCount) error { } } - if db != nil && db.ShouldWriteBlocks() && len(hashes) > 0 { + if db != nil && db.ShouldWriteBlockEvents() && len(hashes) > 0 { dbCh <- struct{}{} go func() { db.WriteBlockHashes(ctx, c.node, hashes) @@ -270,7 +270,7 @@ func (c *Conn) ReadAndServe(db database.Database, count *MessageCount) error { atomic.AddInt32(&count.Blocks, 1) c.logger.Trace().Str("hash", msg.Block.Hash().Hex()).Msg("Received NewBlock") - if db != nil && db.ShouldWriteBlocks() { + if db != nil && (db.ShouldWriteBlocks() || db.ShouldWriteBlockEvents()) { if err := c.getParentBlock(ctx, db, msg.Block.Header()); err != nil { return err } @@ -285,7 +285,7 @@ func (c *Conn) ReadAndServe(db database.Database, count *MessageCount) error { atomic.AddInt32(&count.Transactions, int32(len(*msg))) c.logger.Trace().Msgf("Received %v Transactions", len(*msg)) - if db != nil && db.ShouldWriteTransactions() { + if db != nil && (db.ShouldWriteTransactions() || db.ShouldWriteTransactionEvents()) { dbCh <- struct{}{} go func() { db.WriteTransactions(ctx, c.node, *msg) @@ -296,7 +296,7 @@ func (c *Conn) ReadAndServe(db database.Database, count *MessageCount) error { atomic.AddInt32(&count.Transactions, int32(len(msg.PooledTransactionsPacket))) c.logger.Trace().Msgf("Received %v PooledTransactions", len(msg.PooledTransactionsPacket)) - if db != nil && db.ShouldWriteTransactions() { + if db != nil && (db.ShouldWriteTransactions() || db.ShouldWriteTransactionEvents()) { dbCh <- struct{}{} go func() { db.WriteTransactions(ctx, c.node, msg.PooledTransactionsPacket) @@ -304,11 +304,11 @@ func (c *Conn) ReadAndServe(db database.Database, count *MessageCount) error { }() } case *NewPooledTransactionHashes: - if err := c.processNewPooledTransactionHashes(count, msg.Hashes); err != nil { + if err := c.processNewPooledTransactionHashes(db, count, msg.Hashes); err != nil { return err } case *NewPooledTransactionHashes66: - if err := c.processNewPooledTransactionHashes(count, *msg); err != nil { + if err := c.processNewPooledTransactionHashes(db, count, *msg); err != nil { return err } case *GetPooledTransactions: @@ -343,10 +343,14 @@ func (c *Conn) ReadAndServe(db database.Database, count *MessageCount) error { // processNewPooledTransactionHashes processes NewPooledTransactionHashes // messages by requesting the transaction bodies. -func (c *Conn) processNewPooledTransactionHashes(count *MessageCount, hashes []common.Hash) error { +func (c *Conn) processNewPooledTransactionHashes(db database.Database, count *MessageCount, hashes []common.Hash) error { atomic.AddInt32(&count.TransactionHashes, int32(len(hashes))) c.logger.Trace().Msgf("Received %v NewPooledTransactionHashes", len(hashes)) + if !db.ShouldWriteTransactions() { + return nil + } + req := &GetPooledTransactions{ RequestId: rand.Uint64(), GetPooledTransactionsPacket: hashes, diff --git a/rpctypes/schemas.go b/rpctypes/schemas.go index d473d4a7..44821279 100644 --- a/rpctypes/schemas.go +++ b/rpctypes/schemas.go @@ -33,3 +33,15 @@ var RPCSchemaEthAccessList string //go:embed schemas/rpcschemaethproof.json var RPCSchemaEthProof string + +//go:embed schemas/rpcschemadebugtrace.json +var RPCSchemaDebugTrace string + +//go:embed schemas/rpcschemahexarray.json +var RPCSchemaHexArray string + +//go:embed schemas/rpcschemabadblocks.json +var RPCSchemaBadBlocks string + +//go:embed schemas/rpcschemadebugblock.json +var RPCSchemaDebugTraceBlock string diff --git a/rpctypes/schemas/rpcschemabadblocks.json b/rpctypes/schemas/rpcschemabadblocks.json new file mode 100644 index 00000000..3059b207 --- /dev/null +++ b/rpctypes/schemas/rpcschemabadblocks.json @@ -0,0 +1,30 @@ +{ + "title": "Bad block array", + "type": "array", + "items": { + "title": "Bad block", + "type": "object", + "required": [ + "block", + "hash", + "rlp" + ], + "properties": { + "block": { + "title": "Block", + "type": "string", + "pattern": "^0x[0-9a-f]*$" + }, + "hash": { + "title": "Hash", + "type": "string", + "pattern": "^0x[0-9a-f]{64}$" + }, + "rlp": { + "title": "RLP", + "type": "string", + "pattern": "^0x[0-9a-f]*$" + } + } + } +} diff --git a/rpctypes/schemas/rpcschemadebugblock.json b/rpctypes/schemas/rpcschemadebugblock.json new file mode 100644 index 00000000..4dcf6638 --- /dev/null +++ b/rpctypes/schemas/rpcschemadebugblock.json @@ -0,0 +1,85 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Generated schema for Root", + "type": "array", + "items" : { + "type": "object", + "properties": { + "txHash": { + "type": "string", + "pattern": "^0x[0-9a-f]{64}$" + }, + "result" : { + "type": "object", + "properties": { + "gas": { + "type": "number" + }, + "failed": { + "type": "boolean" + }, + "returnValue": { + "type": "string" + }, + "structLogs": { + "type": "array", + "items": { + "type": "object", + "properties": { + "pc": { + "type": "number" + }, + "op": { + "type": "string" + }, + "gas": { + "type": "number" + }, + "gasCost": { + "type": "number" + }, + "depth": { + "type": "number" + }, + "stack": { + "type": "array", + "items": { + "type": "string" + } + }, + "storage": { + "type": "object", + "patternProperties": { + "^0x[0-9a-f]{64}$": { + "type": "string", + "pattern": "^0x[0-9a-f]{64}$" + } + } + } + }, + "required": [ + "pc", + "op", + "gas", + "gasCost", + "depth", + "stack" + ] + } + } + }, + "required": [ + "gas", + "failed", + "returnValue", + "structLogs" + ] + } + + }, + "required": [ + "result", + "txHash" + ] + } +} diff --git a/rpctypes/schemas/rpcschemadebugtrace.json b/rpctypes/schemas/rpcschemadebugtrace.json new file mode 100644 index 00000000..af10bb7d --- /dev/null +++ b/rpctypes/schemas/rpcschemadebugtrace.json @@ -0,0 +1,68 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Generated schema for Root", + "type": "object", + "properties": { + "gas": { + "type": "number" + }, + "failed": { + "type": "boolean" + }, + "returnValue": { + "type": "string" + }, + "structLogs": { + "type": "array", + "items": { + "type": "object", + "properties": { + "pc": { + "type": "number" + }, + "op": { + "type": "string" + }, + "gas": { + "type": "number" + }, + "gasCost": { + "type": "number" + }, + "depth": { + "type": "number" + }, + "stack": { + "type": "array", + "items": { + "type": "string" + } + }, + "storage": { + "type": "object", + "patternProperties": { + "^0x[0-9a-f]{64}$": { + "type": "string", + "pattern": "^0x[0-9a-f]{64}$" + } + } + } + }, + "required": [ + "pc", + "op", + "gas", + "gasCost", + "depth", + "stack" + ] + } + } + }, + "required": [ + "gas", + "failed", + "returnValue", + "structLogs" + ] +} diff --git a/rpctypes/schemas/rpcschemahexarray.json b/rpctypes/schemas/rpcschemahexarray.json new file mode 100644 index 00000000..6f8ca23f --- /dev/null +++ b/rpctypes/schemas/rpcschemahexarray.json @@ -0,0 +1,9 @@ +{ + "title": "Receipt array", + "type": "array", + "items": { + "title": "hex encoded bytes", + "type": "string", + "pattern": "^0x[0-9a-f]*$" + } +}