From 7359754b7a22a42b7cf3a4fa02bfce8482a224fa Mon Sep 17 00:00:00 2001 From: UlyanaAndrukhiv Date: Sun, 10 Nov 2024 22:34:23 +0200 Subject: [PATCH 01/64] Added basic factory and data providers for blocks --- .../rest/common/convert/block_status.go | 22 ++ .../rest/common/convert/block_status_test.go | 39 +++ .../{http/request => common/convert}/id.go | 2 +- .../request => common/convert}/id_test.go | 2 +- engine/access/rest/http/request/get_block.go | 3 +- engine/access/rest/http/request/get_events.go | 3 +- .../rest/http/request/get_execution_result.go | 3 +- engine/access/rest/http/request/get_script.go | 3 +- .../rest/http/request/get_transaction.go | 5 +- engine/access/rest/http/request/helpers.go | 3 +- .../access/rest/http/request/transaction.go | 3 +- engine/access/rest/server.go | 4 + .../data_providers/base_provider.go | 99 ++++++ .../data_providers/blocks_data_provider.go | 282 ++++++++++++++++++ .../data_providers/data_provider.go | 7 + .../rest/websockets/data_providers/factory.go | 70 +++++ .../legacy/request/subscribe_events.go | 3 +- .../rest/websockets/models/block_models.go | 10 + engine/access/rest_api_test.go | 14 +- 19 files changed, 559 insertions(+), 18 deletions(-) create mode 100644 engine/access/rest/common/convert/block_status.go create mode 100644 engine/access/rest/common/convert/block_status_test.go rename engine/access/rest/{http/request => common/convert}/id.go (98%) rename engine/access/rest/{http/request => common/convert}/id_test.go (98%) create mode 100644 engine/access/rest/websockets/data_providers/base_provider.go create mode 100644 engine/access/rest/websockets/data_providers/blocks_data_provider.go create mode 100644 engine/access/rest/websockets/data_providers/data_provider.go create mode 100644 engine/access/rest/websockets/data_providers/factory.go create mode 100644 engine/access/rest/websockets/models/block_models.go diff --git a/engine/access/rest/common/convert/block_status.go b/engine/access/rest/common/convert/block_status.go new file mode 100644 index 00000000000..762508797fc --- /dev/null +++ b/engine/access/rest/common/convert/block_status.go @@ -0,0 +1,22 @@ +package convert + +import ( + "fmt" + + "github.com/onflow/flow-go/model/flow" +) + +const ( + Finalized = "finalized" + Sealed = "sealed" +) + +func ParseBlockStatus(blockStatus string) (flow.BlockStatus, error) { + switch blockStatus { + case Finalized: + return flow.BlockStatusFinalized, nil + case Sealed: + return flow.BlockStatusSealed, nil + } + return flow.BlockStatusUnknown, fmt.Errorf("invalid 'block_status', must be '%s' or '%s'", Finalized, Sealed) +} diff --git a/engine/access/rest/common/convert/block_status_test.go b/engine/access/rest/common/convert/block_status_test.go new file mode 100644 index 00000000000..3313bbc788c --- /dev/null +++ b/engine/access/rest/common/convert/block_status_test.go @@ -0,0 +1,39 @@ +package convert + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/onflow/flow-go/model/flow" +) + +// TestParseBlockStatus_Invalid tests the ParseBlockStatus function with invalid inputs. +// It verifies that for each invalid block status string, the function returns an error +// matching the expected error message format. +func TestParseBlockStatus_Invalid(t *testing.T) { + tests := []string{"unknown", "pending", ""} + expectedErr := fmt.Sprintf("invalid 'block_status', must be '%s' or '%s'", Finalized, Sealed) + + for _, input := range tests { + _, err := ParseBlockStatus(input) + assert.EqualError(t, err, expectedErr) + } +} + +// TestParseBlockStatus_Valid tests the ParseBlockStatus function with valid inputs. +// It ensures that the function returns the correct flow.BlockStatus for valid status +// strings "finalized" and "sealed" without errors. +func TestParseBlockStatus_Valid(t *testing.T) { + tests := map[string]flow.BlockStatus{ + Finalized: flow.BlockStatusFinalized, + Sealed: flow.BlockStatusSealed, + } + + for input, expectedStatus := range tests { + status, err := ParseBlockStatus(input) + assert.NoError(t, err) + assert.Equal(t, expectedStatus, status) + } +} diff --git a/engine/access/rest/http/request/id.go b/engine/access/rest/common/convert/id.go similarity index 98% rename from engine/access/rest/http/request/id.go rename to engine/access/rest/common/convert/id.go index ba3c1200527..b0d7c2bbbf9 100644 --- a/engine/access/rest/http/request/id.go +++ b/engine/access/rest/common/convert/id.go @@ -1,4 +1,4 @@ -package request +package convert import ( "errors" diff --git a/engine/access/rest/http/request/id_test.go b/engine/access/rest/common/convert/id_test.go similarity index 98% rename from engine/access/rest/http/request/id_test.go rename to engine/access/rest/common/convert/id_test.go index 1096fdbe696..70621ddcbb5 100644 --- a/engine/access/rest/http/request/id_test.go +++ b/engine/access/rest/common/convert/id_test.go @@ -1,4 +1,4 @@ -package request +package convert import ( "testing" diff --git a/engine/access/rest/http/request/get_block.go b/engine/access/rest/http/request/get_block.go index fd74b0e4be0..903bfd6a02c 100644 --- a/engine/access/rest/http/request/get_block.go +++ b/engine/access/rest/http/request/get_block.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/onflow/flow-go/engine/access/rest/common" + "github.com/onflow/flow-go/engine/access/rest/common/convert" "github.com/onflow/flow-go/model/flow" ) @@ -122,7 +123,7 @@ func (g *GetBlockByIDs) Build(r *common.Request) error { } func (g *GetBlockByIDs) Parse(rawIds []string) error { - var ids IDs + var ids convert.IDs err := ids.Parse(rawIds) if err != nil { return err diff --git a/engine/access/rest/http/request/get_events.go b/engine/access/rest/http/request/get_events.go index 39f2ba9faef..f5aadf31369 100644 --- a/engine/access/rest/http/request/get_events.go +++ b/engine/access/rest/http/request/get_events.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/onflow/flow-go/engine/access/rest/common" + "github.com/onflow/flow-go/engine/access/rest/common/convert" "github.com/onflow/flow-go/model/flow" ) @@ -50,7 +51,7 @@ func (g *GetEvents) Parse(rawType string, rawStart string, rawEnd string, rawBlo } g.EndHeight = height.Flow() - var blockIDs IDs + var blockIDs convert.IDs err = blockIDs.Parse(rawBlockIDs) if err != nil { return err diff --git a/engine/access/rest/http/request/get_execution_result.go b/engine/access/rest/http/request/get_execution_result.go index cdf216766c1..8feb7aac51f 100644 --- a/engine/access/rest/http/request/get_execution_result.go +++ b/engine/access/rest/http/request/get_execution_result.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/onflow/flow-go/engine/access/rest/common" + "github.com/onflow/flow-go/engine/access/rest/common/convert" "github.com/onflow/flow-go/model/flow" ) @@ -30,7 +31,7 @@ func (g *GetExecutionResultByBlockIDs) Build(r *common.Request) error { } func (g *GetExecutionResultByBlockIDs) Parse(rawIDs []string) error { - var ids IDs + var ids convert.IDs err := ids.Parse(rawIDs) if err != nil { return err diff --git a/engine/access/rest/http/request/get_script.go b/engine/access/rest/http/request/get_script.go index de8da72cac1..01fbf996601 100644 --- a/engine/access/rest/http/request/get_script.go +++ b/engine/access/rest/http/request/get_script.go @@ -5,6 +5,7 @@ import ( "io" "github.com/onflow/flow-go/engine/access/rest/common" + "github.com/onflow/flow-go/engine/access/rest/common/convert" "github.com/onflow/flow-go/model/flow" ) @@ -42,7 +43,7 @@ func (g *GetScript) Parse(rawHeight string, rawID string, rawScript io.Reader) e } g.BlockHeight = height.Flow() - var id ID + var id convert.ID err = id.Parse(rawID) if err != nil { return err diff --git a/engine/access/rest/http/request/get_transaction.go b/engine/access/rest/http/request/get_transaction.go index 359570cd71d..ba80fad0105 100644 --- a/engine/access/rest/http/request/get_transaction.go +++ b/engine/access/rest/http/request/get_transaction.go @@ -2,6 +2,7 @@ package request import ( "github.com/onflow/flow-go/engine/access/rest/common" + "github.com/onflow/flow-go/engine/access/rest/common/convert" "github.com/onflow/flow-go/model/flow" ) @@ -15,14 +16,14 @@ type TransactionOptionals struct { } func (t *TransactionOptionals) Parse(r *common.Request) error { - var blockId ID + var blockId convert.ID err := blockId.Parse(r.GetQueryParam(blockIDQueryParam)) if err != nil { return err } t.BlockID = blockId.Flow() - var collectionId ID + var collectionId convert.ID err = collectionId.Parse(r.GetQueryParam(collectionIDQueryParam)) if err != nil { return err diff --git a/engine/access/rest/http/request/helpers.go b/engine/access/rest/http/request/helpers.go index 5591cc6df9b..8ad33bb1d81 100644 --- a/engine/access/rest/http/request/helpers.go +++ b/engine/access/rest/http/request/helpers.go @@ -8,6 +8,7 @@ import ( "strings" "github.com/onflow/flow-go/engine/access/rest/common" + "github.com/onflow/flow-go/engine/access/rest/common/convert" "github.com/onflow/flow-go/model/flow" ) @@ -60,7 +61,7 @@ func (g *GetByIDRequest) Build(r *common.Request) error { } func (g *GetByIDRequest) Parse(rawID string) error { - var id ID + var id convert.ID err := id.Parse(rawID) if err != nil { return err diff --git a/engine/access/rest/http/request/transaction.go b/engine/access/rest/http/request/transaction.go index 614d78f1e07..1ebe595da81 100644 --- a/engine/access/rest/http/request/transaction.go +++ b/engine/access/rest/http/request/transaction.go @@ -4,6 +4,7 @@ import ( "fmt" "io" + convert2 "github.com/onflow/flow-go/engine/access/rest/common/convert" "github.com/onflow/flow-go/engine/access/rest/http/models" "github.com/onflow/flow-go/engine/access/rest/util" "github.com/onflow/flow-go/engine/common/rpc/convert" @@ -89,7 +90,7 @@ func (t *Transaction) Parse(raw io.Reader, chain flow.Chain) error { return fmt.Errorf("invalid transaction script encoding") } - var blockID ID + var blockID convert2.ID err = blockID.Parse(tx.ReferenceBlockId) if err != nil { return fmt.Errorf("invalid reference block ID: %w", err) diff --git a/engine/access/rest/server.go b/engine/access/rest/server.go index d25044a60a5..f3c0a79194a 100644 --- a/engine/access/rest/server.go +++ b/engine/access/rest/server.go @@ -9,6 +9,7 @@ import ( "github.com/onflow/flow-go/access" "github.com/onflow/flow-go/engine/access/rest/router" + "github.com/onflow/flow-go/engine/access/rest/websockets/data_providers" "github.com/onflow/flow-go/engine/access/state_stream" "github.com/onflow/flow-go/engine/access/state_stream/backend" "github.com/onflow/flow-go/model/flow" @@ -48,6 +49,9 @@ func NewServer(serverAPI access.API, builder.AddWsLegacyRoutes(stateStreamApi, chain, stateStreamConfig, config.MaxRequestSize) } + // TODO: add new websocket routes + _ = data_providers.NewDataProviderFactory(logger, stateStreamConfig.EventFilterConfig, stateStreamApi, serverAPI) + c := cors.New(cors.Options{ AllowedOrigins: []string{"*"}, AllowedHeaders: []string{"*"}, diff --git a/engine/access/rest/websockets/data_providers/base_provider.go b/engine/access/rest/websockets/data_providers/base_provider.go new file mode 100644 index 00000000000..a30cf9a6887 --- /dev/null +++ b/engine/access/rest/websockets/data_providers/base_provider.go @@ -0,0 +1,99 @@ +package data_providers + +import ( + "context" + "fmt" + + "github.com/onflow/flow-go/access" + "github.com/onflow/flow-go/engine/access/subscription" +) + +// BaseDataProvider defines the basic interface for a data provider. It provides methods +// for retrieving the provider's unique ID, topic, and a method to close the provider. +type BaseDataProvider interface { + // ID returns the unique identifier of subscription in the data provider. + ID() string + // Topic returns the topic associated with the data provider. + Topic() string + // Close terminates the data provider. + Close() +} + +var _ BaseDataProvider = (*BaseDataProviderImpl)(nil) + +// BaseDataProviderImpl is the concrete implementation of the BaseDataProvider interface. +// It holds common objects for the provider. +type BaseDataProviderImpl struct { + topic string + + cancel context.CancelFunc + ctx context.Context + + api access.API + send chan<- interface{} + subscription subscription.Subscription +} + +// NewBaseDataProviderImpl creates a new instance of BaseDataProviderImpl. +func NewBaseDataProviderImpl( + ctx context.Context, + cancel context.CancelFunc, + api access.API, + topic string, + send chan<- interface{}, + subscription subscription.Subscription, +) *BaseDataProviderImpl { + return &BaseDataProviderImpl{ + topic: topic, + + ctx: ctx, + cancel: cancel, + + api: api, + send: send, + subscription: subscription, + } +} + +// ID returns the unique identifier of the data provider's subscription. +func (b *BaseDataProviderImpl) ID() string { + return b.subscription.ID() +} + +// Topic returns the topic associated with the data provider. +func (b *BaseDataProviderImpl) Topic() string { + return b.topic +} + +// Close terminates the data provider. +func (b *BaseDataProviderImpl) Close() { + b.cancel() +} + +// TODO: refactor rpc version of HandleSubscription and use it +func HandleSubscription[T any](ctx context.Context, sub subscription.Subscription, handleResponse func(resp T) error) error { + for { + select { + case v, ok := <-sub.Channel(): + if !ok { + if sub.Err() != nil { + return fmt.Errorf("stream encountered an error: %w", sub.Err()) + } + return nil + } + + resp, ok := v.(T) + if !ok { + return fmt.Errorf("unexpected subscription response type: %T", v) + } + + err := handleResponse(resp) + if err != nil { + return err + } + case <-ctx.Done(): + // context closed, subscription closed + return nil + } + } +} diff --git a/engine/access/rest/websockets/data_providers/blocks_data_provider.go b/engine/access/rest/websockets/data_providers/blocks_data_provider.go new file mode 100644 index 00000000000..f5a24139a87 --- /dev/null +++ b/engine/access/rest/websockets/data_providers/blocks_data_provider.go @@ -0,0 +1,282 @@ +package data_providers + +import ( + "context" + "fmt" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/access" + "github.com/onflow/flow-go/engine/access/rest/common/convert" + "github.com/onflow/flow-go/engine/access/rest/util" + "github.com/onflow/flow-go/engine/access/rest/websockets/models" + "github.com/onflow/flow-go/model/flow" +) + +// BlocksFromStartBlockIDArgs contains the arguments required for subscribing to blocks +// starting from a specific block ID. +type BlocksFromStartBlockIDArgs struct { + StartBlockID flow.Identifier + BlockStatus flow.BlockStatus +} + +// BlocksFromStartBlockIDProvider is responsible for providing blocks starting +// from a specific block ID. +type BlocksFromStartBlockIDProvider struct { + *BaseDataProviderImpl + + logger zerolog.Logger + args BlocksFromStartBlockIDArgs +} + +var _ DataProvider = (*BlocksFromStartBlockIDProvider)(nil) + +// NewBlocksFromStartBlockIDProvider creates a new instance of BlocksFromStartBlockIDProvider. +func NewBlocksFromStartBlockIDProvider( + ctx context.Context, + logger zerolog.Logger, + api access.API, + topic string, + arguments map[string]string, + send chan<- interface{}, +) (*BlocksFromStartBlockIDProvider, error) { + ctx, cancel := context.WithCancel(ctx) + + p := &BlocksFromStartBlockIDProvider{ + logger: logger.With().Str("component", "block-from-start-block-id-provider").Logger(), + } + + // Validate arguments passed to the provider. + err := p.validateArguments(arguments) + if err != nil { + return nil, fmt.Errorf("invalid arguments: %w", err) + } + + // Subscribe to blocks from the start block ID with the specified block status. + subscription := p.api.SubscribeBlocksFromStartBlockID(ctx, p.args.StartBlockID, p.args.BlockStatus) + p.BaseDataProviderImpl = NewBaseDataProviderImpl( + ctx, + cancel, + api, + topic, + send, + subscription, + ) + + return p, nil +} + +// Run starts processing the subscription for blocks from the start block ID and handles responses. +// +// No errors are expected during normal operations. +func (p *BlocksFromStartBlockIDProvider) Run() error { + return HandleSubscription(p.ctx, p.subscription, handleResponse(p.send, p.args.BlockStatus)) +} + +// validateArguments checks and validates the arguments passed to the provider. +// +// No errors are expected during normal operations. +func (p *BlocksFromStartBlockIDProvider) validateArguments(arguments map[string]string) error { + // Check for block_status argument and validate + if blockStatusIn, ok := arguments["block_status"]; ok { + blockStatus, err := convert.ParseBlockStatus(blockStatusIn) + if err != nil { + return err + } + p.args.BlockStatus = blockStatus + } else { + return fmt.Errorf("'block_status' must be provided") + } + + // Check for start_block_id argument and validate + if startBlockIDIn, ok := arguments["start_block_id"]; ok { + var startBlockID convert.ID + err := startBlockID.Parse(startBlockIDIn) + if err != nil { + return err + } + p.args.StartBlockID = startBlockID.Flow() + } else { + return fmt.Errorf("'start_block_id' must be provided") + } + + return nil +} + +// BlocksFromBlockHeightArgs contains the arguments required for subscribing to blocks +// starting from a specific block height. +type BlocksFromBlockHeightArgs struct { + StartBlockHeight uint64 + BlockStatus flow.BlockStatus +} + +// BlocksFromStartBlockHeightProvider is responsible for providing blocks starting +// from a specific block height. +type BlocksFromStartBlockHeightProvider struct { + *BaseDataProviderImpl + + logger zerolog.Logger + args BlocksFromBlockHeightArgs +} + +var _ DataProvider = (*BlocksFromStartBlockHeightProvider)(nil) + +// NewBlocksFromStartBlockHeightProvider creates a new instance of BlocksFromStartBlockHeightProvider. +func NewBlocksFromStartBlockHeightProvider( + ctx context.Context, + logger zerolog.Logger, + api access.API, + topic string, + arguments map[string]string, + send chan<- interface{}, +) (*BlocksFromStartBlockHeightProvider, error) { + ctx, cancel := context.WithCancel(ctx) + + p := &BlocksFromStartBlockHeightProvider{ + logger: logger.With().Str("component", "block-from-start-block-height-provider").Logger(), + } + + // Validate arguments passed to the provider. + err := p.validateArguments(arguments) + if err != nil { + return nil, fmt.Errorf("invalid arguments: %w", err) + } + + // Subscribe to blocks from the start block height with the specified block status. + subscription := p.api.SubscribeBlocksFromStartHeight(ctx, p.args.StartBlockHeight, p.args.BlockStatus) + p.BaseDataProviderImpl = NewBaseDataProviderImpl( + ctx, + cancel, + api, + topic, + send, + subscription, + ) + + return p, nil +} + +// Run starts processing the subscription for blocks from the start block height and handles responses. +// +// No errors are expected during normal operations. +func (p *BlocksFromStartBlockHeightProvider) Run() error { + return HandleSubscription(p.ctx, p.subscription, handleResponse(p.send, p.args.BlockStatus)) +} + +// validateArguments checks and validates the arguments passed to the provider. +// +// No errors are expected during normal operations. +func (p *BlocksFromStartBlockHeightProvider) validateArguments(arguments map[string]string) error { + // Check for block_status argument and validate + if blockStatusIn, ok := arguments["block_status"]; ok { + blockStatus, err := convert.ParseBlockStatus(blockStatusIn) + if err != nil { + return err + } + p.args.BlockStatus = blockStatus + } else { + return fmt.Errorf("'block_status' must be provided") + } + + if startBlockHeightIn, ok := arguments["start_block_height"]; ok { + var err error + p.args.StartBlockHeight, err = util.ToUint64(startBlockHeightIn) + if err != nil { + return fmt.Errorf("invalid start height: %w", err) + } + } else { + return fmt.Errorf("'start_block_height' must be provided") + } + + return nil +} + +// BlocksFromLatestArgs contains the arguments required for subscribing to blocks +// starting from latest block. +type BlocksFromLatestArgs struct { + BlockStatus flow.BlockStatus +} + +// BlocksFromLatestProvider is responsible for providing blocks starting from latest block. +type BlocksFromLatestProvider struct { + *BaseDataProviderImpl + + logger zerolog.Logger + args BlocksFromLatestArgs +} + +var _ DataProvider = (*BlocksFromLatestProvider)(nil) + +// NewBlocksFromLatestProvider creates a new BlocksFromLatestProvider. +func NewBlocksFromLatestProvider( + ctx context.Context, + logger zerolog.Logger, + api access.API, + topic string, + arguments map[string]string, + send chan<- interface{}, +) (*BlocksFromLatestProvider, error) { + ctx, cancel := context.WithCancel(ctx) + + p := &BlocksFromLatestProvider{ + logger: logger.With().Str("component", "block-from-latest-provider").Logger(), + } + + // Validate arguments passed to the provider. + err := p.validateArguments(arguments) + if err != nil { + return nil, fmt.Errorf("invalid arguments: %w", err) + } + + // Subscribe to blocks from the latest block height with the specified block status. + subscription := p.api.SubscribeBlocksFromLatest(ctx, p.args.BlockStatus) + p.BaseDataProviderImpl = NewBaseDataProviderImpl( + ctx, + cancel, + api, + topic, + send, + subscription, + ) + + return p, nil +} + +// Run starts processing the subscription for blocks from the latest block and handles responses. +// +// No errors are expected during normal operations. +func (p *BlocksFromLatestProvider) Run() error { + return HandleSubscription(p.ctx, p.subscription, handleResponse(p.send, p.args.BlockStatus)) +} + +// validateArguments checks and validates the arguments passed to the provider. +// +// No errors are expected during normal operations. +func (p *BlocksFromLatestProvider) validateArguments(arguments map[string]string) error { + // Check for block_status argument and validate + if blockStatusIn, ok := arguments["block_status"]; ok { + blockStatus, err := convert.ParseBlockStatus(blockStatusIn) + if err != nil { + return err + } + p.args.BlockStatus = blockStatus + } else { + return fmt.Errorf("'block_status' must be provided") + } + + return nil +} + +// handleResponse processes a block and sends the formatted response. +// +// No errors are expected during normal operations. +func handleResponse(send chan<- interface{}, blockStatus flow.BlockStatus) func(*flow.Block) error { + return func(block *flow.Block) error { + send <- &models.BlockMessageResponse{ + Block: block, + BlockStatus: blockStatus, + } + + return nil + } +} diff --git a/engine/access/rest/websockets/data_providers/data_provider.go b/engine/access/rest/websockets/data_providers/data_provider.go new file mode 100644 index 00000000000..ba7dd815c9b --- /dev/null +++ b/engine/access/rest/websockets/data_providers/data_provider.go @@ -0,0 +1,7 @@ +package data_providers + +type DataProvider interface { + BaseDataProvider + + Run() error +} diff --git a/engine/access/rest/websockets/data_providers/factory.go b/engine/access/rest/websockets/data_providers/factory.go new file mode 100644 index 00000000000..5b9d0bad1ba --- /dev/null +++ b/engine/access/rest/websockets/data_providers/factory.go @@ -0,0 +1,70 @@ +package data_providers + +import ( + "context" + "fmt" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/access" + "github.com/onflow/flow-go/engine/access/state_stream" +) + +const ( + EventsTopic = "events" + AccountStatusesTopic = "account_statuses" + BlockHeadersTopic = "block_headers" + BlockDigestsTopic = "block_digests" + TransactionStatusesTopic = "transaction_statuses" + + BlocksFromStartBlockIDTopic = "blocks_from_start_block_id" + BlocksFromStartBlockHeightTopic = "blocks_from_start_block_height" + BlocksFromLatestTopic = "blocks_from_latest" +) + +type DataProviderFactory struct { + logger zerolog.Logger + eventFilterConfig state_stream.EventFilterConfig + + stateStreamApi state_stream.API + accessApi access.API +} + +func NewDataProviderFactory( + logger zerolog.Logger, + eventFilterConfig state_stream.EventFilterConfig, + stateStreamApi state_stream.API, + accessApi access.API, +) *DataProviderFactory { + return &DataProviderFactory{ + logger: logger, + eventFilterConfig: eventFilterConfig, + stateStreamApi: stateStreamApi, + accessApi: accessApi, + } +} + +func (s *DataProviderFactory) NewDataProvider( + ctx context.Context, + topic string, + arguments map[string]string, + ch chan<- interface{}, +) (DataProvider, error) { + switch topic { + case BlocksFromStartBlockIDTopic: + return NewBlocksFromStartBlockIDProvider(ctx, s.logger, s.accessApi, topic, arguments, ch) + case BlocksFromStartBlockHeightTopic: + return NewBlocksFromStartBlockHeightProvider(ctx, s.logger, s.accessApi, topic, arguments, ch) + case BlocksFromLatestTopic: + return NewBlocksFromLatestProvider(ctx, s.logger, s.accessApi, topic, arguments, ch) + // TODO: Implemented handlers for each topic should be added in respective case + case EventsTopic, + AccountStatusesTopic, + BlockHeadersTopic, + BlockDigestsTopic, + TransactionStatusesTopic: + return nil, fmt.Errorf("topic \"%s\" not implemented yet", topic) + default: + return nil, fmt.Errorf("unsupported topic \"%s\"", topic) + } +} diff --git a/engine/access/rest/websockets/legacy/request/subscribe_events.go b/engine/access/rest/websockets/legacy/request/subscribe_events.go index 5b2574ccc82..7f02ad1c10e 100644 --- a/engine/access/rest/websockets/legacy/request/subscribe_events.go +++ b/engine/access/rest/websockets/legacy/request/subscribe_events.go @@ -5,6 +5,7 @@ import ( "strconv" "github.com/onflow/flow-go/engine/access/rest/common" + "github.com/onflow/flow-go/engine/access/rest/common/convert" "github.com/onflow/flow-go/engine/access/rest/http/request" "github.com/onflow/flow-go/model/flow" ) @@ -56,7 +57,7 @@ func (g *SubscribeEvents) Parse( rawContracts []string, rawHeartbeatInterval string, ) error { - var startBlockID request.ID + var startBlockID convert.ID err := startBlockID.Parse(rawStartBlockID) if err != nil { return err diff --git a/engine/access/rest/websockets/models/block_models.go b/engine/access/rest/websockets/models/block_models.go new file mode 100644 index 00000000000..f363038808b --- /dev/null +++ b/engine/access/rest/websockets/models/block_models.go @@ -0,0 +1,10 @@ +package models + +import ( + "github.com/onflow/flow-go/model/flow" +) + +type BlockMessageResponse struct { + Block *flow.Block `json:"block"` + BlockStatus flow.BlockStatus `json:"block_status"` +} diff --git a/engine/access/rest_api_test.go b/engine/access/rest_api_test.go index 64dab073c1d..9c8eaa20881 100644 --- a/engine/access/rest_api_test.go +++ b/engine/access/rest_api_test.go @@ -22,7 +22,7 @@ import ( accessmock "github.com/onflow/flow-go/engine/access/mock" "github.com/onflow/flow-go/engine/access/rest" "github.com/onflow/flow-go/engine/access/rest/common" - "github.com/onflow/flow-go/engine/access/rest/http/request" + "github.com/onflow/flow-go/engine/access/rest/common/convert" "github.com/onflow/flow-go/engine/access/rest/router" "github.com/onflow/flow-go/engine/access/rpc" "github.com/onflow/flow-go/engine/access/rpc/backend" @@ -230,8 +230,8 @@ func TestRestAPI(t *testing.T) { func (suite *RestAPITestSuite) TestGetBlock() { - testBlockIDs := make([]string, request.MaxIDsLength) - testBlocks := make([]*flow.Block, request.MaxIDsLength) + testBlockIDs := make([]string, convert.MaxIDsLength) + testBlocks := make([]*flow.Block, convert.MaxIDsLength) for i := range testBlockIDs { collections := unittest.CollectionListFixture(1) block := unittest.BlockWithGuaranteesFixture( @@ -281,7 +281,7 @@ func (suite *RestAPITestSuite) TestGetBlock() { actualBlocks, resp, err := client.BlocksApi.BlocksIdGet(ctx, blockIDSlice, optionsForBlockByID()) require.NoError(suite.T(), err) assert.Equal(suite.T(), http.StatusOK, resp.StatusCode) - assert.Len(suite.T(), actualBlocks, request.MaxIDsLength) + assert.Len(suite.T(), actualBlocks, convert.MaxIDsLength) for i, b := range testBlocks { assert.Equal(suite.T(), b.ID().String(), actualBlocks[i].Header.Id) } @@ -379,13 +379,13 @@ func (suite *RestAPITestSuite) TestGetBlock() { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() - blockIDs := make([]string, request.MaxIDsLength+1) + blockIDs := make([]string, convert.MaxIDsLength+1) copy(blockIDs, testBlockIDs) - blockIDs[request.MaxIDsLength] = unittest.IdentifierFixture().String() + blockIDs[convert.MaxIDsLength] = unittest.IdentifierFixture().String() blockIDSlice := []string{strings.Join(blockIDs, ",")} _, resp, err := client.BlocksApi.BlocksIdGet(ctx, blockIDSlice, optionsForBlockByID()) - assertError(suite.T(), resp, err, http.StatusBadRequest, fmt.Sprintf("at most %d IDs can be requested at a time", request.MaxIDsLength)) + assertError(suite.T(), resp, err, http.StatusBadRequest, fmt.Sprintf("at most %d IDs can be requested at a time", convert.MaxIDsLength)) }) suite.Run("GetBlockByID with one non-existing block ID", func() { From 9f9154015655a31a2726755c566a7b20e6c3f7fd Mon Sep 17 00:00:00 2001 From: UlyanaAndrukhiv Date: Mon, 11 Nov 2024 10:40:34 +0200 Subject: [PATCH 02/64] Added godoc for DataProvider interface --- .../websockets/data_providers/blocks_data_provider_test.go | 0 engine/access/rest/websockets/data_providers/data_provider.go | 4 ++++ 2 files changed, 4 insertions(+) create mode 100644 engine/access/rest/websockets/data_providers/blocks_data_provider_test.go diff --git a/engine/access/rest/websockets/data_providers/blocks_data_provider_test.go b/engine/access/rest/websockets/data_providers/blocks_data_provider_test.go new file mode 100644 index 00000000000..e69de29bb2d diff --git a/engine/access/rest/websockets/data_providers/data_provider.go b/engine/access/rest/websockets/data_providers/data_provider.go index ba7dd815c9b..11a8bc16c38 100644 --- a/engine/access/rest/websockets/data_providers/data_provider.go +++ b/engine/access/rest/websockets/data_providers/data_provider.go @@ -1,7 +1,11 @@ package data_providers +// The DataProvider is the interface abstracts of the actual subscriptions used by the WebSocketCollector. type DataProvider interface { BaseDataProvider + // Run starts processing the subscription and handles responses. + // + // No errors are expected during normal operations. Run() error } From 7502a798ab9f74b3a5f482408259428716f94a2b Mon Sep 17 00:00:00 2001 From: UlyanaAndrukhiv Date: Mon, 11 Nov 2024 11:30:02 +0200 Subject: [PATCH 03/64] Added godoc for factory --- .../rest/websockets/data_providers/factory.go | 22 +++++++++++++++++++ .../websockets/data_providers/factory_test.go | 0 2 files changed, 22 insertions(+) create mode 100644 engine/access/rest/websockets/data_providers/factory_test.go diff --git a/engine/access/rest/websockets/data_providers/factory.go b/engine/access/rest/websockets/data_providers/factory.go index 5b9d0bad1ba..d8285743d5e 100644 --- a/engine/access/rest/websockets/data_providers/factory.go +++ b/engine/access/rest/websockets/data_providers/factory.go @@ -10,6 +10,8 @@ import ( "github.com/onflow/flow-go/engine/access/state_stream" ) +// Constants defining various topic names used to specify different types of +// data providers. const ( EventsTopic = "events" AccountStatusesTopic = "account_statuses" @@ -22,6 +24,9 @@ const ( BlocksFromLatestTopic = "blocks_from_latest" ) +// DataProviderFactory is responsible for creating data providers based on the +// requested topic. It manages access to logging, state stream configuration, +// and relevant APIs needed to retrieve data. type DataProviderFactory struct { logger zerolog.Logger eventFilterConfig state_stream.EventFilterConfig @@ -30,6 +35,13 @@ type DataProviderFactory struct { accessApi access.API } +// NewDataProviderFactory creates a new DataProviderFactory +// +// Parameters: +// - logger: Used for logging within the data providers. +// - eventFilterConfig: Configuration for filtering events from state streams. +// - stateStreamApi: API for accessing data from the Flow state stream API. +// - accessApi: API for accessing data from the Flow Access API. func NewDataProviderFactory( logger zerolog.Logger, eventFilterConfig state_stream.EventFilterConfig, @@ -44,6 +56,16 @@ func NewDataProviderFactory( } } +// NewDataProvider creates a new data provider based on the specified topic +// and configuration parameters. +// +// Parameters: +// - ctx: Context for managing request lifetime and cancellation. +// - topic: The topic for which a data provider is to be created. +// - arguments: Configuration arguments for the data provider. +// - ch: Channel to which the data provider sends data. +// +// No errors are expected during normal operations. func (s *DataProviderFactory) NewDataProvider( ctx context.Context, topic string, diff --git a/engine/access/rest/websockets/data_providers/factory_test.go b/engine/access/rest/websockets/data_providers/factory_test.go new file mode 100644 index 00000000000..e69de29bb2d From c30b1a00adfa3a1131a5041e89eeba12c933a0a6 Mon Sep 17 00:00:00 2001 From: UlyanaAndrukhiv Date: Tue, 12 Nov 2024 10:45:02 +0200 Subject: [PATCH 04/64] Refacored BlockProvider, added part of unit tests to provider factory and block provider --- .../{convert => parser}/block_status.go | 2 +- .../{convert => parser}/block_status_test.go | 2 +- .../rest/common/{convert => parser}/id.go | 2 +- .../common/{convert => parser}/id_test.go | 2 +- engine/access/rest/http/request/get_block.go | 4 +- engine/access/rest/http/request/get_events.go | 4 +- .../rest/http/request/get_execution_result.go | 4 +- engine/access/rest/http/request/get_script.go | 4 +- .../rest/http/request/get_transaction.go | 6 +- engine/access/rest/http/request/helpers.go | 4 +- .../access/rest/http/request/transaction.go | 2 +- engine/access/rest/http/routes/events.go | 1 - .../data_providers/blocks_data_provider.go | 282 ------------------ .../blocks_data_provider_test.go | 0 .../data_providers/blocks_provider.go | 145 +++++++++ .../data_providers/blocks_provider_test.go | 147 +++++++++ 16 files changed, 310 insertions(+), 301 deletions(-) rename engine/access/rest/common/{convert => parser}/block_status.go (96%) rename engine/access/rest/common/{convert => parser}/block_status_test.go (98%) rename engine/access/rest/common/{convert => parser}/id.go (98%) rename engine/access/rest/common/{convert => parser}/id_test.go (98%) delete mode 100644 engine/access/rest/websockets/data_providers/blocks_data_provider.go delete mode 100644 engine/access/rest/websockets/data_providers/blocks_data_provider_test.go create mode 100644 engine/access/rest/websockets/data_providers/blocks_provider.go create mode 100644 engine/access/rest/websockets/data_providers/blocks_provider_test.go diff --git a/engine/access/rest/common/convert/block_status.go b/engine/access/rest/common/parser/block_status.go similarity index 96% rename from engine/access/rest/common/convert/block_status.go rename to engine/access/rest/common/parser/block_status.go index 762508797fc..a1b6e8a7b46 100644 --- a/engine/access/rest/common/convert/block_status.go +++ b/engine/access/rest/common/parser/block_status.go @@ -1,4 +1,4 @@ -package convert +package parser import ( "fmt" diff --git a/engine/access/rest/common/convert/block_status_test.go b/engine/access/rest/common/parser/block_status_test.go similarity index 98% rename from engine/access/rest/common/convert/block_status_test.go rename to engine/access/rest/common/parser/block_status_test.go index 3313bbc788c..0bbaa30c56b 100644 --- a/engine/access/rest/common/convert/block_status_test.go +++ b/engine/access/rest/common/parser/block_status_test.go @@ -1,4 +1,4 @@ -package convert +package parser import ( "fmt" diff --git a/engine/access/rest/common/convert/id.go b/engine/access/rest/common/parser/id.go similarity index 98% rename from engine/access/rest/common/convert/id.go rename to engine/access/rest/common/parser/id.go index b0d7c2bbbf9..7b1436b4761 100644 --- a/engine/access/rest/common/convert/id.go +++ b/engine/access/rest/common/parser/id.go @@ -1,4 +1,4 @@ -package convert +package parser import ( "errors" diff --git a/engine/access/rest/common/convert/id_test.go b/engine/access/rest/common/parser/id_test.go similarity index 98% rename from engine/access/rest/common/convert/id_test.go rename to engine/access/rest/common/parser/id_test.go index 70621ddcbb5..a663c915e7a 100644 --- a/engine/access/rest/common/convert/id_test.go +++ b/engine/access/rest/common/parser/id_test.go @@ -1,4 +1,4 @@ -package convert +package parser import ( "testing" diff --git a/engine/access/rest/http/request/get_block.go b/engine/access/rest/http/request/get_block.go index 903bfd6a02c..972cd2ee97b 100644 --- a/engine/access/rest/http/request/get_block.go +++ b/engine/access/rest/http/request/get_block.go @@ -4,7 +4,7 @@ import ( "fmt" "github.com/onflow/flow-go/engine/access/rest/common" - "github.com/onflow/flow-go/engine/access/rest/common/convert" + "github.com/onflow/flow-go/engine/access/rest/common/parser" "github.com/onflow/flow-go/model/flow" ) @@ -123,7 +123,7 @@ func (g *GetBlockByIDs) Build(r *common.Request) error { } func (g *GetBlockByIDs) Parse(rawIds []string) error { - var ids convert.IDs + var ids parser.IDs err := ids.Parse(rawIds) if err != nil { return err diff --git a/engine/access/rest/http/request/get_events.go b/engine/access/rest/http/request/get_events.go index f5aadf31369..c864cf24a47 100644 --- a/engine/access/rest/http/request/get_events.go +++ b/engine/access/rest/http/request/get_events.go @@ -4,7 +4,7 @@ import ( "fmt" "github.com/onflow/flow-go/engine/access/rest/common" - "github.com/onflow/flow-go/engine/access/rest/common/convert" + "github.com/onflow/flow-go/engine/access/rest/common/parser" "github.com/onflow/flow-go/model/flow" ) @@ -51,7 +51,7 @@ func (g *GetEvents) Parse(rawType string, rawStart string, rawEnd string, rawBlo } g.EndHeight = height.Flow() - var blockIDs convert.IDs + var blockIDs parser.IDs err = blockIDs.Parse(rawBlockIDs) if err != nil { return err diff --git a/engine/access/rest/http/request/get_execution_result.go b/engine/access/rest/http/request/get_execution_result.go index 8feb7aac51f..4947cd8f07f 100644 --- a/engine/access/rest/http/request/get_execution_result.go +++ b/engine/access/rest/http/request/get_execution_result.go @@ -4,7 +4,7 @@ import ( "fmt" "github.com/onflow/flow-go/engine/access/rest/common" - "github.com/onflow/flow-go/engine/access/rest/common/convert" + "github.com/onflow/flow-go/engine/access/rest/common/parser" "github.com/onflow/flow-go/model/flow" ) @@ -31,7 +31,7 @@ func (g *GetExecutionResultByBlockIDs) Build(r *common.Request) error { } func (g *GetExecutionResultByBlockIDs) Parse(rawIDs []string) error { - var ids convert.IDs + var ids parser.IDs err := ids.Parse(rawIDs) if err != nil { return err diff --git a/engine/access/rest/http/request/get_script.go b/engine/access/rest/http/request/get_script.go index 01fbf996601..a01a025465a 100644 --- a/engine/access/rest/http/request/get_script.go +++ b/engine/access/rest/http/request/get_script.go @@ -5,7 +5,7 @@ import ( "io" "github.com/onflow/flow-go/engine/access/rest/common" - "github.com/onflow/flow-go/engine/access/rest/common/convert" + "github.com/onflow/flow-go/engine/access/rest/common/parser" "github.com/onflow/flow-go/model/flow" ) @@ -43,7 +43,7 @@ func (g *GetScript) Parse(rawHeight string, rawID string, rawScript io.Reader) e } g.BlockHeight = height.Flow() - var id convert.ID + var id parser.ID err = id.Parse(rawID) if err != nil { return err diff --git a/engine/access/rest/http/request/get_transaction.go b/engine/access/rest/http/request/get_transaction.go index ba80fad0105..0d5df1e541e 100644 --- a/engine/access/rest/http/request/get_transaction.go +++ b/engine/access/rest/http/request/get_transaction.go @@ -2,7 +2,7 @@ package request import ( "github.com/onflow/flow-go/engine/access/rest/common" - "github.com/onflow/flow-go/engine/access/rest/common/convert" + "github.com/onflow/flow-go/engine/access/rest/common/parser" "github.com/onflow/flow-go/model/flow" ) @@ -16,14 +16,14 @@ type TransactionOptionals struct { } func (t *TransactionOptionals) Parse(r *common.Request) error { - var blockId convert.ID + var blockId parser.ID err := blockId.Parse(r.GetQueryParam(blockIDQueryParam)) if err != nil { return err } t.BlockID = blockId.Flow() - var collectionId convert.ID + var collectionId parser.ID err = collectionId.Parse(r.GetQueryParam(collectionIDQueryParam)) if err != nil { return err diff --git a/engine/access/rest/http/request/helpers.go b/engine/access/rest/http/request/helpers.go index 8ad33bb1d81..38a669d0ad1 100644 --- a/engine/access/rest/http/request/helpers.go +++ b/engine/access/rest/http/request/helpers.go @@ -8,7 +8,7 @@ import ( "strings" "github.com/onflow/flow-go/engine/access/rest/common" - "github.com/onflow/flow-go/engine/access/rest/common/convert" + "github.com/onflow/flow-go/engine/access/rest/common/parser" "github.com/onflow/flow-go/model/flow" ) @@ -61,7 +61,7 @@ func (g *GetByIDRequest) Build(r *common.Request) error { } func (g *GetByIDRequest) Parse(rawID string) error { - var id convert.ID + var id parser.ID err := id.Parse(rawID) if err != nil { return err diff --git a/engine/access/rest/http/request/transaction.go b/engine/access/rest/http/request/transaction.go index 1ebe595da81..a26f929ca8e 100644 --- a/engine/access/rest/http/request/transaction.go +++ b/engine/access/rest/http/request/transaction.go @@ -4,7 +4,7 @@ import ( "fmt" "io" - convert2 "github.com/onflow/flow-go/engine/access/rest/common/convert" + convert2 "github.com/onflow/flow-go/engine/access/rest/common/parser" "github.com/onflow/flow-go/engine/access/rest/http/models" "github.com/onflow/flow-go/engine/access/rest/util" "github.com/onflow/flow-go/engine/common/rpc/convert" diff --git a/engine/access/rest/http/routes/events.go b/engine/access/rest/http/routes/events.go index 038a4a98aeb..fed682555d0 100644 --- a/engine/access/rest/http/routes/events.go +++ b/engine/access/rest/http/routes/events.go @@ -7,7 +7,6 @@ import ( "github.com/onflow/flow-go/access" "github.com/onflow/flow-go/engine/access/rest/common" - "github.com/onflow/flow-go/engine/access/rest/http/models" "github.com/onflow/flow-go/engine/access/rest/http/request" ) diff --git a/engine/access/rest/websockets/data_providers/blocks_data_provider.go b/engine/access/rest/websockets/data_providers/blocks_data_provider.go deleted file mode 100644 index f5a24139a87..00000000000 --- a/engine/access/rest/websockets/data_providers/blocks_data_provider.go +++ /dev/null @@ -1,282 +0,0 @@ -package data_providers - -import ( - "context" - "fmt" - - "github.com/rs/zerolog" - - "github.com/onflow/flow-go/access" - "github.com/onflow/flow-go/engine/access/rest/common/convert" - "github.com/onflow/flow-go/engine/access/rest/util" - "github.com/onflow/flow-go/engine/access/rest/websockets/models" - "github.com/onflow/flow-go/model/flow" -) - -// BlocksFromStartBlockIDArgs contains the arguments required for subscribing to blocks -// starting from a specific block ID. -type BlocksFromStartBlockIDArgs struct { - StartBlockID flow.Identifier - BlockStatus flow.BlockStatus -} - -// BlocksFromStartBlockIDProvider is responsible for providing blocks starting -// from a specific block ID. -type BlocksFromStartBlockIDProvider struct { - *BaseDataProviderImpl - - logger zerolog.Logger - args BlocksFromStartBlockIDArgs -} - -var _ DataProvider = (*BlocksFromStartBlockIDProvider)(nil) - -// NewBlocksFromStartBlockIDProvider creates a new instance of BlocksFromStartBlockIDProvider. -func NewBlocksFromStartBlockIDProvider( - ctx context.Context, - logger zerolog.Logger, - api access.API, - topic string, - arguments map[string]string, - send chan<- interface{}, -) (*BlocksFromStartBlockIDProvider, error) { - ctx, cancel := context.WithCancel(ctx) - - p := &BlocksFromStartBlockIDProvider{ - logger: logger.With().Str("component", "block-from-start-block-id-provider").Logger(), - } - - // Validate arguments passed to the provider. - err := p.validateArguments(arguments) - if err != nil { - return nil, fmt.Errorf("invalid arguments: %w", err) - } - - // Subscribe to blocks from the start block ID with the specified block status. - subscription := p.api.SubscribeBlocksFromStartBlockID(ctx, p.args.StartBlockID, p.args.BlockStatus) - p.BaseDataProviderImpl = NewBaseDataProviderImpl( - ctx, - cancel, - api, - topic, - send, - subscription, - ) - - return p, nil -} - -// Run starts processing the subscription for blocks from the start block ID and handles responses. -// -// No errors are expected during normal operations. -func (p *BlocksFromStartBlockIDProvider) Run() error { - return HandleSubscription(p.ctx, p.subscription, handleResponse(p.send, p.args.BlockStatus)) -} - -// validateArguments checks and validates the arguments passed to the provider. -// -// No errors are expected during normal operations. -func (p *BlocksFromStartBlockIDProvider) validateArguments(arguments map[string]string) error { - // Check for block_status argument and validate - if blockStatusIn, ok := arguments["block_status"]; ok { - blockStatus, err := convert.ParseBlockStatus(blockStatusIn) - if err != nil { - return err - } - p.args.BlockStatus = blockStatus - } else { - return fmt.Errorf("'block_status' must be provided") - } - - // Check for start_block_id argument and validate - if startBlockIDIn, ok := arguments["start_block_id"]; ok { - var startBlockID convert.ID - err := startBlockID.Parse(startBlockIDIn) - if err != nil { - return err - } - p.args.StartBlockID = startBlockID.Flow() - } else { - return fmt.Errorf("'start_block_id' must be provided") - } - - return nil -} - -// BlocksFromBlockHeightArgs contains the arguments required for subscribing to blocks -// starting from a specific block height. -type BlocksFromBlockHeightArgs struct { - StartBlockHeight uint64 - BlockStatus flow.BlockStatus -} - -// BlocksFromStartBlockHeightProvider is responsible for providing blocks starting -// from a specific block height. -type BlocksFromStartBlockHeightProvider struct { - *BaseDataProviderImpl - - logger zerolog.Logger - args BlocksFromBlockHeightArgs -} - -var _ DataProvider = (*BlocksFromStartBlockHeightProvider)(nil) - -// NewBlocksFromStartBlockHeightProvider creates a new instance of BlocksFromStartBlockHeightProvider. -func NewBlocksFromStartBlockHeightProvider( - ctx context.Context, - logger zerolog.Logger, - api access.API, - topic string, - arguments map[string]string, - send chan<- interface{}, -) (*BlocksFromStartBlockHeightProvider, error) { - ctx, cancel := context.WithCancel(ctx) - - p := &BlocksFromStartBlockHeightProvider{ - logger: logger.With().Str("component", "block-from-start-block-height-provider").Logger(), - } - - // Validate arguments passed to the provider. - err := p.validateArguments(arguments) - if err != nil { - return nil, fmt.Errorf("invalid arguments: %w", err) - } - - // Subscribe to blocks from the start block height with the specified block status. - subscription := p.api.SubscribeBlocksFromStartHeight(ctx, p.args.StartBlockHeight, p.args.BlockStatus) - p.BaseDataProviderImpl = NewBaseDataProviderImpl( - ctx, - cancel, - api, - topic, - send, - subscription, - ) - - return p, nil -} - -// Run starts processing the subscription for blocks from the start block height and handles responses. -// -// No errors are expected during normal operations. -func (p *BlocksFromStartBlockHeightProvider) Run() error { - return HandleSubscription(p.ctx, p.subscription, handleResponse(p.send, p.args.BlockStatus)) -} - -// validateArguments checks and validates the arguments passed to the provider. -// -// No errors are expected during normal operations. -func (p *BlocksFromStartBlockHeightProvider) validateArguments(arguments map[string]string) error { - // Check for block_status argument and validate - if blockStatusIn, ok := arguments["block_status"]; ok { - blockStatus, err := convert.ParseBlockStatus(blockStatusIn) - if err != nil { - return err - } - p.args.BlockStatus = blockStatus - } else { - return fmt.Errorf("'block_status' must be provided") - } - - if startBlockHeightIn, ok := arguments["start_block_height"]; ok { - var err error - p.args.StartBlockHeight, err = util.ToUint64(startBlockHeightIn) - if err != nil { - return fmt.Errorf("invalid start height: %w", err) - } - } else { - return fmt.Errorf("'start_block_height' must be provided") - } - - return nil -} - -// BlocksFromLatestArgs contains the arguments required for subscribing to blocks -// starting from latest block. -type BlocksFromLatestArgs struct { - BlockStatus flow.BlockStatus -} - -// BlocksFromLatestProvider is responsible for providing blocks starting from latest block. -type BlocksFromLatestProvider struct { - *BaseDataProviderImpl - - logger zerolog.Logger - args BlocksFromLatestArgs -} - -var _ DataProvider = (*BlocksFromLatestProvider)(nil) - -// NewBlocksFromLatestProvider creates a new BlocksFromLatestProvider. -func NewBlocksFromLatestProvider( - ctx context.Context, - logger zerolog.Logger, - api access.API, - topic string, - arguments map[string]string, - send chan<- interface{}, -) (*BlocksFromLatestProvider, error) { - ctx, cancel := context.WithCancel(ctx) - - p := &BlocksFromLatestProvider{ - logger: logger.With().Str("component", "block-from-latest-provider").Logger(), - } - - // Validate arguments passed to the provider. - err := p.validateArguments(arguments) - if err != nil { - return nil, fmt.Errorf("invalid arguments: %w", err) - } - - // Subscribe to blocks from the latest block height with the specified block status. - subscription := p.api.SubscribeBlocksFromLatest(ctx, p.args.BlockStatus) - p.BaseDataProviderImpl = NewBaseDataProviderImpl( - ctx, - cancel, - api, - topic, - send, - subscription, - ) - - return p, nil -} - -// Run starts processing the subscription for blocks from the latest block and handles responses. -// -// No errors are expected during normal operations. -func (p *BlocksFromLatestProvider) Run() error { - return HandleSubscription(p.ctx, p.subscription, handleResponse(p.send, p.args.BlockStatus)) -} - -// validateArguments checks and validates the arguments passed to the provider. -// -// No errors are expected during normal operations. -func (p *BlocksFromLatestProvider) validateArguments(arguments map[string]string) error { - // Check for block_status argument and validate - if blockStatusIn, ok := arguments["block_status"]; ok { - blockStatus, err := convert.ParseBlockStatus(blockStatusIn) - if err != nil { - return err - } - p.args.BlockStatus = blockStatus - } else { - return fmt.Errorf("'block_status' must be provided") - } - - return nil -} - -// handleResponse processes a block and sends the formatted response. -// -// No errors are expected during normal operations. -func handleResponse(send chan<- interface{}, blockStatus flow.BlockStatus) func(*flow.Block) error { - return func(block *flow.Block) error { - send <- &models.BlockMessageResponse{ - Block: block, - BlockStatus: blockStatus, - } - - return nil - } -} diff --git a/engine/access/rest/websockets/data_providers/blocks_data_provider_test.go b/engine/access/rest/websockets/data_providers/blocks_data_provider_test.go deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/engine/access/rest/websockets/data_providers/blocks_provider.go b/engine/access/rest/websockets/data_providers/blocks_provider.go new file mode 100644 index 00000000000..679724c0346 --- /dev/null +++ b/engine/access/rest/websockets/data_providers/blocks_provider.go @@ -0,0 +1,145 @@ +package data_providers + +import ( + "context" + "fmt" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/access" + "github.com/onflow/flow-go/engine/access/rest/common/parser" + "github.com/onflow/flow-go/engine/access/rest/http/request" + "github.com/onflow/flow-go/engine/access/rest/util" + "github.com/onflow/flow-go/engine/access/rest/websockets/models" + "github.com/onflow/flow-go/engine/access/subscription" + "github.com/onflow/flow-go/model/flow" +) + +// BlocksArguments contains the arguments required for subscribing to blocks +type BlocksArguments struct { + StartBlockID flow.Identifier + StartBlockHeight uint64 + BlockStatus flow.BlockStatus +} + +// BlocksDataProvider is responsible for providing blocks +type BlocksDataProvider struct { + *BaseDataProviderImpl + + logger zerolog.Logger + args BlocksArguments + api access.API +} + +var _ DataProvider = (*BlocksDataProvider)(nil) + +// NewBlocksDataProvider creates a new instance of BlocksDataProvider. +func NewBlocksDataProvider( + ctx context.Context, + logger zerolog.Logger, + api access.API, + topic string, + arguments map[string]string, + send chan<- interface{}, +) (*BlocksDataProvider, error) { + ctx, cancel := context.WithCancel(ctx) + + p := &BlocksDataProvider{ + logger: logger.With().Str("component", "block-data-provider").Logger(), + api: api, + } + + // Validate arguments passed to the provider. + err := p.validateArguments(arguments) + if err != nil { + return nil, fmt.Errorf("invalid arguments: %w", err) + } + + // Subscribe to blocks from the start block ID with the specified block status. + subscription := p.createSubscription(ctx) + p.BaseDataProviderImpl = NewBaseDataProviderImpl( + ctx, + cancel, + topic, + send, + subscription, + ) + + return p, nil +} + +// Run starts processing the subscription for blocks and handles responses. +// +// No errors are expected during normal operations. +func (p *BlocksDataProvider) Run() error { + return HandleSubscription(p.ctx, p.subscription, handleResponse(p.send, p.args.BlockStatus)) +} + +// validateArguments checks and validates the arguments passed to the provider. +// +// No errors are expected during normal operations. +func (p *BlocksDataProvider) validateArguments(arguments map[string]string) error { + // Check for block_status argument and validate + if blockStatusIn, ok := arguments["block_status"]; ok { + blockStatus, err := parser.ParseBlockStatus(blockStatusIn) + if err != nil { + return err + } + p.args.BlockStatus = blockStatus + } else { + return fmt.Errorf("'block_status' must be provided") + } + + if startBlockIDIn, ok := arguments["start_block_id"]; ok { + var startBlockID parser.ID + err := startBlockID.Parse(startBlockIDIn) + if err != nil { + return err + } + p.args.StartBlockID = startBlockID.Flow() + } + + if startBlockHeightIn, ok := arguments["start_block_height"]; ok { + var err error + p.args.StartBlockHeight, err = util.ToUint64(startBlockHeightIn) + if err != nil { + return fmt.Errorf("invalid 'start_block_height': %w", err) + } + } else { + p.args.StartBlockHeight = request.EmptyHeight + } + + // if both start_block_id and start_height are provided + if p.args.StartBlockID != flow.ZeroID && p.args.StartBlockHeight != request.EmptyHeight { + return fmt.Errorf("can only provide either 'start_block_id' or 'start_block_height'") + } + + return nil +} + +// createSubscription +func (p *BlocksDataProvider) createSubscription(ctx context.Context) subscription.Subscription { + if p.args.StartBlockID != flow.ZeroID { + return p.api.SubscribeBlocksFromStartBlockID(ctx, p.args.StartBlockID, p.args.BlockStatus) + } + + if p.args.StartBlockHeight > 0 { + return p.api.SubscribeBlocksFromStartHeight(ctx, p.args.StartBlockHeight, p.args.BlockStatus) + } + + return p.api.SubscribeBlocksFromLatest(ctx, p.args.BlockStatus) +} + +// handleResponse processes a block and sends the formatted response. +// +// No errors are expected during normal operations. +func handleResponse(send chan<- interface{}, blockStatus flow.BlockStatus) func(*flow.Block) error { + return func(block *flow.Block) error { + send <- &models.BlockMessageResponse{ + Block: block, + BlockStatus: blockStatus, + } + + return nil + } +} diff --git a/engine/access/rest/websockets/data_providers/blocks_provider_test.go b/engine/access/rest/websockets/data_providers/blocks_provider_test.go new file mode 100644 index 00000000000..3b8a7fe2198 --- /dev/null +++ b/engine/access/rest/websockets/data_providers/blocks_provider_test.go @@ -0,0 +1,147 @@ +package data_providers + +import ( + "context" + "fmt" + "testing" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/suite" + + accessmock "github.com/onflow/flow-go/access/mock" + "github.com/onflow/flow-go/engine/access/rest/common/parser" + mockstatestream "github.com/onflow/flow-go/engine/access/state_stream/mock" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/unittest" +) + +const unknownBlockStatus = "unknown_block_status" + +type testErrType struct { + name string + arguments map[string]string + expectedErrorMsg string +} + +// BlocksProviderSuite is a test suite for testing the block providers functionality. +type BlocksProviderSuite struct { + suite.Suite + + log zerolog.Logger + api *accessmock.API + + blockMap map[uint64]*flow.Block + rootBlock flow.Block + finalizedBlock *flow.Header +} + +func TestBlocksProviderSuite(t *testing.T) { + suite.Run(t, new(BlocksProviderSuite)) +} + +func (s *BlocksProviderSuite) SetupTest() { + s.log = unittest.Logger() + s.api = accessmock.NewAPI(s.T()) + + blockCount := 5 + s.blockMap = make(map[uint64]*flow.Block, blockCount) + s.rootBlock = unittest.BlockFixture() + s.rootBlock.Header.Height = 0 + parent := s.rootBlock.Header + + for i := 0; i < blockCount; i++ { + block := unittest.BlockWithParentFixture(parent) + // update for next iteration + parent = block.Header + s.blockMap[block.Header.Height] = block + } + s.finalizedBlock = parent +} + +// TestBlocksDataProvider_InvalidArguments verifies that +func (s *BlocksProviderSuite) TestBlocksDataProvider_InvalidArguments() { + ctx := context.Background() + send := make(chan interface{}) + + testCases := []testErrType{ + { + name: "missing 'block_status' argument", + arguments: map[string]string{ + "start_block_id": s.rootBlock.ID().String(), + }, + expectedErrorMsg: "'block_status' must be provided", + }, + { + name: "unknown 'block_status' argument", + arguments: map[string]string{ + "block_status": unknownBlockStatus, + }, + expectedErrorMsg: fmt.Sprintf("invalid 'block_status', must be '%s' or '%s'", parser.Finalized, parser.Sealed), + }, + { + name: "provide both 'start_block_id' and 'start_block_height' arguments", + arguments: map[string]string{ + "block_status": parser.Finalized, + "start_block_id": s.rootBlock.ID().String(), + "start_block_height": fmt.Sprintf("%d", s.rootBlock.Header.Height), + }, + expectedErrorMsg: "can only provide either 'start_block_id' or 'start_block_height'", + }, + } + + topic := BlocksTopic + + for _, test := range testCases { + s.Run(test.name, func() { + provider, err := NewBlocksDataProvider(ctx, s.log, s.api, topic, test.arguments, send) + s.Require().Nil(provider) + s.Require().Error(err) + s.Require().Contains(err.Error(), test.expectedErrorMsg) + }) + } +} + +// TestBlocksDataProvider_ValidArguments tests +func (s *BlocksProviderSuite) TestBlocksDataProvider_ValidArguments() { + ctx := context.Background() + + topic := BlocksTopic + send := make(chan interface{}) + + s.Run("subscribe blocks from start block id", func() { + subscription := mockstatestream.NewSubscription(s.T()) + startBlockId := s.rootBlock.Header.ID() + + s.api.On("SubscribeBlocksFromStartBlockID", mock.Anything, startBlockId, flow.BlockStatusFinalized).Return(subscription).Once() + + arguments := map[string]string{ + "start_block_id": startBlockId.String(), + "block_status": parser.Finalized, + } + + provider, err := NewBlocksDataProvider(ctx, s.log, s.api, topic, arguments, send) + s.Require().NoError(err) + s.Require().NotNil(provider) + s.Require().Equal(flow.BlockStatusFinalized, provider.args.BlockStatus) + + // Create a channel to receive mock Blocks objects + ch := make(chan interface{}) + var chReadOnly <-chan interface{} + // Simulate sending a mock Blocks + go func() { + for _, block := range s.blockMap { + // Send the mock Blocks through the channel + ch <- block + } + }() + + chReadOnly = ch + subscription.Mock.On("Channel").Return(chReadOnly) + + err = provider.Run() + s.Require().NoError(err) + + provider.Close() + }) +} From 67a184a5ccd93a1b4eb001c7dc6606fe023b6096 Mon Sep 17 00:00:00 2001 From: UlyanaAndrukhiv Date: Tue, 12 Nov 2024 11:07:45 +0200 Subject: [PATCH 05/64] Updated last commit --- .../access/rest/http/request/transaction.go | 4 +- .../data_providers/base_provider.go | 33 ---------- .../data_providers/blocks_provider.go | 16 +++-- .../data_providers/blocks_provider_test.go | 46 ------------- .../rest/websockets/data_providers/factory.go | 12 +--- .../websockets/data_providers/factory_test.go | 66 +++++++++++++++++++ .../legacy/request/subscribe_events.go | 4 +- engine/access/rest_api_test.go | 14 ++-- 8 files changed, 89 insertions(+), 106 deletions(-) diff --git a/engine/access/rest/http/request/transaction.go b/engine/access/rest/http/request/transaction.go index a26f929ca8e..68bad0009f2 100644 --- a/engine/access/rest/http/request/transaction.go +++ b/engine/access/rest/http/request/transaction.go @@ -4,7 +4,7 @@ import ( "fmt" "io" - convert2 "github.com/onflow/flow-go/engine/access/rest/common/parser" + "github.com/onflow/flow-go/engine/access/rest/common/parser" "github.com/onflow/flow-go/engine/access/rest/http/models" "github.com/onflow/flow-go/engine/access/rest/util" "github.com/onflow/flow-go/engine/common/rpc/convert" @@ -90,7 +90,7 @@ func (t *Transaction) Parse(raw io.Reader, chain flow.Chain) error { return fmt.Errorf("invalid transaction script encoding") } - var blockID convert2.ID + var blockID parser.ID err = blockID.Parse(tx.ReferenceBlockId) if err != nil { return fmt.Errorf("invalid reference block ID: %w", err) diff --git a/engine/access/rest/websockets/data_providers/base_provider.go b/engine/access/rest/websockets/data_providers/base_provider.go index a30cf9a6887..567b7647eff 100644 --- a/engine/access/rest/websockets/data_providers/base_provider.go +++ b/engine/access/rest/websockets/data_providers/base_provider.go @@ -2,9 +2,7 @@ package data_providers import ( "context" - "fmt" - "github.com/onflow/flow-go/access" "github.com/onflow/flow-go/engine/access/subscription" ) @@ -29,7 +27,6 @@ type BaseDataProviderImpl struct { cancel context.CancelFunc ctx context.Context - api access.API send chan<- interface{} subscription subscription.Subscription } @@ -38,7 +35,6 @@ type BaseDataProviderImpl struct { func NewBaseDataProviderImpl( ctx context.Context, cancel context.CancelFunc, - api access.API, topic string, send chan<- interface{}, subscription subscription.Subscription, @@ -49,7 +45,6 @@ func NewBaseDataProviderImpl( ctx: ctx, cancel: cancel, - api: api, send: send, subscription: subscription, } @@ -69,31 +64,3 @@ func (b *BaseDataProviderImpl) Topic() string { func (b *BaseDataProviderImpl) Close() { b.cancel() } - -// TODO: refactor rpc version of HandleSubscription and use it -func HandleSubscription[T any](ctx context.Context, sub subscription.Subscription, handleResponse func(resp T) error) error { - for { - select { - case v, ok := <-sub.Channel(): - if !ok { - if sub.Err() != nil { - return fmt.Errorf("stream encountered an error: %w", sub.Err()) - } - return nil - } - - resp, ok := v.(T) - if !ok { - return fmt.Errorf("unexpected subscription response type: %T", v) - } - - err := handleResponse(resp) - if err != nil { - return err - } - case <-ctx.Done(): - // context closed, subscription closed - return nil - } - } -} diff --git a/engine/access/rest/websockets/data_providers/blocks_provider.go b/engine/access/rest/websockets/data_providers/blocks_provider.go index 679724c0346..2aba13337c2 100644 --- a/engine/access/rest/websockets/data_providers/blocks_provider.go +++ b/engine/access/rest/websockets/data_providers/blocks_provider.go @@ -17,9 +17,9 @@ import ( // BlocksArguments contains the arguments required for subscribing to blocks type BlocksArguments struct { - StartBlockID flow.Identifier - StartBlockHeight uint64 - BlockStatus flow.BlockStatus + StartBlockID flow.Identifier // ID of the block to start subscription from + StartBlockHeight uint64 // Height of the block to start subscription from + BlockStatus flow.BlockStatus // Status of blocks to subscribe to } // BlocksDataProvider is responsible for providing blocks @@ -55,7 +55,7 @@ func NewBlocksDataProvider( return nil, fmt.Errorf("invalid arguments: %w", err) } - // Subscribe to blocks from the start block ID with the specified block status. + // Set up a subscription to blocks based on the provided start block ID and block status. subscription := p.createSubscription(ctx) p.BaseDataProviderImpl = NewBaseDataProviderImpl( ctx, @@ -79,7 +79,7 @@ func (p *BlocksDataProvider) Run() error { // // No errors are expected during normal operations. func (p *BlocksDataProvider) validateArguments(arguments map[string]string) error { - // Check for block_status argument and validate + // Parse 'block_status' if blockStatusIn, ok := arguments["block_status"]; ok { blockStatus, err := parser.ParseBlockStatus(blockStatusIn) if err != nil { @@ -90,6 +90,7 @@ func (p *BlocksDataProvider) validateArguments(arguments map[string]string) erro return fmt.Errorf("'block_status' must be provided") } + // Parse 'start_block_id' if provided if startBlockIDIn, ok := arguments["start_block_id"]; ok { var startBlockID parser.ID err := startBlockID.Parse(startBlockIDIn) @@ -99,6 +100,7 @@ func (p *BlocksDataProvider) validateArguments(arguments map[string]string) erro p.args.StartBlockID = startBlockID.Flow() } + // Parse 'start_block_height' if provided if startBlockHeightIn, ok := arguments["start_block_height"]; ok { var err error p.args.StartBlockHeight, err = util.ToUint64(startBlockHeightIn) @@ -117,13 +119,13 @@ func (p *BlocksDataProvider) validateArguments(arguments map[string]string) erro return nil } -// createSubscription +// createSubscription creates a new subscription based on the specified start block ID or height. func (p *BlocksDataProvider) createSubscription(ctx context.Context) subscription.Subscription { if p.args.StartBlockID != flow.ZeroID { return p.api.SubscribeBlocksFromStartBlockID(ctx, p.args.StartBlockID, p.args.BlockStatus) } - if p.args.StartBlockHeight > 0 { + if p.args.StartBlockHeight != request.EmptyHeight { return p.api.SubscribeBlocksFromStartHeight(ctx, p.args.StartBlockHeight, p.args.BlockStatus) } diff --git a/engine/access/rest/websockets/data_providers/blocks_provider_test.go b/engine/access/rest/websockets/data_providers/blocks_provider_test.go index 3b8a7fe2198..db460d4f879 100644 --- a/engine/access/rest/websockets/data_providers/blocks_provider_test.go +++ b/engine/access/rest/websockets/data_providers/blocks_provider_test.go @@ -6,12 +6,10 @@ import ( "testing" "github.com/rs/zerolog" - "github.com/stretchr/testify/mock" "github.com/stretchr/testify/suite" accessmock "github.com/onflow/flow-go/access/mock" "github.com/onflow/flow-go/engine/access/rest/common/parser" - mockstatestream "github.com/onflow/flow-go/engine/access/state_stream/mock" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" ) @@ -101,47 +99,3 @@ func (s *BlocksProviderSuite) TestBlocksDataProvider_InvalidArguments() { }) } } - -// TestBlocksDataProvider_ValidArguments tests -func (s *BlocksProviderSuite) TestBlocksDataProvider_ValidArguments() { - ctx := context.Background() - - topic := BlocksTopic - send := make(chan interface{}) - - s.Run("subscribe blocks from start block id", func() { - subscription := mockstatestream.NewSubscription(s.T()) - startBlockId := s.rootBlock.Header.ID() - - s.api.On("SubscribeBlocksFromStartBlockID", mock.Anything, startBlockId, flow.BlockStatusFinalized).Return(subscription).Once() - - arguments := map[string]string{ - "start_block_id": startBlockId.String(), - "block_status": parser.Finalized, - } - - provider, err := NewBlocksDataProvider(ctx, s.log, s.api, topic, arguments, send) - s.Require().NoError(err) - s.Require().NotNil(provider) - s.Require().Equal(flow.BlockStatusFinalized, provider.args.BlockStatus) - - // Create a channel to receive mock Blocks objects - ch := make(chan interface{}) - var chReadOnly <-chan interface{} - // Simulate sending a mock Blocks - go func() { - for _, block := range s.blockMap { - // Send the mock Blocks through the channel - ch <- block - } - }() - - chReadOnly = ch - subscription.Mock.On("Channel").Return(chReadOnly) - - err = provider.Run() - s.Require().NoError(err) - - provider.Close() - }) -} diff --git a/engine/access/rest/websockets/data_providers/factory.go b/engine/access/rest/websockets/data_providers/factory.go index d8285743d5e..7061215ac20 100644 --- a/engine/access/rest/websockets/data_providers/factory.go +++ b/engine/access/rest/websockets/data_providers/factory.go @@ -19,9 +19,7 @@ const ( BlockDigestsTopic = "block_digests" TransactionStatusesTopic = "transaction_statuses" - BlocksFromStartBlockIDTopic = "blocks_from_start_block_id" - BlocksFromStartBlockHeightTopic = "blocks_from_start_block_height" - BlocksFromLatestTopic = "blocks_from_latest" + BlocksTopic = "blocks" ) // DataProviderFactory is responsible for creating data providers based on the @@ -73,12 +71,8 @@ func (s *DataProviderFactory) NewDataProvider( ch chan<- interface{}, ) (DataProvider, error) { switch topic { - case BlocksFromStartBlockIDTopic: - return NewBlocksFromStartBlockIDProvider(ctx, s.logger, s.accessApi, topic, arguments, ch) - case BlocksFromStartBlockHeightTopic: - return NewBlocksFromStartBlockHeightProvider(ctx, s.logger, s.accessApi, topic, arguments, ch) - case BlocksFromLatestTopic: - return NewBlocksFromLatestProvider(ctx, s.logger, s.accessApi, topic, arguments, ch) + case BlocksTopic: + return NewBlocksDataProvider(ctx, s.logger, s.accessApi, topic, arguments, ch) // TODO: Implemented handlers for each topic should be added in respective case case EventsTopic, AccountStatusesTopic, diff --git a/engine/access/rest/websockets/data_providers/factory_test.go b/engine/access/rest/websockets/data_providers/factory_test.go index e69de29bb2d..45b8aebbf67 100644 --- a/engine/access/rest/websockets/data_providers/factory_test.go +++ b/engine/access/rest/websockets/data_providers/factory_test.go @@ -0,0 +1,66 @@ +package data_providers + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/suite" + + accessmock "github.com/onflow/flow-go/access/mock" + "github.com/onflow/flow-go/engine/access/state_stream" + statestreammock "github.com/onflow/flow-go/engine/access/state_stream/mock" + "github.com/onflow/flow-go/utils/unittest" +) + +// DataProviderFactorySuite is a test suite for testing the DataProviderFactory functionality. +type DataProviderFactorySuite struct { + suite.Suite + + ctx context.Context + ch chan interface{} + + factory *DataProviderFactory +} + +func TestDataProviderFactorySuite(t *testing.T) { + suite.Run(t, new(DataProviderFactorySuite)) +} + +// SetupTest sets up the initial context and dependencies for each test case. +// It initializes the factory with mock instances and validates that it is created successfully. +func (s *DataProviderFactorySuite) SetupTest() { + log := unittest.Logger() + eventFilterConfig := state_stream.EventFilterConfig{} + stateStreamApi := statestreammock.NewAPI(s.T()) + accessApi := accessmock.NewAPI(s.T()) + + s.ctx = context.Background() + s.ch = make(chan interface{}) + + s.factory = NewDataProviderFactory(log, eventFilterConfig, stateStreamApi, accessApi) + s.Require().NotNil(s.factory) +} + +// TestSupportedTopics verifies that supported topics return a valid provider and no errors. +// Each test case includes a topic and arguments for which a data provider should be created. +func (s *DataProviderFactorySuite) TestSupportedTopics() { + +} + +// TestUnsupportedTopics verifies that unsupported topics do not return a provider +// and instead return an error indicating the topic is unsupported. +func (s *DataProviderFactorySuite) TestUnsupportedTopics() { + // Define unsupported topics + unsupportedTopics := []string{ + "unknown_topic", + "", + } + + for _, topic := range unsupportedTopics { + provider, err := s.factory.NewDataProvider(s.ctx, topic, nil, s.ch) + s.Require().Nil(provider, "Expected no provider for unsupported topic %s", topic) + s.Require().Error(err, "Expected error for unsupported topic %s", topic) + s.Require().EqualError(err, fmt.Sprintf("unsupported topic \"%s\"", topic)) + } +} diff --git a/engine/access/rest/websockets/legacy/request/subscribe_events.go b/engine/access/rest/websockets/legacy/request/subscribe_events.go index 7f02ad1c10e..1110d3582d4 100644 --- a/engine/access/rest/websockets/legacy/request/subscribe_events.go +++ b/engine/access/rest/websockets/legacy/request/subscribe_events.go @@ -5,7 +5,7 @@ import ( "strconv" "github.com/onflow/flow-go/engine/access/rest/common" - "github.com/onflow/flow-go/engine/access/rest/common/convert" + "github.com/onflow/flow-go/engine/access/rest/common/parser" "github.com/onflow/flow-go/engine/access/rest/http/request" "github.com/onflow/flow-go/model/flow" ) @@ -57,7 +57,7 @@ func (g *SubscribeEvents) Parse( rawContracts []string, rawHeartbeatInterval string, ) error { - var startBlockID convert.ID + var startBlockID parser.ID err := startBlockID.Parse(rawStartBlockID) if err != nil { return err diff --git a/engine/access/rest_api_test.go b/engine/access/rest_api_test.go index 9c8eaa20881..c1f6eeb0c21 100644 --- a/engine/access/rest_api_test.go +++ b/engine/access/rest_api_test.go @@ -22,7 +22,7 @@ import ( accessmock "github.com/onflow/flow-go/engine/access/mock" "github.com/onflow/flow-go/engine/access/rest" "github.com/onflow/flow-go/engine/access/rest/common" - "github.com/onflow/flow-go/engine/access/rest/common/convert" + "github.com/onflow/flow-go/engine/access/rest/common/parser" "github.com/onflow/flow-go/engine/access/rest/router" "github.com/onflow/flow-go/engine/access/rpc" "github.com/onflow/flow-go/engine/access/rpc/backend" @@ -230,8 +230,8 @@ func TestRestAPI(t *testing.T) { func (suite *RestAPITestSuite) TestGetBlock() { - testBlockIDs := make([]string, convert.MaxIDsLength) - testBlocks := make([]*flow.Block, convert.MaxIDsLength) + testBlockIDs := make([]string, parser.MaxIDsLength) + testBlocks := make([]*flow.Block, parser.MaxIDsLength) for i := range testBlockIDs { collections := unittest.CollectionListFixture(1) block := unittest.BlockWithGuaranteesFixture( @@ -281,7 +281,7 @@ func (suite *RestAPITestSuite) TestGetBlock() { actualBlocks, resp, err := client.BlocksApi.BlocksIdGet(ctx, blockIDSlice, optionsForBlockByID()) require.NoError(suite.T(), err) assert.Equal(suite.T(), http.StatusOK, resp.StatusCode) - assert.Len(suite.T(), actualBlocks, convert.MaxIDsLength) + assert.Len(suite.T(), actualBlocks, parser.MaxIDsLength) for i, b := range testBlocks { assert.Equal(suite.T(), b.ID().String(), actualBlocks[i].Header.Id) } @@ -379,13 +379,13 @@ func (suite *RestAPITestSuite) TestGetBlock() { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() - blockIDs := make([]string, convert.MaxIDsLength+1) + blockIDs := make([]string, parser.MaxIDsLength+1) copy(blockIDs, testBlockIDs) - blockIDs[convert.MaxIDsLength] = unittest.IdentifierFixture().String() + blockIDs[parser.MaxIDsLength] = unittest.IdentifierFixture().String() blockIDSlice := []string{strings.Join(blockIDs, ",")} _, resp, err := client.BlocksApi.BlocksIdGet(ctx, blockIDSlice, optionsForBlockByID()) - assertError(suite.T(), resp, err, http.StatusBadRequest, fmt.Sprintf("at most %d IDs can be requested at a time", convert.MaxIDsLength)) + assertError(suite.T(), resp, err, http.StatusBadRequest, fmt.Sprintf("at most %d IDs can be requested at a time", parser.MaxIDsLength)) }) suite.Run("GetBlockByID with one non-existing block ID", func() { From e9c6ac8c149b7483414a10672929941cd5a4ca9c Mon Sep 17 00:00:00 2001 From: UlyanaAndrukhiv Date: Tue, 12 Nov 2024 11:08:44 +0200 Subject: [PATCH 06/64] Redactored HandleSubscription to use it in the rest subscription data providers --- access/handler.go | 20 +++---- engine/access/state_stream/backend/handler.go | 22 +++---- engine/access/subscription/util.go | 57 +++++++++++++------ 3 files changed, 61 insertions(+), 38 deletions(-) diff --git a/access/handler.go b/access/handler.go index 25316e7f3dd..b974e7034fc 100644 --- a/access/handler.go +++ b/access/handler.go @@ -1066,7 +1066,7 @@ func (h *Handler) SubscribeBlocksFromStartBlockID(request *access.SubscribeBlock } sub := h.api.SubscribeBlocksFromStartBlockID(stream.Context(), startBlockID, blockStatus) - return subscription.HandleSubscription(sub, h.handleBlocksResponse(stream.Send, request.GetFullBlockResponse(), blockStatus)) + return subscription.HandleRPCSubscription(sub, h.handleBlocksResponse(stream.Send, request.GetFullBlockResponse(), blockStatus)) } // SubscribeBlocksFromStartHeight handles subscription requests for blocks started from block height. @@ -1093,7 +1093,7 @@ func (h *Handler) SubscribeBlocksFromStartHeight(request *access.SubscribeBlocks } sub := h.api.SubscribeBlocksFromStartHeight(stream.Context(), request.GetStartBlockHeight(), blockStatus) - return subscription.HandleSubscription(sub, h.handleBlocksResponse(stream.Send, request.GetFullBlockResponse(), blockStatus)) + return subscription.HandleRPCSubscription(sub, h.handleBlocksResponse(stream.Send, request.GetFullBlockResponse(), blockStatus)) } // SubscribeBlocksFromLatest handles subscription requests for blocks started from latest sealed block. @@ -1120,7 +1120,7 @@ func (h *Handler) SubscribeBlocksFromLatest(request *access.SubscribeBlocksFromL } sub := h.api.SubscribeBlocksFromLatest(stream.Context(), blockStatus) - return subscription.HandleSubscription(sub, h.handleBlocksResponse(stream.Send, request.GetFullBlockResponse(), blockStatus)) + return subscription.HandleRPCSubscription(sub, h.handleBlocksResponse(stream.Send, request.GetFullBlockResponse(), blockStatus)) } // handleBlocksResponse handles the subscription to block updates and sends @@ -1179,7 +1179,7 @@ func (h *Handler) SubscribeBlockHeadersFromStartBlockID(request *access.Subscrib } sub := h.api.SubscribeBlockHeadersFromStartBlockID(stream.Context(), startBlockID, blockStatus) - return subscription.HandleSubscription(sub, h.handleBlockHeadersResponse(stream.Send)) + return subscription.HandleRPCSubscription(sub, h.handleBlockHeadersResponse(stream.Send)) } // SubscribeBlockHeadersFromStartHeight handles subscription requests for block headers started from block height. @@ -1206,7 +1206,7 @@ func (h *Handler) SubscribeBlockHeadersFromStartHeight(request *access.Subscribe } sub := h.api.SubscribeBlockHeadersFromStartHeight(stream.Context(), request.GetStartBlockHeight(), blockStatus) - return subscription.HandleSubscription(sub, h.handleBlockHeadersResponse(stream.Send)) + return subscription.HandleRPCSubscription(sub, h.handleBlockHeadersResponse(stream.Send)) } // SubscribeBlockHeadersFromLatest handles subscription requests for block headers started from latest sealed block. @@ -1233,7 +1233,7 @@ func (h *Handler) SubscribeBlockHeadersFromLatest(request *access.SubscribeBlock } sub := h.api.SubscribeBlockHeadersFromLatest(stream.Context(), blockStatus) - return subscription.HandleSubscription(sub, h.handleBlockHeadersResponse(stream.Send)) + return subscription.HandleRPCSubscription(sub, h.handleBlockHeadersResponse(stream.Send)) } // handleBlockHeadersResponse handles the subscription to block updates and sends @@ -1293,7 +1293,7 @@ func (h *Handler) SubscribeBlockDigestsFromStartBlockID(request *access.Subscrib } sub := h.api.SubscribeBlockDigestsFromStartBlockID(stream.Context(), startBlockID, blockStatus) - return subscription.HandleSubscription(sub, h.handleBlockDigestsResponse(stream.Send)) + return subscription.HandleRPCSubscription(sub, h.handleBlockDigestsResponse(stream.Send)) } // SubscribeBlockDigestsFromStartHeight handles subscription requests for lightweight blocks started from block height. @@ -1320,7 +1320,7 @@ func (h *Handler) SubscribeBlockDigestsFromStartHeight(request *access.Subscribe } sub := h.api.SubscribeBlockDigestsFromStartHeight(stream.Context(), request.GetStartBlockHeight(), blockStatus) - return subscription.HandleSubscription(sub, h.handleBlockDigestsResponse(stream.Send)) + return subscription.HandleRPCSubscription(sub, h.handleBlockDigestsResponse(stream.Send)) } // SubscribeBlockDigestsFromLatest handles subscription requests for lightweight block started from latest sealed block. @@ -1347,7 +1347,7 @@ func (h *Handler) SubscribeBlockDigestsFromLatest(request *access.SubscribeBlock } sub := h.api.SubscribeBlockDigestsFromLatest(stream.Context(), blockStatus) - return subscription.HandleSubscription(sub, h.handleBlockDigestsResponse(stream.Send)) + return subscription.HandleRPCSubscription(sub, h.handleBlockDigestsResponse(stream.Send)) } // handleBlockDigestsResponse handles the subscription to block updates and sends @@ -1433,7 +1433,7 @@ func (h *Handler) SendAndSubscribeTransactionStatuses( sub := h.api.SubscribeTransactionStatuses(ctx, &tx, request.GetEventEncodingVersion()) messageIndex := counters.NewMonotonousCounter(0) - return subscription.HandleSubscription(sub, func(txResults []*TransactionResult) error { + return subscription.HandleRPCSubscription(sub, func(txResults []*TransactionResult) error { for i := range txResults { index := messageIndex.Value() if ok := messageIndex.Set(index + 1); !ok { diff --git a/engine/access/state_stream/backend/handler.go b/engine/access/state_stream/backend/handler.go index b2066440bb8..3acf1bad6ca 100644 --- a/engine/access/state_stream/backend/handler.go +++ b/engine/access/state_stream/backend/handler.go @@ -102,7 +102,7 @@ func (h *Handler) SubscribeExecutionData(request *executiondata.SubscribeExecuti sub := h.api.SubscribeExecutionData(stream.Context(), startBlockID, request.GetStartBlockHeight()) - return subscription.HandleSubscription(sub, handleSubscribeExecutionData(stream.Send, request.GetEventEncodingVersion())) + return subscription.HandleRPCSubscription(sub, handleSubscribeExecutionData(stream.Send, request.GetEventEncodingVersion())) } // SubscribeExecutionDataFromStartBlockID handles subscription requests for @@ -129,7 +129,7 @@ func (h *Handler) SubscribeExecutionDataFromStartBlockID(request *executiondata. sub := h.api.SubscribeExecutionDataFromStartBlockID(stream.Context(), startBlockID) - return subscription.HandleSubscription(sub, handleSubscribeExecutionData(stream.Send, request.GetEventEncodingVersion())) + return subscription.HandleRPCSubscription(sub, handleSubscribeExecutionData(stream.Send, request.GetEventEncodingVersion())) } // SubscribeExecutionDataFromStartBlockHeight handles subscription requests for @@ -150,7 +150,7 @@ func (h *Handler) SubscribeExecutionDataFromStartBlockHeight(request *executiond sub := h.api.SubscribeExecutionDataFromStartBlockHeight(stream.Context(), request.GetStartBlockHeight()) - return subscription.HandleSubscription(sub, handleSubscribeExecutionData(stream.Send, request.GetEventEncodingVersion())) + return subscription.HandleRPCSubscription(sub, handleSubscribeExecutionData(stream.Send, request.GetEventEncodingVersion())) } // SubscribeExecutionDataFromLatest handles subscription requests for @@ -171,7 +171,7 @@ func (h *Handler) SubscribeExecutionDataFromLatest(request *executiondata.Subscr sub := h.api.SubscribeExecutionDataFromLatest(stream.Context()) - return subscription.HandleSubscription(sub, handleSubscribeExecutionData(stream.Send, request.GetEventEncodingVersion())) + return subscription.HandleRPCSubscription(sub, handleSubscribeExecutionData(stream.Send, request.GetEventEncodingVersion())) } // SubscribeEvents is deprecated and will be removed in a future version. @@ -213,7 +213,7 @@ func (h *Handler) SubscribeEvents(request *executiondata.SubscribeEventsRequest, sub := h.api.SubscribeEvents(stream.Context(), startBlockID, request.GetStartBlockHeight(), filter) - return subscription.HandleSubscription(sub, h.handleEventsResponse(stream.Send, request.HeartbeatInterval, request.GetEventEncodingVersion())) + return subscription.HandleRPCSubscription(sub, h.handleEventsResponse(stream.Send, request.HeartbeatInterval, request.GetEventEncodingVersion())) } // SubscribeEventsFromStartBlockID handles subscription requests for events starting at the specified block ID. @@ -248,7 +248,7 @@ func (h *Handler) SubscribeEventsFromStartBlockID(request *executiondata.Subscri sub := h.api.SubscribeEventsFromStartBlockID(stream.Context(), startBlockID, filter) - return subscription.HandleSubscription(sub, h.handleEventsResponse(stream.Send, request.HeartbeatInterval, request.GetEventEncodingVersion())) + return subscription.HandleRPCSubscription(sub, h.handleEventsResponse(stream.Send, request.HeartbeatInterval, request.GetEventEncodingVersion())) } // SubscribeEventsFromStartHeight handles subscription requests for events starting at the specified block height. @@ -278,7 +278,7 @@ func (h *Handler) SubscribeEventsFromStartHeight(request *executiondata.Subscrib sub := h.api.SubscribeEventsFromStartHeight(stream.Context(), request.GetStartBlockHeight(), filter) - return subscription.HandleSubscription(sub, h.handleEventsResponse(stream.Send, request.HeartbeatInterval, request.GetEventEncodingVersion())) + return subscription.HandleRPCSubscription(sub, h.handleEventsResponse(stream.Send, request.HeartbeatInterval, request.GetEventEncodingVersion())) } // SubscribeEventsFromLatest handles subscription requests for events started from latest sealed block.. @@ -308,7 +308,7 @@ func (h *Handler) SubscribeEventsFromLatest(request *executiondata.SubscribeEven sub := h.api.SubscribeEventsFromLatest(stream.Context(), filter) - return subscription.HandleSubscription(sub, h.handleEventsResponse(stream.Send, request.HeartbeatInterval, request.GetEventEncodingVersion())) + return subscription.HandleRPCSubscription(sub, h.handleEventsResponse(stream.Send, request.HeartbeatInterval, request.GetEventEncodingVersion())) } // handleSubscribeExecutionData handles the subscription to execution data and sends it to the client via the provided stream. @@ -546,7 +546,7 @@ func (h *Handler) SubscribeAccountStatusesFromStartBlockID( sub := h.api.SubscribeAccountStatusesFromStartBlockID(stream.Context(), startBlockID, filter) - return subscription.HandleSubscription(sub, h.handleAccountStatusesResponse(request.HeartbeatInterval, request.GetEventEncodingVersion(), stream.Send)) + return subscription.HandleRPCSubscription(sub, h.handleAccountStatusesResponse(request.HeartbeatInterval, request.GetEventEncodingVersion(), stream.Send)) } // SubscribeAccountStatusesFromStartHeight streams account statuses for all blocks starting at the requested @@ -573,7 +573,7 @@ func (h *Handler) SubscribeAccountStatusesFromStartHeight( sub := h.api.SubscribeAccountStatusesFromStartHeight(stream.Context(), request.GetStartBlockHeight(), filter) - return subscription.HandleSubscription(sub, h.handleAccountStatusesResponse(request.HeartbeatInterval, request.GetEventEncodingVersion(), stream.Send)) + return subscription.HandleRPCSubscription(sub, h.handleAccountStatusesResponse(request.HeartbeatInterval, request.GetEventEncodingVersion(), stream.Send)) } // SubscribeAccountStatusesFromLatestBlock streams account statuses for all blocks starting @@ -600,5 +600,5 @@ func (h *Handler) SubscribeAccountStatusesFromLatestBlock( sub := h.api.SubscribeAccountStatusesFromLatestBlock(stream.Context(), filter) - return subscription.HandleSubscription(sub, h.handleAccountStatusesResponse(request.HeartbeatInterval, request.GetEventEncodingVersion(), stream.Send)) + return subscription.HandleRPCSubscription(sub, h.handleAccountStatusesResponse(request.HeartbeatInterval, request.GetEventEncodingVersion(), stream.Send)) } diff --git a/engine/access/subscription/util.go b/engine/access/subscription/util.go index 593f3d78499..604cbf6e597 100644 --- a/engine/access/subscription/util.go +++ b/engine/access/subscription/util.go @@ -1,8 +1,10 @@ package subscription import ( + "context" + "fmt" + "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" "github.com/onflow/flow-go/engine/common/rpc" ) @@ -11,29 +13,50 @@ import ( // handles the received responses, and sends the processed information to the client via the provided stream using handleResponse. // // Parameters: +// - ctx: Context for the operation. // - sub: The subscription. // - handleResponse: The function responsible for handling the response of the subscribed type. // -// Expected errors during normal operation: -// - codes.Internal: If the subscription encounters an error or gets an unexpected response. -func HandleSubscription[T any](sub Subscription, handleResponse func(resp T) error) error { +// No errors are expected during normal operations. +func HandleSubscription[T any](ctx context.Context, sub Subscription, handleResponse func(resp T) error) error { for { - v, ok := <-sub.Channel() - if !ok { - if sub.Err() != nil { - return rpc.ConvertError(sub.Err(), "stream encountered an error", codes.Internal) + select { + case v, ok := <-sub.Channel(): + if !ok { + if sub.Err() != nil { + return fmt.Errorf("stream encountered an error: %w", sub.Err()) + } + return nil } - return nil - } - resp, ok := v.(T) - if !ok { - return status.Errorf(codes.Internal, "unexpected response type: %T", v) - } + resp, ok := v.(T) + if !ok { + return fmt.Errorf("unexpected response type: %T", v) + } - err := handleResponse(resp) - if err != nil { - return err + err := handleResponse(resp) + if err != nil { + return err + } + case <-ctx.Done(): + return nil } } } + +// HandleRPCSubscription is a generic handler for subscriptions to a specific type for rpc calls. +// +// Parameters: +// - sub: The subscription. +// - handleResponse: The function responsible for handling the response of the subscribed type. +// +// Expected errors during normal operation: +// - codes.Internal: If the subscription encounters an error or gets an unexpected response. +func HandleRPCSubscription[T any](sub Subscription, handleResponse func(resp T) error) error { + err := HandleSubscription(nil, sub, handleResponse) + if err != nil { + return rpc.ConvertError(err, "handle subscription error", codes.Internal) + } + + return nil +} From 5fdece5c296824d7b8651303e8a285101e572516 Mon Sep 17 00:00:00 2001 From: UlyanaAndrukhiv Date: Tue, 12 Nov 2024 11:10:38 +0200 Subject: [PATCH 07/64] Added missed package name for block provider --- engine/access/rest/websockets/data_providers/blocks_provider.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/engine/access/rest/websockets/data_providers/blocks_provider.go b/engine/access/rest/websockets/data_providers/blocks_provider.go index 2aba13337c2..1443becd44a 100644 --- a/engine/access/rest/websockets/data_providers/blocks_provider.go +++ b/engine/access/rest/websockets/data_providers/blocks_provider.go @@ -72,7 +72,7 @@ func NewBlocksDataProvider( // // No errors are expected during normal operations. func (p *BlocksDataProvider) Run() error { - return HandleSubscription(p.ctx, p.subscription, handleResponse(p.send, p.args.BlockStatus)) + return subscription.HandleSubscription(p.ctx, p.subscription, handleResponse(p.send, p.args.BlockStatus)) } // validateArguments checks and validates the arguments passed to the provider. From 7c50eabf8e2bccda01c5d66f9b72405832a3269c Mon Sep 17 00:00:00 2001 From: UlyanaAndrukhiv Date: Tue, 12 Nov 2024 11:34:24 +0200 Subject: [PATCH 08/64] Added test for suppoted topics for data provoder factory --- .../websockets/data_providers/factory_test.go | 44 +++++++++++++++++-- 1 file changed, 41 insertions(+), 3 deletions(-) diff --git a/engine/access/rest/websockets/data_providers/factory_test.go b/engine/access/rest/websockets/data_providers/factory_test.go index 45b8aebbf67..cf258733d4f 100644 --- a/engine/access/rest/websockets/data_providers/factory_test.go +++ b/engine/access/rest/websockets/data_providers/factory_test.go @@ -5,11 +5,14 @@ import ( "fmt" "testing" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/suite" accessmock "github.com/onflow/flow-go/access/mock" + "github.com/onflow/flow-go/engine/access/rest/common/parser" "github.com/onflow/flow-go/engine/access/state_stream" statestreammock "github.com/onflow/flow-go/engine/access/state_stream/mock" + "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" ) @@ -20,6 +23,9 @@ type DataProviderFactorySuite struct { ctx context.Context ch chan interface{} + accessApi *accessmock.API + stateStreamApi *statestreammock.API + factory *DataProviderFactory } @@ -32,20 +38,52 @@ func TestDataProviderFactorySuite(t *testing.T) { func (s *DataProviderFactorySuite) SetupTest() { log := unittest.Logger() eventFilterConfig := state_stream.EventFilterConfig{} - stateStreamApi := statestreammock.NewAPI(s.T()) - accessApi := accessmock.NewAPI(s.T()) + s.stateStreamApi = statestreammock.NewAPI(s.T()) + s.accessApi = accessmock.NewAPI(s.T()) s.ctx = context.Background() s.ch = make(chan interface{}) - s.factory = NewDataProviderFactory(log, eventFilterConfig, stateStreamApi, accessApi) + s.factory = NewDataProviderFactory(log, eventFilterConfig, s.stateStreamApi, s.accessApi) s.Require().NotNil(s.factory) } +// TODO: add others topic to check when they will be implemented // TestSupportedTopics verifies that supported topics return a valid provider and no errors. // Each test case includes a topic and arguments for which a data provider should be created. func (s *DataProviderFactorySuite) TestSupportedTopics() { + // Define supported topics and check if each returns the correct provider without errors + testCases := []struct { + name string + topic string + arguments map[string]string + mockSubscription func() + assertExpectations func() + }{ + { + name: "block topic", + topic: BlocksTopic, + arguments: map[string]string{"block_status": parser.Finalized}, + mockSubscription: func() { + s.accessApi.On("SubscribeBlocksFromLatest", mock.Anything, flow.BlockStatusFinalized).Return(nil).Once() + }, + assertExpectations: func() { + s.accessApi.AssertExpectations(s.T()) + }, + }, + } + + for _, test := range testCases { + s.Run(test.name, func() { + test.mockSubscription() + provider, err := s.factory.NewDataProvider(s.ctx, test.topic, test.arguments, s.ch) + s.Require().NotNil(provider, "Expected provider for topic %s", test.topic) + s.Require().NoError(err, "Expected no error for topic %s", test.topic) + + test.assertExpectations() + }) + } } // TestUnsupportedTopics verifies that unsupported topics do not return a provider From 5390d1a0c25e45c8c743d326c30bd760423cae16 Mon Sep 17 00:00:00 2001 From: UlyanaAndrukhiv Date: Tue, 12 Nov 2024 13:01:21 +0200 Subject: [PATCH 09/64] Added godoc to test --- .../websockets/data_providers/blocks_provider_test.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/engine/access/rest/websockets/data_providers/blocks_provider_test.go b/engine/access/rest/websockets/data_providers/blocks_provider_test.go index db460d4f879..7470a2e7bfe 100644 --- a/engine/access/rest/websockets/data_providers/blocks_provider_test.go +++ b/engine/access/rest/websockets/data_providers/blocks_provider_test.go @@ -57,7 +57,13 @@ func (s *BlocksProviderSuite) SetupTest() { s.finalizedBlock = parent } -// TestBlocksDataProvider_InvalidArguments verifies that +// TestBlocksDataProvider_InvalidArguments tests the behavior of the block data provider +// when invalid arguments are provided. It verifies that appropriate errors are returned +// for missing or conflicting arguments. +// This test covers the test cases: +// 1. Missing 'block_status' argument. +// 2. Invalid 'block_status' argument. +// 3. Providing both 'start_block_id' and 'start_block_height' simultaneously. func (s *BlocksProviderSuite) TestBlocksDataProvider_InvalidArguments() { ctx := context.Background() send := make(chan interface{}) From 1d63e2d4e137cd0ce62ce118a41d4067332461f8 Mon Sep 17 00:00:00 2001 From: UlyanaAndrukhiv Date: Tue, 12 Nov 2024 13:43:32 +0200 Subject: [PATCH 10/64] Updated tests to check topic and subscription id, updated godoc --- .../websockets/data_providers/blocks_provider.go | 6 +++--- .../data_providers/blocks_provider_test.go | 2 ++ .../websockets/data_providers/data_provider.go | 2 +- .../websockets/data_providers/factory_test.go | 16 ++++++++++++---- 4 files changed, 18 insertions(+), 8 deletions(-) diff --git a/engine/access/rest/websockets/data_providers/blocks_provider.go b/engine/access/rest/websockets/data_providers/blocks_provider.go index 1443becd44a..f3acc76b9d4 100644 --- a/engine/access/rest/websockets/data_providers/blocks_provider.go +++ b/engine/access/rest/websockets/data_providers/blocks_provider.go @@ -42,8 +42,6 @@ func NewBlocksDataProvider( arguments map[string]string, send chan<- interface{}, ) (*BlocksDataProvider, error) { - ctx, cancel := context.WithCancel(ctx) - p := &BlocksDataProvider{ logger: logger.With().Str("component", "block-data-provider").Logger(), api: api, @@ -55,10 +53,12 @@ func NewBlocksDataProvider( return nil, fmt.Errorf("invalid arguments: %w", err) } + context, cancel := context.WithCancel(ctx) + // Set up a subscription to blocks based on the provided start block ID and block status. subscription := p.createSubscription(ctx) p.BaseDataProviderImpl = NewBaseDataProviderImpl( - ctx, + context, cancel, topic, send, diff --git a/engine/access/rest/websockets/data_providers/blocks_provider_test.go b/engine/access/rest/websockets/data_providers/blocks_provider_test.go index 7470a2e7bfe..4a619d7bfcf 100644 --- a/engine/access/rest/websockets/data_providers/blocks_provider_test.go +++ b/engine/access/rest/websockets/data_providers/blocks_provider_test.go @@ -105,3 +105,5 @@ func (s *BlocksProviderSuite) TestBlocksDataProvider_InvalidArguments() { }) } } + +// TODO: add tests for responses after the WebsocketController is ready diff --git a/engine/access/rest/websockets/data_providers/data_provider.go b/engine/access/rest/websockets/data_providers/data_provider.go index 11a8bc16c38..6a3fcc8991b 100644 --- a/engine/access/rest/websockets/data_providers/data_provider.go +++ b/engine/access/rest/websockets/data_providers/data_provider.go @@ -1,6 +1,6 @@ package data_providers -// The DataProvider is the interface abstracts of the actual subscriptions used by the WebSocketCollector. +// The DataProvider is the interface abstracts of the actual data provider used by the WebSocketCollector. type DataProvider interface { BaseDataProvider diff --git a/engine/access/rest/websockets/data_providers/factory_test.go b/engine/access/rest/websockets/data_providers/factory_test.go index cf258733d4f..2cbbb1276df 100644 --- a/engine/access/rest/websockets/data_providers/factory_test.go +++ b/engine/access/rest/websockets/data_providers/factory_test.go @@ -57,15 +57,20 @@ func (s *DataProviderFactorySuite) TestSupportedTopics() { name string topic string arguments map[string]string - mockSubscription func() + mockSubscription func() string // return subscription id assertExpectations func() }{ { name: "block topic", topic: BlocksTopic, arguments: map[string]string{"block_status": parser.Finalized}, - mockSubscription: func() { - s.accessApi.On("SubscribeBlocksFromLatest", mock.Anything, flow.BlockStatusFinalized).Return(nil).Once() + mockSubscription: func() string { + subscription := statestreammock.NewSubscription(s.T()) + subscriptionID := unittest.IdentifierFixture().String() + subscription.On("ID").Return(subscriptionID).Once() + + s.accessApi.On("SubscribeBlocksFromLatest", mock.Anything, flow.BlockStatusFinalized).Return(subscription).Once() + return subscriptionID }, assertExpectations: func() { s.accessApi.AssertExpectations(s.T()) @@ -75,12 +80,15 @@ func (s *DataProviderFactorySuite) TestSupportedTopics() { for _, test := range testCases { s.Run(test.name, func() { - test.mockSubscription() + subscriptionID := test.mockSubscription() provider, err := s.factory.NewDataProvider(s.ctx, test.topic, test.arguments, s.ch) s.Require().NotNil(provider, "Expected provider for topic %s", test.topic) s.Require().NoError(err, "Expected no error for topic %s", test.topic) + s.Require().Equal(test.topic, provider.Topic()) + s.Require().Equal(subscriptionID, provider.ID()) + test.assertExpectations() }) } From c33dfa29fbaecf7ee261feb0a48b69680393b433 Mon Sep 17 00:00:00 2001 From: UlyanaAndrukhiv Date: Tue, 12 Nov 2024 13:51:29 +0200 Subject: [PATCH 11/64] Linted --- engine/access/subscription/util.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/engine/access/subscription/util.go b/engine/access/subscription/util.go index 604cbf6e597..9f5b5b1bba9 100644 --- a/engine/access/subscription/util.go +++ b/engine/access/subscription/util.go @@ -53,7 +53,7 @@ func HandleSubscription[T any](ctx context.Context, sub Subscription, handleResp // Expected errors during normal operation: // - codes.Internal: If the subscription encounters an error or gets an unexpected response. func HandleRPCSubscription[T any](sub Subscription, handleResponse func(resp T) error) error { - err := HandleSubscription(nil, sub, handleResponse) + err := HandleSubscription(context.TODO(), sub, handleResponse) if err != nil { return rpc.ConvertError(err, "handle subscription error", codes.Internal) } From 7d60c33dfd19681c64f08ee9be5eac7832cec564 Mon Sep 17 00:00:00 2001 From: UlyanaAndrukhiv Date: Tue, 12 Nov 2024 17:17:52 +0200 Subject: [PATCH 12/64] Updated context for creating subscription for block data provider --- engine/access/rest/websockets/data_providers/blocks_provider.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/engine/access/rest/websockets/data_providers/blocks_provider.go b/engine/access/rest/websockets/data_providers/blocks_provider.go index f3acc76b9d4..a1833e171bf 100644 --- a/engine/access/rest/websockets/data_providers/blocks_provider.go +++ b/engine/access/rest/websockets/data_providers/blocks_provider.go @@ -56,7 +56,7 @@ func NewBlocksDataProvider( context, cancel := context.WithCancel(ctx) // Set up a subscription to blocks based on the provided start block ID and block status. - subscription := p.createSubscription(ctx) + subscription := p.createSubscription(context) p.BaseDataProviderImpl = NewBaseDataProviderImpl( context, cancel, From 22ad4699dc75b52c6cd2e1682d0efdcabcd9fa0c Mon Sep 17 00:00:00 2001 From: UlyanaAndrukhiv Date: Wed, 13 Nov 2024 11:37:14 +0200 Subject: [PATCH 13/64] Updated BlockMessageResponse, added dogoc for constant values, removed ctx from HandleSubscription --- .../access/rest/common/parser/block_status.go | 2 ++ .../data_providers/base_provider.go | 15 +++----- .../data_providers/blocks_provider.go | 6 ++-- .../rest/websockets/models/block_models.go | 3 +- engine/access/subscription/util.go | 36 ++++++++----------- 5 files changed, 24 insertions(+), 38 deletions(-) diff --git a/engine/access/rest/common/parser/block_status.go b/engine/access/rest/common/parser/block_status.go index a1b6e8a7b46..efb34519894 100644 --- a/engine/access/rest/common/parser/block_status.go +++ b/engine/access/rest/common/parser/block_status.go @@ -6,6 +6,8 @@ import ( "github.com/onflow/flow-go/model/flow" ) +// Finalized and Sealed represents the status of a block. +// It is used in rest arguments to provide block status. const ( Finalized = "finalized" Sealed = "sealed" diff --git a/engine/access/rest/websockets/data_providers/base_provider.go b/engine/access/rest/websockets/data_providers/base_provider.go index 567b7647eff..86bbf9f2c0e 100644 --- a/engine/access/rest/websockets/data_providers/base_provider.go +++ b/engine/access/rest/websockets/data_providers/base_provider.go @@ -22,29 +22,22 @@ var _ BaseDataProvider = (*BaseDataProviderImpl)(nil) // BaseDataProviderImpl is the concrete implementation of the BaseDataProvider interface. // It holds common objects for the provider. type BaseDataProviderImpl struct { - topic string - - cancel context.CancelFunc - ctx context.Context - + topic string + cancel context.CancelFunc send chan<- interface{} subscription subscription.Subscription } // NewBaseDataProviderImpl creates a new instance of BaseDataProviderImpl. func NewBaseDataProviderImpl( - ctx context.Context, cancel context.CancelFunc, topic string, send chan<- interface{}, subscription subscription.Subscription, ) *BaseDataProviderImpl { return &BaseDataProviderImpl{ - topic: topic, - - ctx: ctx, - cancel: cancel, - + topic: topic, + cancel: cancel, send: send, subscription: subscription, } diff --git a/engine/access/rest/websockets/data_providers/blocks_provider.go b/engine/access/rest/websockets/data_providers/blocks_provider.go index a1833e171bf..62da3a3cbd9 100644 --- a/engine/access/rest/websockets/data_providers/blocks_provider.go +++ b/engine/access/rest/websockets/data_providers/blocks_provider.go @@ -58,7 +58,6 @@ func NewBlocksDataProvider( // Set up a subscription to blocks based on the provided start block ID and block status. subscription := p.createSubscription(context) p.BaseDataProviderImpl = NewBaseDataProviderImpl( - context, cancel, topic, send, @@ -72,7 +71,7 @@ func NewBlocksDataProvider( // // No errors are expected during normal operations. func (p *BlocksDataProvider) Run() error { - return subscription.HandleSubscription(p.ctx, p.subscription, handleResponse(p.send, p.args.BlockStatus)) + return subscription.HandleSubscription(p.subscription, handleResponse(p.send, p.args.BlockStatus)) } // validateArguments checks and validates the arguments passed to the provider. @@ -138,8 +137,7 @@ func (p *BlocksDataProvider) createSubscription(ctx context.Context) subscriptio func handleResponse(send chan<- interface{}, blockStatus flow.BlockStatus) func(*flow.Block) error { return func(block *flow.Block) error { send <- &models.BlockMessageResponse{ - Block: block, - BlockStatus: blockStatus, + Block: block, } return nil diff --git a/engine/access/rest/websockets/models/block_models.go b/engine/access/rest/websockets/models/block_models.go index f363038808b..2e3e378d832 100644 --- a/engine/access/rest/websockets/models/block_models.go +++ b/engine/access/rest/websockets/models/block_models.go @@ -5,6 +5,5 @@ import ( ) type BlockMessageResponse struct { - Block *flow.Block `json:"block"` - BlockStatus flow.BlockStatus `json:"block_status"` + Block *flow.Block `json:"block"` } diff --git a/engine/access/subscription/util.go b/engine/access/subscription/util.go index 9f5b5b1bba9..2dadac441cf 100644 --- a/engine/access/subscription/util.go +++ b/engine/access/subscription/util.go @@ -1,7 +1,6 @@ package subscription import ( - "context" "fmt" "google.golang.org/grpc/codes" @@ -13,33 +12,28 @@ import ( // handles the received responses, and sends the processed information to the client via the provided stream using handleResponse. // // Parameters: -// - ctx: Context for the operation. // - sub: The subscription. // - handleResponse: The function responsible for handling the response of the subscribed type. // // No errors are expected during normal operations. -func HandleSubscription[T any](ctx context.Context, sub Subscription, handleResponse func(resp T) error) error { +func HandleSubscription[T any](sub Subscription, handleResponse func(resp T) error) error { for { - select { - case v, ok := <-sub.Channel(): - if !ok { - if sub.Err() != nil { - return fmt.Errorf("stream encountered an error: %w", sub.Err()) - } - return nil + v, ok := <-sub.Channel() + if !ok { + if sub.Err() != nil { + return fmt.Errorf("stream encountered an error: %w", sub.Err()) } + return nil + } - resp, ok := v.(T) - if !ok { - return fmt.Errorf("unexpected response type: %T", v) - } + resp, ok := v.(T) + if !ok { + return fmt.Errorf("unexpected response type: %T", v) + } - err := handleResponse(resp) - if err != nil { - return err - } - case <-ctx.Done(): - return nil + err := handleResponse(resp) + if err != nil { + return err } } } @@ -53,7 +47,7 @@ func HandleSubscription[T any](ctx context.Context, sub Subscription, handleResp // Expected errors during normal operation: // - codes.Internal: If the subscription encounters an error or gets an unexpected response. func HandleRPCSubscription[T any](sub Subscription, handleResponse func(resp T) error) error { - err := HandleSubscription(context.TODO(), sub, handleResponse) + err := HandleSubscription(sub, handleResponse) if err != nil { return rpc.ConvertError(err, "handle subscription error", codes.Internal) } From a9fe159f4c8a3ce82a7a854b8c7c617a4a54bdd7 Mon Sep 17 00:00:00 2001 From: UlyanaAndrukhiv Date: Wed, 13 Nov 2024 14:09:55 +0200 Subject: [PATCH 14/64] Updated accoeding to comments --- .../websockets/data_providers/blocks_provider.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/engine/access/rest/websockets/data_providers/blocks_provider.go b/engine/access/rest/websockets/data_providers/blocks_provider.go index 62da3a3cbd9..605894b67ec 100644 --- a/engine/access/rest/websockets/data_providers/blocks_provider.go +++ b/engine/access/rest/websockets/data_providers/blocks_provider.go @@ -47,15 +47,15 @@ func NewBlocksDataProvider( api: api, } - // Validate arguments passed to the provider. - err := p.validateArguments(arguments) + // Initialize arguments passed to the provider. + err := p.initArguments(arguments) if err != nil { return nil, fmt.Errorf("invalid arguments: %w", err) } context, cancel := context.WithCancel(ctx) - // Set up a subscription to blocks based on the provided start block ID and block status. + // Set up a subscription to blocks based on arguments. subscription := p.createSubscription(context) p.BaseDataProviderImpl = NewBaseDataProviderImpl( cancel, @@ -74,10 +74,10 @@ func (p *BlocksDataProvider) Run() error { return subscription.HandleSubscription(p.subscription, handleResponse(p.send, p.args.BlockStatus)) } -// validateArguments checks and validates the arguments passed to the provider. +// initArguments checks and validates the arguments passed to the provider. // // No errors are expected during normal operations. -func (p *BlocksDataProvider) validateArguments(arguments map[string]string) error { +func (p *BlocksDataProvider) initArguments(arguments map[string]string) error { // Parse 'block_status' if blockStatusIn, ok := arguments["block_status"]; ok { blockStatus, err := parser.ParseBlockStatus(blockStatusIn) @@ -118,7 +118,7 @@ func (p *BlocksDataProvider) validateArguments(arguments map[string]string) erro return nil } -// createSubscription creates a new subscription based on the specified start block ID or height. +// createSubscription creates a new subscription using the specified input arguments. func (p *BlocksDataProvider) createSubscription(ctx context.Context) subscription.Subscription { if p.args.StartBlockID != flow.ZeroID { return p.api.SubscribeBlocksFromStartBlockID(ctx, p.args.StartBlockID, p.args.BlockStatus) From 286d3d714cd502d346a77ef87e029d1d6f40dd49 Mon Sep 17 00:00:00 2001 From: Uliana Andrukhiv Date: Wed, 13 Nov 2024 15:18:52 +0200 Subject: [PATCH 15/64] Update topics order Co-authored-by: Andrii Slisarchuk --- engine/access/rest/websockets/data_providers/factory.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/engine/access/rest/websockets/data_providers/factory.go b/engine/access/rest/websockets/data_providers/factory.go index 7061215ac20..05b9aa29e40 100644 --- a/engine/access/rest/websockets/data_providers/factory.go +++ b/engine/access/rest/websockets/data_providers/factory.go @@ -15,11 +15,10 @@ import ( const ( EventsTopic = "events" AccountStatusesTopic = "account_statuses" + BlocksTopic = "blocks" BlockHeadersTopic = "block_headers" BlockDigestsTopic = "block_digests" TransactionStatusesTopic = "transaction_statuses" - - BlocksTopic = "blocks" ) // DataProviderFactory is responsible for creating data providers based on the From 08ec8cfc5e0630b638bc10ef28b94fc06e6b88e6 Mon Sep 17 00:00:00 2001 From: UlyanaAndrukhiv Date: Tue, 19 Nov 2024 14:01:57 +0200 Subject: [PATCH 16/64] Added implementation of block headers data provider, added unit tests --- .../data_providers/block_headers_provider.go | 93 +++++++++++++++++++ .../block_headers_provider_test.go | 46 +++++++++ .../data_providers/blocks_provider.go | 87 ++++++++--------- .../data_providers/blocks_provider_test.go | 36 ++++--- .../rest/websockets/data_providers/factory.go | 3 +- .../rest/websockets/models/block_models.go | 10 ++ 6 files changed, 218 insertions(+), 57 deletions(-) create mode 100644 engine/access/rest/websockets/data_providers/block_headers_provider.go create mode 100644 engine/access/rest/websockets/data_providers/block_headers_provider_test.go diff --git a/engine/access/rest/websockets/data_providers/block_headers_provider.go b/engine/access/rest/websockets/data_providers/block_headers_provider.go new file mode 100644 index 00000000000..969a7c60244 --- /dev/null +++ b/engine/access/rest/websockets/data_providers/block_headers_provider.go @@ -0,0 +1,93 @@ +package data_providers + +import ( + "context" + "fmt" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/access" + "github.com/onflow/flow-go/engine/access/rest/http/request" + "github.com/onflow/flow-go/engine/access/rest/websockets/models" + "github.com/onflow/flow-go/engine/access/subscription" + "github.com/onflow/flow-go/model/flow" +) + +// BlockHeadersDataProvider is responsible for providing block headers +type BlockHeadersDataProvider struct { + *BaseDataProviderImpl + + logger zerolog.Logger + args BlocksArguments + api access.API +} + +var _ DataProvider = (*BlockHeadersDataProvider)(nil) + +// NewBlockHeadersDataProvider creates a new instance of BlockHeadersDataProvider. +func NewBlockHeadersDataProvider( + ctx context.Context, + logger zerolog.Logger, + api access.API, + topic string, + arguments map[string]string, + send chan<- interface{}, +) (*BlockHeadersDataProvider, error) { + p := &BlockHeadersDataProvider{ + logger: logger.With().Str("component", "block-headers-data-provider").Logger(), + api: api, + } + + // Initialize arguments passed to the provider. + var err error + p.args, err = ParseBlocksArguments(arguments) + if err != nil { + return nil, fmt.Errorf("invalid arguments: %w", err) + } + + context, cancel := context.WithCancel(ctx) + + // Set up a subscription to block headers based on arguments. + subscription := p.createSubscription(context) + p.BaseDataProviderImpl = NewBaseDataProviderImpl( + cancel, + topic, + send, + subscription, + ) + + return p, nil +} + +// Run starts processing the subscription for block headers and handles responses. +// +// No errors are expected during normal operations. +func (p *BlockHeadersDataProvider) Run() error { + return subscription.HandleSubscription(p.subscription, p.handleResponse(p.send)) +} + +// createSubscription creates a new subscription using the specified input arguments. +func (p *BlockHeadersDataProvider) createSubscription(ctx context.Context) subscription.Subscription { + if p.args.StartBlockID != flow.ZeroID { + return p.api.SubscribeBlockHeadersFromStartBlockID(ctx, p.args.StartBlockID, p.args.BlockStatus) + } + + if p.args.StartBlockHeight != request.EmptyHeight { + return p.api.SubscribeBlockHeadersFromStartHeight(ctx, p.args.StartBlockHeight, p.args.BlockStatus) + } + + return p.api.SubscribeBlockHeadersFromLatest(ctx, p.args.BlockStatus) +} + +// handleResponse processes a block header and sends the formatted response. +// +// No errors are expected during normal operations. +func (p *BlockHeadersDataProvider) handleResponse(send chan<- interface{}) func(header *flow.Header) error { + return func(header *flow.Header) error { + send <- &models.BlockHeaderMessageResponse{ + Header: header, + } + + return nil + } +} diff --git a/engine/access/rest/websockets/data_providers/block_headers_provider_test.go b/engine/access/rest/websockets/data_providers/block_headers_provider_test.go new file mode 100644 index 00000000000..7d26e80d387 --- /dev/null +++ b/engine/access/rest/websockets/data_providers/block_headers_provider_test.go @@ -0,0 +1,46 @@ +package data_providers + +import ( + "context" + "testing" + + "github.com/stretchr/testify/suite" +) + +type TestBlockHeadersProviderSuite struct { + BlocksProviderSuite +} + +func TestBackendBlockHeadersSuite(t *testing.T) { + suite.Run(t, new(TestBlockHeadersProviderSuite)) +} + +// SetupTest initializes the test suite with required dependencies. +func (s *TestBlockHeadersProviderSuite) SetupTest() { + s.BlocksProviderSuite.SetupTest() +} + +// TestBlockHeadersDataProvider_InvalidArguments tests the behavior of the block headers data provider +// when invalid arguments are provided. It verifies that appropriate errors are returned +// for missing or conflicting arguments. +// This test covers the test cases: +// 1. Missing 'block_status' argument. +// 2. Invalid 'block_status' argument. +// 3. Providing both 'start_block_id' and 'start_block_height' simultaneously. +func (s *TestBlockHeadersProviderSuite) TestBlockHeadersDataProvider_InvalidArguments() { + ctx := context.Background() + send := make(chan interface{}) + + topic := BlockHeadersTopic + + for _, test := range s.invalidArgumentsTestCases() { + s.Run(test.name, func() { + provider, err := NewBlockHeadersDataProvider(ctx, s.log, s.api, topic, test.arguments, send) + s.Require().Nil(provider) + s.Require().Error(err) + s.Require().Contains(err.Error(), test.expectedErrorMsg) + }) + } +} + +// TODO: add tests for responses after the WebsocketController is ready diff --git a/engine/access/rest/websockets/data_providers/blocks_provider.go b/engine/access/rest/websockets/data_providers/blocks_provider.go index 605894b67ec..01a18b5c4dd 100644 --- a/engine/access/rest/websockets/data_providers/blocks_provider.go +++ b/engine/access/rest/websockets/data_providers/blocks_provider.go @@ -43,12 +43,13 @@ func NewBlocksDataProvider( send chan<- interface{}, ) (*BlocksDataProvider, error) { p := &BlocksDataProvider{ - logger: logger.With().Str("component", "block-data-provider").Logger(), + logger: logger.With().Str("component", "blocks-data-provider").Logger(), api: api, } // Initialize arguments passed to the provider. - err := p.initArguments(arguments) + var err error + p.args, err = ParseBlocksArguments(arguments) if err != nil { return nil, fmt.Errorf("invalid arguments: %w", err) } @@ -71,22 +72,48 @@ func NewBlocksDataProvider( // // No errors are expected during normal operations. func (p *BlocksDataProvider) Run() error { - return subscription.HandleSubscription(p.subscription, handleResponse(p.send, p.args.BlockStatus)) + return subscription.HandleSubscription(p.subscription, p.handleResponse(p.send)) } -// initArguments checks and validates the arguments passed to the provider. +// createSubscription creates a new subscription using the specified input arguments. +func (p *BlocksDataProvider) createSubscription(ctx context.Context) subscription.Subscription { + if p.args.StartBlockID != flow.ZeroID { + return p.api.SubscribeBlocksFromStartBlockID(ctx, p.args.StartBlockID, p.args.BlockStatus) + } + + if p.args.StartBlockHeight != request.EmptyHeight { + return p.api.SubscribeBlocksFromStartHeight(ctx, p.args.StartBlockHeight, p.args.BlockStatus) + } + + return p.api.SubscribeBlocksFromLatest(ctx, p.args.BlockStatus) +} + +// handleResponse processes a block and sends the formatted response. // // No errors are expected during normal operations. -func (p *BlocksDataProvider) initArguments(arguments map[string]string) error { +func (p *BlocksDataProvider) handleResponse(send chan<- interface{}) func(*flow.Block) error { + return func(block *flow.Block) error { + send <- &models.BlockMessageResponse{ + Block: block, + } + + return nil + } +} + +// ParseBlocksArguments validates and initializes the blocks arguments. +func ParseBlocksArguments(arguments map[string]string) (BlocksArguments, error) { + var args BlocksArguments + // Parse 'block_status' if blockStatusIn, ok := arguments["block_status"]; ok { blockStatus, err := parser.ParseBlockStatus(blockStatusIn) if err != nil { - return err + return args, err } - p.args.BlockStatus = blockStatus + args.BlockStatus = blockStatus } else { - return fmt.Errorf("'block_status' must be provided") + return args, fmt.Errorf("'block_status' must be provided") } // Parse 'start_block_id' if provided @@ -94,52 +121,26 @@ func (p *BlocksDataProvider) initArguments(arguments map[string]string) error { var startBlockID parser.ID err := startBlockID.Parse(startBlockIDIn) if err != nil { - return err + return args, err } - p.args.StartBlockID = startBlockID.Flow() + args.StartBlockID = startBlockID.Flow() } // Parse 'start_block_height' if provided if startBlockHeightIn, ok := arguments["start_block_height"]; ok { var err error - p.args.StartBlockHeight, err = util.ToUint64(startBlockHeightIn) + args.StartBlockHeight, err = util.ToUint64(startBlockHeightIn) if err != nil { - return fmt.Errorf("invalid 'start_block_height': %w", err) + return args, fmt.Errorf("invalid 'start_block_height': %w", err) } } else { - p.args.StartBlockHeight = request.EmptyHeight + args.StartBlockHeight = request.EmptyHeight } - // if both start_block_id and start_height are provided - if p.args.StartBlockID != flow.ZeroID && p.args.StartBlockHeight != request.EmptyHeight { - return fmt.Errorf("can only provide either 'start_block_id' or 'start_block_height'") + // Ensure only one of start_block_id or start_block_height is provided + if args.StartBlockID != flow.ZeroID && args.StartBlockHeight != request.EmptyHeight { + return args, fmt.Errorf("can only provide either 'start_block_id' or 'start_block_height'") } - return nil -} - -// createSubscription creates a new subscription using the specified input arguments. -func (p *BlocksDataProvider) createSubscription(ctx context.Context) subscription.Subscription { - if p.args.StartBlockID != flow.ZeroID { - return p.api.SubscribeBlocksFromStartBlockID(ctx, p.args.StartBlockID, p.args.BlockStatus) - } - - if p.args.StartBlockHeight != request.EmptyHeight { - return p.api.SubscribeBlocksFromStartHeight(ctx, p.args.StartBlockHeight, p.args.BlockStatus) - } - - return p.api.SubscribeBlocksFromLatest(ctx, p.args.BlockStatus) -} - -// handleResponse processes a block and sends the formatted response. -// -// No errors are expected during normal operations. -func handleResponse(send chan<- interface{}, blockStatus flow.BlockStatus) func(*flow.Block) error { - return func(block *flow.Block) error { - send <- &models.BlockMessageResponse{ - Block: block, - } - - return nil - } + return args, nil } diff --git a/engine/access/rest/websockets/data_providers/blocks_provider_test.go b/engine/access/rest/websockets/data_providers/blocks_provider_test.go index 4a619d7bfcf..9771cb54780 100644 --- a/engine/access/rest/websockets/data_providers/blocks_provider_test.go +++ b/engine/access/rest/websockets/data_providers/blocks_provider_test.go @@ -57,18 +57,16 @@ func (s *BlocksProviderSuite) SetupTest() { s.finalizedBlock = parent } -// TestBlocksDataProvider_InvalidArguments tests the behavior of the block data provider -// when invalid arguments are provided. It verifies that appropriate errors are returned -// for missing or conflicting arguments. -// This test covers the test cases: -// 1. Missing 'block_status' argument. -// 2. Invalid 'block_status' argument. -// 3. Providing both 'start_block_id' and 'start_block_height' simultaneously. -func (s *BlocksProviderSuite) TestBlocksDataProvider_InvalidArguments() { - ctx := context.Background() - send := make(chan interface{}) - - testCases := []testErrType{ +// invalidArgumentsTestCases returns a list of test cases with invalid argument combinations +// for testing the behavior of block, block headers, block digests data providers. Each test case includes a name, +// a set of input arguments, and the expected error message that should be returned. +// +// The test cases cover scenarios such as: +// 1. Missing the required 'block_status' argument. +// 2. Providing an unknown or invalid 'block_status' value. +// 3. Supplying both 'start_block_id' and 'start_block_height' simultaneously, which is not allowed. +func (s *BlocksProviderSuite) invalidArgumentsTestCases() []testErrType { + return []testErrType{ { name: "missing 'block_status' argument", arguments: map[string]string{ @@ -93,10 +91,22 @@ func (s *BlocksProviderSuite) TestBlocksDataProvider_InvalidArguments() { expectedErrorMsg: "can only provide either 'start_block_id' or 'start_block_height'", }, } +} + +// TestBlocksDataProvider_InvalidArguments tests the behavior of the block data provider +// when invalid arguments are provided. It verifies that appropriate errors are returned +// for missing or conflicting arguments. +// This test covers the test cases: +// 1. Missing 'block_status' argument. +// 2. Invalid 'block_status' argument. +// 3. Providing both 'start_block_id' and 'start_block_height' simultaneously. +func (s *BlocksProviderSuite) TestBlocksDataProvider_InvalidArguments() { + ctx := context.Background() + send := make(chan interface{}) topic := BlocksTopic - for _, test := range testCases { + for _, test := range s.invalidArgumentsTestCases() { s.Run(test.name, func() { provider, err := NewBlocksDataProvider(ctx, s.log, s.api, topic, test.arguments, send) s.Require().Nil(provider) diff --git a/engine/access/rest/websockets/data_providers/factory.go b/engine/access/rest/websockets/data_providers/factory.go index 05b9aa29e40..c9439266f82 100644 --- a/engine/access/rest/websockets/data_providers/factory.go +++ b/engine/access/rest/websockets/data_providers/factory.go @@ -72,10 +72,11 @@ func (s *DataProviderFactory) NewDataProvider( switch topic { case BlocksTopic: return NewBlocksDataProvider(ctx, s.logger, s.accessApi, topic, arguments, ch) + case BlockHeadersTopic: + return NewBlockHeadersDataProvider(ctx, s.logger, s.accessApi, topic, arguments, ch) // TODO: Implemented handlers for each topic should be added in respective case case EventsTopic, AccountStatusesTopic, - BlockHeadersTopic, BlockDigestsTopic, TransactionStatusesTopic: return nil, fmt.Errorf("topic \"%s\" not implemented yet", topic) diff --git a/engine/access/rest/websockets/models/block_models.go b/engine/access/rest/websockets/models/block_models.go index 2e3e378d832..9eb8c30ee1f 100644 --- a/engine/access/rest/websockets/models/block_models.go +++ b/engine/access/rest/websockets/models/block_models.go @@ -4,6 +4,16 @@ import ( "github.com/onflow/flow-go/model/flow" ) +// BlockMessageResponse is the response message for 'blocks' topic. type BlockMessageResponse struct { + // The sealed or finalized blocks according to the block status + // in the request. Block *flow.Block `json:"block"` } + +// BlockHeaderMessageResponse is the response message for 'block_headers' topic. +type BlockHeaderMessageResponse struct { + // The sealed or finalized block headers according to the block status + // in the request. + Header *flow.Header `json:"header"` +} From 16100da4efcb2eedf6db95978df99d253006cd58 Mon Sep 17 00:00:00 2001 From: UlyanaAndrukhiv Date: Tue, 19 Nov 2024 14:28:20 +0200 Subject: [PATCH 17/64] Added implementation of block digests data provider, added unit tests --- .../data_providers/block_digests_provider.go | 93 +++++++++++++++++++ .../block_digests_provider_test.go | 46 +++++++++ .../block_headers_provider_test.go | 10 +- .../rest/websockets/data_providers/factory.go | 3 +- .../websockets/data_providers/factory_test.go | 32 +++++++ .../rest/websockets/models/block_models.go | 7 ++ 6 files changed, 185 insertions(+), 6 deletions(-) create mode 100644 engine/access/rest/websockets/data_providers/block_digests_provider.go create mode 100644 engine/access/rest/websockets/data_providers/block_digests_provider_test.go diff --git a/engine/access/rest/websockets/data_providers/block_digests_provider.go b/engine/access/rest/websockets/data_providers/block_digests_provider.go new file mode 100644 index 00000000000..30f7ded3b15 --- /dev/null +++ b/engine/access/rest/websockets/data_providers/block_digests_provider.go @@ -0,0 +1,93 @@ +package data_providers + +import ( + "context" + "fmt" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/access" + "github.com/onflow/flow-go/engine/access/rest/http/request" + "github.com/onflow/flow-go/engine/access/rest/websockets/models" + "github.com/onflow/flow-go/engine/access/subscription" + "github.com/onflow/flow-go/model/flow" +) + +// BlockDigestsDataProvider is responsible for providing block digests +type BlockDigestsDataProvider struct { + *BaseDataProviderImpl + + logger zerolog.Logger + args BlocksArguments + api access.API +} + +var _ DataProvider = (*BlockDigestsDataProvider)(nil) + +// NewBlockDigestsDataProvider creates a new instance of BlockDigestsDataProvider. +func NewBlockDigestsDataProvider( + ctx context.Context, + logger zerolog.Logger, + api access.API, + topic string, + arguments map[string]string, + send chan<- interface{}, +) (*BlockDigestsDataProvider, error) { + p := &BlockDigestsDataProvider{ + logger: logger.With().Str("component", "block-digests-data-provider").Logger(), + api: api, + } + + // Initialize arguments passed to the provider. + var err error + p.args, err = ParseBlocksArguments(arguments) + if err != nil { + return nil, fmt.Errorf("invalid arguments: %w", err) + } + + context, cancel := context.WithCancel(ctx) + + // Set up a subscription to block digests based on arguments. + subscription := p.createSubscription(context) + p.BaseDataProviderImpl = NewBaseDataProviderImpl( + cancel, + topic, + send, + subscription, + ) + + return p, nil +} + +// Run starts processing the subscription for block digests and handles responses. +// +// No errors are expected during normal operations. +func (p *BlockDigestsDataProvider) Run() error { + return subscription.HandleSubscription(p.subscription, p.handleResponse(p.send)) +} + +// createSubscription creates a new subscription using the specified input arguments. +func (p *BlockDigestsDataProvider) createSubscription(ctx context.Context) subscription.Subscription { + if p.args.StartBlockID != flow.ZeroID { + return p.api.SubscribeBlockDigestsFromStartBlockID(ctx, p.args.StartBlockID, p.args.BlockStatus) + } + + if p.args.StartBlockHeight != request.EmptyHeight { + return p.api.SubscribeBlockDigestsFromStartHeight(ctx, p.args.StartBlockHeight, p.args.BlockStatus) + } + + return p.api.SubscribeBlockDigestsFromLatest(ctx, p.args.BlockStatus) +} + +// handleResponse processes a block digest and sends the formatted response. +// +// No errors are expected during normal operations. +func (p *BlockDigestsDataProvider) handleResponse(send chan<- interface{}) func(block *flow.BlockDigest) error { + return func(block *flow.BlockDigest) error { + send <- &models.BlockDigestMessageResponse{ + Block: block, + } + + return nil + } +} diff --git a/engine/access/rest/websockets/data_providers/block_digests_provider_test.go b/engine/access/rest/websockets/data_providers/block_digests_provider_test.go new file mode 100644 index 00000000000..57fdabd7994 --- /dev/null +++ b/engine/access/rest/websockets/data_providers/block_digests_provider_test.go @@ -0,0 +1,46 @@ +package data_providers + +import ( + "context" + "testing" + + "github.com/stretchr/testify/suite" +) + +type BlockDigestsProviderSuite struct { + BlocksProviderSuite +} + +func TestBlockDigestsProviderSuite(t *testing.T) { + suite.Run(t, new(BlockDigestsProviderSuite)) +} + +// SetupTest initializes the test suite with required dependencies. +func (s *BlockDigestsProviderSuite) SetupTest() { + s.BlocksProviderSuite.SetupTest() +} + +// TestBlockDigestsDataProvider_InvalidArguments tests the behavior of the block digests data provider +// when invalid arguments are provided. It verifies that appropriate errors are returned +// for missing or conflicting arguments. +// This test covers the test cases: +// 1. Missing 'block_status' argument. +// 2. Invalid 'block_status' argument. +// 3. Providing both 'start_block_id' and 'start_block_height' simultaneously. +func (s *BlockDigestsProviderSuite) TestBlockDigestsDataProvider_InvalidArguments() { + ctx := context.Background() + send := make(chan interface{}) + + topic := BlockDigestsTopic + + for _, test := range s.invalidArgumentsTestCases() { + s.Run(test.name, func() { + provider, err := NewBlockDigestsDataProvider(ctx, s.log, s.api, topic, test.arguments, send) + s.Require().Nil(provider) + s.Require().Error(err) + s.Require().Contains(err.Error(), test.expectedErrorMsg) + }) + } +} + +// TODO: add tests for responses after the WebsocketController is ready diff --git a/engine/access/rest/websockets/data_providers/block_headers_provider_test.go b/engine/access/rest/websockets/data_providers/block_headers_provider_test.go index 7d26e80d387..efd94916e92 100644 --- a/engine/access/rest/websockets/data_providers/block_headers_provider_test.go +++ b/engine/access/rest/websockets/data_providers/block_headers_provider_test.go @@ -7,16 +7,16 @@ import ( "github.com/stretchr/testify/suite" ) -type TestBlockHeadersProviderSuite struct { +type BlockHeadersProviderSuite struct { BlocksProviderSuite } -func TestBackendBlockHeadersSuite(t *testing.T) { - suite.Run(t, new(TestBlockHeadersProviderSuite)) +func TestBlockHeadersProviderSuite(t *testing.T) { + suite.Run(t, new(BlockHeadersProviderSuite)) } // SetupTest initializes the test suite with required dependencies. -func (s *TestBlockHeadersProviderSuite) SetupTest() { +func (s *BlockHeadersProviderSuite) SetupTest() { s.BlocksProviderSuite.SetupTest() } @@ -27,7 +27,7 @@ func (s *TestBlockHeadersProviderSuite) SetupTest() { // 1. Missing 'block_status' argument. // 2. Invalid 'block_status' argument. // 3. Providing both 'start_block_id' and 'start_block_height' simultaneously. -func (s *TestBlockHeadersProviderSuite) TestBlockHeadersDataProvider_InvalidArguments() { +func (s *BlockHeadersProviderSuite) TestBlockHeadersDataProvider_InvalidArguments() { ctx := context.Background() send := make(chan interface{}) diff --git a/engine/access/rest/websockets/data_providers/factory.go b/engine/access/rest/websockets/data_providers/factory.go index c9439266f82..ccff93488a0 100644 --- a/engine/access/rest/websockets/data_providers/factory.go +++ b/engine/access/rest/websockets/data_providers/factory.go @@ -74,10 +74,11 @@ func (s *DataProviderFactory) NewDataProvider( return NewBlocksDataProvider(ctx, s.logger, s.accessApi, topic, arguments, ch) case BlockHeadersTopic: return NewBlockHeadersDataProvider(ctx, s.logger, s.accessApi, topic, arguments, ch) + case BlockDigestsTopic: + return NewBlockDigestsDataProvider(ctx, s.logger, s.accessApi, topic, arguments, ch) // TODO: Implemented handlers for each topic should be added in respective case case EventsTopic, AccountStatusesTopic, - BlockDigestsTopic, TransactionStatusesTopic: return nil, fmt.Errorf("topic \"%s\" not implemented yet", topic) default: diff --git a/engine/access/rest/websockets/data_providers/factory_test.go b/engine/access/rest/websockets/data_providers/factory_test.go index 2cbbb1276df..3d85778f3a9 100644 --- a/engine/access/rest/websockets/data_providers/factory_test.go +++ b/engine/access/rest/websockets/data_providers/factory_test.go @@ -76,6 +76,38 @@ func (s *DataProviderFactorySuite) TestSupportedTopics() { s.accessApi.AssertExpectations(s.T()) }, }, + { + name: "block headers topic", + topic: BlockHeadersTopic, + arguments: map[string]string{"block_status": parser.Finalized}, + mockSubscription: func() string { + subscription := statestreammock.NewSubscription(s.T()) + subscriptionID := unittest.IdentifierFixture().String() + subscription.On("ID").Return(subscriptionID).Once() + + s.accessApi.On("SubscribeBlockHeadersFromLatest", mock.Anything, flow.BlockStatusFinalized).Return(subscription).Once() + return subscriptionID + }, + assertExpectations: func() { + s.accessApi.AssertExpectations(s.T()) + }, + }, + { + name: "block digests topic", + topic: BlockDigestsTopic, + arguments: map[string]string{"block_status": parser.Finalized}, + mockSubscription: func() string { + subscription := statestreammock.NewSubscription(s.T()) + subscriptionID := unittest.IdentifierFixture().String() + subscription.On("ID").Return(subscriptionID).Once() + + s.accessApi.On("SubscribeBlockDigestsFromLatest", mock.Anything, flow.BlockStatusFinalized).Return(subscription).Once() + return subscriptionID + }, + assertExpectations: func() { + s.accessApi.AssertExpectations(s.T()) + }, + }, } for _, test := range testCases { diff --git a/engine/access/rest/websockets/models/block_models.go b/engine/access/rest/websockets/models/block_models.go index 9eb8c30ee1f..fa7af987236 100644 --- a/engine/access/rest/websockets/models/block_models.go +++ b/engine/access/rest/websockets/models/block_models.go @@ -17,3 +17,10 @@ type BlockHeaderMessageResponse struct { // in the request. Header *flow.Header `json:"header"` } + +// BlockDigestMessageResponse is the response message for 'block_digests' topic. +type BlockDigestMessageResponse struct { + // The sealed or finalized block digest according to the block status + // in the request. + Block *flow.BlockDigest `json:"block_digest"` +} From 2fa0767fcb527d97440bcb0c85e603e24e05ebcd Mon Sep 17 00:00:00 2001 From: UlyanaAndrukhiv Date: Tue, 19 Nov 2024 17:19:17 +0200 Subject: [PATCH 18/64] Updated according to comments --- engine/access/rest/server.go | 2 +- .../data_providers/blocks_provider.go | 2 +- .../rest/websockets/data_providers/factory.go | 11 +++---- .../websockets/data_providers/factory_test.go | 33 ++++++++++--------- 4 files changed, 23 insertions(+), 25 deletions(-) diff --git a/engine/access/rest/server.go b/engine/access/rest/server.go index f3c0a79194a..efaf36bdd44 100644 --- a/engine/access/rest/server.go +++ b/engine/access/rest/server.go @@ -50,7 +50,7 @@ func NewServer(serverAPI access.API, } // TODO: add new websocket routes - _ = data_providers.NewDataProviderFactory(logger, stateStreamConfig.EventFilterConfig, stateStreamApi, serverAPI) + _ = data_providers.NewDataProviderFactory(logger, stateStreamApi, serverAPI) c := cors.New(cors.Options{ AllowedOrigins: []string{"*"}, diff --git a/engine/access/rest/websockets/data_providers/blocks_provider.go b/engine/access/rest/websockets/data_providers/blocks_provider.go index 01a18b5c4dd..691e165f9e8 100644 --- a/engine/access/rest/websockets/data_providers/blocks_provider.go +++ b/engine/access/rest/websockets/data_providers/blocks_provider.go @@ -15,7 +15,7 @@ import ( "github.com/onflow/flow-go/model/flow" ) -// BlocksArguments contains the arguments required for subscribing to blocks +// BlocksArguments contains the arguments required for subscribing to blocks / block headers / block digests type BlocksArguments struct { StartBlockID flow.Identifier // ID of the block to start subscription from StartBlockHeight uint64 // Height of the block to start subscription from diff --git a/engine/access/rest/websockets/data_providers/factory.go b/engine/access/rest/websockets/data_providers/factory.go index ccff93488a0..6ec8dd3185a 100644 --- a/engine/access/rest/websockets/data_providers/factory.go +++ b/engine/access/rest/websockets/data_providers/factory.go @@ -25,8 +25,7 @@ const ( // requested topic. It manages access to logging, state stream configuration, // and relevant APIs needed to retrieve data. type DataProviderFactory struct { - logger zerolog.Logger - eventFilterConfig state_stream.EventFilterConfig + logger zerolog.Logger stateStreamApi state_stream.API accessApi access.API @@ -41,15 +40,13 @@ type DataProviderFactory struct { // - accessApi: API for accessing data from the Flow Access API. func NewDataProviderFactory( logger zerolog.Logger, - eventFilterConfig state_stream.EventFilterConfig, stateStreamApi state_stream.API, accessApi access.API, ) *DataProviderFactory { return &DataProviderFactory{ - logger: logger, - eventFilterConfig: eventFilterConfig, - stateStreamApi: stateStreamApi, - accessApi: accessApi, + logger: logger, + stateStreamApi: stateStreamApi, + accessApi: accessApi, } } diff --git a/engine/access/rest/websockets/data_providers/factory_test.go b/engine/access/rest/websockets/data_providers/factory_test.go index 3d85778f3a9..90c3c8d3b93 100644 --- a/engine/access/rest/websockets/data_providers/factory_test.go +++ b/engine/access/rest/websockets/data_providers/factory_test.go @@ -10,7 +10,6 @@ import ( accessmock "github.com/onflow/flow-go/access/mock" "github.com/onflow/flow-go/engine/access/rest/common/parser" - "github.com/onflow/flow-go/engine/access/state_stream" statestreammock "github.com/onflow/flow-go/engine/access/state_stream/mock" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" @@ -37,17 +36,26 @@ func TestDataProviderFactorySuite(t *testing.T) { // It initializes the factory with mock instances and validates that it is created successfully. func (s *DataProviderFactorySuite) SetupTest() { log := unittest.Logger() - eventFilterConfig := state_stream.EventFilterConfig{} s.stateStreamApi = statestreammock.NewAPI(s.T()) s.accessApi = accessmock.NewAPI(s.T()) s.ctx = context.Background() s.ch = make(chan interface{}) - s.factory = NewDataProviderFactory(log, eventFilterConfig, s.stateStreamApi, s.accessApi) + s.factory = NewDataProviderFactory(log, s.stateStreamApi, s.accessApi) s.Require().NotNil(s.factory) } +// mockSubscription creates a mock subscription instance for testing purposes. +// It sets up the mock subscription's ID method to return a predefined identifiers +func (s *DataProviderFactorySuite) mockSubscription() *statestreammock.Subscription { + subscription := statestreammock.NewSubscription(s.T()) + subscriptionID := unittest.IdentifierFixture().String() + subscription.On("ID").Return(subscriptionID).Twice() + + return subscription +} + // TODO: add others topic to check when they will be implemented // TestSupportedTopics verifies that supported topics return a valid provider and no errors. // Each test case includes a topic and arguments for which a data provider should be created. @@ -65,12 +73,9 @@ func (s *DataProviderFactorySuite) TestSupportedTopics() { topic: BlocksTopic, arguments: map[string]string{"block_status": parser.Finalized}, mockSubscription: func() string { - subscription := statestreammock.NewSubscription(s.T()) - subscriptionID := unittest.IdentifierFixture().String() - subscription.On("ID").Return(subscriptionID).Once() - + subscription := s.mockSubscription() s.accessApi.On("SubscribeBlocksFromLatest", mock.Anything, flow.BlockStatusFinalized).Return(subscription).Once() - return subscriptionID + return subscription.ID() }, assertExpectations: func() { s.accessApi.AssertExpectations(s.T()) @@ -81,12 +86,10 @@ func (s *DataProviderFactorySuite) TestSupportedTopics() { topic: BlockHeadersTopic, arguments: map[string]string{"block_status": parser.Finalized}, mockSubscription: func() string { - subscription := statestreammock.NewSubscription(s.T()) - subscriptionID := unittest.IdentifierFixture().String() - subscription.On("ID").Return(subscriptionID).Once() + subscription := s.mockSubscription() s.accessApi.On("SubscribeBlockHeadersFromLatest", mock.Anything, flow.BlockStatusFinalized).Return(subscription).Once() - return subscriptionID + return subscription.ID() }, assertExpectations: func() { s.accessApi.AssertExpectations(s.T()) @@ -97,12 +100,10 @@ func (s *DataProviderFactorySuite) TestSupportedTopics() { topic: BlockDigestsTopic, arguments: map[string]string{"block_status": parser.Finalized}, mockSubscription: func() string { - subscription := statestreammock.NewSubscription(s.T()) - subscriptionID := unittest.IdentifierFixture().String() - subscription.On("ID").Return(subscriptionID).Once() + subscription := s.mockSubscription() s.accessApi.On("SubscribeBlockDigestsFromLatest", mock.Anything, flow.BlockStatusFinalized).Return(subscription).Once() - return subscriptionID + return subscription.ID() }, assertExpectations: func() { s.accessApi.AssertExpectations(s.T()) From c3391936e4e4d4b0737bf9f05e6a4ba9eec405e8 Mon Sep 17 00:00:00 2001 From: UlyanaAndrukhiv Date: Tue, 19 Nov 2024 17:59:36 +0200 Subject: [PATCH 19/64] Updated TestSupportedTopics unit test --- .../websockets/data_providers/factory_test.go | 36 ++++++++----------- 1 file changed, 15 insertions(+), 21 deletions(-) diff --git a/engine/access/rest/websockets/data_providers/factory_test.go b/engine/access/rest/websockets/data_providers/factory_test.go index 90c3c8d3b93..1b56a4d7cac 100644 --- a/engine/access/rest/websockets/data_providers/factory_test.go +++ b/engine/access/rest/websockets/data_providers/factory_test.go @@ -46,14 +46,16 @@ func (s *DataProviderFactorySuite) SetupTest() { s.Require().NotNil(s.factory) } -// mockSubscription creates a mock subscription instance for testing purposes. -// It sets up the mock subscription's ID method to return a predefined identifiers -func (s *DataProviderFactorySuite) mockSubscription() *statestreammock.Subscription { +// setupSubscription creates a mock subscription instance for testing purposes. +// It configures the mock subscription's `ID` method to return a predefined subscription identifier. +// Additionally, it sets the return value of the specified API call to the mock subscription. +func (s *DataProviderFactorySuite) setupSubscription(apiCall *mock.Call) string { subscription := statestreammock.NewSubscription(s.T()) subscriptionID := unittest.IdentifierFixture().String() - subscription.On("ID").Return(subscriptionID).Twice() + subscription.On("ID").Return(subscriptionID).Once() - return subscription + apiCall.Return(subscription).Once() + return subscriptionID } // TODO: add others topic to check when they will be implemented @@ -65,17 +67,15 @@ func (s *DataProviderFactorySuite) TestSupportedTopics() { name string topic string arguments map[string]string - mockSubscription func() string // return subscription id + setupSubscription func() string // return subscription id assertExpectations func() }{ { name: "block topic", topic: BlocksTopic, arguments: map[string]string{"block_status": parser.Finalized}, - mockSubscription: func() string { - subscription := s.mockSubscription() - s.accessApi.On("SubscribeBlocksFromLatest", mock.Anything, flow.BlockStatusFinalized).Return(subscription).Once() - return subscription.ID() + setupSubscription: func() string { + return s.setupSubscription(s.accessApi.On("SubscribeBlocksFromLatest", mock.Anything, flow.BlockStatusFinalized)) }, assertExpectations: func() { s.accessApi.AssertExpectations(s.T()) @@ -85,11 +85,8 @@ func (s *DataProviderFactorySuite) TestSupportedTopics() { name: "block headers topic", topic: BlockHeadersTopic, arguments: map[string]string{"block_status": parser.Finalized}, - mockSubscription: func() string { - subscription := s.mockSubscription() - - s.accessApi.On("SubscribeBlockHeadersFromLatest", mock.Anything, flow.BlockStatusFinalized).Return(subscription).Once() - return subscription.ID() + setupSubscription: func() string { + return s.setupSubscription(s.accessApi.On("SubscribeBlockHeadersFromLatest", mock.Anything, flow.BlockStatusFinalized)) }, assertExpectations: func() { s.accessApi.AssertExpectations(s.T()) @@ -99,11 +96,8 @@ func (s *DataProviderFactorySuite) TestSupportedTopics() { name: "block digests topic", topic: BlockDigestsTopic, arguments: map[string]string{"block_status": parser.Finalized}, - mockSubscription: func() string { - subscription := s.mockSubscription() - - s.accessApi.On("SubscribeBlockDigestsFromLatest", mock.Anything, flow.BlockStatusFinalized).Return(subscription).Once() - return subscription.ID() + setupSubscription: func() string { + return s.setupSubscription(s.accessApi.On("SubscribeBlockDigestsFromLatest", mock.Anything, flow.BlockStatusFinalized)) }, assertExpectations: func() { s.accessApi.AssertExpectations(s.T()) @@ -113,7 +107,7 @@ func (s *DataProviderFactorySuite) TestSupportedTopics() { for _, test := range testCases { s.Run(test.name, func() { - subscriptionID := test.mockSubscription() + subscriptionID := test.setupSubscription() provider, err := s.factory.NewDataProvider(s.ctx, test.topic, test.arguments, s.ch) s.Require().NotNil(provider, "Expected provider for topic %s", test.topic) From 9cc130194997a6baa1d561322bbb2b571f29e3ba Mon Sep 17 00:00:00 2001 From: UlyanaAndrukhiv Date: Fri, 22 Nov 2024 16:30:17 +0200 Subject: [PATCH 20/64] Updated godoc, fixed warning with naming --- .../data_providers/block_digests_provider.go | 4 ++-- .../data_providers/block_headers_provider.go | 4 ++-- .../websockets/data_providers/blocks_provider.go | 4 ++-- .../access/rest/websockets/data_providers/factory.go | 12 ++++++------ 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/engine/access/rest/websockets/data_providers/block_digests_provider.go b/engine/access/rest/websockets/data_providers/block_digests_provider.go index 3bb9bc64288..1d99f1a7a8d 100644 --- a/engine/access/rest/websockets/data_providers/block_digests_provider.go +++ b/engine/access/rest/websockets/data_providers/block_digests_provider.go @@ -45,13 +45,13 @@ func NewBlockDigestsDataProvider( return nil, fmt.Errorf("invalid arguments: %w", err) } - context, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(ctx) p.BaseDataProviderImpl = NewBaseDataProviderImpl( topic, cancel, send, - p.createSubscription(context), // Set up a subscription to block digests based on arguments. + p.createSubscription(ctx), // Set up a subscription to block digests based on arguments. ) return p, nil diff --git a/engine/access/rest/websockets/data_providers/block_headers_provider.go b/engine/access/rest/websockets/data_providers/block_headers_provider.go index 4886da3e18f..40bdb2ddd5c 100644 --- a/engine/access/rest/websockets/data_providers/block_headers_provider.go +++ b/engine/access/rest/websockets/data_providers/block_headers_provider.go @@ -45,13 +45,13 @@ func NewBlockHeadersDataProvider( return nil, fmt.Errorf("invalid arguments: %w", err) } - context, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(ctx) p.BaseDataProviderImpl = NewBaseDataProviderImpl( topic, cancel, send, - p.createSubscription(context), // Set up a subscription to block headers based on arguments. + p.createSubscription(ctx), // Set up a subscription to block headers based on arguments. ) return p, nil diff --git a/engine/access/rest/websockets/data_providers/blocks_provider.go b/engine/access/rest/websockets/data_providers/blocks_provider.go index eac39983863..f340c27ba54 100644 --- a/engine/access/rest/websockets/data_providers/blocks_provider.go +++ b/engine/access/rest/websockets/data_providers/blocks_provider.go @@ -54,13 +54,13 @@ func NewBlocksDataProvider( return nil, fmt.Errorf("invalid arguments: %w", err) } - context, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(ctx) p.BaseDataProviderImpl = NewBaseDataProviderImpl( topic, cancel, send, - p.createSubscription(context), // Set up a subscription to blocks based on arguments. + p.createSubscription(ctx), // Set up a subscription to blocks based on arguments. ) return p, nil diff --git a/engine/access/rest/websockets/data_providers/factory.go b/engine/access/rest/websockets/data_providers/factory.go index 8761cb35a69..c9c187a0117 100644 --- a/engine/access/rest/websockets/data_providers/factory.go +++ b/engine/access/rest/websockets/data_providers/factory.go @@ -21,9 +21,9 @@ const ( TransactionStatusesTopic = "transaction_statuses" ) -// DataProviderFactory is responsible for creating data providers based on the -// requested topic. It manages access to logging, state stream configuration, -// and relevant APIs needed to retrieve data. +// DataProviderFactory defines an interface for creating data providers +// based on specified topics. The factory abstracts the creation process +// and ensures consistent access to required APIs. type DataProviderFactory interface { // NewDataProvider creates a new data provider based on the specified topic // and configuration parameters. @@ -34,9 +34,9 @@ type DataProviderFactory interface { var _ DataProviderFactory = (*DataProviderFactoryImpl)(nil) -// DataProviderFactoryImpl is responsible for creating data providers based on the -// requested topic. It manages access to logging, state stream configuration, -// and relevant APIs needed to retrieve data. +// DataProviderFactoryImpl is an implementation of the DataProviderFactory interface. +// It is responsible for creating data providers based on the +// requested topic. It manages access to logging and relevant APIs needed to retrieve data. type DataProviderFactoryImpl struct { logger zerolog.Logger From 64716a546f89b1beebdb96a71322d0ffcb922eec Mon Sep 17 00:00:00 2001 From: UlyanaAndrukhiv Date: Fri, 22 Nov 2024 16:34:44 +0200 Subject: [PATCH 21/64] Updated mocks --- .../data_providers/mock/data_provider.go | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/engine/access/rest/websockets/data_providers/mock/data_provider.go b/engine/access/rest/websockets/data_providers/mock/data_provider.go index 478f1625ad5..48debb23ae3 100644 --- a/engine/access/rest/websockets/data_providers/mock/data_provider.go +++ b/engine/access/rest/websockets/data_providers/mock/data_provider.go @@ -2,7 +2,10 @@ package mock -import mock "github.com/stretchr/testify/mock" +import ( + uuid "github.com/google/uuid" + mock "github.com/stretchr/testify/mock" +) // DataProvider is an autogenerated mock type for the DataProvider type type DataProvider struct { @@ -15,18 +18,20 @@ func (_m *DataProvider) Close() { } // ID provides a mock function with given fields: -func (_m *DataProvider) ID() string { +func (_m *DataProvider) ID() uuid.UUID { ret := _m.Called() if len(ret) == 0 { panic("no return value specified for ID") } - var r0 string - if rf, ok := ret.Get(0).(func() string); ok { + var r0 uuid.UUID + if rf, ok := ret.Get(0).(func() uuid.UUID); ok { r0 = rf() } else { - r0 = ret.Get(0).(string) + if ret.Get(0) != nil { + r0 = ret.Get(0).(uuid.UUID) + } } return r0 From 9bf550caeec2dce57939be29a75dad8ef7226fe7 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 20 Nov 2024 17:27:42 -0800 Subject: [PATCH 22/64] add testcase for offchain evm backward compatibilities --- fvm/evm/offchain/utils/collection_test.go | 326 ++++++++++++++++++++-- fvm/evm/testutils/backend.go | 23 +- 2 files changed, 323 insertions(+), 26 deletions(-) diff --git a/fvm/evm/offchain/utils/collection_test.go b/fvm/evm/offchain/utils/collection_test.go index a90a8f57bea..d4fe05dcf8f 100644 --- a/fvm/evm/offchain/utils/collection_test.go +++ b/fvm/evm/offchain/utils/collection_test.go @@ -2,45 +2,252 @@ package utils_test import ( "bufio" + "encoding/gob" "encoding/hex" "encoding/json" + "fmt" "os" + "path/filepath" "strings" "testing" - "github.com/onflow/cadence" - "github.com/onflow/cadence/encoding/ccf" "github.com/rs/zerolog" + "github.com/rs/zerolog/log" "github.com/stretchr/testify/require" + "github.com/onflow/cadence" + "github.com/onflow/cadence/encoding/ccf" + "github.com/onflow/flow-go/fvm/environment" "github.com/onflow/flow-go/fvm/evm" "github.com/onflow/flow-go/fvm/evm/events" "github.com/onflow/flow-go/fvm/evm/offchain/blocks" + "github.com/onflow/flow-go/fvm/evm/offchain/storage" "github.com/onflow/flow-go/fvm/evm/offchain/sync" "github.com/onflow/flow-go/fvm/evm/offchain/utils" . "github.com/onflow/flow-go/fvm/evm/testutils" - "github.com/onflow/flow-go/fvm/evm/types" "github.com/onflow/flow-go/model/flow" ) -func ReplyingCollectionFromScratch( +func TestTestnetBackwardCompatibility(t *testing.T) { + t.Skip("TIME CONSUMING TESTS. Enable the tests with the events files saved in local") + // how to run this tests + // Note: this is a time consuming tests, so please run it in local + // + // 1) run the following cli to get the events files across different sporks + + // flow events get A.8c5303eaa26202d6.EVM.TransactionExecuted A.8c5303eaa26202d6.EVM.BlockExecuted + // --start 211176670 --end 211176770 --network testnet --host access-001.devnet51.nodes.onflow.org:9000 + // > ~/Downloads/events_devnet51_1.jsonl + // ... + // + // 2) comment the above t.Skip, and update the events file paths and checkpoint dir + // to run the tests + BackwardCompatibleSinceEVMGenesisBlock( + t, flow.Testnet, []string{ + "~/Downloads/events_devnet51_1.jsonl", + "~/Downloads/events_devnet51_2.jsonl", + }, + "~/Downloads/", + 0, + ) +} + +// BackwardCompatibilityTestSinceEVMGenesisBlock verifies that the offchain package +// is able to read EVM events from the given file paths and replay blocks since the +// EVM genesis block and derive a consistant state as the latest onchain EVM state. +// the eventsFilePaths is a list of file paths that contain ordered EVM events in JSONL format. +// The EVM events file can be queried by flow cli query, for instance: +// +// flow events get A.8c5303eaa26202d6.EVM.TransactionExecuted A.8c5303eaa26202d6.EVM.BlockExecuted +// --start 211176670 --end 211176770 --network testnet --host access-001.devnet51.nodes.onflow.org:9000 +// +// After replaying with each event json file, it will generate a values_.gob and +// allocators_.gob files as checkpoint, such that when the checkpoint exists, it will loaded +// and skil replaying the coresponding event json files. + +// backwardCompatibilityTestSinceEVMGenesisBlock ensures that the offchain package +// can read EVM events from the provided file paths, replay blocks starting from +// the EVM genesis block, and derive a consistent state matching the latest on-chain EVM state. +// +// The parameter `eventsFilePaths` is a list of file paths containing ordered EVM events in JSONL format. +// These EVM event files can be generated using the Flow CLI query command, for example: +// +// flow events get A.8c5303eaa26202d6.EVM.TransactionExecuted A.8c5303eaa26202d6.EVM.BlockExecuted +// +// --start 211176670 --end 211176770 --network testnet --host access-001.devnet51.nodes.onflow.org:9000 +// +// During the replay process, it will generate `values_.gob` and +// `allocators_.gob` checkpoint files for each height. If these checkpoint files exist, +// the corresponding event JSON files will be skipped to optimize replay. +func BackwardCompatibleSinceEVMGenesisBlock( t *testing.T, chainID flow.ChainID, - storage types.BackendStorage, - filePath string, + eventsFilePaths []string, // ordered EVM events in JSONL format + checkpointDir string, + checkpointEndHeight uint64, // EVM height of an EVM state that a checkpoint was created for ) { + // ensure that checkpoints are not more than the event files + require.True(t, len(eventsFilePaths) > 0) + + log.Info().Msgf("replaying EVM events from %v to %v, with checkpoints in %s, and checkpointEndHeight: %v", + eventsFilePaths[0], eventsFilePaths[len(eventsFilePaths)-1], + checkpointDir, checkpointEndHeight) + + store, checkpointEndHeightOrZero := initStorageWithCheckpoints(t, chainID, checkpointDir, checkpointEndHeight) + + // the events to replay + nextHeight := checkpointEndHeightOrZero + 1 + + // replay each event files + for _, eventsFilePath := range eventsFilePaths { + log.Info().Msgf("replaying events from %v, nextHeight: %v", eventsFilePath, nextHeight) + + checkpointEndHeight := replayEvents(t, chainID, store, eventsFilePath, checkpointDir, nextHeight) + nextHeight = checkpointEndHeight + 1 + } + log.Info(). + Msgf("succhessfully replayed all events and state changes are consistent with onchain state change. nextHeight: %v", nextHeight) +} + +func initStorageWithCheckpoints(t *testing.T, chainID flow.ChainID, checkpointDir string, checkpointEndHeight uint64) ( + *TestValueStore, uint64, +) { rootAddr := evm.StorageAccountAddress(chainID) - // setup the rootAddress account - as := environment.NewAccountStatus() - err := storage.SetValue(rootAddr[:], []byte(flow.AccountStatusKey), as.ToBytes()) + // if there is no checkpoint, create a empty store and initialize the account status, + // return 0 as the genesis height + if checkpointEndHeight == 0 { + store := GetSimpleValueStore() + as := environment.NewAccountStatus() + require.NoError(t, store.SetValue(rootAddr[:], []byte(flow.AccountStatusKey), as.ToBytes())) + + return store, 0 + } + + valueFileName, allocatorFileName := checkpointFileNamesByEndHeight(checkpointDir, checkpointEndHeight) + values, err := deserialize(valueFileName) require.NoError(t, err) + allocators, err := deserializeAllocator(allocatorFileName) + require.NoError(t, err) + store := GetSimpleValueStorePopulated(values, allocators) + return store, checkpointEndHeight +} - bp, err := blocks.NewBasicProvider(chainID, storage, rootAddr) +func replayEvents( + t *testing.T, + chainID flow.ChainID, + store *TestValueStore, eventsFilePath string, checkpointDir string, initialNextHeight uint64) uint64 { + + rootAddr := evm.StorageAccountAddress(chainID) + + bpStorage := storage.NewEphemeralStorage(store) + bp, err := blocks.NewBasicProvider(chainID, bpStorage, rootAddr) require.NoError(t, err) + nextHeight := initialNextHeight + + scanEventFilesAndRun(t, eventsFilePath, + func(blockEventPayload *events.BlockEventPayload, txEvents []events.TransactionEventPayload) error { + if blockEventPayload.Height != nextHeight { + return fmt.Errorf( + "expected height for next block event to be %v, but got %v", + nextHeight, blockEventPayload.Height) + } + + err = bp.OnBlockReceived(blockEventPayload) + require.NoError(t, err) + + sp := NewTestStorageProvider(store, blockEventPayload.Height) + cr := sync.NewReplayer(chainID, rootAddr, sp, bp, zerolog.Logger{}, nil, true) + res, err := cr.ReplayBlock(txEvents, blockEventPayload) + require.NoError(t, err) + + // commit all changes + for k, v := range res.StorageRegisterUpdates() { + err = store.SetValue([]byte(k.Owner), []byte(k.Key), v) + require.NoError(t, err) + } + + err = bp.OnBlockExecuted(blockEventPayload.Height, res) + require.NoError(t, err) + + // commit all block hash list changes + for k, v := range bpStorage.StorageRegisterUpdates() { + err = store.SetValue([]byte(k.Owner), []byte(k.Key), v) + require.NoError(t, err) + } + + // verify the block height is sequential without gap + nextHeight++ + + return nil + }) + + checkpointEndHeight := nextHeight - 1 + + log.Info().Msgf("finished replaying events from %v to %v, creating checkpoint", initialNextHeight, checkpointEndHeight) + valuesFile, allocatorsFile := dumpCheckpoint(t, store, checkpointDir, checkpointEndHeight) + log.Info().Msgf("checkpoint created: %v, %v", valuesFile, allocatorsFile) + + return checkpointEndHeight +} + +func checkpointFileNamesByEndHeight(dir string, endHeight uint64) (string, string) { + return filepath.Join(dir, fmt.Sprintf("values_%d.gob", endHeight)), + filepath.Join(dir, fmt.Sprintf("allocators_%d.gob", endHeight)) +} + +func dumpCheckpoint(t *testing.T, store *TestValueStore, dir string, checkpointEndHeight uint64) (string, string) { + valuesFileName, allocatorsFileName := checkpointFileNamesByEndHeight(dir, checkpointEndHeight) + values, allocators := store.Dump() + + require.NoError(t, serialize(valuesFileName, values)) + require.NoError(t, serializeAllocator(allocatorsFileName, allocators)) + return valuesFileName, allocatorsFileName +} + +const resume_height = 6559268 + +func decodeFullKey(encoded string) ([]byte, []byte, error) { + // Split the encoded string at the first occurrence of "~" + parts := strings.SplitN(encoded, "~", 2) + if len(parts) != 2 { + return nil, nil, fmt.Errorf("invalid encoded key: no delimiter found") + } + + // Convert the split parts back to byte slices + owner := []byte(parts[0]) + key := []byte(parts[1]) + return owner, key, nil +} + +type Subscription[T any] struct { + ch chan T + err error +} + +func NewSubscription[T any]() *Subscription[T] { + return &Subscription[T]{ + ch: make(chan T), + } +} + +func (s *Subscription[T]) Channel() <-chan T { + return s.ch +} + +func (s *Subscription[T]) Err() error { + return s.err +} + +// scanEventFilesAndRun +func scanEventFilesAndRun( + t *testing.T, + filePath string, + handler func(*events.BlockEventPayload, []events.TransactionEventPayload) error, +) { file, err := os.Open(filePath) require.NoError(t, err) defer file.Close() @@ -65,21 +272,8 @@ func ReplyingCollectionFromScratch( blockEventPayload, err := events.DecodeBlockEventPayload(ev.(cadence.Event)) require.NoError(t, err) - err = bp.OnBlockReceived(blockEventPayload) - require.NoError(t, err) - - sp := NewTestStorageProvider(storage, blockEventPayload.Height) - cr := sync.NewReplayer(chainID, rootAddr, sp, bp, zerolog.Logger{}, nil, true) - res, err := cr.ReplayBlock(txEvents, blockEventPayload) - require.NoError(t, err) - // commit all changes - for k, v := range res.StorageRegisterUpdates() { - err = storage.SetValue([]byte(k.Owner), []byte(k.Key), v) - require.NoError(t, err) - } - - err = bp.OnBlockExecuted(blockEventPayload.Height, res) - require.NoError(t, err) + require.NoError(t, handler(blockEventPayload, txEvents), fmt.Sprintf("fail to handle block at height %d", + blockEventPayload.Height)) txEvents = make([]events.TransactionEventPayload, 0) continue @@ -97,3 +291,85 @@ func ReplyingCollectionFromScratch( t.Fatal(err) } } + +// Serialize function: saves map data to a file +func serialize(filename string, data map[string][]byte) error { + // Create a file to save data + file, err := os.Create(filename) + if err != nil { + return err + } + defer file.Close() + + // Use gob to encode data + encoder := gob.NewEncoder(file) + err = encoder.Encode(data) + if err != nil { + return err + } + + return nil +} + +// Deserialize function: reads map data from a file +func deserialize(filename string) (map[string][]byte, error) { + // Open the file for reading + file, err := os.Open(filename) + if err != nil { + return nil, err + } + defer file.Close() + + // Prepare the map to store decoded data + var data map[string][]byte + + // Use gob to decode data + decoder := gob.NewDecoder(file) + err = decoder.Decode(&data) + if err != nil { + return nil, err + } + + return data, nil +} + +// Serialize function: saves map data to a file +func serializeAllocator(filename string, data map[string]uint64) error { + // Create a file to save data + file, err := os.Create(filename) + if err != nil { + return err + } + defer file.Close() + + // Use gob to encode data + encoder := gob.NewEncoder(file) + err = encoder.Encode(data) + if err != nil { + return err + } + + return nil +} + +// Deserialize function: reads map data from a file +func deserializeAllocator(filename string) (map[string]uint64, error) { + // Open the file for reading + file, err := os.Open(filename) + if err != nil { + return nil, err + } + defer file.Close() + + // Prepare the map to store decoded data + var data map[string]uint64 + + // Use gob to decode data + decoder := gob.NewDecoder(file) + err = decoder.Decode(&data) + if err != nil { + return nil, err + } + + return data, nil +} diff --git a/fvm/evm/testutils/backend.go b/fvm/evm/testutils/backend.go index 7e0f05cb201..8971b97c2b0 100644 --- a/fvm/evm/testutils/backend.go +++ b/fvm/evm/testutils/backend.go @@ -60,7 +60,7 @@ func ConvertToCadence(data []byte) []cadence.Value { } func fullKey(owner, key []byte) string { - return string(owner) + "~" + string(key) + return fmt.Sprintf("%x~%s", owner, key) } func GetSimpleValueStore() *TestValueStore { @@ -145,6 +145,19 @@ func GetSimpleValueStorePopulated( // clone allocator return GetSimpleValueStorePopulated(newData, newAllocator) }, + + DumpFunc: func() (map[string][]byte, map[string]uint64) { + // clone data + newData := make(map[string][]byte) + for k, v := range data { + newData[k] = v + } + newAllocator := make(map[string]uint64) + for k, v := range allocator { + newAllocator[k] = v + } + return newData, newAllocator + }, } } @@ -253,6 +266,7 @@ type TestValueStore struct { TotalStorageItemsFunc func() int ResetStatsFunc func() CloneFunc func() *TestValueStore + DumpFunc func() (map[string][]byte, map[string]uint64) } var _ environment.ValueStore = &TestValueStore{} @@ -327,6 +341,13 @@ func (vs *TestValueStore) Clone() *TestValueStore { return vs.CloneFunc() } +func (vs *TestValueStore) Dump() (map[string][]byte, map[string]uint64) { + if vs.DumpFunc == nil { + panic("method not set") + } + return vs.DumpFunc() +} + type testMeter struct { meterComputation func(common.ComputationKind, uint) error hasComputationCapacity func(common.ComputationKind, uint) bool From 27c0f3ae641b6525506c06eefb6958033a99f6e9 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Fri, 22 Nov 2024 20:46:42 -0800 Subject: [PATCH 23/64] review comments --- fvm/evm/offchain/utils/collection_test.go | 51 +---------------------- 1 file changed, 2 insertions(+), 49 deletions(-) diff --git a/fvm/evm/offchain/utils/collection_test.go b/fvm/evm/offchain/utils/collection_test.go index d4fe05dcf8f..a18ce4a81ac 100644 --- a/fvm/evm/offchain/utils/collection_test.go +++ b/fvm/evm/offchain/utils/collection_test.go @@ -53,20 +53,7 @@ func TestTestnetBackwardCompatibility(t *testing.T) { ) } -// BackwardCompatibilityTestSinceEVMGenesisBlock verifies that the offchain package -// is able to read EVM events from the given file paths and replay blocks since the -// EVM genesis block and derive a consistant state as the latest onchain EVM state. -// the eventsFilePaths is a list of file paths that contain ordered EVM events in JSONL format. -// The EVM events file can be queried by flow cli query, for instance: -// -// flow events get A.8c5303eaa26202d6.EVM.TransactionExecuted A.8c5303eaa26202d6.EVM.BlockExecuted -// --start 211176670 --end 211176770 --network testnet --host access-001.devnet51.nodes.onflow.org:9000 -// -// After replaying with each event json file, it will generate a values_.gob and -// allocators_.gob files as checkpoint, such that when the checkpoint exists, it will loaded -// and skil replaying the coresponding event json files. - -// backwardCompatibilityTestSinceEVMGenesisBlock ensures that the offchain package +// BackwardCompatibilityTestSinceEVMGenesisBlock ensures that the offchain package // can read EVM events from the provided file paths, replay blocks starting from // the EVM genesis block, and derive a consistent state matching the latest on-chain EVM state. // @@ -87,7 +74,7 @@ func BackwardCompatibleSinceEVMGenesisBlock( checkpointDir string, checkpointEndHeight uint64, // EVM height of an EVM state that a checkpoint was created for ) { - // ensure that checkpoints are not more than the event files + // ensure that event files is not an empty array require.True(t, len(eventsFilePaths) > 0) log.Info().Msgf("replaying EVM events from %v to %v, with checkpoints in %s, and checkpointEndHeight: %v", @@ -208,40 +195,6 @@ func dumpCheckpoint(t *testing.T, store *TestValueStore, dir string, checkpointE return valuesFileName, allocatorsFileName } -const resume_height = 6559268 - -func decodeFullKey(encoded string) ([]byte, []byte, error) { - // Split the encoded string at the first occurrence of "~" - parts := strings.SplitN(encoded, "~", 2) - if len(parts) != 2 { - return nil, nil, fmt.Errorf("invalid encoded key: no delimiter found") - } - - // Convert the split parts back to byte slices - owner := []byte(parts[0]) - key := []byte(parts[1]) - return owner, key, nil -} - -type Subscription[T any] struct { - ch chan T - err error -} - -func NewSubscription[T any]() *Subscription[T] { - return &Subscription[T]{ - ch: make(chan T), - } -} - -func (s *Subscription[T]) Channel() <-chan T { - return s.ch -} - -func (s *Subscription[T]) Err() error { - return s.err -} - // scanEventFilesAndRun func scanEventFilesAndRun( t *testing.T, From 25a8af57bea132f302a375d192ebfbbd7c92ca28 Mon Sep 17 00:00:00 2001 From: Janez Podhostnik Date: Mon, 25 Nov 2024 20:04:44 +0100 Subject: [PATCH 24/64] Fix testnet EVM replay --- fvm/evm/offchain/blocks/block_context.go | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/fvm/evm/offchain/blocks/block_context.go b/fvm/evm/offchain/blocks/block_context.go index a18c7077378..2da73f9cb32 100644 --- a/fvm/evm/offchain/blocks/block_context.go +++ b/fvm/evm/offchain/blocks/block_context.go @@ -67,7 +67,7 @@ func UseBlockHashCorrection(chainID flow.ChainID, evmHeightOfCurrentBlock uint64 // array of hashes. if chainID == flow.Mainnet && evmHeightOfCurrentBlock < blockHashListFixHCUEVMHeightMainnet { return fixedHashes[flow.Mainnet][queriedEVMHeight%256], true - } else if chainID == flow.Testnet && blockHashListBugIntroducedHCUEVMHeightTestnet <= evmHeightOfCurrentBlock && evmHeightOfCurrentBlock < blockHashListFixHCUEVMHeightTestnet { + } else if chainID == flow.Testnet && evmHeightOfCurrentBlock < blockHashListFixHCUEVMHeightTestnet { return fixedHashes[flow.Testnet][queriedEVMHeight%256], true } return gethCommon.Hash{}, false @@ -83,11 +83,6 @@ const blockHashListFixHCUEVMHeightMainnet = 8357079 // PR: https://github.com/onflow/flow-go/pull/6734 const blockHashListFixHCUEVMHeightTestnet = 16848829 -// Testnet52 - Spork -// Flow Block: 218215350 cc7188f0bdac4c442cc3ee072557d7f7c8ca4462537da945b148d5d0efa7a1ff -// PR: https://github.com/onflow/flow-go/pull/6377 -const blockHashListBugIntroducedHCUEVMHeightTestnet = 7038679 - // Testnet51 - Height Coordinated Upgrade 1 // Flow Block: 212562161 1a520608c5457f228405c4c30fc39c8a0af7cf915fb2ede7ec5ccffc2a000f57 // PR: https://github.com/onflow/flow-go/pull/6380 From 558155049a830b31cf5700a158ba878abc346c1a Mon Sep 17 00:00:00 2001 From: UlyanaAndrukhiv Date: Tue, 26 Nov 2024 18:03:14 +0200 Subject: [PATCH 25/64] Moved Run to separate goroutine, paralleled test as suggested --- engine/access/rest/websockets/controller.go | 10 ++++++++-- .../rest/websockets/data_providers/factory_test.go | 5 +++++ 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/engine/access/rest/websockets/controller.go b/engine/access/rest/websockets/controller.go index 7c77558e7c2..cbcdc55be0f 100644 --- a/engine/access/rest/websockets/controller.go +++ b/engine/access/rest/websockets/controller.go @@ -164,11 +164,17 @@ func (c *Controller) handleSubscribe(ctx context.Context, msg models.SubscribeMe dp, _ := c.dataProviderFactory.NewDataProvider(ctx, msg.Topic, msg.Arguments, c.communicationChannel) // TODO: handle error here c.dataProviders.Add(dp.ID(), dp) - _ = dp.Run() - // TODO: handle error here //TODO: return OK response to client c.communicationChannel <- msg + + go func() { + err := dp.Run() + if err != nil { + // Log or handle the error from Run + c.logger.Error().Err(err).Msgf("error while running data provider for topic: %s", msg.Topic) + } + }() } func (c *Controller) handleUnsubscribe(_ context.Context, msg models.UnsubscribeMessageRequest) { diff --git a/engine/access/rest/websockets/data_providers/factory_test.go b/engine/access/rest/websockets/data_providers/factory_test.go index ce4b16e97f6..602212e08f5 100644 --- a/engine/access/rest/websockets/data_providers/factory_test.go +++ b/engine/access/rest/websockets/data_providers/factory_test.go @@ -57,6 +57,8 @@ func (s *DataProviderFactorySuite) setupSubscription(apiCall *mock.Call) { // TestSupportedTopics verifies that supported topics return a valid provider and no errors. // Each test case includes a topic and arguments for which a data provider should be created. func (s *DataProviderFactorySuite) TestSupportedTopics() { + s.T().Parallel() + // Define supported topics and check if each returns the correct provider without errors testCases := []struct { name string @@ -102,6 +104,7 @@ func (s *DataProviderFactorySuite) TestSupportedTopics() { for _, test := range testCases { s.Run(test.name, func() { + s.T().Parallel() test.setupSubscription() provider, err := s.factory.NewDataProvider(s.ctx, test.topic, test.arguments, s.ch) @@ -117,6 +120,8 @@ func (s *DataProviderFactorySuite) TestSupportedTopics() { // TestUnsupportedTopics verifies that unsupported topics do not return a provider // and instead return an error indicating the topic is unsupported. func (s *DataProviderFactorySuite) TestUnsupportedTopics() { + s.T().Parallel() + // Define unsupported topics unsupportedTopics := []string{ "unknown_topic", From a4920e43570a1fd5c42197f4784055fe7a760745 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Fri, 22 Nov 2024 16:12:11 -0800 Subject: [PATCH 26/64] fix coinbase address change for old testnet --- fvm/evm/offchain/blocks/block_context.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fvm/evm/offchain/blocks/block_context.go b/fvm/evm/offchain/blocks/block_context.go index 2da73f9cb32..ecbc8813c76 100644 --- a/fvm/evm/offchain/blocks/block_context.go +++ b/fvm/evm/offchain/blocks/block_context.go @@ -86,7 +86,7 @@ const blockHashListFixHCUEVMHeightTestnet = 16848829 // Testnet51 - Height Coordinated Upgrade 1 // Flow Block: 212562161 1a520608c5457f228405c4c30fc39c8a0af7cf915fb2ede7ec5ccffc2a000f57 // PR: https://github.com/onflow/flow-go/pull/6380 -const coinbaseAddressChangeEVMHeightTestnet = 1385491 +const coinbaseAddressChangeEVMHeightTestnet = 1385490 var genesisCoinbaseAddressTestnet = types.Address(gethCommon.HexToAddress("0000000000000000000000021169100eecb7c1a6")) From c5a9f17c216247068050820f927f38c479ee4d69 Mon Sep 17 00:00:00 2001 From: UlyanaAndrukhiv Date: Tue, 26 Nov 2024 18:47:58 +0200 Subject: [PATCH 27/64] Added names to interface arguments, created Arguments type for websockets --- .../data_providers/base_provider.go | 5 +++-- .../data_providers/block_digests_provider.go | 2 +- .../data_providers/block_headers_provider.go | 2 +- .../data_providers/blocks_provider.go | 4 ++-- .../data_providers/blocks_provider_test.go | 9 +++++---- .../rest/websockets/data_providers/factory.go | 5 +++-- .../websockets/data_providers/factory_test.go | 9 +++++---- .../data_providers/mock/data_provider.go | 17 ++++++++++++++-- .../mock/data_provider_factory.go | 20 ++++++++++--------- .../rest/websockets/models/subscribe.go | 6 ++++-- 10 files changed, 50 insertions(+), 29 deletions(-) diff --git a/engine/access/rest/websockets/data_providers/base_provider.go b/engine/access/rest/websockets/data_providers/base_provider.go index 7c5e4ccf548..2d81c3bf25d 100644 --- a/engine/access/rest/websockets/data_providers/base_provider.go +++ b/engine/access/rest/websockets/data_providers/base_provider.go @@ -16,7 +16,7 @@ type BaseDataProvider interface { // Topic returns the topic associated with the data provider. Topic() string // Close terminates the data provider. - Close() + Close() error } var _ BaseDataProvider = (*BaseDataProviderImpl)(nil) @@ -58,6 +58,7 @@ func (b *BaseDataProviderImpl) Topic() string { } // Close terminates the data provider. -func (b *BaseDataProviderImpl) Close() { +func (b *BaseDataProviderImpl) Close() error { b.cancel() + return nil } diff --git a/engine/access/rest/websockets/data_providers/block_digests_provider.go b/engine/access/rest/websockets/data_providers/block_digests_provider.go index 1d99f1a7a8d..e94ff1f2c4a 100644 --- a/engine/access/rest/websockets/data_providers/block_digests_provider.go +++ b/engine/access/rest/websockets/data_providers/block_digests_provider.go @@ -30,7 +30,7 @@ func NewBlockDigestsDataProvider( logger zerolog.Logger, api access.API, topic string, - arguments map[string]string, + arguments models.Arguments, send chan<- interface{}, ) (*BlockDigestsDataProvider, error) { p := &BlockDigestsDataProvider{ diff --git a/engine/access/rest/websockets/data_providers/block_headers_provider.go b/engine/access/rest/websockets/data_providers/block_headers_provider.go index 40bdb2ddd5c..42ede67703f 100644 --- a/engine/access/rest/websockets/data_providers/block_headers_provider.go +++ b/engine/access/rest/websockets/data_providers/block_headers_provider.go @@ -30,7 +30,7 @@ func NewBlockHeadersDataProvider( logger zerolog.Logger, api access.API, topic string, - arguments map[string]string, + arguments models.Arguments, send chan<- interface{}, ) (*BlockHeadersDataProvider, error) { p := &BlockHeadersDataProvider{ diff --git a/engine/access/rest/websockets/data_providers/blocks_provider.go b/engine/access/rest/websockets/data_providers/blocks_provider.go index f340c27ba54..1da9c58f4c8 100644 --- a/engine/access/rest/websockets/data_providers/blocks_provider.go +++ b/engine/access/rest/websockets/data_providers/blocks_provider.go @@ -39,7 +39,7 @@ func NewBlocksDataProvider( logger zerolog.Logger, api access.API, topic string, - arguments map[string]string, + arguments models.Arguments, send chan<- interface{}, ) (*BlocksDataProvider, error) { p := &BlocksDataProvider{ @@ -100,7 +100,7 @@ func (p *BlocksDataProvider) handleResponse(send chan<- interface{}) func(*flow. } // ParseBlocksArguments validates and initializes the blocks arguments. -func ParseBlocksArguments(arguments map[string]string) (BlocksArguments, error) { +func ParseBlocksArguments(arguments models.Arguments) (BlocksArguments, error) { var args BlocksArguments // Parse 'block_status' diff --git a/engine/access/rest/websockets/data_providers/blocks_provider_test.go b/engine/access/rest/websockets/data_providers/blocks_provider_test.go index 9771cb54780..8d3e984bd71 100644 --- a/engine/access/rest/websockets/data_providers/blocks_provider_test.go +++ b/engine/access/rest/websockets/data_providers/blocks_provider_test.go @@ -10,6 +10,7 @@ import ( accessmock "github.com/onflow/flow-go/access/mock" "github.com/onflow/flow-go/engine/access/rest/common/parser" + "github.com/onflow/flow-go/engine/access/rest/websockets/models" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" ) @@ -18,7 +19,7 @@ const unknownBlockStatus = "unknown_block_status" type testErrType struct { name string - arguments map[string]string + arguments models.Arguments expectedErrorMsg string } @@ -69,21 +70,21 @@ func (s *BlocksProviderSuite) invalidArgumentsTestCases() []testErrType { return []testErrType{ { name: "missing 'block_status' argument", - arguments: map[string]string{ + arguments: models.Arguments{ "start_block_id": s.rootBlock.ID().String(), }, expectedErrorMsg: "'block_status' must be provided", }, { name: "unknown 'block_status' argument", - arguments: map[string]string{ + arguments: models.Arguments{ "block_status": unknownBlockStatus, }, expectedErrorMsg: fmt.Sprintf("invalid 'block_status', must be '%s' or '%s'", parser.Finalized, parser.Sealed), }, { name: "provide both 'start_block_id' and 'start_block_height' arguments", - arguments: map[string]string{ + arguments: models.Arguments{ "block_status": parser.Finalized, "start_block_id": s.rootBlock.ID().String(), "start_block_height": fmt.Sprintf("%d", s.rootBlock.Header.Height), diff --git a/engine/access/rest/websockets/data_providers/factory.go b/engine/access/rest/websockets/data_providers/factory.go index c9c187a0117..51bf67b900d 100644 --- a/engine/access/rest/websockets/data_providers/factory.go +++ b/engine/access/rest/websockets/data_providers/factory.go @@ -7,6 +7,7 @@ import ( "github.com/rs/zerolog" "github.com/onflow/flow-go/access" + "github.com/onflow/flow-go/engine/access/rest/websockets/models" "github.com/onflow/flow-go/engine/access/state_stream" ) @@ -29,7 +30,7 @@ type DataProviderFactory interface { // and configuration parameters. // // No errors are expected during normal operations. - NewDataProvider(context.Context, string, map[string]string, chan<- interface{}) (DataProvider, error) + NewDataProvider(ctx context.Context, topic string, arguments models.Arguments, ch chan<- interface{}) (DataProvider, error) } var _ DataProviderFactory = (*DataProviderFactoryImpl)(nil) @@ -76,7 +77,7 @@ func NewDataProviderFactory( func (s *DataProviderFactoryImpl) NewDataProvider( ctx context.Context, topic string, - arguments map[string]string, + arguments models.Arguments, ch chan<- interface{}, ) (DataProvider, error) { switch topic { diff --git a/engine/access/rest/websockets/data_providers/factory_test.go b/engine/access/rest/websockets/data_providers/factory_test.go index 602212e08f5..1b33d892573 100644 --- a/engine/access/rest/websockets/data_providers/factory_test.go +++ b/engine/access/rest/websockets/data_providers/factory_test.go @@ -10,6 +10,7 @@ import ( accessmock "github.com/onflow/flow-go/access/mock" "github.com/onflow/flow-go/engine/access/rest/common/parser" + "github.com/onflow/flow-go/engine/access/rest/websockets/models" statestreammock "github.com/onflow/flow-go/engine/access/state_stream/mock" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" @@ -63,14 +64,14 @@ func (s *DataProviderFactorySuite) TestSupportedTopics() { testCases := []struct { name string topic string - arguments map[string]string + arguments models.Arguments setupSubscription func() assertExpectations func() }{ { name: "block topic", topic: BlocksTopic, - arguments: map[string]string{"block_status": parser.Finalized}, + arguments: models.Arguments{"block_status": parser.Finalized}, setupSubscription: func() { s.setupSubscription(s.accessApi.On("SubscribeBlocksFromLatest", mock.Anything, flow.BlockStatusFinalized)) }, @@ -81,7 +82,7 @@ func (s *DataProviderFactorySuite) TestSupportedTopics() { { name: "block headers topic", topic: BlockHeadersTopic, - arguments: map[string]string{"block_status": parser.Finalized}, + arguments: models.Arguments{"block_status": parser.Finalized}, setupSubscription: func() { s.setupSubscription(s.accessApi.On("SubscribeBlockHeadersFromLatest", mock.Anything, flow.BlockStatusFinalized)) }, @@ -92,7 +93,7 @@ func (s *DataProviderFactorySuite) TestSupportedTopics() { { name: "block digests topic", topic: BlockDigestsTopic, - arguments: map[string]string{"block_status": parser.Finalized}, + arguments: models.Arguments{"block_status": parser.Finalized}, setupSubscription: func() { s.setupSubscription(s.accessApi.On("SubscribeBlockDigestsFromLatest", mock.Anything, flow.BlockStatusFinalized)) }, diff --git a/engine/access/rest/websockets/data_providers/mock/data_provider.go b/engine/access/rest/websockets/data_providers/mock/data_provider.go index 48debb23ae3..3fe8bc5d15b 100644 --- a/engine/access/rest/websockets/data_providers/mock/data_provider.go +++ b/engine/access/rest/websockets/data_providers/mock/data_provider.go @@ -13,8 +13,21 @@ type DataProvider struct { } // Close provides a mock function with given fields: -func (_m *DataProvider) Close() { - _m.Called() +func (_m *DataProvider) Close() error { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Close") + } + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 } // ID provides a mock function with given fields: diff --git a/engine/access/rest/websockets/data_providers/mock/data_provider_factory.go b/engine/access/rest/websockets/data_providers/mock/data_provider_factory.go index 2a959715511..c2e46e58d1d 100644 --- a/engine/access/rest/websockets/data_providers/mock/data_provider_factory.go +++ b/engine/access/rest/websockets/data_providers/mock/data_provider_factory.go @@ -7,6 +7,8 @@ import ( data_providers "github.com/onflow/flow-go/engine/access/rest/websockets/data_providers" mock "github.com/stretchr/testify/mock" + + models "github.com/onflow/flow-go/engine/access/rest/websockets/models" ) // DataProviderFactory is an autogenerated mock type for the DataProviderFactory type @@ -14,9 +16,9 @@ type DataProviderFactory struct { mock.Mock } -// NewDataProvider provides a mock function with given fields: _a0, _a1, _a2, _a3 -func (_m *DataProviderFactory) NewDataProvider(_a0 context.Context, _a1 string, _a2 map[string]string, _a3 chan<- interface{}) (data_providers.DataProvider, error) { - ret := _m.Called(_a0, _a1, _a2, _a3) +// NewDataProvider provides a mock function with given fields: ctx, topic, arguments, ch +func (_m *DataProviderFactory) NewDataProvider(ctx context.Context, topic string, arguments models.Arguments, ch chan<- interface{}) (data_providers.DataProvider, error) { + ret := _m.Called(ctx, topic, arguments, ch) if len(ret) == 0 { panic("no return value specified for NewDataProvider") @@ -24,19 +26,19 @@ func (_m *DataProviderFactory) NewDataProvider(_a0 context.Context, _a1 string, var r0 data_providers.DataProvider var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string, map[string]string, chan<- interface{}) (data_providers.DataProvider, error)); ok { - return rf(_a0, _a1, _a2, _a3) + if rf, ok := ret.Get(0).(func(context.Context, string, models.Arguments, chan<- interface{}) (data_providers.DataProvider, error)); ok { + return rf(ctx, topic, arguments, ch) } - if rf, ok := ret.Get(0).(func(context.Context, string, map[string]string, chan<- interface{}) data_providers.DataProvider); ok { - r0 = rf(_a0, _a1, _a2, _a3) + if rf, ok := ret.Get(0).(func(context.Context, string, models.Arguments, chan<- interface{}) data_providers.DataProvider); ok { + r0 = rf(ctx, topic, arguments, ch) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(data_providers.DataProvider) } } - if rf, ok := ret.Get(1).(func(context.Context, string, map[string]string, chan<- interface{}) error); ok { - r1 = rf(_a0, _a1, _a2, _a3) + if rf, ok := ret.Get(1).(func(context.Context, string, models.Arguments, chan<- interface{}) error); ok { + r1 = rf(ctx, topic, arguments, ch) } else { r1 = ret.Error(1) } diff --git a/engine/access/rest/websockets/models/subscribe.go b/engine/access/rest/websockets/models/subscribe.go index d2cd007bd3c..95ad17e3708 100644 --- a/engine/access/rest/websockets/models/subscribe.go +++ b/engine/access/rest/websockets/models/subscribe.go @@ -1,10 +1,12 @@ package models +type Arguments map[string]string + // SubscribeMessageRequest represents a request to subscribe to a topic. type SubscribeMessageRequest struct { BaseMessageRequest - Topic string `json:"topic"` // Topic to subscribe to - Arguments map[string]string `json:"arguments"` // Additional arguments for subscription + Topic string `json:"topic"` // Topic to subscribe to + Arguments Arguments `json:"arguments"` // Additional arguments for subscription } // SubscribeMessageResponse represents the response to a subscription request. From 3cb67c424d6a609f21d85979677ce819f4b99240 Mon Sep 17 00:00:00 2001 From: UlyanaAndrukhiv Date: Wed, 27 Nov 2024 12:53:32 +0200 Subject: [PATCH 28/64] Updated ParseBlocksArguments for block data provider --- .../data_providers/blocks_provider.go | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/engine/access/rest/websockets/data_providers/blocks_provider.go b/engine/access/rest/websockets/data_providers/blocks_provider.go index 1da9c58f4c8..1c3d454b4c5 100644 --- a/engine/access/rest/websockets/data_providers/blocks_provider.go +++ b/engine/access/rest/websockets/data_providers/blocks_provider.go @@ -114,8 +114,16 @@ func ParseBlocksArguments(arguments models.Arguments) (BlocksArguments, error) { return args, fmt.Errorf("'block_status' must be provided") } + startBlockIDIn, hasStartBlockID := arguments["start_block_id"] + startBlockHeightIn, hasStartBlockHeight := arguments["start_block_height"] + + // Ensure only one of start_block_id or start_block_height is provided + if hasStartBlockID && hasStartBlockHeight { + return args, fmt.Errorf("can only provide either 'start_block_id' or 'start_block_height'") + } + // Parse 'start_block_id' if provided - if startBlockIDIn, ok := arguments["start_block_id"]; ok { + if hasStartBlockID { var startBlockID parser.ID err := startBlockID.Parse(startBlockIDIn) if err != nil { @@ -125,7 +133,7 @@ func ParseBlocksArguments(arguments models.Arguments) (BlocksArguments, error) { } // Parse 'start_block_height' if provided - if startBlockHeightIn, ok := arguments["start_block_height"]; ok { + if hasStartBlockHeight { var err error args.StartBlockHeight, err = util.ToUint64(startBlockHeightIn) if err != nil { @@ -135,10 +143,5 @@ func ParseBlocksArguments(arguments models.Arguments) (BlocksArguments, error) { args.StartBlockHeight = request.EmptyHeight } - // Ensure only one of start_block_id or start_block_height is provided - if args.StartBlockID != flow.ZeroID && args.StartBlockHeight != request.EmptyHeight { - return args, fmt.Errorf("can only provide either 'start_block_id' or 'start_block_height'") - } - return args, nil } From b9abf683c1324f9141b06bbac3f61d5b729bccba Mon Sep 17 00:00:00 2001 From: UlyanaAndrukhiv Date: Fri, 29 Nov 2024 13:34:13 +0200 Subject: [PATCH 29/64] Updated comments and messages --- engine/access/rest/websockets/controller.go | 9 ++++++--- engine/access/rest/websockets/data_providers/factory.go | 2 +- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/engine/access/rest/websockets/controller.go b/engine/access/rest/websockets/controller.go index cbcdc55be0f..df5d48b345c 100644 --- a/engine/access/rest/websockets/controller.go +++ b/engine/access/rest/websockets/controller.go @@ -161,8 +161,11 @@ func (c *Controller) handleAction(ctx context.Context, message interface{}) erro } func (c *Controller) handleSubscribe(ctx context.Context, msg models.SubscribeMessageRequest) { - dp, _ := c.dataProviderFactory.NewDataProvider(ctx, msg.Topic, msg.Arguments, c.communicationChannel) - // TODO: handle error here + dp, err := c.dataProviderFactory.NewDataProvider(ctx, msg.Topic, msg.Arguments, c.communicationChannel) + if err != nil { + // TODO: handle error here + } + c.dataProviders.Add(dp.ID(), dp) //TODO: return OK response to client @@ -171,7 +174,7 @@ func (c *Controller) handleSubscribe(ctx context.Context, msg models.SubscribeMe go func() { err := dp.Run() if err != nil { - // Log or handle the error from Run + //TODO: Log or handle the error from Run c.logger.Error().Err(err).Msgf("error while running data provider for topic: %s", msg.Topic) } }() diff --git a/engine/access/rest/websockets/data_providers/factory.go b/engine/access/rest/websockets/data_providers/factory.go index 51bf67b900d..72f4a6b7633 100644 --- a/engine/access/rest/websockets/data_providers/factory.go +++ b/engine/access/rest/websockets/data_providers/factory.go @@ -91,7 +91,7 @@ func (s *DataProviderFactoryImpl) NewDataProvider( case EventsTopic, AccountStatusesTopic, TransactionStatusesTopic: - return nil, fmt.Errorf("topic \"%s\" not implemented yet", topic) + return nil, fmt.Errorf(`topic "%s" not implemented yet`, topic) default: return nil, fmt.Errorf("unsupported topic \"%s\"", topic) } From 1a2698f62208b855cf7a87105f8ade5cd9038b6f Mon Sep 17 00:00:00 2001 From: UlyanaAndrukhiv Date: Fri, 29 Nov 2024 13:45:12 +0200 Subject: [PATCH 30/64] Removed storing arguments in the data providers --- engine/access/rest/websockets/controller.go | 1 + .../data_providers/block_digests_provider.go | 23 ++++++++----------- .../data_providers/block_headers_provider.go | 23 ++++++++----------- .../data_providers/blocks_provider.go | 23 ++++++++----------- 4 files changed, 31 insertions(+), 39 deletions(-) diff --git a/engine/access/rest/websockets/controller.go b/engine/access/rest/websockets/controller.go index df5d48b345c..38bc7306b55 100644 --- a/engine/access/rest/websockets/controller.go +++ b/engine/access/rest/websockets/controller.go @@ -164,6 +164,7 @@ func (c *Controller) handleSubscribe(ctx context.Context, msg models.SubscribeMe dp, err := c.dataProviderFactory.NewDataProvider(ctx, msg.Topic, msg.Arguments, c.communicationChannel) if err != nil { // TODO: handle error here + c.logger.Error().Err(err).Msgf("error while creating data provider for topic: %s", msg.Topic) } c.dataProviders.Add(dp.ID(), dp) diff --git a/engine/access/rest/websockets/data_providers/block_digests_provider.go b/engine/access/rest/websockets/data_providers/block_digests_provider.go index e94ff1f2c4a..3352e0c0ac0 100644 --- a/engine/access/rest/websockets/data_providers/block_digests_provider.go +++ b/engine/access/rest/websockets/data_providers/block_digests_provider.go @@ -18,7 +18,6 @@ type BlockDigestsDataProvider struct { *BaseDataProviderImpl logger zerolog.Logger - args BlocksArguments api access.API } @@ -38,20 +37,18 @@ func NewBlockDigestsDataProvider( api: api, } - // Initialize arguments passed to the provider. - var err error - p.args, err = ParseBlocksArguments(arguments) + // Parse arguments passed to the provider. + blockArgs, err := ParseBlocksArguments(arguments) if err != nil { return nil, fmt.Errorf("invalid arguments: %w", err) } - ctx, cancel := context.WithCancel(ctx) - + subCtx, cancel := context.WithCancel(ctx) p.BaseDataProviderImpl = NewBaseDataProviderImpl( topic, cancel, send, - p.createSubscription(ctx), // Set up a subscription to block digests based on arguments. + p.createSubscription(subCtx, blockArgs), // Set up a subscription to block digests based on arguments. ) return p, nil @@ -65,16 +62,16 @@ func (p *BlockDigestsDataProvider) Run() error { } // createSubscription creates a new subscription using the specified input arguments. -func (p *BlockDigestsDataProvider) createSubscription(ctx context.Context) subscription.Subscription { - if p.args.StartBlockID != flow.ZeroID { - return p.api.SubscribeBlockDigestsFromStartBlockID(ctx, p.args.StartBlockID, p.args.BlockStatus) +func (p *BlockDigestsDataProvider) createSubscription(ctx context.Context, args BlocksArguments) subscription.Subscription { + if args.StartBlockID != flow.ZeroID { + return p.api.SubscribeBlockDigestsFromStartBlockID(ctx, args.StartBlockID, args.BlockStatus) } - if p.args.StartBlockHeight != request.EmptyHeight { - return p.api.SubscribeBlockDigestsFromStartHeight(ctx, p.args.StartBlockHeight, p.args.BlockStatus) + if args.StartBlockHeight != request.EmptyHeight { + return p.api.SubscribeBlockDigestsFromStartHeight(ctx, args.StartBlockHeight, args.BlockStatus) } - return p.api.SubscribeBlockDigestsFromLatest(ctx, p.args.BlockStatus) + return p.api.SubscribeBlockDigestsFromLatest(ctx, args.BlockStatus) } // handleResponse processes a block digest and sends the formatted response. diff --git a/engine/access/rest/websockets/data_providers/block_headers_provider.go b/engine/access/rest/websockets/data_providers/block_headers_provider.go index 42ede67703f..142b98f9e70 100644 --- a/engine/access/rest/websockets/data_providers/block_headers_provider.go +++ b/engine/access/rest/websockets/data_providers/block_headers_provider.go @@ -18,7 +18,6 @@ type BlockHeadersDataProvider struct { *BaseDataProviderImpl logger zerolog.Logger - args BlocksArguments api access.API } @@ -38,20 +37,18 @@ func NewBlockHeadersDataProvider( api: api, } - // Initialize arguments passed to the provider. - var err error - p.args, err = ParseBlocksArguments(arguments) + // Parse arguments passed to the provider. + blockArgs, err := ParseBlocksArguments(arguments) if err != nil { return nil, fmt.Errorf("invalid arguments: %w", err) } - ctx, cancel := context.WithCancel(ctx) - + subCtx, cancel := context.WithCancel(ctx) p.BaseDataProviderImpl = NewBaseDataProviderImpl( topic, cancel, send, - p.createSubscription(ctx), // Set up a subscription to block headers based on arguments. + p.createSubscription(subCtx, blockArgs), // Set up a subscription to block headers based on arguments. ) return p, nil @@ -65,16 +62,16 @@ func (p *BlockHeadersDataProvider) Run() error { } // createSubscription creates a new subscription using the specified input arguments. -func (p *BlockHeadersDataProvider) createSubscription(ctx context.Context) subscription.Subscription { - if p.args.StartBlockID != flow.ZeroID { - return p.api.SubscribeBlockHeadersFromStartBlockID(ctx, p.args.StartBlockID, p.args.BlockStatus) +func (p *BlockHeadersDataProvider) createSubscription(ctx context.Context, args BlocksArguments) subscription.Subscription { + if args.StartBlockID != flow.ZeroID { + return p.api.SubscribeBlockHeadersFromStartBlockID(ctx, args.StartBlockID, args.BlockStatus) } - if p.args.StartBlockHeight != request.EmptyHeight { - return p.api.SubscribeBlockHeadersFromStartHeight(ctx, p.args.StartBlockHeight, p.args.BlockStatus) + if args.StartBlockHeight != request.EmptyHeight { + return p.api.SubscribeBlockHeadersFromStartHeight(ctx, args.StartBlockHeight, args.BlockStatus) } - return p.api.SubscribeBlockHeadersFromLatest(ctx, p.args.BlockStatus) + return p.api.SubscribeBlockHeadersFromLatest(ctx, args.BlockStatus) } // handleResponse processes a block header and sends the formatted response. diff --git a/engine/access/rest/websockets/data_providers/blocks_provider.go b/engine/access/rest/websockets/data_providers/blocks_provider.go index 1c3d454b4c5..cccf8b5bed3 100644 --- a/engine/access/rest/websockets/data_providers/blocks_provider.go +++ b/engine/access/rest/websockets/data_providers/blocks_provider.go @@ -27,7 +27,6 @@ type BlocksDataProvider struct { *BaseDataProviderImpl logger zerolog.Logger - args BlocksArguments api access.API } @@ -47,20 +46,18 @@ func NewBlocksDataProvider( api: api, } - // Initialize arguments passed to the provider. - var err error - p.args, err = ParseBlocksArguments(arguments) + // Parse arguments passed to the provider. + blockArgs, err := ParseBlocksArguments(arguments) if err != nil { return nil, fmt.Errorf("invalid arguments: %w", err) } - ctx, cancel := context.WithCancel(ctx) - + subCtx, cancel := context.WithCancel(ctx) p.BaseDataProviderImpl = NewBaseDataProviderImpl( topic, cancel, send, - p.createSubscription(ctx), // Set up a subscription to blocks based on arguments. + p.createSubscription(subCtx, blockArgs), // Set up a subscription to blocks based on arguments. ) return p, nil @@ -74,16 +71,16 @@ func (p *BlocksDataProvider) Run() error { } // createSubscription creates a new subscription using the specified input arguments. -func (p *BlocksDataProvider) createSubscription(ctx context.Context) subscription.Subscription { - if p.args.StartBlockID != flow.ZeroID { - return p.api.SubscribeBlocksFromStartBlockID(ctx, p.args.StartBlockID, p.args.BlockStatus) +func (p *BlocksDataProvider) createSubscription(ctx context.Context, args BlocksArguments) subscription.Subscription { + if args.StartBlockID != flow.ZeroID { + return p.api.SubscribeBlocksFromStartBlockID(ctx, args.StartBlockID, args.BlockStatus) } - if p.args.StartBlockHeight != request.EmptyHeight { - return p.api.SubscribeBlocksFromStartHeight(ctx, p.args.StartBlockHeight, p.args.BlockStatus) + if args.StartBlockHeight != request.EmptyHeight { + return p.api.SubscribeBlocksFromStartHeight(ctx, args.StartBlockHeight, args.BlockStatus) } - return p.api.SubscribeBlocksFromLatest(ctx, p.args.BlockStatus) + return p.api.SubscribeBlocksFromLatest(ctx, args.BlockStatus) } // handleResponse processes a block and sends the formatted response. From ff3851b346f1c9a5e638dd1f54ae1a5506c8e0bd Mon Sep 17 00:00:00 2001 From: UlyanaAndrukhiv Date: Fri, 29 Nov 2024 15:07:41 +0200 Subject: [PATCH 31/64] Refactored base data provider according to comments --- .../data_providers/base_provider.go | 34 ++++++------------- .../data_providers/block_digests_provider.go | 4 +-- .../data_providers/block_headers_provider.go | 4 +-- .../data_providers/blocks_provider.go | 4 +-- .../data_providers/data_provider.go | 15 ++++++-- 5 files changed, 30 insertions(+), 31 deletions(-) diff --git a/engine/access/rest/websockets/data_providers/base_provider.go b/engine/access/rest/websockets/data_providers/base_provider.go index 2d81c3bf25d..cf1ee1313d9 100644 --- a/engine/access/rest/websockets/data_providers/base_provider.go +++ b/engine/access/rest/websockets/data_providers/base_provider.go @@ -8,22 +8,8 @@ import ( "github.com/onflow/flow-go/engine/access/subscription" ) -// BaseDataProvider defines the basic interface for a data provider. It provides methods -// for retrieving the provider's unique ID, topic, and a method to close the provider. -type BaseDataProvider interface { - // ID returns the unique identifier of the data provider. - ID() uuid.UUID - // Topic returns the topic associated with the data provider. - Topic() string - // Close terminates the data provider. - Close() error -} - -var _ BaseDataProvider = (*BaseDataProviderImpl)(nil) - -// BaseDataProviderImpl is the concrete implementation of the BaseDataProvider interface. -// It holds common objects for the provider. -type BaseDataProviderImpl struct { +// baseDataProvider holds common objects for the provider +type baseDataProvider struct { id uuid.UUID topic string cancel context.CancelFunc @@ -31,14 +17,14 @@ type BaseDataProviderImpl struct { subscription subscription.Subscription } -// NewBaseDataProviderImpl creates a new instance of BaseDataProviderImpl. -func NewBaseDataProviderImpl( +// newBaseDataProvider creates a new instance of baseDataProvider. +func newBaseDataProvider( topic string, cancel context.CancelFunc, send chan<- interface{}, subscription subscription.Subscription, -) *BaseDataProviderImpl { - return &BaseDataProviderImpl{ +) *baseDataProvider { + return &baseDataProvider{ id: uuid.New(), topic: topic, cancel: cancel, @@ -48,17 +34,19 @@ func NewBaseDataProviderImpl( } // ID returns the unique identifier of the data provider. -func (b *BaseDataProviderImpl) ID() uuid.UUID { +func (b *baseDataProvider) ID() uuid.UUID { return b.id } // Topic returns the topic associated with the data provider. -func (b *BaseDataProviderImpl) Topic() string { +func (b *baseDataProvider) Topic() string { return b.topic } // Close terminates the data provider. -func (b *BaseDataProviderImpl) Close() error { +// +// No errors are expected during normal operations. +func (b *baseDataProvider) Close() error { b.cancel() return nil } diff --git a/engine/access/rest/websockets/data_providers/block_digests_provider.go b/engine/access/rest/websockets/data_providers/block_digests_provider.go index 3352e0c0ac0..b2f5d3496b3 100644 --- a/engine/access/rest/websockets/data_providers/block_digests_provider.go +++ b/engine/access/rest/websockets/data_providers/block_digests_provider.go @@ -15,7 +15,7 @@ import ( // BlockDigestsDataProvider is responsible for providing block digests type BlockDigestsDataProvider struct { - *BaseDataProviderImpl + *baseDataProvider logger zerolog.Logger api access.API @@ -44,7 +44,7 @@ func NewBlockDigestsDataProvider( } subCtx, cancel := context.WithCancel(ctx) - p.BaseDataProviderImpl = NewBaseDataProviderImpl( + p.baseDataProvider = newBaseDataProvider( topic, cancel, send, diff --git a/engine/access/rest/websockets/data_providers/block_headers_provider.go b/engine/access/rest/websockets/data_providers/block_headers_provider.go index 142b98f9e70..7cd91fa4a38 100644 --- a/engine/access/rest/websockets/data_providers/block_headers_provider.go +++ b/engine/access/rest/websockets/data_providers/block_headers_provider.go @@ -15,7 +15,7 @@ import ( // BlockHeadersDataProvider is responsible for providing block headers type BlockHeadersDataProvider struct { - *BaseDataProviderImpl + *baseDataProvider logger zerolog.Logger api access.API @@ -44,7 +44,7 @@ func NewBlockHeadersDataProvider( } subCtx, cancel := context.WithCancel(ctx) - p.BaseDataProviderImpl = NewBaseDataProviderImpl( + p.baseDataProvider = newBaseDataProvider( topic, cancel, send, diff --git a/engine/access/rest/websockets/data_providers/blocks_provider.go b/engine/access/rest/websockets/data_providers/blocks_provider.go index cccf8b5bed3..794cd8e63f8 100644 --- a/engine/access/rest/websockets/data_providers/blocks_provider.go +++ b/engine/access/rest/websockets/data_providers/blocks_provider.go @@ -24,7 +24,7 @@ type BlocksArguments struct { // BlocksDataProvider is responsible for providing blocks type BlocksDataProvider struct { - *BaseDataProviderImpl + *baseDataProvider logger zerolog.Logger api access.API @@ -53,7 +53,7 @@ func NewBlocksDataProvider( } subCtx, cancel := context.WithCancel(ctx) - p.BaseDataProviderImpl = NewBaseDataProviderImpl( + p.baseDataProvider = newBaseDataProvider( topic, cancel, send, diff --git a/engine/access/rest/websockets/data_providers/data_provider.go b/engine/access/rest/websockets/data_providers/data_provider.go index 6a3fcc8991b..9d1d8855f21 100644 --- a/engine/access/rest/websockets/data_providers/data_provider.go +++ b/engine/access/rest/websockets/data_providers/data_provider.go @@ -1,9 +1,20 @@ package data_providers +import ( + "github.com/google/uuid" +) + // The DataProvider is the interface abstracts of the actual data provider used by the WebSocketCollector. +// It provides methods for retrieving the provider's unique ID, topic, and a methods to close and run the provider. type DataProvider interface { - BaseDataProvider - + // ID returns the unique identifier of the data provider. + ID() uuid.UUID + // Topic returns the topic associated with the data provider. + Topic() string + // Close terminates the data provider. + // + // No errors are expected during normal operations. + Close() error // Run starts processing the subscription and handles responses. // // No errors are expected during normal operations. From f8d711f75a9219c44f4b9831f5c713a80c9d28bc Mon Sep 17 00:00:00 2001 From: UlyanaAndrukhiv Date: Mon, 2 Dec 2024 14:53:31 +0200 Subject: [PATCH 32/64] Added happy case unit tests for providers --- .../block_digests_provider_test.go | 85 ++++++++- .../block_headers_provider_test.go | 83 ++++++++- .../data_providers/blocks_provider_test.go | 167 +++++++++++++++++- .../websockets/data_providers/factory_test.go | 2 - 4 files changed, 326 insertions(+), 11 deletions(-) diff --git a/engine/access/rest/websockets/data_providers/block_digests_provider_test.go b/engine/access/rest/websockets/data_providers/block_digests_provider_test.go index 57fdabd7994..476edf77111 100644 --- a/engine/access/rest/websockets/data_providers/block_digests_provider_test.go +++ b/engine/access/rest/websockets/data_providers/block_digests_provider_test.go @@ -2,9 +2,17 @@ package data_providers import ( "context" + "strconv" "testing" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + + "github.com/onflow/flow-go/engine/access/rest/common/parser" + "github.com/onflow/flow-go/engine/access/rest/websockets/models" + statestreamsmock "github.com/onflow/flow-go/engine/access/state_stream/mock" + "github.com/onflow/flow-go/model/flow" ) type BlockDigestsProviderSuite struct { @@ -43,4 +51,79 @@ func (s *BlockDigestsProviderSuite) TestBlockDigestsDataProvider_InvalidArgument } } -// TODO: add tests for responses after the WebsocketController is ready +// validBlockDigestsArgumentsTestCases defines test happy cases for block digests data providers. +// Each test case specifies input arguments, and setup functions for the mock API used in the test. +func (s *BlockDigestsProviderSuite) validBlockDigestsArgumentsTestCases() []testType { + return []testType{ + { + name: "happy path with start_block_id argument", + arguments: models.Arguments{ + "start_block_id": s.rootBlock.ID().String(), + "block_status": parser.Finalized, + }, + setupBackend: func(sub *statestreamsmock.Subscription) { + s.api.On( + "SubscribeBlockDigestsFromStartBlockID", + mock.Anything, + s.rootBlock.ID(), + flow.BlockStatusFinalized, + ).Return(sub).Once() + }, + }, + { + name: "happy path with start_block_height argument", + arguments: models.Arguments{ + "start_block_height": strconv.FormatUint(s.rootBlock.Header.Height, 10), + "block_status": parser.Finalized, + }, + setupBackend: func(sub *statestreamsmock.Subscription) { + s.api.On( + "SubscribeBlockDigestsFromStartHeight", + mock.Anything, + s.rootBlock.Header.Height, + flow.BlockStatusFinalized, + ).Return(sub).Once() + }, + }, + { + name: "happy path without any start argument", + arguments: models.Arguments{ + "block_status": parser.Finalized, + }, + setupBackend: func(sub *statestreamsmock.Subscription) { + s.api.On( + "SubscribeBlockDigestsFromLatest", + mock.Anything, + flow.BlockStatusFinalized, + ).Return(sub).Once() + }, + }, + } +} + +// TestBlockDigestsDataProvider_HappyPath tests the behavior of the block digests data provider +// when it is configured correctly and operating under normal conditions. It +// validates that block digests are correctly streamed to the channel and ensures +// no unexpected errors occur. +func (s *BlockDigestsProviderSuite) TestBlockDigestsDataProvider_HappyPath() { + s.testHappyPath( + BlockDigestsTopic, + s.validBlockDigestsArgumentsTestCases(), + func(dataChan chan interface{}, blocks []*flow.Block) { + for _, block := range blocks { + dataChan <- flow.NewBlockDigest(block.Header.ID(), block.Header.Height, block.Header.Timestamp) + } + }, + s.requireBlockDigests, + ) +} + +// requireBlockHeaders ensures that the received block header information matches the expected data. +func (s *BlocksProviderSuite) requireBlockDigests(v interface{}, expectedBlock *flow.Block) { + actualResponse, ok := v.(*models.BlockDigestMessageResponse) + require.True(s.T(), ok, "unexpected response type: %T", v) + + s.Require().Equal(expectedBlock.Header.ID(), actualResponse.Block.ID()) + s.Require().Equal(expectedBlock.Header.Height, actualResponse.Block.Height) + s.Require().Equal(expectedBlock.Header.Timestamp, actualResponse.Block.Timestamp) +} diff --git a/engine/access/rest/websockets/data_providers/block_headers_provider_test.go b/engine/access/rest/websockets/data_providers/block_headers_provider_test.go index efd94916e92..57c262d8795 100644 --- a/engine/access/rest/websockets/data_providers/block_headers_provider_test.go +++ b/engine/access/rest/websockets/data_providers/block_headers_provider_test.go @@ -2,9 +2,17 @@ package data_providers import ( "context" + "strconv" "testing" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + + "github.com/onflow/flow-go/engine/access/rest/common/parser" + "github.com/onflow/flow-go/engine/access/rest/websockets/models" + statestreamsmock "github.com/onflow/flow-go/engine/access/state_stream/mock" + "github.com/onflow/flow-go/model/flow" ) type BlockHeadersProviderSuite struct { @@ -43,4 +51,77 @@ func (s *BlockHeadersProviderSuite) TestBlockHeadersDataProvider_InvalidArgument } } -// TODO: add tests for responses after the WebsocketController is ready +// validBlockHeadersArgumentsTestCases defines test happy cases for block headers data providers. +// Each test case specifies input arguments, and setup functions for the mock API used in the test. +func (s *BlockHeadersProviderSuite) validBlockHeadersArgumentsTestCases() []testType { + return []testType{ + { + name: "happy path with start_block_id argument", + arguments: models.Arguments{ + "start_block_id": s.rootBlock.ID().String(), + "block_status": parser.Finalized, + }, + setupBackend: func(sub *statestreamsmock.Subscription) { + s.api.On( + "SubscribeBlockHeadersFromStartBlockID", + mock.Anything, + s.rootBlock.ID(), + flow.BlockStatusFinalized, + ).Return(sub).Once() + }, + }, + { + name: "happy path with start_block_height argument", + arguments: models.Arguments{ + "start_block_height": strconv.FormatUint(s.rootBlock.Header.Height, 10), + "block_status": parser.Finalized, + }, + setupBackend: func(sub *statestreamsmock.Subscription) { + s.api.On( + "SubscribeBlockHeadersFromStartHeight", + mock.Anything, + s.rootBlock.Header.Height, + flow.BlockStatusFinalized, + ).Return(sub).Once() + }, + }, + { + name: "happy path without any start argument", + arguments: models.Arguments{ + "block_status": parser.Finalized, + }, + setupBackend: func(sub *statestreamsmock.Subscription) { + s.api.On( + "SubscribeBlockHeadersFromLatest", + mock.Anything, + flow.BlockStatusFinalized, + ).Return(sub).Once() + }, + }, + } +} + +// TestBlockHeadersDataProvider_HappyPath tests the behavior of the block headers data provider +// when it is configured correctly and operating under normal conditions. It +// validates that block headers are correctly streamed to the channel and ensures +// no unexpected errors occur. +func (s *BlockHeadersProviderSuite) TestBlockHeadersDataProvider_HappyPath() { + s.testHappyPath( + BlockHeadersTopic, + s.validBlockHeadersArgumentsTestCases(), + func(dataChan chan interface{}, blocks []*flow.Block) { + for _, block := range blocks { + dataChan <- block.Header + } + }, + s.requireBlockHeaders, + ) +} + +// requireBlockHeaders ensures that the received block header information matches the expected data. +func (s *BlockHeadersProviderSuite) requireBlockHeaders(v interface{}, expectedBlock *flow.Block) { + actualResponse, ok := v.(*models.BlockHeaderMessageResponse) + require.True(s.T(), ok, "unexpected response type: %T", v) + + s.Require().Equal(expectedBlock.Header, actualResponse.Header) +} diff --git a/engine/access/rest/websockets/data_providers/blocks_provider_test.go b/engine/access/rest/websockets/data_providers/blocks_provider_test.go index 8d3e984bd71..83fe16bbae6 100644 --- a/engine/access/rest/websockets/data_providers/blocks_provider_test.go +++ b/engine/access/rest/websockets/data_providers/blocks_provider_test.go @@ -3,14 +3,19 @@ package data_providers import ( "context" "fmt" + "strconv" "testing" + "time" "github.com/rs/zerolog" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" accessmock "github.com/onflow/flow-go/access/mock" "github.com/onflow/flow-go/engine/access/rest/common/parser" "github.com/onflow/flow-go/engine/access/rest/websockets/models" + statestreamsmock "github.com/onflow/flow-go/engine/access/state_stream/mock" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/utils/unittest" ) @@ -23,6 +28,13 @@ type testErrType struct { expectedErrorMsg string } +// testType represents a valid test scenario for subscribing +type testType struct { + name string + arguments models.Arguments + setupBackend func(sub *statestreamsmock.Subscription) +} + // BlocksProviderSuite is a test suite for testing the block providers functionality. type BlocksProviderSuite struct { suite.Suite @@ -30,9 +42,11 @@ type BlocksProviderSuite struct { log zerolog.Logger api *accessmock.API - blockMap map[uint64]*flow.Block + blocks []*flow.Block rootBlock flow.Block finalizedBlock *flow.Header + + factory *DataProviderFactoryImpl } func TestBlocksProviderSuite(t *testing.T) { @@ -44,7 +58,8 @@ func (s *BlocksProviderSuite) SetupTest() { s.api = accessmock.NewAPI(s.T()) blockCount := 5 - s.blockMap = make(map[uint64]*flow.Block, blockCount) + s.blocks = make([]*flow.Block, 0, blockCount) + s.rootBlock = unittest.BlockFixture() s.rootBlock.Header.Height = 0 parent := s.rootBlock.Header @@ -53,9 +68,13 @@ func (s *BlocksProviderSuite) SetupTest() { block := unittest.BlockWithParentFixture(parent) // update for next iteration parent = block.Header - s.blockMap[block.Header.Height] = block + s.blocks = append(s.blocks, block) + } s.finalizedBlock = parent + + s.factory = NewDataProviderFactory(s.log, nil, s.api) + s.Require().NotNil(s.factory) } // invalidArgumentsTestCases returns a list of test cases with invalid argument combinations @@ -105,11 +124,9 @@ func (s *BlocksProviderSuite) TestBlocksDataProvider_InvalidArguments() { ctx := context.Background() send := make(chan interface{}) - topic := BlocksTopic - for _, test := range s.invalidArgumentsTestCases() { s.Run(test.name, func() { - provider, err := NewBlocksDataProvider(ctx, s.log, s.api, topic, test.arguments, send) + provider, err := NewBlocksDataProvider(ctx, s.log, s.api, BlocksTopic, test.arguments, send) s.Require().Nil(provider) s.Require().Error(err) s.Require().Contains(err.Error(), test.expectedErrorMsg) @@ -117,4 +134,140 @@ func (s *BlocksProviderSuite) TestBlocksDataProvider_InvalidArguments() { } } -// TODO: add tests for responses after the WebsocketController is ready +// validBlockArgumentsTestCases defines test happy cases for block data providers. +// Each test case specifies input arguments, and setup functions for the mock API used in the test. +func (s *BlocksProviderSuite) validBlockArgumentsTestCases() []testType { + return []testType{ + { + name: "happy path with start_block_id argument", + arguments: models.Arguments{ + "start_block_id": s.rootBlock.ID().String(), + "block_status": parser.Finalized, + }, + setupBackend: func(sub *statestreamsmock.Subscription) { + s.api.On( + "SubscribeBlocksFromStartBlockID", + mock.Anything, + s.rootBlock.ID(), + flow.BlockStatusFinalized, + ).Return(sub).Once() + }, + }, + { + name: "happy path with start_block_height argument", + arguments: models.Arguments{ + "start_block_height": strconv.FormatUint(s.rootBlock.Header.Height, 10), + "block_status": parser.Finalized, + }, + setupBackend: func(sub *statestreamsmock.Subscription) { + s.api.On( + "SubscribeBlocksFromStartHeight", + mock.Anything, + s.rootBlock.Header.Height, + flow.BlockStatusFinalized, + ).Return(sub).Once() + }, + }, + { + name: "happy path without any start argument", + arguments: models.Arguments{ + "block_status": parser.Finalized, + }, + setupBackend: func(sub *statestreamsmock.Subscription) { + s.api.On( + "SubscribeBlocksFromLatest", + mock.Anything, + flow.BlockStatusFinalized, + ).Return(sub).Once() + }, + }, + } +} + +// TestBlocksDataProvider_HappyPath tests the behavior of the block data provider +// when it is configured correctly and operating under normal conditions. It +// validates that blocks are correctly streamed to the channel and ensures +// no unexpected errors occur. +func (s *BlocksProviderSuite) TestBlocksDataProvider_HappyPath() { + s.testHappyPath( + BlocksTopic, + s.validBlockArgumentsTestCases(), + func(dataChan chan interface{}, blocks []*flow.Block) { + for _, block := range blocks { + dataChan <- block + } + }, + s.requireBlock, + ) +} + +// requireBlocks ensures that the received block information matches the expected data. +func (s *BlocksProviderSuite) requireBlock(v interface{}, expectedBlock *flow.Block) { + actualResponse, ok := v.(*models.BlockMessageResponse) + require.True(s.T(), ok, "unexpected response type: %T", v) + + s.Require().Equal(expectedBlock, actualResponse.Block) +} + +// testHappyPath tests a variety of scenarios for data providers in +// happy path scenarios. This function runs parameterized test cases that +// simulate various configurations and verifies that the data provider operates +// as expected without encountering errors. +// +// Arguments: +// - topic: The topic associated with the data provider. +// - tests: A slice of test cases to run, each specifying setup and validation logic. +// - sendData: A function to simulate emitting data into the data channel. +// - requireFn: A function to validate the output received in the send channel. +func (s *BlocksProviderSuite) testHappyPath( + topic string, + tests []testType, + sendData func(chan interface{}, []*flow.Block), + requireFn func(interface{}, *flow.Block), +) { + for _, test := range tests { + s.Run(test.name, func() { + ctx := context.Background() + send := make(chan interface{}, 10) + + // Create a channel to simulate the subscription's data channel + dataChan := make(chan interface{}) + + // // Create a mock subscription and mock the channel + sub := statestreamsmock.NewSubscription(s.T()) + sub.On("Channel").Return((<-chan interface{})(dataChan)) + sub.On("Err").Return(nil) + test.setupBackend(sub) + + // Create the data provider instance + provider, err := s.factory.NewDataProvider(ctx, topic, test.arguments, send) + s.Require().NotNil(provider) + s.Require().NoError(err) + + // Run the provider in a separate goroutine + go func() { + err = provider.Run() + s.Require().NoError(err) + }() + + // Simulate emitting data to the data channel + go func() { + defer close(dataChan) + sendData(dataChan, s.blocks) + }() + + // Collect responses + for _, b := range s.blocks { + unittest.RequireReturnsBefore(s.T(), func() { + v, ok := <-send + s.Require().True(ok, "channel closed while waiting for block %x %v: err: %v", b.Header.Height, b.ID(), sub.Err()) + + requireFn(v, b) + }, time.Second, fmt.Sprintf("timed out waiting for block %d %v", b.Header.Height, b.ID())) + } + + // Ensure the provider is properly closed after the test + provider.Close() + }) + } +} diff --git a/engine/access/rest/websockets/data_providers/factory_test.go b/engine/access/rest/websockets/data_providers/factory_test.go index 1b33d892573..2ed2b075d0c 100644 --- a/engine/access/rest/websockets/data_providers/factory_test.go +++ b/engine/access/rest/websockets/data_providers/factory_test.go @@ -58,8 +58,6 @@ func (s *DataProviderFactorySuite) setupSubscription(apiCall *mock.Call) { // TestSupportedTopics verifies that supported topics return a valid provider and no errors. // Each test case includes a topic and arguments for which a data provider should be created. func (s *DataProviderFactorySuite) TestSupportedTopics() { - s.T().Parallel() - // Define supported topics and check if each returns the correct provider without errors testCases := []struct { name string From 437af4f6cdb52aaddc8b07cb80943b93c88dceed Mon Sep 17 00:00:00 2001 From: UlyanaAndrukhiv Date: Mon, 2 Dec 2024 14:57:31 +0200 Subject: [PATCH 33/64] Added additional description for websocket data providers Run method --- .../rest/websockets/data_providers/data_provider.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/engine/access/rest/websockets/data_providers/data_provider.go b/engine/access/rest/websockets/data_providers/data_provider.go index 9d1d8855f21..08dc497808b 100644 --- a/engine/access/rest/websockets/data_providers/data_provider.go +++ b/engine/access/rest/websockets/data_providers/data_provider.go @@ -17,6 +17,17 @@ type DataProvider interface { Close() error // Run starts processing the subscription and handles responses. // + // The separation of the data provider's creation and its Run() method + // allows for better control over the subscription lifecycle. By doing so, + // a confirmation message can be sent to the client immediately upon + // successful subscription creation or failure. This ensures any required + // setup or preparation steps can be handled prior to initiating the + // subscription and data streaming process. + // + // Run() begins the actual processing of the subscription. At this point, + // the context used for provider creation is no longer needed, as all + // necessary preparation steps should have been completed. + // // No errors are expected during normal operations. Run() error } From 1f2f0aea0dfb1e999ab71c2813772d585a941df4 Mon Sep 17 00:00:00 2001 From: UlyanaAndrukhiv Date: Mon, 2 Dec 2024 16:56:29 +0200 Subject: [PATCH 34/64] Added generic HandleResponse, updated providers --- .../data_providers/block_digests_provider.go | 22 +++++++------------ .../data_providers/block_headers_provider.go | 22 +++++++------------ .../data_providers/blocks_provider.go | 22 +++++++------------ .../data_providers/blocks_provider_test.go | 2 +- engine/access/subscription/util.go | 22 +++++++++++++++++++ 5 files changed, 47 insertions(+), 43 deletions(-) diff --git a/engine/access/rest/websockets/data_providers/block_digests_provider.go b/engine/access/rest/websockets/data_providers/block_digests_provider.go index b2f5d3496b3..1fa3f7a6dc7 100644 --- a/engine/access/rest/websockets/data_providers/block_digests_provider.go +++ b/engine/access/rest/websockets/data_providers/block_digests_provider.go @@ -58,7 +58,14 @@ func NewBlockDigestsDataProvider( // // No errors are expected during normal operations. func (p *BlockDigestsDataProvider) Run() error { - return subscription.HandleSubscription(p.subscription, p.handleResponse(p.send)) + return subscription.HandleSubscription( + p.subscription, + subscription.HandleResponse(p.send, func(block *flow.BlockDigest) (interface{}, error) { + return &models.BlockDigestMessageResponse{ + Block: block, + }, nil + }), + ) } // createSubscription creates a new subscription using the specified input arguments. @@ -73,16 +80,3 @@ func (p *BlockDigestsDataProvider) createSubscription(ctx context.Context, args return p.api.SubscribeBlockDigestsFromLatest(ctx, args.BlockStatus) } - -// handleResponse processes a block digest and sends the formatted response. -// -// No errors are expected during normal operations. -func (p *BlockDigestsDataProvider) handleResponse(send chan<- interface{}) func(block *flow.BlockDigest) error { - return func(block *flow.BlockDigest) error { - send <- &models.BlockDigestMessageResponse{ - Block: block, - } - - return nil - } -} diff --git a/engine/access/rest/websockets/data_providers/block_headers_provider.go b/engine/access/rest/websockets/data_providers/block_headers_provider.go index 7cd91fa4a38..4f9e29e2428 100644 --- a/engine/access/rest/websockets/data_providers/block_headers_provider.go +++ b/engine/access/rest/websockets/data_providers/block_headers_provider.go @@ -58,7 +58,14 @@ func NewBlockHeadersDataProvider( // // No errors are expected during normal operations. func (p *BlockHeadersDataProvider) Run() error { - return subscription.HandleSubscription(p.subscription, p.handleResponse(p.send)) + return subscription.HandleSubscription( + p.subscription, + subscription.HandleResponse(p.send, func(header *flow.Header) (interface{}, error) { + return &models.BlockHeaderMessageResponse{ + Header: header, + }, nil + }), + ) } // createSubscription creates a new subscription using the specified input arguments. @@ -73,16 +80,3 @@ func (p *BlockHeadersDataProvider) createSubscription(ctx context.Context, args return p.api.SubscribeBlockHeadersFromLatest(ctx, args.BlockStatus) } - -// handleResponse processes a block header and sends the formatted response. -// -// No errors are expected during normal operations. -func (p *BlockHeadersDataProvider) handleResponse(send chan<- interface{}) func(header *flow.Header) error { - return func(header *flow.Header) error { - send <- &models.BlockHeaderMessageResponse{ - Header: header, - } - - return nil - } -} diff --git a/engine/access/rest/websockets/data_providers/blocks_provider.go b/engine/access/rest/websockets/data_providers/blocks_provider.go index 794cd8e63f8..72cfaa6f554 100644 --- a/engine/access/rest/websockets/data_providers/blocks_provider.go +++ b/engine/access/rest/websockets/data_providers/blocks_provider.go @@ -67,7 +67,14 @@ func NewBlocksDataProvider( // // No errors are expected during normal operations. func (p *BlocksDataProvider) Run() error { - return subscription.HandleSubscription(p.subscription, p.handleResponse(p.send)) + return subscription.HandleSubscription( + p.subscription, + subscription.HandleResponse(p.send, func(block *flow.Block) (interface{}, error) { + return &models.BlockMessageResponse{ + Block: block, + }, nil + }), + ) } // createSubscription creates a new subscription using the specified input arguments. @@ -83,19 +90,6 @@ func (p *BlocksDataProvider) createSubscription(ctx context.Context, args Blocks return p.api.SubscribeBlocksFromLatest(ctx, args.BlockStatus) } -// handleResponse processes a block and sends the formatted response. -// -// No errors are expected during normal operations. -func (p *BlocksDataProvider) handleResponse(send chan<- interface{}) func(*flow.Block) error { - return func(block *flow.Block) error { - send <- &models.BlockMessageResponse{ - Block: block, - } - - return nil - } -} - // ParseBlocksArguments validates and initializes the blocks arguments. func ParseBlocksArguments(arguments models.Arguments) (BlocksArguments, error) { var args BlocksArguments diff --git a/engine/access/rest/websockets/data_providers/blocks_provider_test.go b/engine/access/rest/websockets/data_providers/blocks_provider_test.go index 83fe16bbae6..6f46d27ccfe 100644 --- a/engine/access/rest/websockets/data_providers/blocks_provider_test.go +++ b/engine/access/rest/websockets/data_providers/blocks_provider_test.go @@ -217,7 +217,7 @@ func (s *BlocksProviderSuite) requireBlock(v interface{}, expectedBlock *flow.Bl // Arguments: // - topic: The topic associated with the data provider. // - tests: A slice of test cases to run, each specifying setup and validation logic. -// - sendData: A function to simulate emitting data into the data channel. +// - sendData: A function to simulate emitting data into the subscription's data channel. // - requireFn: A function to validate the output received in the send channel. func (s *BlocksProviderSuite) testHappyPath( topic string, diff --git a/engine/access/subscription/util.go b/engine/access/subscription/util.go index 2dadac441cf..9ef98044bb8 100644 --- a/engine/access/subscription/util.go +++ b/engine/access/subscription/util.go @@ -54,3 +54,25 @@ func HandleRPCSubscription[T any](sub Subscription, handleResponse func(resp T) return nil } + +// HandleResponse processes a generic response of type and sends it to the provided channel. +// +// Parameters: +// - send: The channel to which the processed response is sent. +// - transform: A function to transform the response into the expected interface{} type. +// +// No errors are expected during normal operations. +func HandleResponse[T any](send chan<- interface{}, transform func(resp T) (interface{}, error)) func(resp T) error { + return func(response T) error { + // Transform the response + resp, err := transform(response) + if err != nil { + return fmt.Errorf("failed to transform response: %w", err) + } + + // send to the channel + send <- resp + + return nil + } +} From 373bd67963dbe4db81b1795978f74f0c24fbadf8 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Tue, 19 Nov 2024 15:22:49 -0800 Subject: [PATCH 35/64] add verify execution result cmd --- cmd/util/cmd/root.go | 2 + cmd/util/cmd/verify_execution_result/cmd.go | 98 +++++++++ .../execution_verification_test.go | 28 +-- engine/verification/fetcher/engine.go | 74 +------ engine/verification/fetcher/engine_test.go | 23 +- engine/verification/verifier/verifiers.go | 204 ++++++++++++++++++ model/verification/convert/convert.go | 81 +++++++ 7 files changed, 421 insertions(+), 89 deletions(-) create mode 100644 cmd/util/cmd/verify_execution_result/cmd.go create mode 100644 engine/verification/verifier/verifiers.go create mode 100644 model/verification/convert/convert.go diff --git a/cmd/util/cmd/root.go b/cmd/util/cmd/root.go index cefd8db691d..902ba16cb46 100644 --- a/cmd/util/cmd/root.go +++ b/cmd/util/cmd/root.go @@ -41,6 +41,7 @@ import ( "github.com/onflow/flow-go/cmd/util/cmd/snapshot" system_addresses "github.com/onflow/flow-go/cmd/util/cmd/system-addresses" truncate_database "github.com/onflow/flow-go/cmd/util/cmd/truncate-database" + verify_execution_result "github.com/onflow/flow-go/cmd/util/cmd/verify_execution_result" "github.com/onflow/flow-go/cmd/util/cmd/version" "github.com/onflow/flow-go/module/profiler" ) @@ -126,6 +127,7 @@ func addCommands() { rootCmd.AddCommand(debug_script.Cmd) rootCmd.AddCommand(generate_authorization_fixes.Cmd) rootCmd.AddCommand(evm_state_exporter.Cmd) + rootCmd.AddCommand(verify_execution_result.Cmd) } func initConfig() { diff --git a/cmd/util/cmd/verify_execution_result/cmd.go b/cmd/util/cmd/verify_execution_result/cmd.go new file mode 100644 index 00000000000..6aa7e135483 --- /dev/null +++ b/cmd/util/cmd/verify_execution_result/cmd.go @@ -0,0 +1,98 @@ +package verify + +import ( + "fmt" + "strconv" + "strings" + + "github.com/rs/zerolog/log" + "github.com/spf13/cobra" + + "github.com/onflow/flow-go/engine/verification/verifier" + "github.com/onflow/flow-go/model/flow" +) + +var ( + flagLastK uint64 + flagDatadir string + flagChunkDataPackDir string + flagChain string + flagFromTo string +) + +// # verify the last 100 sealed blocks +// ./util verify_execution_result --chain flow-testnet --datadir /var/flow/data/protocol --chunk_data_pack_dir /var/flow/data/chunk_data_pack --lastk 100 +// # verify the blocks from height 2000 to 3000 +// ./util verify_execution_result --chain flow-testnet --datadir /var/flow/data/protocol --chunk_data_pack_dir /var/flow/data/chunk_data_pack --from_to 2000-3000 +var Cmd = &cobra.Command{ + Use: "verify-execution-result", + Short: "verify block execution by verifying all chunks in the result", + Run: run, +} + +func init() { + Cmd.Flags().StringVar(&flagChain, "chain", "", "Chain name") + _ = Cmd.MarkFlagRequired("chain") + + Cmd.Flags().StringVar(&flagDatadir, "datadir", "/var/flow/data/protocol", + "directory that stores the protocol state") + + Cmd.Flags().StringVar(&flagChunkDataPackDir, "chunk_data_pack_dir", "/var/flow/data/chunk_data_pack", + "directory that stores the protocol state") + + Cmd.Flags().Uint64Var(&flagLastK, "lastk", 1, + "last k sealed blocks to verify") + + Cmd.Flags().StringVar(&flagFromTo, "from_to", "", + "the height range to verify blocks, i.e, 1-1000, 1000-2000, 2000-3000, etc.") +} + +func run(*cobra.Command, []string) { + _ = flow.ChainID(flagChain).Chain() + + if flagFromTo != "" { + from, to, err := parseFromTo(flagFromTo) + if err != nil { + log.Fatal().Err(err).Msg("could not parse from_to") + } + + log.Info().Msgf("verifying range from %d to %d", from, to) + err = verifier.VerifyRange(from, to, flow.Testnet, flagDatadir, flagChunkDataPackDir) + if err != nil { + log.Fatal().Err(err).Msg("could not verify last k height") + } + log.Info().Msgf("successfully verified range from %d to %d", from, to) + + } else { + log.Info().Msgf("verifying last %d sealed blocks", flagLastK) + err := verifier.VerifyLastKHeight(flagLastK, flow.Testnet, flagDatadir, flagChunkDataPackDir) + if err != nil { + log.Fatal().Err(err).Msg("could not verify last k height") + } + + log.Info().Msgf("successfully verified last %d sealed blocks", flagLastK) + } +} + +func parseFromTo(fromTo string) (from, to uint64, err error) { + parts := strings.Split(fromTo, "-") + if len(parts) != 2 { + return 0, 0, fmt.Errorf("invalid format: expected 'from-to', got '%s'", fromTo) + } + + from, err = strconv.ParseUint(strings.TrimSpace(parts[0]), 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("invalid 'from' value: %w", err) + } + + to, err = strconv.ParseUint(strings.TrimSpace(parts[1]), 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("invalid 'to' value: %w", err) + } + + if from > to { + return 0, 0, fmt.Errorf("'from' value (%d) must be less than or equal to 'to' value (%d)", from, to) + } + + return from, to, nil +} diff --git a/engine/execution/computation/execution_verification_test.go b/engine/execution/computation/execution_verification_test.go index c949b378df4..bcdadd2a0ad 100644 --- a/engine/execution/computation/execution_verification_test.go +++ b/engine/execution/computation/execution_verification_test.go @@ -25,7 +25,6 @@ import ( "github.com/onflow/flow-go/engine/execution/testutil" "github.com/onflow/flow-go/engine/execution/utils" "github.com/onflow/flow-go/engine/testutil/mocklocal" - "github.com/onflow/flow-go/engine/verification/fetcher" "github.com/onflow/flow-go/fvm" "github.com/onflow/flow-go/fvm/blueprints" "github.com/onflow/flow-go/fvm/environment" @@ -36,6 +35,7 @@ import ( "github.com/onflow/flow-go/ledger/complete/wal/fixtures" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/verification" + "github.com/onflow/flow-go/model/verification/convert" "github.com/onflow/flow-go/module/chunks" "github.com/onflow/flow-go/module/executiondatasync/execution_data" exedataprovider "github.com/onflow/flow-go/module/executiondatasync/provider" @@ -69,7 +69,7 @@ func Test_ExecutionMatchesVerification(t *testing.T) { `access(all) contract Foo { access(all) event FooEvent(x: Int, y: Int) - access(all) fun emitEvent() { + access(all) fun emitEvent() { emit FooEvent(x: 2, y: 1) } }`), "Foo") @@ -113,7 +113,7 @@ func Test_ExecutionMatchesVerification(t *testing.T) { `access(all) contract Foo { access(all) event FooEvent(x: Int, y: Int) - access(all) fun emitEvent() { + access(all) fun emitEvent() { emit FooEvent(x: 2, y: 1) } }`), "Foo") @@ -585,34 +585,34 @@ func TestTransactionFeeDeduction(t *testing.T) { // // The withdraw amount and the account from getAccount // would be the parameters to the transaction - + import FungibleToken from 0x%s import FlowToken from 0x%s - + transaction(amount: UFix64, to: Address) { - + // The Vault resource that holds the tokens that are being transferred let sentVault: @{FungibleToken.Vault} - + prepare(signer: auth(BorrowValue) &Account) { - + // Get a reference to the signer's stored vault let vaultRef = signer.storage.borrow(from: /storage/flowTokenVault) ?? panic("Could not borrow reference to the owner's Vault!") - + // Withdraw tokens from the signer's stored vault self.sentVault <- vaultRef.withdraw(amount: amount) } - + execute { - + // Get the recipient's public account object let recipient = getAccount(to) - + // Get a reference to the recipient's Receiver let receiverRef = recipient.capabilities.borrow<&{FungibleToken.Receiver}>(/public/flowTokenReceiver) ?? panic("Could not borrow receiver reference to the recipient's Vault") - + // Deposit the withdrawn tokens in the recipient's receiver receiverRef.deposit(from: <-self.sentVault) } @@ -840,7 +840,7 @@ func executeBlockAndVerifyWithParameters(t *testing.T, for i, chunk := range er.Chunks { isSystemChunk := i == er.Chunks.Len()-1 - offsetForChunk, err := fetcher.TransactionOffsetForChunk(er.Chunks, chunk.Index) + offsetForChunk, err := convert.TransactionOffsetForChunk(er.Chunks, chunk.Index) require.NoError(t, err) vcds[i] = &verification.VerifiableChunkData{ diff --git a/engine/verification/fetcher/engine.go b/engine/verification/fetcher/engine.go index 20afad04021..551b0571526 100644 --- a/engine/verification/fetcher/engine.go +++ b/engine/verification/fetcher/engine.go @@ -12,6 +12,7 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/model/verification" + "github.com/onflow/flow-go/model/verification/convert" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/mempool" "github.com/onflow/flow-go/module/trace" @@ -259,7 +260,7 @@ func (e *Engine) HandleChunkDataPack(originID flow.Identifier, response *verific Uint64("block_height", status.BlockHeight). Hex("result_id", logging.ID(resultID)). Uint64("chunk_index", status.ChunkIndex). - Bool("system_chunk", IsSystemChunk(status.ChunkIndex, status.ExecutionResult)). + Bool("system_chunk", convert.IsSystemChunk(status.ChunkIndex, status.ExecutionResult)). Logger() span, ctx := e.tracer.StartBlockSpan(context.Background(), status.ExecutionResult.BlockID, trace.VERFetcherHandleChunkDataPack) @@ -413,7 +414,7 @@ func (e Engine) validateCollectionID( result *flow.ExecutionResult, chunk *flow.Chunk) error { - if IsSystemChunk(chunk.Index, result) { + if convert.IsSystemChunk(chunk.Index, result) { return e.validateSystemChunkCollection(chunkDataPack) } @@ -550,29 +551,13 @@ func (e *Engine) makeVerifiableChunkData(chunk *flow.Chunk, chunkDataPack *flow.ChunkDataPack, ) (*verification.VerifiableChunkData, error) { - // system chunk is the last chunk - isSystemChunk := IsSystemChunk(chunk.Index, result) - - endState, err := EndStateCommitment(result, chunk.Index, isSystemChunk) - if err != nil { - return nil, fmt.Errorf("could not compute end state of chunk: %w", err) - } - - transactionOffset, err := TransactionOffsetForChunk(result.Chunks, chunk.Index) - if err != nil { - return nil, fmt.Errorf("cannot compute transaction offset for chunk: %w", err) - } - - return &verification.VerifiableChunkData{ - IsSystemChunk: isSystemChunk, - Chunk: chunk, - Header: header, - Snapshot: snapshot, - Result: result, - ChunkDataPack: chunkDataPack, - EndState: endState, - TransactionOffset: transactionOffset, - }, nil + return convert.FromChunkDataPack( + chunk, + chunkDataPack, + header, + snapshot, + result, + ) } // requestChunkDataPack creates and dispatches a chunk data pack request to the requester engine. @@ -661,42 +646,3 @@ func executorsOf(receipts []*flow.ExecutionReceipt, resultID flow.Identifier) (f return agrees, disagrees } - -// EndStateCommitment computes the end state of the given chunk. -func EndStateCommitment(result *flow.ExecutionResult, chunkIndex uint64, systemChunk bool) (flow.StateCommitment, error) { - var endState flow.StateCommitment - if systemChunk { - var err error - // last chunk in a result is the system chunk and takes final state commitment - endState, err = result.FinalStateCommitment() - if err != nil { - return flow.DummyStateCommitment, fmt.Errorf("can not read final state commitment, likely a bug:%w", err) - } - } else { - // any chunk except last takes the subsequent chunk's start state - endState = result.Chunks[chunkIndex+1].StartState - } - - return endState, nil -} - -// TransactionOffsetForChunk calculates transaction offset for a given chunk which is the index of the first -// transaction of this chunk within the whole block -func TransactionOffsetForChunk(chunks flow.ChunkList, chunkIndex uint64) (uint32, error) { - if int(chunkIndex) > len(chunks)-1 { - return 0, fmt.Errorf("chunk list out of bounds, len %d asked for chunk %d", len(chunks), chunkIndex) - } - var offset uint32 = 0 - for i := 0; i < int(chunkIndex); i++ { - offset += uint32(chunks[i].NumberOfTransactions) - } - return offset, nil -} - -// IsSystemChunk returns true if `chunkIndex` points to a system chunk in `result`. -// Otherwise, it returns false. -// In the current version, a chunk is a system chunk if it is the last chunk of the -// execution result. -func IsSystemChunk(chunkIndex uint64, result *flow.ExecutionResult) bool { - return chunkIndex == uint64(len(result.Chunks)-1) -} diff --git a/engine/verification/fetcher/engine_test.go b/engine/verification/fetcher/engine_test.go index b2fb94a94cb..273a76ac73f 100644 --- a/engine/verification/fetcher/engine_test.go +++ b/engine/verification/fetcher/engine_test.go @@ -18,6 +18,7 @@ import ( "github.com/onflow/flow-go/model/chunks" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/verification" + "github.com/onflow/flow-go/model/verification/convert" mempool "github.com/onflow/flow-go/module/mempool/mock" module "github.com/onflow/flow-go/module/mock" "github.com/onflow/flow-go/module/trace" @@ -757,10 +758,10 @@ func mockVerifierEngine(t *testing.T, require.Equal(t, expected.Result.ID(), vc.Result.ID()) require.Equal(t, expected.Header.ID(), vc.Header.ID()) - isSystemChunk := fetcher.IsSystemChunk(vc.Chunk.Index, vc.Result) + isSystemChunk := convert.IsSystemChunk(vc.Chunk.Index, vc.Result) require.Equal(t, isSystemChunk, vc.IsSystemChunk) - endState, err := fetcher.EndStateCommitment(vc.Result, vc.Chunk.Index, isSystemChunk) + endState, err := convert.EndStateCommitment(vc.Result, vc.Chunk.Index, isSystemChunk) require.NoError(t, err) require.Equal(t, endState, vc.EndState) @@ -872,7 +873,7 @@ func chunkDataPackResponseFixture(t *testing.T, collection *flow.Collection, result *flow.ExecutionResult) *verification.ChunkDataPackResponse { - require.Equal(t, collection != nil, !fetcher.IsSystemChunk(chunk.Index, result), "only non-system chunks must have a collection") + require.Equal(t, collection != nil, !convert.IsSystemChunk(chunk.Index, result), "only non-system chunks must have a collection") return &verification.ChunkDataPackResponse{ Locator: chunks.Locator{ @@ -917,7 +918,7 @@ func verifiableChunkFixture(t *testing.T, result *flow.ExecutionResult, chunkDataPack *flow.ChunkDataPack) *verification.VerifiableChunkData { - offsetForChunk, err := fetcher.TransactionOffsetForChunk(result.Chunks, chunk.Index) + offsetForChunk, err := convert.TransactionOffsetForChunk(result.Chunks, chunk.Index) require.NoError(t, err) // TODO: add end state @@ -1000,7 +1001,7 @@ func completeChunkStatusListFixture(t *testing.T, chunkCount int, statusCount in locators := unittest.ChunkStatusListToChunkLocatorFixture(statuses) for _, status := range statuses { - if fetcher.IsSystemChunk(status.ChunkIndex, result) { + if convert.IsSystemChunk(status.ChunkIndex, result) { // system-chunk should have a nil collection continue } @@ -1012,7 +1013,7 @@ func completeChunkStatusListFixture(t *testing.T, chunkCount int, statusCount in func TestTransactionOffsetForChunk(t *testing.T) { t.Run("first chunk index always returns zero offset", func(t *testing.T) { - offsetForChunk, err := fetcher.TransactionOffsetForChunk([]*flow.Chunk{nil}, 0) + offsetForChunk, err := convert.TransactionOffsetForChunk([]*flow.Chunk{nil}, 0) require.NoError(t, err) assert.Equal(t, uint32(0), offsetForChunk) }) @@ -1042,19 +1043,19 @@ func TestTransactionOffsetForChunk(t *testing.T) { }, } - offsetForChunk, err := fetcher.TransactionOffsetForChunk(chunksList, 0) + offsetForChunk, err := convert.TransactionOffsetForChunk(chunksList, 0) require.NoError(t, err) assert.Equal(t, uint32(0), offsetForChunk) - offsetForChunk, err = fetcher.TransactionOffsetForChunk(chunksList, 1) + offsetForChunk, err = convert.TransactionOffsetForChunk(chunksList, 1) require.NoError(t, err) assert.Equal(t, uint32(1), offsetForChunk) - offsetForChunk, err = fetcher.TransactionOffsetForChunk(chunksList, 2) + offsetForChunk, err = convert.TransactionOffsetForChunk(chunksList, 2) require.NoError(t, err) assert.Equal(t, uint32(3), offsetForChunk) - offsetForChunk, err = fetcher.TransactionOffsetForChunk(chunksList, 3) + offsetForChunk, err = convert.TransactionOffsetForChunk(chunksList, 3) require.NoError(t, err) assert.Equal(t, uint32(6), offsetForChunk) }) @@ -1063,7 +1064,7 @@ func TestTransactionOffsetForChunk(t *testing.T) { chunksList := make([]*flow.Chunk, 2) - _, err := fetcher.TransactionOffsetForChunk(chunksList, 2) + _, err := convert.TransactionOffsetForChunk(chunksList, 2) require.Error(t, err) }) } diff --git a/engine/verification/verifier/verifiers.go b/engine/verification/verifier/verifiers.go new file mode 100644 index 00000000000..1bc1d11652a --- /dev/null +++ b/engine/verification/verifier/verifiers.go @@ -0,0 +1,204 @@ +package verifier + +import ( + "fmt" + + "github.com/dgraph-io/badger/v2" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + + "github.com/onflow/flow-go/cmd/util/cmd/common" + "github.com/onflow/flow-go/engine/execution/computation" + "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/verification/convert" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/chunks" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" + storagepebble "github.com/onflow/flow-go/storage/pebble" +) + +// VerifyLastKHeight verifies the last k sealed blocks by verifying all chunks in the results. +// It assumes the latest sealed block has been executed, and the chunk data packs have not been +// pruned. +func VerifyLastKHeight(k uint64, chainID flow.ChainID, protocolDataDir string, chunkDataPackDir string) error { + db, storages, chunkDataPacks, state, verifier, err := initStorages(chainID, protocolDataDir, chunkDataPackDir) + if err != nil { + return fmt.Errorf("could not init storages: %w", err) + } + defer db.Close() + + lastSealed, err := state.Sealed().Head() + if err != nil { + return fmt.Errorf("could not get last sealed height: %w", err) + } + + root := state.Params().SealedRoot().Height + from := lastSealed.Height - k + 1 + + // root block is not verifiable, because it's sealed already. + // the first verifiable is the next block of the root block + firstVerifiable := root + 1 + + if from < firstVerifiable { + from = firstVerifiable + } + to := lastSealed.Height + + for height := from; height <= to; height++ { + log.Info().Uint64("height", height).Msg("verifying height") + err := verifyHeight(height, storages.Headers, chunkDataPacks, storages.Results, state, verifier) + if err != nil { + return fmt.Errorf("could not verify height %d: %w", height, err) + } + } + + return nil +} + +// VerifyRange verifies all chunks in the results of the blocks in the given range. +func VerifyRange( + from, to uint64, + chainID flow.ChainID, + protocolDataDir string, chunkDataPackDir string, +) error { + db, storages, chunkDataPacks, state, verifier, err := initStorages(chainID, protocolDataDir, chunkDataPackDir) + if err != nil { + return fmt.Errorf("could not init storages: %w", err) + } + defer db.Close() + + for height := from; height <= to; height++ { + log.Info().Uint64("height", height).Msg("verifying height") + err := verifyHeight(height, storages.Headers, chunkDataPacks, storages.Results, state, verifier) + if err != nil { + return fmt.Errorf("could not verify height %d: %w", height, err) + } + } + + return nil +} + +func initStorages(chainID flow.ChainID, dataDir string, chunkDataPackDir string) ( + *badger.DB, + *storage.All, + storage.ChunkDataPacks, + protocol.State, + module.ChunkVerifier, + error, +) { + db := common.InitStorage(dataDir) + + storages := common.InitStorages(db) + state, err := common.InitProtocolState(db, storages) + if err != nil { + return nil, nil, nil, nil, nil, fmt.Errorf("could not init protocol state: %w", err) + } + + chunkDataPackDB, err := storagepebble.OpenDefaultPebbleDB(chunkDataPackDir) + if err != nil { + return nil, nil, nil, nil, nil, fmt.Errorf("could not open chunk data pack DB: %w", err) + } + chunkDataPacks := storagepebble.NewChunkDataPacks(metrics.NewNoopCollector(), + chunkDataPackDB, storages.Collections, 1000) + + verifier := makeVerifier(log.Logger, chainID, storages.Headers) + return db, storages, chunkDataPacks, state, verifier, nil +} + +func verifyHeight( + height uint64, + headers storage.Headers, + chunkDataPacks storage.ChunkDataPacks, + results storage.ExecutionResults, + state protocol.State, + verifier module.ChunkVerifier, +) error { + header, err := headers.ByHeight(height) + if err != nil { + return fmt.Errorf("could not get block header by height %d: %w", height, err) + } + + blockID := header.ID() + + if err != nil { + return fmt.Errorf("could not get block ID by height %d: %w", height, err) + } + + result, err := results.ByBlockID(blockID) + if err != nil { + return fmt.Errorf("could not get execution result by block ID %s: %w", blockID, err) + } + snapshot := state.AtBlockID(blockID) + + for i, chunk := range result.Chunks { + chunkDataPack, err := chunkDataPacks.ByChunkID(chunk.ID()) + if err != nil { + return fmt.Errorf("could not get chunk data pack by chunk ID %s: %w", chunk.ID(), err) + } + + vcd, err := convert.FromChunkDataPack(chunk, chunkDataPack, header, snapshot, result) + if err != nil { + return err + } + + _, err = verifier.Verify(vcd) + if err != nil { + return fmt.Errorf("could not verify %d-th chunk: %w", i, err) + } + } + return nil +} + +func makeVerifier( + logger zerolog.Logger, + chainID flow.ChainID, + headers storage.Headers, +) module.ChunkVerifier { + + vm := fvm.NewVirtualMachine() + fvmOptions := initFvmOptions(chainID, headers) + fvmOptions = append( + []fvm.Option{fvm.WithLogger(logger)}, + fvmOptions..., + ) + + // TODO(JanezP): cleanup creation of fvm context github.com/onflow/flow-go/issues/5249 + fvmOptions = append(fvmOptions, computation.DefaultFVMOptions(chainID, false, false)...) + vmCtx := fvm.NewContext(fvmOptions...) + + chunkVerifier := chunks.NewChunkVerifier(vm, vmCtx, logger) + return chunkVerifier +} + +func initFvmOptions(chainID flow.ChainID, headers storage.Headers) []fvm.Option { + blockFinder := environment.NewBlockFinder(headers) + vmOpts := []fvm.Option{ + fvm.WithChain(chainID.Chain()), + fvm.WithBlocks(blockFinder), + fvm.WithAccountStorageLimit(true), + } + switch chainID { + case flow.Testnet, + flow.Sandboxnet, + flow.Previewnet, + flow.Mainnet: + vmOpts = append(vmOpts, + fvm.WithTransactionFeesEnabled(true), + ) + } + switch chainID { + case flow.Testnet, + flow.Sandboxnet, + flow.Previewnet, + flow.Localnet, + flow.Benchnet: + vmOpts = append(vmOpts, + fvm.WithContractDeploymentRestricted(false), + ) + } + return vmOpts +} diff --git a/model/verification/convert/convert.go b/model/verification/convert/convert.go new file mode 100644 index 00000000000..4e62e4d446c --- /dev/null +++ b/model/verification/convert/convert.go @@ -0,0 +1,81 @@ +package convert + +import ( + "fmt" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/verification" + "github.com/onflow/flow-go/state/protocol" +) + +func FromChunkDataPack( + chunk *flow.Chunk, + chunkDataPack *flow.ChunkDataPack, + header *flow.Header, + snapshot protocol.Snapshot, + result *flow.ExecutionResult, +) (*verification.VerifiableChunkData, error) { + + // system chunk is the last chunk + isSystemChunk := IsSystemChunk(chunk.Index, result) + + endState, err := EndStateCommitment(result, chunk.Index, isSystemChunk) + if err != nil { + return nil, fmt.Errorf("could not compute end state of chunk: %w", err) + } + + transactionOffset, err := TransactionOffsetForChunk(result.Chunks, chunk.Index) + if err != nil { + return nil, fmt.Errorf("cannot compute transaction offset for chunk: %w", err) + } + + return &verification.VerifiableChunkData{ + IsSystemChunk: isSystemChunk, + Chunk: chunk, + Header: header, + Snapshot: snapshot, + Result: result, + ChunkDataPack: chunkDataPack, + EndState: endState, + TransactionOffset: transactionOffset, + }, nil +} + +// EndStateCommitment computes the end state of the given chunk. +func EndStateCommitment(result *flow.ExecutionResult, chunkIndex uint64, systemChunk bool) (flow.StateCommitment, error) { + var endState flow.StateCommitment + if systemChunk { + var err error + // last chunk in a result is the system chunk and takes final state commitment + endState, err = result.FinalStateCommitment() + if err != nil { + return flow.DummyStateCommitment, fmt.Errorf("can not read final state commitment, likely a bug:%w", err) + } + } else { + // any chunk except last takes the subsequent chunk's start state + endState = result.Chunks[chunkIndex+1].StartState + } + + return endState, nil +} + +// TransactionOffsetForChunk calculates transaction offset for a given chunk which is the index of the first +// transaction of this chunk within the whole block +func TransactionOffsetForChunk(chunks flow.ChunkList, chunkIndex uint64) (uint32, error) { + if int(chunkIndex) > len(chunks)-1 { + return 0, fmt.Errorf("chunk list out of bounds, len %d asked for chunk %d", len(chunks), chunkIndex) + } + var offset uint32 = 0 + for i := 0; i < int(chunkIndex); i++ { + offset += uint32(chunks[i].NumberOfTransactions) + } + return offset, nil +} + +// IsSystemChunk returns true if `chunkIndex` points to a system chunk in `result`. +// Otherwise, it returns false. +// In the current version, a chunk is a system chunk if it is the last chunk of the +// execution result. +func IsSystemChunk(chunkIndex uint64, result *flow.ExecutionResult) bool { + return chunkIndex == uint64(len(result.Chunks)-1) +} From a240e54ab0dd792ec0df1a8d21bbfdbb119dbd6f Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Mon, 2 Dec 2024 08:44:09 -0800 Subject: [PATCH 36/64] add review comments --- cmd/scaffold.go | 32 +++-------------- cmd/util/cmd/verify_execution_result/cmd.go | 2 +- engine/verification/verifier/verifiers.go | 37 ++----------------- fvm/initialize/options.go | 40 +++++++++++++++++++++ 4 files changed, 47 insertions(+), 64 deletions(-) create mode 100644 fvm/initialize/options.go diff --git a/cmd/scaffold.go b/cmd/scaffold.go index a3a95cfff15..be175a0fd12 100644 --- a/cmd/scaffold.go +++ b/cmd/scaffold.go @@ -33,8 +33,7 @@ import ( "github.com/onflow/flow-go/cmd/build" "github.com/onflow/flow-go/config" "github.com/onflow/flow-go/consensus/hotstuff/persister" - "github.com/onflow/flow-go/fvm" - "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/fvm/initialize" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/module" @@ -1522,32 +1521,9 @@ func (fnb *FlowNodeBuilder) initLocal() error { } func (fnb *FlowNodeBuilder) initFvmOptions() { - blockFinder := environment.NewBlockFinder(fnb.Storage.Headers) - vmOpts := []fvm.Option{ - fvm.WithChain(fnb.RootChainID.Chain()), - fvm.WithBlocks(blockFinder), - fvm.WithAccountStorageLimit(true), - } - switch fnb.RootChainID { - case flow.Testnet, - flow.Sandboxnet, - flow.Previewnet, - flow.Mainnet: - vmOpts = append(vmOpts, - fvm.WithTransactionFeesEnabled(true), - ) - } - switch fnb.RootChainID { - case flow.Testnet, - flow.Sandboxnet, - flow.Previewnet, - flow.Localnet, - flow.Benchnet: - vmOpts = append(vmOpts, - fvm.WithContractDeploymentRestricted(false), - ) - } - fnb.FvmOptions = vmOpts + fnb.FvmOptions = initialize.InitFvmOptions( + fnb.RootChainID, fnb.Storage.Headers, + ) } // handleModules initializes the given module. diff --git a/cmd/util/cmd/verify_execution_result/cmd.go b/cmd/util/cmd/verify_execution_result/cmd.go index 6aa7e135483..4fbbdc58e21 100644 --- a/cmd/util/cmd/verify_execution_result/cmd.go +++ b/cmd/util/cmd/verify_execution_result/cmd.go @@ -44,7 +44,7 @@ func init() { "last k sealed blocks to verify") Cmd.Flags().StringVar(&flagFromTo, "from_to", "", - "the height range to verify blocks, i.e, 1-1000, 1000-2000, 2000-3000, etc.") + "the height range to verify blocks (inclusive), i.e, 1-1000, 1000-2000, 2000-3000, etc.") } func run(*cobra.Command, []string) { diff --git a/engine/verification/verifier/verifiers.go b/engine/verification/verifier/verifiers.go index 1bc1d11652a..05fba45e245 100644 --- a/engine/verification/verifier/verifiers.go +++ b/engine/verification/verifier/verifiers.go @@ -10,7 +10,7 @@ import ( "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/engine/execution/computation" "github.com/onflow/flow-go/fvm" - "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/fvm/initialize" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/model/verification/convert" "github.com/onflow/flow-go/module" @@ -124,10 +124,6 @@ func verifyHeight( blockID := header.ID() - if err != nil { - return fmt.Errorf("could not get block ID by height %d: %w", height, err) - } - result, err := results.ByBlockID(blockID) if err != nil { return fmt.Errorf("could not get execution result by block ID %s: %w", blockID, err) @@ -160,7 +156,7 @@ func makeVerifier( ) module.ChunkVerifier { vm := fvm.NewVirtualMachine() - fvmOptions := initFvmOptions(chainID, headers) + fvmOptions := initialize.InitFvmOptions(chainID, headers) fvmOptions = append( []fvm.Option{fvm.WithLogger(logger)}, fvmOptions..., @@ -173,32 +169,3 @@ func makeVerifier( chunkVerifier := chunks.NewChunkVerifier(vm, vmCtx, logger) return chunkVerifier } - -func initFvmOptions(chainID flow.ChainID, headers storage.Headers) []fvm.Option { - blockFinder := environment.NewBlockFinder(headers) - vmOpts := []fvm.Option{ - fvm.WithChain(chainID.Chain()), - fvm.WithBlocks(blockFinder), - fvm.WithAccountStorageLimit(true), - } - switch chainID { - case flow.Testnet, - flow.Sandboxnet, - flow.Previewnet, - flow.Mainnet: - vmOpts = append(vmOpts, - fvm.WithTransactionFeesEnabled(true), - ) - } - switch chainID { - case flow.Testnet, - flow.Sandboxnet, - flow.Previewnet, - flow.Localnet, - flow.Benchnet: - vmOpts = append(vmOpts, - fvm.WithContractDeploymentRestricted(false), - ) - } - return vmOpts -} diff --git a/fvm/initialize/options.go b/fvm/initialize/options.go new file mode 100644 index 00000000000..fcfce074601 --- /dev/null +++ b/fvm/initialize/options.go @@ -0,0 +1,40 @@ +package initialize + +import ( + "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/storage" +) + +// InitFvmOptions initializes the FVM options based on the chain ID and headers. +// This function is extracted so that it can be reused in multiple places, +// and ensure that the FVM options are consistent across different components. +func InitFvmOptions(chainID flow.ChainID, headers storage.Headers) []fvm.Option { + blockFinder := environment.NewBlockFinder(headers) + vmOpts := []fvm.Option{ + fvm.WithChain(chainID.Chain()), + fvm.WithBlocks(blockFinder), + fvm.WithAccountStorageLimit(true), + } + switch chainID { + case flow.Testnet, + flow.Sandboxnet, + flow.Previewnet, + flow.Mainnet: + vmOpts = append(vmOpts, + fvm.WithTransactionFeesEnabled(true), + ) + } + switch chainID { + case flow.Testnet, + flow.Sandboxnet, + flow.Previewnet, + flow.Localnet, + flow.Benchnet: + vmOpts = append(vmOpts, + fvm.WithContractDeploymentRestricted(false), + ) + } + return vmOpts +} From 9e924e814453ecb0dc5dad3eac66ece65e40efbb Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Mon, 2 Dec 2024 08:48:38 -0800 Subject: [PATCH 37/64] apply review comments --- cmd/util/cmd/verify_execution_result/cmd.go | 8 ++++---- engine/verification/verifier/verifiers.go | 21 ++++++++++++++++----- 2 files changed, 20 insertions(+), 9 deletions(-) diff --git a/cmd/util/cmd/verify_execution_result/cmd.go b/cmd/util/cmd/verify_execution_result/cmd.go index 4fbbdc58e21..08081c1bcd8 100644 --- a/cmd/util/cmd/verify_execution_result/cmd.go +++ b/cmd/util/cmd/verify_execution_result/cmd.go @@ -48,7 +48,7 @@ func init() { } func run(*cobra.Command, []string) { - _ = flow.ChainID(flagChain).Chain() + chainID := flow.ChainID(flagChain) if flagFromTo != "" { from, to, err := parseFromTo(flagFromTo) @@ -57,15 +57,15 @@ func run(*cobra.Command, []string) { } log.Info().Msgf("verifying range from %d to %d", from, to) - err = verifier.VerifyRange(from, to, flow.Testnet, flagDatadir, flagChunkDataPackDir) + err = verifier.VerifyRange(from, to, chainID, flagDatadir, flagChunkDataPackDir) if err != nil { - log.Fatal().Err(err).Msg("could not verify last k height") + log.Fatal().Err(err).Msg("could not verify range from %d to %d") } log.Info().Msgf("successfully verified range from %d to %d", from, to) } else { log.Info().Msgf("verifying last %d sealed blocks", flagLastK) - err := verifier.VerifyLastKHeight(flagLastK, flow.Testnet, flagDatadir, flagChunkDataPackDir) + err := verifier.VerifyLastKHeight(flagLastK, chainID, flagDatadir, flagChunkDataPackDir) if err != nil { log.Fatal().Err(err).Msg("could not verify last k height") } diff --git a/engine/verification/verifier/verifiers.go b/engine/verification/verifier/verifiers.go index 05fba45e245..c692de1f855 100644 --- a/engine/verification/verifier/verifiers.go +++ b/engine/verification/verifier/verifiers.go @@ -3,6 +3,7 @@ package verifier import ( "fmt" + "github.com/cockroachdb/pebble" "github.com/dgraph-io/badger/v2" "github.com/rs/zerolog" "github.com/rs/zerolog/log" @@ -25,11 +26,20 @@ import ( // It assumes the latest sealed block has been executed, and the chunk data packs have not been // pruned. func VerifyLastKHeight(k uint64, chainID flow.ChainID, protocolDataDir string, chunkDataPackDir string) error { - db, storages, chunkDataPacks, state, verifier, err := initStorages(chainID, protocolDataDir, chunkDataPackDir) + db, storages, cdpDB, chunkDataPacks, state, verifier, err := initStorages(chainID, protocolDataDir, chunkDataPackDir) if err != nil { return fmt.Errorf("could not init storages: %w", err) } - defer db.Close() + defer func() { + err := db.Close() + if err != nil { + log.Error().Err(err).Msg("failed to close db") + } + err = cdpDB.Close() + if err != nil { + log.Error().Err(err).Msg("failed to close chunk data pack db") + } + }() lastSealed, err := state.Sealed().Head() if err != nil { @@ -85,6 +95,7 @@ func VerifyRange( func initStorages(chainID flow.ChainID, dataDir string, chunkDataPackDir string) ( *badger.DB, *storage.All, + *pebble.DB, storage.ChunkDataPacks, protocol.State, module.ChunkVerifier, @@ -95,18 +106,18 @@ func initStorages(chainID flow.ChainID, dataDir string, chunkDataPackDir string) storages := common.InitStorages(db) state, err := common.InitProtocolState(db, storages) if err != nil { - return nil, nil, nil, nil, nil, fmt.Errorf("could not init protocol state: %w", err) + return nil, nil, nil, nil, nil, nil, fmt.Errorf("could not init protocol state: %w", err) } chunkDataPackDB, err := storagepebble.OpenDefaultPebbleDB(chunkDataPackDir) if err != nil { - return nil, nil, nil, nil, nil, fmt.Errorf("could not open chunk data pack DB: %w", err) + return nil, nil, nil, nil, nil, nil, fmt.Errorf("could not open chunk data pack DB: %w", err) } chunkDataPacks := storagepebble.NewChunkDataPacks(metrics.NewNoopCollector(), chunkDataPackDB, storages.Collections, 1000) verifier := makeVerifier(log.Logger, chainID, storages.Headers) - return db, storages, chunkDataPacks, state, verifier, nil + return db, storages, chunkDataPackDB, chunkDataPacks, state, verifier, nil } func verifyHeight( From f75569e4c312576995f6f2d4b2001fb2b981c372 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Mon, 2 Dec 2024 08:57:23 -0800 Subject: [PATCH 38/64] apply review comments --- cmd/util/cmd/verify_execution_result/cmd.go | 2 + engine/verification/verifier/verifiers.go | 51 ++++++++++++++------- 2 files changed, 37 insertions(+), 16 deletions(-) diff --git a/cmd/util/cmd/verify_execution_result/cmd.go b/cmd/util/cmd/verify_execution_result/cmd.go index 08081c1bcd8..5cfd907b28d 100644 --- a/cmd/util/cmd/verify_execution_result/cmd.go +++ b/cmd/util/cmd/verify_execution_result/cmd.go @@ -36,9 +36,11 @@ func init() { Cmd.Flags().StringVar(&flagDatadir, "datadir", "/var/flow/data/protocol", "directory that stores the protocol state") + _ = Cmd.MarkFlagRequired("datadir") Cmd.Flags().StringVar(&flagChunkDataPackDir, "chunk_data_pack_dir", "/var/flow/data/chunk_data_pack", "directory that stores the protocol state") + _ = Cmd.MarkFlagRequired("chunk_data_pack_dir") Cmd.Flags().Uint64Var(&flagLastK, "lastk", 1, "last k sealed blocks to verify") diff --git a/engine/verification/verifier/verifiers.go b/engine/verification/verifier/verifiers.go index c692de1f855..81e6a5e3cdb 100644 --- a/engine/verification/verifier/verifiers.go +++ b/engine/verification/verifier/verifiers.go @@ -3,8 +3,6 @@ package verifier import ( "fmt" - "github.com/cockroachdb/pebble" - "github.com/dgraph-io/badger/v2" "github.com/rs/zerolog" "github.com/rs/zerolog/log" @@ -26,18 +24,14 @@ import ( // It assumes the latest sealed block has been executed, and the chunk data packs have not been // pruned. func VerifyLastKHeight(k uint64, chainID flow.ChainID, protocolDataDir string, chunkDataPackDir string) error { - db, storages, cdpDB, chunkDataPacks, state, verifier, err := initStorages(chainID, protocolDataDir, chunkDataPackDir) + closer, storages, chunkDataPacks, state, verifier, err := initStorages(chainID, protocolDataDir, chunkDataPackDir) if err != nil { return fmt.Errorf("could not init storages: %w", err) } defer func() { - err := db.Close() - if err != nil { - log.Error().Err(err).Msg("failed to close db") - } - err = cdpDB.Close() + err := closer() if err != nil { - log.Error().Err(err).Msg("failed to close chunk data pack db") + log.Error().Err(err).Msg("failed to close storages") } }() @@ -47,6 +41,12 @@ func VerifyLastKHeight(k uint64, chainID flow.ChainID, protocolDataDir string, c } root := state.Params().SealedRoot().Height + + // preventing overflow + if k > lastSealed.Height+1 { + return fmt.Errorf("k is greater than the number of sealed blocks, k: %d, last sealed height: %d", k, lastSealed.Height) + } + from := lastSealed.Height - k + 1 // root block is not verifiable, because it's sealed already. @@ -58,6 +58,8 @@ func VerifyLastKHeight(k uint64, chainID flow.ChainID, protocolDataDir string, c } to := lastSealed.Height + log.Info().Msgf("verifying blocks from %d to %d", from, to) + for height := from; height <= to; height++ { log.Info().Uint64("height", height).Msg("verifying height") err := verifyHeight(height, storages.Headers, chunkDataPacks, storages.Results, state, verifier) @@ -75,11 +77,18 @@ func VerifyRange( chainID flow.ChainID, protocolDataDir string, chunkDataPackDir string, ) error { - db, storages, chunkDataPacks, state, verifier, err := initStorages(chainID, protocolDataDir, chunkDataPackDir) + closer, storages, chunkDataPacks, state, verifier, err := initStorages(chainID, protocolDataDir, chunkDataPackDir) if err != nil { return fmt.Errorf("could not init storages: %w", err) } - defer db.Close() + defer func() { + err := closer() + if err != nil { + log.Error().Err(err).Msg("failed to close storages") + } + }() + + log.Info().Msgf("verifying blocks from %d to %d", from, to) for height := from; height <= to; height++ { log.Info().Uint64("height", height).Msg("verifying height") @@ -93,9 +102,8 @@ func VerifyRange( } func initStorages(chainID flow.ChainID, dataDir string, chunkDataPackDir string) ( - *badger.DB, + func() error, *storage.All, - *pebble.DB, storage.ChunkDataPacks, protocol.State, module.ChunkVerifier, @@ -106,18 +114,29 @@ func initStorages(chainID flow.ChainID, dataDir string, chunkDataPackDir string) storages := common.InitStorages(db) state, err := common.InitProtocolState(db, storages) if err != nil { - return nil, nil, nil, nil, nil, nil, fmt.Errorf("could not init protocol state: %w", err) + return nil, nil, nil, nil, nil, fmt.Errorf("could not init protocol state: %w", err) } chunkDataPackDB, err := storagepebble.OpenDefaultPebbleDB(chunkDataPackDir) if err != nil { - return nil, nil, nil, nil, nil, nil, fmt.Errorf("could not open chunk data pack DB: %w", err) + return nil, nil, nil, nil, nil, fmt.Errorf("could not open chunk data pack DB: %w", err) } chunkDataPacks := storagepebble.NewChunkDataPacks(metrics.NewNoopCollector(), chunkDataPackDB, storages.Collections, 1000) verifier := makeVerifier(log.Logger, chainID, storages.Headers) - return db, storages, chunkDataPackDB, chunkDataPacks, state, verifier, nil + closer := func() error { + err := db.Close() + if err != nil { + return fmt.Errorf("failed to close protocol db: %w", err) + } + err = chunkDataPackDB.Close() + if err != nil { + return fmt.Errorf("failed to close chunk data pack db: %w", err) + } + return nil + } + return closer, storages, chunkDataPacks, state, verifier, nil } func verifyHeight( From 9d6531913bef3bd3c7f290dcf6b56db77c23b7f3 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Mon, 2 Dec 2024 09:10:43 -0800 Subject: [PATCH 39/64] check from against first verifiable height --- engine/verification/verifier/verifiers.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/engine/verification/verifier/verifiers.go b/engine/verification/verifier/verifiers.go index 81e6a5e3cdb..76546c340f4 100644 --- a/engine/verification/verifier/verifiers.go +++ b/engine/verification/verifier/verifiers.go @@ -90,6 +90,12 @@ func VerifyRange( log.Info().Msgf("verifying blocks from %d to %d", from, to) + root := state.Params().SealedRoot().Height + + if from <= root { + return fmt.Errorf("cannot verify blocks before the root block, from: %d, root: %d", from, root) + } + for height := from; height <= to; height++ { log.Info().Uint64("height", height).Msg("verifying height") err := verifyHeight(height, storages.Headers, chunkDataPacks, storages.Results, state, verifier) From 3cfb408629b11a504b2c3bdf023d0e23a8b37605 Mon Sep 17 00:00:00 2001 From: Leo Zhang Date: Mon, 2 Dec 2024 13:00:05 -0800 Subject: [PATCH 40/64] Apply suggestions from code review Co-authored-by: Faye Amacker <33205765+fxamacker@users.noreply.github.com> --- cmd/util/cmd/verify_execution_result/cmd.go | 3 ++- engine/verification/verifier/verifiers.go | 23 +++++++++++---------- 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/cmd/util/cmd/verify_execution_result/cmd.go b/cmd/util/cmd/verify_execution_result/cmd.go index 5cfd907b28d..5db87eb9dc5 100644 --- a/cmd/util/cmd/verify_execution_result/cmd.go +++ b/cmd/util/cmd/verify_execution_result/cmd.go @@ -51,6 +51,7 @@ func init() { func run(*cobra.Command, []string) { chainID := flow.ChainID(flagChain) + _ = chainID.Chain() if flagFromTo != "" { from, to, err := parseFromTo(flagFromTo) @@ -61,7 +62,7 @@ func run(*cobra.Command, []string) { log.Info().Msgf("verifying range from %d to %d", from, to) err = verifier.VerifyRange(from, to, chainID, flagDatadir, flagChunkDataPackDir) if err != nil { - log.Fatal().Err(err).Msg("could not verify range from %d to %d") + log.Fatal().Err(err).Msgf("could not verify range from %d to %d", from, to) } log.Info().Msgf("successfully verified range from %d to %d", from, to) diff --git a/engine/verification/verifier/verifiers.go b/engine/verification/verifier/verifiers.go index 76546c340f4..d1f59cb4c59 100644 --- a/engine/verification/verifier/verifiers.go +++ b/engine/verification/verifier/verifiers.go @@ -23,15 +23,15 @@ import ( // VerifyLastKHeight verifies the last k sealed blocks by verifying all chunks in the results. // It assumes the latest sealed block has been executed, and the chunk data packs have not been // pruned. -func VerifyLastKHeight(k uint64, chainID flow.ChainID, protocolDataDir string, chunkDataPackDir string) error { +func VerifyLastKHeight(k uint64, chainID flow.ChainID, protocolDataDir string, chunkDataPackDir string) (err error) { closer, storages, chunkDataPacks, state, verifier, err := initStorages(chainID, protocolDataDir, chunkDataPackDir) if err != nil { return fmt.Errorf("could not init storages: %w", err) } defer func() { - err := closer() - if err != nil { - log.Error().Err(err).Msg("failed to close storages") + closerErr := closer() + if closerErr != nil { + err = errors.Join(err, closerErr) } }() @@ -132,15 +132,16 @@ func initStorages(chainID flow.ChainID, dataDir string, chunkDataPackDir string) verifier := makeVerifier(log.Logger, chainID, storages.Headers) closer := func() error { - err := db.Close() - if err != nil { - return fmt.Errorf("failed to close protocol db: %w", err) + var dbErr, chunkDataPackDBErr error + + if err := db.Close(); err != nil { + dbErr = fmt.Errorf("failed to close protocol db: %w", err) } - err = chunkDataPackDB.Close() - if err != nil { - return fmt.Errorf("failed to close chunk data pack db: %w", err) + + if err := chunkDataPackDB.Close(); err != nil { + chunkDataPackDBErr = fmt.Errorf("failed to close chunk data pack db: %w", err) } - return nil + return errors.Join(dbErr, chunkDataPackDBErr) } return closer, storages, chunkDataPacks, state, verifier, nil } From df4891d92a5de8273d2b4527cba0823b0260ca81 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Mon, 2 Dec 2024 13:01:34 -0800 Subject: [PATCH 41/64] fix lint --- engine/verification/verifier/verifiers.go | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/engine/verification/verifier/verifiers.go b/engine/verification/verifier/verifiers.go index d1f59cb4c59..7afbb6a4b92 100644 --- a/engine/verification/verifier/verifiers.go +++ b/engine/verification/verifier/verifiers.go @@ -1,6 +1,7 @@ package verifier import ( + "errors" "fmt" "github.com/rs/zerolog" @@ -76,15 +77,15 @@ func VerifyRange( from, to uint64, chainID flow.ChainID, protocolDataDir string, chunkDataPackDir string, -) error { +) (err error) { closer, storages, chunkDataPacks, state, verifier, err := initStorages(chainID, protocolDataDir, chunkDataPackDir) if err != nil { return fmt.Errorf("could not init storages: %w", err) } defer func() { - err := closer() - if err != nil { - log.Error().Err(err).Msg("failed to close storages") + closerErr := closer() + if closerErr != nil { + err = errors.Join(err, closerErr) } }() @@ -132,12 +133,12 @@ func initStorages(chainID flow.ChainID, dataDir string, chunkDataPackDir string) verifier := makeVerifier(log.Logger, chainID, storages.Headers) closer := func() error { - var dbErr, chunkDataPackDBErr error - + var dbErr, chunkDataPackDBErr error + if err := db.Close(); err != nil { dbErr = fmt.Errorf("failed to close protocol db: %w", err) } - + if err := chunkDataPackDB.Close(); err != nil { chunkDataPackDBErr = fmt.Errorf("failed to close chunk data pack db: %w", err) } From 6c49ee4850fd7c0f8487e33a3511e8ee279099b5 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Thu, 21 Nov 2024 11:42:55 -0800 Subject: [PATCH 42/64] add verify evm offchain replay util cmd --- cmd/util/cmd/root.go | 2 + .../cmd/verify-evm-offchain-replay/main.go | 87 +++++++++ .../cmd/verify-evm-offchain-replay/verify.go | 93 ++++++++++ fvm/evm/offchain/utils/collection_test.go | 48 ++--- fvm/evm/offchain/utils/verify.go | 168 ++++++++++++++++++ 5 files changed, 374 insertions(+), 24 deletions(-) create mode 100644 cmd/util/cmd/verify-evm-offchain-replay/main.go create mode 100644 cmd/util/cmd/verify-evm-offchain-replay/verify.go create mode 100644 fvm/evm/offchain/utils/verify.go diff --git a/cmd/util/cmd/root.go b/cmd/util/cmd/root.go index 902ba16cb46..b152c28f3e5 100644 --- a/cmd/util/cmd/root.go +++ b/cmd/util/cmd/root.go @@ -41,6 +41,7 @@ import ( "github.com/onflow/flow-go/cmd/util/cmd/snapshot" system_addresses "github.com/onflow/flow-go/cmd/util/cmd/system-addresses" truncate_database "github.com/onflow/flow-go/cmd/util/cmd/truncate-database" + verify_evm_offchain_replay "github.com/onflow/flow-go/cmd/util/cmd/verify-evm-offchain-replay" verify_execution_result "github.com/onflow/flow-go/cmd/util/cmd/verify_execution_result" "github.com/onflow/flow-go/cmd/util/cmd/version" "github.com/onflow/flow-go/module/profiler" @@ -128,6 +129,7 @@ func addCommands() { rootCmd.AddCommand(generate_authorization_fixes.Cmd) rootCmd.AddCommand(evm_state_exporter.Cmd) rootCmd.AddCommand(verify_execution_result.Cmd) + rootCmd.AddCommand(verify_evm_offchain_replay.Cmd) } func initConfig() { diff --git a/cmd/util/cmd/verify-evm-offchain-replay/main.go b/cmd/util/cmd/verify-evm-offchain-replay/main.go new file mode 100644 index 00000000000..9f56587306e --- /dev/null +++ b/cmd/util/cmd/verify-evm-offchain-replay/main.go @@ -0,0 +1,87 @@ +package verify + +import ( + "fmt" + "strconv" + "strings" + + "github.com/rs/zerolog/log" + "github.com/spf13/cobra" + + "github.com/onflow/flow-go/model/flow" +) + +var ( + flagDatadir string + flagExecutionDataDir string + flagEVMStateGobDir string + flagChain string + flagFromTo string +) + +// usage example +// +// ./util verify-evm-offchain-replay --chain flow-testnet --from-to 211176671-211177000 +// --datadir /var/flow/data/protocol --execution_data_dir /var/flow/data/execution_data +var Cmd = &cobra.Command{ + Use: "verify-evm-offchain-replay", + Short: "verify evm offchain replay with execution data", + Run: run, +} + +func init() { + Cmd.Flags().StringVar(&flagChain, "chain", "", "Chain name") + _ = Cmd.MarkFlagRequired("chain") + + Cmd.Flags().StringVar(&flagDatadir, "datadir", "/var/flow/data/protocol", + "directory that stores the protocol state") + + Cmd.Flags().StringVar(&flagExecutionDataDir, "execution_data_dir", "/var/flow/data/execution_data", + "directory that stores the execution state") + + Cmd.Flags().StringVar(&flagFromTo, "from_to", "", + "the flow height range to verify blocks, i.e, 1-1000, 1000-2000, 2000-3000, etc.") + + Cmd.Flags().StringVar(&flagEVMStateGobDir, "evm_state_gob_dir", "/var/flow/data/evm_state_gob", + "directory that stores the evm state gob files as checkpoint") +} + +func run(*cobra.Command, []string) { + _ = flow.ChainID(flagChain).Chain() + + from, to, err := parseFromTo(flagFromTo) + if err != nil { + log.Fatal().Err(err).Msg("could not parse from_to") + } + + log.Info().Msgf("verifying range from %d to %d", from, to) + err = Verify(from, to, flow.Testnet, flagDatadir, flagExecutionDataDir) + if err != nil { + log.Fatal().Err(err).Msg("could not verify last k height") + } + log.Info().Msgf("successfully verified range from %d to %d", from, to) + +} + +func parseFromTo(fromTo string) (from, to uint64, err error) { + parts := strings.Split(fromTo, "-") + if len(parts) != 2 { + return 0, 0, fmt.Errorf("invalid format: expected 'from-to', got '%s'", fromTo) + } + + from, err = strconv.ParseUint(strings.TrimSpace(parts[0]), 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("invalid 'from' value: %w", err) + } + + to, err = strconv.ParseUint(strings.TrimSpace(parts[1]), 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("invalid 'to' value: %w", err) + } + + if from > to { + return 0, 0, fmt.Errorf("'from' value (%d) must be less than or equal to 'to' value (%d)", from, to) + } + + return from, to, nil +} diff --git a/cmd/util/cmd/verify-evm-offchain-replay/verify.go b/cmd/util/cmd/verify-evm-offchain-replay/verify.go new file mode 100644 index 00000000000..1a907be669a --- /dev/null +++ b/cmd/util/cmd/verify-evm-offchain-replay/verify.go @@ -0,0 +1,93 @@ +package verify + +import ( + "fmt" + "io" + "os" + "path/filepath" + + "github.com/dgraph-io/badger/v2" + badgerds "github.com/ipfs/go-ds-badger2" + + "github.com/onflow/flow-go/cmd/util/cmd/common" + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/fvm/evm" + "github.com/onflow/flow-go/fvm/evm/offchain/utils" + "github.com/onflow/flow-go/fvm/evm/testutils" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/blobs" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/storage" +) + +func Verify(from uint64, to uint64, chainID flow.ChainID, dataDir string, executionDataDir string, evmStateGobDir string) error { + db, storages, executionDataStore, dsStore, err := initStorages(chainID, dataDir, executionDataDir) + if err != nil { + return fmt.Errorf("could not initialize storages: %w", err) + } + + defer db.Close() + defer dsStore.Close() + + var store *testutils.TestValueStore + isRoot := isEVMRootHeight(chainID, from) + if isRoot { + store = testutils.GetSimpleValueStore() + as := environment.NewAccountStatus() + rootAddr := evm.StorageAccountAddress(chainID) + err = store.SetValue(rootAddr[:], []byte(flow.AccountStatusKey), as.ToBytes()) + if err != nil { + return err + } + } else { + // TODO: recover from gob + } + + return utils.OffchainReplayBackwardCompatibilityTest( + chainID, + from, + to, + storages.Headers, + storages.Results, + executionDataStore, + store, + ) +} + +func initStorages(chainID flow.ChainID, dataDir string, executionDataDir string) ( + *badger.DB, + *storage.All, + execution_data.ExecutionDataGetter, + io.Closer, + error, +) { + db := common.InitStorage(dataDir) + + storages := common.InitStorages(db) + + datastoreDir := filepath.Join(executionDataDir, "blobstore") + err := os.MkdirAll(datastoreDir, 0700) + if err != nil { + return nil, nil, nil, nil, err + } + dsOpts := &badgerds.DefaultOptions + ds, err := badgerds.NewDatastore(datastoreDir, dsOpts) + if err != nil { + return nil, nil, nil, nil, err + } + + executionDataBlobstore := blobs.NewBlobstore(ds) + executionDataStore := execution_data.NewExecutionDataStore(executionDataBlobstore, execution_data.DefaultSerializer) + + return db, storages, executionDataStore, ds, nil +} + +// EVM Root Height is the first block that has EVM Block Event where the EVM block height is 1 +func isEVMRootHeight(chainID flow.ChainID, flowHeight uint64) bool { + if chainID == flow.Testnet { + return flowHeight == 211176671 + } else if chainID == flow.Mainnet { + return flowHeight == 85981136 + } + return flowHeight == 1 +} diff --git a/fvm/evm/offchain/utils/collection_test.go b/fvm/evm/offchain/utils/collection_test.go index a18ce4a81ac..827bb918601 100644 --- a/fvm/evm/offchain/utils/collection_test.go +++ b/fvm/evm/offchain/utils/collection_test.go @@ -41,7 +41,7 @@ func TestTestnetBackwardCompatibility(t *testing.T) { // > ~/Downloads/events_devnet51_1.jsonl // ... // - // 2) comment the above t.Skip, and update the events file paths and checkpoint dir + // 2) comment the above t.Skip, and update the events file paths and evmStateGob dir // to run the tests BackwardCompatibleSinceEVMGenesisBlock( t, flow.Testnet, []string{ @@ -65,47 +65,47 @@ func TestTestnetBackwardCompatibility(t *testing.T) { // --start 211176670 --end 211176770 --network testnet --host access-001.devnet51.nodes.onflow.org:9000 // // During the replay process, it will generate `values_.gob` and -// `allocators_.gob` checkpoint files for each height. If these checkpoint files exist, +// `allocators_.gob` checkpoint files for each height. If these checkpoint gob files exist, // the corresponding event JSON files will be skipped to optimize replay. func BackwardCompatibleSinceEVMGenesisBlock( t *testing.T, chainID flow.ChainID, eventsFilePaths []string, // ordered EVM events in JSONL format - checkpointDir string, - checkpointEndHeight uint64, // EVM height of an EVM state that a checkpoint was created for + evmStateGob string, + evmStateEndHeight uint64, // EVM height of an EVM state that a evmStateGob file was created for ) { // ensure that event files is not an empty array require.True(t, len(eventsFilePaths) > 0) - log.Info().Msgf("replaying EVM events from %v to %v, with checkpoints in %s, and checkpointEndHeight: %v", + log.Info().Msgf("replaying EVM events from %v to %v, with evmStateGob file in %s, and evmStateEndHeight: %v", eventsFilePaths[0], eventsFilePaths[len(eventsFilePaths)-1], - checkpointDir, checkpointEndHeight) + evmStateGob, evmStateEndHeight) - store, checkpointEndHeightOrZero := initStorageWithCheckpoints(t, chainID, checkpointDir, checkpointEndHeight) + store, evmStateEndHeightOrZero := initStorageWithEVMStateGob(t, chainID, evmStateGob, evmStateEndHeight) // the events to replay - nextHeight := checkpointEndHeightOrZero + 1 + nextHeight := evmStateEndHeightOrZero + 1 // replay each event files for _, eventsFilePath := range eventsFilePaths { log.Info().Msgf("replaying events from %v, nextHeight: %v", eventsFilePath, nextHeight) - checkpointEndHeight := replayEvents(t, chainID, store, eventsFilePath, checkpointDir, nextHeight) - nextHeight = checkpointEndHeight + 1 + evmStateEndHeight := replayEvents(t, chainID, store, eventsFilePath, evmStateGob, nextHeight) + nextHeight = evmStateEndHeight + 1 } log.Info(). Msgf("succhessfully replayed all events and state changes are consistent with onchain state change. nextHeight: %v", nextHeight) } -func initStorageWithCheckpoints(t *testing.T, chainID flow.ChainID, checkpointDir string, checkpointEndHeight uint64) ( +func initStorageWithEVMStateGob(t *testing.T, chainID flow.ChainID, evmStateGob string, evmStateEndHeight uint64) ( *TestValueStore, uint64, ) { rootAddr := evm.StorageAccountAddress(chainID) - // if there is no checkpoint, create a empty store and initialize the account status, + // if there is no evmStateGob file, create a empty store and initialize the account status, // return 0 as the genesis height - if checkpointEndHeight == 0 { + if evmStateEndHeight == 0 { store := GetSimpleValueStore() as := environment.NewAccountStatus() require.NoError(t, store.SetValue(rootAddr[:], []byte(flow.AccountStatusKey), as.ToBytes())) @@ -113,19 +113,19 @@ func initStorageWithCheckpoints(t *testing.T, chainID flow.ChainID, checkpointDi return store, 0 } - valueFileName, allocatorFileName := checkpointFileNamesByEndHeight(checkpointDir, checkpointEndHeight) + valueFileName, allocatorFileName := evmStateGobFileNamesByEndHeight(evmStateGob, evmStateEndHeight) values, err := deserialize(valueFileName) require.NoError(t, err) allocators, err := deserializeAllocator(allocatorFileName) require.NoError(t, err) store := GetSimpleValueStorePopulated(values, allocators) - return store, checkpointEndHeight + return store, evmStateEndHeight } func replayEvents( t *testing.T, chainID flow.ChainID, - store *TestValueStore, eventsFilePath string, checkpointDir string, initialNextHeight uint64) uint64 { + store *TestValueStore, eventsFilePath string, evmStateGob string, initialNextHeight uint64) uint64 { rootAddr := evm.StorageAccountAddress(chainID) @@ -172,22 +172,22 @@ func replayEvents( return nil }) - checkpointEndHeight := nextHeight - 1 + evmStateEndHeight := nextHeight - 1 - log.Info().Msgf("finished replaying events from %v to %v, creating checkpoint", initialNextHeight, checkpointEndHeight) - valuesFile, allocatorsFile := dumpCheckpoint(t, store, checkpointDir, checkpointEndHeight) - log.Info().Msgf("checkpoint created: %v, %v", valuesFile, allocatorsFile) + log.Info().Msgf("finished replaying events from %v to %v, creating evm state gobs", initialNextHeight, evmStateEndHeight) + valuesFile, allocatorsFile := dumpEVMStateToGobFiles(t, store, evmStateGob, evmStateEndHeight) + log.Info().Msgf("evm state gobs created: %v, %v", valuesFile, allocatorsFile) - return checkpointEndHeight + return evmStateEndHeight } -func checkpointFileNamesByEndHeight(dir string, endHeight uint64) (string, string) { +func evmStateGobFileNamesByEndHeight(dir string, endHeight uint64) (string, string) { return filepath.Join(dir, fmt.Sprintf("values_%d.gob", endHeight)), filepath.Join(dir, fmt.Sprintf("allocators_%d.gob", endHeight)) } -func dumpCheckpoint(t *testing.T, store *TestValueStore, dir string, checkpointEndHeight uint64) (string, string) { - valuesFileName, allocatorsFileName := checkpointFileNamesByEndHeight(dir, checkpointEndHeight) +func dumpEVMStateToGobFiles(t *testing.T, store *TestValueStore, dir string, evmStateEndHeight uint64) (string, string) { + valuesFileName, allocatorsFileName := evmStateGobFileNamesByEndHeight(dir, evmStateEndHeight) values, allocators := store.Dump() require.NoError(t, serialize(valuesFileName, values)) diff --git a/fvm/evm/offchain/utils/verify.go b/fvm/evm/offchain/utils/verify.go new file mode 100644 index 00000000000..bf3ed506adf --- /dev/null +++ b/fvm/evm/offchain/utils/verify.go @@ -0,0 +1,168 @@ +package utils + +import ( + "context" + "errors" + "strings" + + "github.com/rs/zerolog/log" + + "github.com/onflow/cadence" + "github.com/onflow/cadence/encoding/ccf" + + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/fvm/evm" + "github.com/onflow/flow-go/fvm/evm/events" + "github.com/onflow/flow-go/fvm/evm/offchain/blocks" + evmStorage "github.com/onflow/flow-go/fvm/evm/offchain/storage" + "github.com/onflow/flow-go/fvm/evm/offchain/sync" + "github.com/onflow/flow-go/fvm/evm/testutils" + "github.com/onflow/flow-go/ledger" + "github.com/onflow/flow-go/ledger/common/convert" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module/executiondatasync/execution_data" + "github.com/onflow/flow-go/storage" +) + +func OffchainReplayBackwardCompatibilityTest( + chainID flow.ChainID, + flowStartHeight uint64, + flowEndHeight uint64, + headers storage.Headers, + results storage.ExecutionResults, + executionDataStore execution_data.ExecutionDataGetter, + store environment.ValueStore, +) error { + rootAddr := evm.StorageAccountAddress(chainID) + rootAddrStr := string(rootAddr.Bytes()) + + bpStorage := evmStorage.NewEphemeralStorage(store) + bp, err := blocks.NewBasicProvider(chainID, bpStorage, rootAddr) + if err != nil { + return err + } + + for height := flowStartHeight; height <= flowEndHeight; height++ { + blockID, err := headers.BlockIDByHeight(height) + if err != nil { + return err + } + + result, err := results.ByBlockID(blockID) + if err != nil { + return err + } + + executionData, err := executionDataStore.Get(context.Background(), result.ExecutionDataID) + if err != nil { + return err + } + + events := flow.EventsList{} + payloads := []*ledger.Payload{} + + for _, chunkData := range executionData.ChunkExecutionDatas { + events = append(events, chunkData.Events...) + payloads = append(payloads, chunkData.TrieUpdate.Payloads...) + } + + updates := make(map[flow.RegisterID]flow.RegisterValue, len(payloads)) + for i := len(payloads) - 1; i >= 0; i-- { + regID, regVal, err := convert.PayloadToRegister(payloads[i]) + if err != nil { + return err + } + + // skip non-evm-account registers + if regID.Owner != rootAddrStr { + continue + } + + // when iterating backwards, duplicated register updates are stale updates, + // so skipping them + if _, ok := updates[regID]; !ok { + updates[regID] = regVal + } + } + + // parse events + evmBlockEvent, evmTxEvents, err := parseEVMEvents(events) + if err != nil { + return err + } + + err = bp.OnBlockReceived(evmBlockEvent) + if err != nil { + return err + } + + sp := testutils.NewTestStorageProvider(store, evmBlockEvent.Height) + cr := sync.NewReplayer(chainID, rootAddr, sp, bp, log.Logger, nil, true) + res, err := cr.ReplayBlock(evmTxEvents, evmBlockEvent) + if err != nil { + return err + } + + // commit all changes + for k, v := range res.StorageRegisterUpdates() { + err = store.SetValue([]byte(k.Owner), []byte(k.Key), v) + if err != nil { + return err + } + } + + err = bp.OnBlockExecuted(evmBlockEvent.Height, res) + if err != nil { + return err + } + + // verify and commit all block hash list changes + for k, v := range bpStorage.StorageRegisterUpdates() { + // verify the block hash list changes are included in the trie update + + err = store.SetValue([]byte(k.Owner), []byte(k.Key), v) + if err != nil { + return err + } + } + } + + return nil +} + +func parseEVMEvents(evts flow.EventsList) (*events.BlockEventPayload, []events.TransactionEventPayload, error) { + var blockEvent *events.BlockEventPayload + txEvents := make([]events.TransactionEventPayload, 0) + + for _, e := range evts { + evtType := string(e.Type) + if strings.Contains(evtType, "BlockExecuted") { + if blockEvent != nil { + return nil, nil, errors.New("multiple block events in a single block") + } + + ev, err := ccf.Decode(nil, e.Payload) + if err != nil { + return nil, nil, err + } + + blockEventPayload, err := events.DecodeBlockEventPayload(ev.(cadence.Event)) + if err != nil { + return nil, nil, err + } + blockEvent = blockEventPayload + } else if strings.Contains(evtType, "TransactionExecuted") { + ev, err := ccf.Decode(nil, e.Payload) + if err != nil { + return nil, nil, err + } + txEv, err := events.DecodeTransactionEventPayload(ev.(cadence.Event)) + if err != nil { + return nil, nil, err + } + txEvents = append(txEvents, *txEv) + } + } + + return blockEvent, txEvents, nil +} From ed67f542935079965e9a160b958b13e53ec6cb08 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Thu, 21 Nov 2024 14:49:32 -0800 Subject: [PATCH 43/64] refactor serailization with gob --- .../cmd/verify-evm-offchain-replay/main.go | 2 +- .../cmd/verify-evm-offchain-replay/verify.go | 38 +++++++- fvm/evm/offchain/utils/collection_test.go | 91 +------------------ fvm/evm/testutils/gob.go | 88 ++++++++++++++++++ 4 files changed, 129 insertions(+), 90 deletions(-) create mode 100644 fvm/evm/testutils/gob.go diff --git a/cmd/util/cmd/verify-evm-offchain-replay/main.go b/cmd/util/cmd/verify-evm-offchain-replay/main.go index 9f56587306e..76581e8a471 100644 --- a/cmd/util/cmd/verify-evm-offchain-replay/main.go +++ b/cmd/util/cmd/verify-evm-offchain-replay/main.go @@ -55,7 +55,7 @@ func run(*cobra.Command, []string) { } log.Info().Msgf("verifying range from %d to %d", from, to) - err = Verify(from, to, flow.Testnet, flagDatadir, flagExecutionDataDir) + err = Verify(from, to, flow.Testnet, flagDatadir, flagExecutionDataDir, flagEVMStateGobDir) if err != nil { log.Fatal().Err(err).Msg("could not verify last k height") } diff --git a/cmd/util/cmd/verify-evm-offchain-replay/verify.go b/cmd/util/cmd/verify-evm-offchain-replay/verify.go index 1a907be669a..bbdd9911c21 100644 --- a/cmd/util/cmd/verify-evm-offchain-replay/verify.go +++ b/cmd/util/cmd/verify-evm-offchain-replay/verify.go @@ -40,10 +40,21 @@ func Verify(from uint64, to uint64, chainID flow.ChainID, dataDir string, execut return err } } else { - // TODO: recover from gob + prev := from - 1 + valueFileName, allocatorFileName := evmStateGobFileNamesByEndHeight(evmStateGobDir, prev) + values, err := testutils.DeserializeState(valueFileName) + if err != nil { + return err + } + + allocators, err := testutils.DeserializeAllocator(allocatorFileName) + if err != nil { + return err + } + store = testutils.GetSimpleValueStorePopulated(values, allocators) } - return utils.OffchainReplayBackwardCompatibilityTest( + err = utils.OffchainReplayBackwardCompatibilityTest( chainID, from, to, @@ -52,6 +63,23 @@ func Verify(from uint64, to uint64, chainID flow.ChainID, dataDir string, execut executionDataStore, store, ) + + if err != nil { + return err + } + + valueFileName, allocatorFileName := evmStateGobFileNamesByEndHeight(evmStateGobDir, to) + values, allocators := store.Dump() + err = testutils.SerializeState(valueFileName, values) + if err != nil { + return err + } + err = testutils.SerializeAllocator(allocatorFileName, allocators) + if err != nil { + return err + } + + return nil } func initStorages(chainID flow.ChainID, dataDir string, executionDataDir string) ( @@ -91,3 +119,9 @@ func isEVMRootHeight(chainID flow.ChainID, flowHeight uint64) bool { } return flowHeight == 1 } + +func evmStateGobFileNamesByEndHeight(evmStateGobDir string, endHeight uint64) (string, string) { + valueFileName := filepath.Join(evmStateGobDir, fmt.Sprintf("values-%d.gob", endHeight)) + allocatorFileName := filepath.Join(evmStateGobDir, fmt.Sprintf("allocators-%d.gob", endHeight)) + return valueFileName, allocatorFileName +} diff --git a/fvm/evm/offchain/utils/collection_test.go b/fvm/evm/offchain/utils/collection_test.go index 827bb918601..e5b3059661b 100644 --- a/fvm/evm/offchain/utils/collection_test.go +++ b/fvm/evm/offchain/utils/collection_test.go @@ -2,7 +2,6 @@ package utils_test import ( "bufio" - "encoding/gob" "encoding/hex" "encoding/json" "fmt" @@ -114,9 +113,9 @@ func initStorageWithEVMStateGob(t *testing.T, chainID flow.ChainID, evmStateGob } valueFileName, allocatorFileName := evmStateGobFileNamesByEndHeight(evmStateGob, evmStateEndHeight) - values, err := deserialize(valueFileName) + values, err := DeserializeState(valueFileName) require.NoError(t, err) - allocators, err := deserializeAllocator(allocatorFileName) + allocators, err := DeserializeAllocator(allocatorFileName) require.NoError(t, err) store := GetSimpleValueStorePopulated(values, allocators) return store, evmStateEndHeight @@ -190,8 +189,8 @@ func dumpEVMStateToGobFiles(t *testing.T, store *TestValueStore, dir string, evm valuesFileName, allocatorsFileName := evmStateGobFileNamesByEndHeight(dir, evmStateEndHeight) values, allocators := store.Dump() - require.NoError(t, serialize(valuesFileName, values)) - require.NoError(t, serializeAllocator(allocatorsFileName, allocators)) + require.NoError(t, SerializeState(valuesFileName, values)) + require.NoError(t, SerializeAllocator(allocatorsFileName, allocators)) return valuesFileName, allocatorsFileName } @@ -244,85 +243,3 @@ func scanEventFilesAndRun( t.Fatal(err) } } - -// Serialize function: saves map data to a file -func serialize(filename string, data map[string][]byte) error { - // Create a file to save data - file, err := os.Create(filename) - if err != nil { - return err - } - defer file.Close() - - // Use gob to encode data - encoder := gob.NewEncoder(file) - err = encoder.Encode(data) - if err != nil { - return err - } - - return nil -} - -// Deserialize function: reads map data from a file -func deserialize(filename string) (map[string][]byte, error) { - // Open the file for reading - file, err := os.Open(filename) - if err != nil { - return nil, err - } - defer file.Close() - - // Prepare the map to store decoded data - var data map[string][]byte - - // Use gob to decode data - decoder := gob.NewDecoder(file) - err = decoder.Decode(&data) - if err != nil { - return nil, err - } - - return data, nil -} - -// Serialize function: saves map data to a file -func serializeAllocator(filename string, data map[string]uint64) error { - // Create a file to save data - file, err := os.Create(filename) - if err != nil { - return err - } - defer file.Close() - - // Use gob to encode data - encoder := gob.NewEncoder(file) - err = encoder.Encode(data) - if err != nil { - return err - } - - return nil -} - -// Deserialize function: reads map data from a file -func deserializeAllocator(filename string) (map[string]uint64, error) { - // Open the file for reading - file, err := os.Open(filename) - if err != nil { - return nil, err - } - defer file.Close() - - // Prepare the map to store decoded data - var data map[string]uint64 - - // Use gob to decode data - decoder := gob.NewDecoder(file) - err = decoder.Decode(&data) - if err != nil { - return nil, err - } - - return data, nil -} diff --git a/fvm/evm/testutils/gob.go b/fvm/evm/testutils/gob.go new file mode 100644 index 00000000000..1c944a1e9e3 --- /dev/null +++ b/fvm/evm/testutils/gob.go @@ -0,0 +1,88 @@ +package testutils + +import ( + "encoding/gob" + "os" +) + +// Serialize function: saves map data to a file +func SerializeState(filename string, data map[string][]byte) error { + // Create a file to save data + file, err := os.Create(filename) + if err != nil { + return err + } + defer file.Close() + + // Use gob to encode data + encoder := gob.NewEncoder(file) + err = encoder.Encode(data) + if err != nil { + return err + } + + return nil +} + +// Deserialize function: reads map data from a file +func DeserializeState(filename string) (map[string][]byte, error) { + // Open the file for reading + file, err := os.Open(filename) + if err != nil { + return nil, err + } + defer file.Close() + + // Prepare the map to store decoded data + var data map[string][]byte + + // Use gob to decode data + decoder := gob.NewDecoder(file) + err = decoder.Decode(&data) + if err != nil { + return nil, err + } + + return data, nil +} + +// Serialize function: saves map data to a file +func SerializeAllocator(filename string, data map[string]uint64) error { + // Create a file to save data + file, err := os.Create(filename) + if err != nil { + return err + } + defer file.Close() + + // Use gob to encode data + encoder := gob.NewEncoder(file) + err = encoder.Encode(data) + if err != nil { + return err + } + + return nil +} + +// Deserialize function: reads map data from a file +func DeserializeAllocator(filename string) (map[string]uint64, error) { + // Open the file for reading + file, err := os.Open(filename) + if err != nil { + return nil, err + } + defer file.Close() + + // Prepare the map to store decoded data + var data map[string]uint64 + + // Use gob to decode data + decoder := gob.NewDecoder(file) + err = decoder.Decode(&data) + if err != nil { + return nil, err + } + + return data, nil +} From 21511fbecfb8e30873566703fa8665922307d4b0 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Thu, 21 Nov 2024 15:20:56 -0800 Subject: [PATCH 44/64] add logging --- .../cmd/verify-evm-offchain-replay/main.go | 2 +- .../cmd/verify-evm-offchain-replay/verify.go | 23 ++++++++++++++++--- fvm/evm/offchain/utils/verify.go | 8 ++++--- 3 files changed, 26 insertions(+), 7 deletions(-) diff --git a/cmd/util/cmd/verify-evm-offchain-replay/main.go b/cmd/util/cmd/verify-evm-offchain-replay/main.go index 76581e8a471..2459a35cd59 100644 --- a/cmd/util/cmd/verify-evm-offchain-replay/main.go +++ b/cmd/util/cmd/verify-evm-offchain-replay/main.go @@ -55,7 +55,7 @@ func run(*cobra.Command, []string) { } log.Info().Msgf("verifying range from %d to %d", from, to) - err = Verify(from, to, flow.Testnet, flagDatadir, flagExecutionDataDir, flagEVMStateGobDir) + err = Verify(log.Logger, from, to, flow.Testnet, flagDatadir, flagExecutionDataDir, flagEVMStateGobDir) if err != nil { log.Fatal().Err(err).Msg("could not verify last k height") } diff --git a/cmd/util/cmd/verify-evm-offchain-replay/verify.go b/cmd/util/cmd/verify-evm-offchain-replay/verify.go index bbdd9911c21..f75cd8278b6 100644 --- a/cmd/util/cmd/verify-evm-offchain-replay/verify.go +++ b/cmd/util/cmd/verify-evm-offchain-replay/verify.go @@ -8,6 +8,7 @@ import ( "github.com/dgraph-io/badger/v2" badgerds "github.com/ipfs/go-ds-badger2" + "github.com/rs/zerolog" "github.com/onflow/flow-go/cmd/util/cmd/common" "github.com/onflow/flow-go/fvm/environment" @@ -20,7 +21,16 @@ import ( "github.com/onflow/flow-go/storage" ) -func Verify(from uint64, to uint64, chainID flow.ChainID, dataDir string, executionDataDir string, evmStateGobDir string) error { +// Verify verifies the offchain replay of EVM blocks from the given height range +// and updates the EVM state gob files with the latest state +func Verify(log zerolog.Logger, from uint64, to uint64, chainID flow.ChainID, dataDir string, executionDataDir string, evmStateGobDir string) error { + log.Info(). + Str("chain", chainID.String()). + Str("dataDir", dataDir). + Str("executionDataDir", executionDataDir). + Str("evmStateGobDir", evmStateGobDir). + Msgf("verifying range from %d to %d", from, to) + db, storages, executionDataStore, dsStore, err := initStorages(chainID, dataDir, executionDataDir) if err != nil { return fmt.Errorf("could not initialize storages: %w", err) @@ -32,6 +42,8 @@ func Verify(from uint64, to uint64, chainID flow.ChainID, dataDir string, execut var store *testutils.TestValueStore isRoot := isEVMRootHeight(chainID, from) if isRoot { + log.Info().Msgf("initializing EVM state for root height %d", from) + store = testutils.GetSimpleValueStore() as := environment.NewAccountStatus() rootAddr := evm.StorageAccountAddress(chainID) @@ -41,20 +53,23 @@ func Verify(from uint64, to uint64, chainID flow.ChainID, dataDir string, execut } } else { prev := from - 1 + log.Info().Msgf("loading EVM state from previous height %d", prev) + valueFileName, allocatorFileName := evmStateGobFileNamesByEndHeight(evmStateGobDir, prev) values, err := testutils.DeserializeState(valueFileName) if err != nil { - return err + return fmt.Errorf("could not deserialize state %v: %w", valueFileName, err) } allocators, err := testutils.DeserializeAllocator(allocatorFileName) if err != nil { - return err + return fmt.Errorf("could not deserialize allocator %v: %w", allocatorFileName, err) } store = testutils.GetSimpleValueStorePopulated(values, allocators) } err = utils.OffchainReplayBackwardCompatibilityTest( + log, chainID, from, to, @@ -79,6 +94,8 @@ func Verify(from uint64, to uint64, chainID flow.ChainID, dataDir string, execut return err } + log.Info().Msgf("saved EVM state to %s and %s", valueFileName, allocatorFileName) + return nil } diff --git a/fvm/evm/offchain/utils/verify.go b/fvm/evm/offchain/utils/verify.go index bf3ed506adf..ae99e827acb 100644 --- a/fvm/evm/offchain/utils/verify.go +++ b/fvm/evm/offchain/utils/verify.go @@ -5,10 +5,9 @@ import ( "errors" "strings" - "github.com/rs/zerolog/log" - "github.com/onflow/cadence" "github.com/onflow/cadence/encoding/ccf" + "github.com/rs/zerolog" "github.com/onflow/flow-go/fvm/environment" "github.com/onflow/flow-go/fvm/evm" @@ -25,6 +24,7 @@ import ( ) func OffchainReplayBackwardCompatibilityTest( + log zerolog.Logger, chainID flow.ChainID, flowStartHeight uint64, flowEndHeight uint64, @@ -97,7 +97,7 @@ func OffchainReplayBackwardCompatibilityTest( } sp := testutils.NewTestStorageProvider(store, evmBlockEvent.Height) - cr := sync.NewReplayer(chainID, rootAddr, sp, bp, log.Logger, nil, true) + cr := sync.NewReplayer(chainID, rootAddr, sp, bp, log, nil, true) res, err := cr.ReplayBlock(evmTxEvents, evmBlockEvent) if err != nil { return err @@ -125,6 +125,8 @@ func OffchainReplayBackwardCompatibilityTest( return err } } + + log.Info().Msgf("verified block %d", height) } return nil From ac7c24699ce8048e1ec46deab8acfacf9fd76e4b Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Fri, 22 Nov 2024 06:28:14 -0800 Subject: [PATCH 45/64] update error message --- cmd/util/cmd/verify-evm-offchain-replay/main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/util/cmd/verify-evm-offchain-replay/main.go b/cmd/util/cmd/verify-evm-offchain-replay/main.go index 2459a35cd59..0bc6eef8187 100644 --- a/cmd/util/cmd/verify-evm-offchain-replay/main.go +++ b/cmd/util/cmd/verify-evm-offchain-replay/main.go @@ -57,7 +57,7 @@ func run(*cobra.Command, []string) { log.Info().Msgf("verifying range from %d to %d", from, to) err = Verify(log.Logger, from, to, flow.Testnet, flagDatadir, flagExecutionDataDir, flagEVMStateGobDir) if err != nil { - log.Fatal().Err(err).Msg("could not verify last k height") + log.Fatal().Err(err).Msg("could not verify height") } log.Info().Msgf("successfully verified range from %d to %d", from, to) From 2ff295d1f114c9785daa7946a173b186580e01f7 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Fri, 22 Nov 2024 08:44:36 -0800 Subject: [PATCH 46/64] add register checks --- fvm/evm/offchain/utils/verify.go | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/fvm/evm/offchain/utils/verify.go b/fvm/evm/offchain/utils/verify.go index ae99e827acb..2045de36f22 100644 --- a/fvm/evm/offchain/utils/verify.go +++ b/fvm/evm/offchain/utils/verify.go @@ -1,8 +1,10 @@ package utils import ( + "bytes" "context" "errors" + "fmt" "strings" "github.com/onflow/cadence" @@ -124,6 +126,21 @@ func OffchainReplayBackwardCompatibilityTest( if err != nil { return err } + + expectedUpdate, ok := updates[k] + if !ok { + return fmt.Errorf("missing update for register %v, %v", k, expectedUpdate) + } + + if !bytes.Equal(expectedUpdate, v) { + return fmt.Errorf("unexpected update for register %v, expected %v, got %v", k, expectedUpdate, v) + } + + delete(updates, k) + } + + if len(updates) > 0 { + return fmt.Errorf("missing updates for registers %v", updates) } log.Info().Msgf("verified block %d", height) From 0431f802911cfef1b993dd3c00cf442d230c8e84 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Fri, 22 Nov 2024 10:30:11 -0800 Subject: [PATCH 47/64] store block proposal in replay --- fvm/evm/offchain/blocks/provider.go | 50 +++++++++++++++++++++-- fvm/evm/offchain/sync/replay.go | 37 +++++++++-------- fvm/evm/offchain/sync/replayer.go | 18 ++++---- fvm/evm/offchain/sync/replayer_test.go | 4 +- fvm/evm/offchain/utils/collection_test.go | 2 +- fvm/evm/offchain/utils/verify.go | 39 +++++++++++++++++- 6 files changed, 119 insertions(+), 31 deletions(-) diff --git a/fvm/evm/offchain/blocks/provider.go b/fvm/evm/offchain/blocks/provider.go index 9111be4ac64..b9da39bd468 100644 --- a/fvm/evm/offchain/blocks/provider.go +++ b/fvm/evm/offchain/blocks/provider.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/onflow/flow-go/fvm/evm/events" + "github.com/onflow/flow-go/fvm/evm/handler" "github.com/onflow/flow-go/fvm/evm/types" "github.com/onflow/flow-go/model/flow" ) @@ -13,7 +14,10 @@ import ( // a OnBlockReceived call before block execution and // a follow up OnBlockExecuted call after block execution. type BasicProvider struct { + chainID flow.ChainID blks *Blocks + rootAddr flow.Address + storage types.BackendStorage latestBlockPayload *events.BlockEventPayload } @@ -28,7 +32,12 @@ func NewBasicProvider( if err != nil { return nil, err } - return &BasicProvider{blks: blks}, nil + return &BasicProvider{ + chainID: chainID, + blks: blks, + rootAddr: rootAddr, + storage: storage, + }, nil } // GetSnapshotAt returns a block snapshot at the given height @@ -61,14 +70,49 @@ func (p *BasicProvider) OnBlockReceived(blockEvent *events.BlockEventPayload) er // OnBlockExecuted should be called after executing blocks. func (p *BasicProvider) OnBlockExecuted( height uint64, - resCol types.ReplayResultCollector) error { + resCol types.ReplayResultCollector, + blockProposal *types.BlockProposal, +) error { // we push the block hash after execution, so the behaviour of the blockhash is // identical to the evm.handler. if p.latestBlockPayload.Height != height { return fmt.Errorf("active block height doesn't match expected: %d, got: %d", p.latestBlockPayload.Height, height) } + + blockBytes, err := blockProposal.Block.ToBytes() + if err != nil { + return types.NewFatalError(err) + } + + // do the same as handler.CommitBlockProposal + err = p.storage.SetValue( + p.rootAddr[:], + []byte(handler.BlockStoreLatestBlockKey), + blockBytes, + ) + if err != nil { + return err + } + + blockProposalBytes, err := blockProposal.ToBytes() + if err != nil { + return types.NewFatalError(err) + } + + hash := p.latestBlockPayload.Hash + // update block proposal + err = p.storage.SetValue( + p.rootAddr[:], + []byte(handler.BlockStoreLatestBlockProposalKey), + blockProposalBytes, + ) + if err != nil { + return err + } + + // update block hash list return p.blks.PushBlockHash( p.latestBlockPayload.Height, - p.latestBlockPayload.Hash, + hash, ) } diff --git a/fvm/evm/offchain/sync/replay.go b/fvm/evm/offchain/sync/replay.go index 4516f37007d..e85fc21658c 100644 --- a/fvm/evm/offchain/sync/replay.go +++ b/fvm/evm/offchain/sync/replay.go @@ -30,25 +30,26 @@ func ReplayBlockExecution( transactionEvents []events.TransactionEventPayload, blockEvent *events.BlockEventPayload, validateResults bool, -) error { +) ([]*types.Result, error) { // check the passed block event if blockEvent == nil { - return fmt.Errorf("nil block event has been passed") + return nil, fmt.Errorf("nil block event has been passed") } // create a base block context for all transactions // tx related context values will be replaced during execution ctx, err := blockSnapshot.BlockContext() if err != nil { - return err + return nil, err } // update the tracer ctx.Tracer = tracer gasConsumedSoFar := uint64(0) txHashes := make(types.TransactionHashes, len(transactionEvents)) + results := make([]*types.Result, 0, len(transactionEvents)) for idx, tx := range transactionEvents { - err = replayTransactionExecution( + result, err := replayTransactionExecution( rootAddr, ctx, uint(idx), @@ -58,28 +59,30 @@ func ReplayBlockExecution( validateResults, ) if err != nil { - return fmt.Errorf("transaction execution failed, txIndex: %d, err: %w", idx, err) + return nil, fmt.Errorf("transaction execution failed, txIndex: %d, err: %w", idx, err) } gasConsumedSoFar += tx.GasConsumed txHashes[idx] = tx.Hash + + results = append(results, result) } if validateResults { // check transaction inclusion txHashRoot := gethTypes.DeriveSha(txHashes, gethTrie.NewStackTrie(nil)) if txHashRoot != blockEvent.TransactionHashRoot { - return fmt.Errorf("transaction root hash doesn't match [%x] != [%x]", txHashRoot, blockEvent.TransactionHashRoot) + return nil, fmt.Errorf("transaction root hash doesn't match [%x] != [%x]", txHashRoot, blockEvent.TransactionHashRoot) } // check total gas used if blockEvent.TotalGasUsed != gasConsumedSoFar { - return fmt.Errorf("total gas used doesn't match [%d] != [%d]", gasConsumedSoFar, blockEvent.TotalGasUsed) + return nil, fmt.Errorf("total gas used doesn't match [%d] != [%d]", gasConsumedSoFar, blockEvent.TotalGasUsed) } // no need to check the receipt root hash given we have checked the logs and other // values during tx execution. } - return nil + return results, nil } func replayTransactionExecution( @@ -90,7 +93,7 @@ func replayTransactionExecution( ledger atree.Ledger, txEvent *events.TransactionEventPayload, validate bool, -) error { +) (*types.Result, error) { // create emulator em := emulator.NewEmulator(ledger, rootAddr) @@ -102,7 +105,7 @@ func replayTransactionExecution( if len(txEvent.PrecompiledCalls) > 0 { pcs, err := types.AggregatedPrecompileCallsFromEncoded(txEvent.PrecompiledCalls) if err != nil { - return fmt.Errorf("error decoding precompiled calls [%x]: %w", txEvent.Payload, err) + return nil, fmt.Errorf("error decoding precompiled calls [%x]: %w", txEvent.Payload, err) } ctx.ExtraPrecompiledContracts = precompiles.AggregatedPrecompiledCallsToPrecompiledContracts(pcs) } @@ -110,7 +113,7 @@ func replayTransactionExecution( // create a new block view bv, err := em.NewBlockView(ctx) if err != nil { - return err + return nil, err } var res *types.Result @@ -119,31 +122,31 @@ func replayTransactionExecution( if txEvent.TransactionType == types.DirectCallTxType { call, err := types.DirectCallFromEncoded(txEvent.Payload) if err != nil { - return fmt.Errorf("failed to RLP-decode direct call [%x]: %w", txEvent.Payload, err) + return nil, fmt.Errorf("failed to RLP-decode direct call [%x]: %w", txEvent.Payload, err) } res, err = bv.DirectCall(call) if err != nil { - return fmt.Errorf("failed to execute direct call [%x]: %w", txEvent.Hash, err) + return nil, fmt.Errorf("failed to execute direct call [%x]: %w", txEvent.Hash, err) } } else { gethTx := &gethTypes.Transaction{} if err := gethTx.UnmarshalBinary(txEvent.Payload); err != nil { - return fmt.Errorf("failed to RLP-decode transaction [%x]: %w", txEvent.Payload, err) + return nil, fmt.Errorf("failed to RLP-decode transaction [%x]: %w", txEvent.Payload, err) } res, err = bv.RunTransaction(gethTx) if err != nil { - return fmt.Errorf("failed to run transaction [%x]: %w", txEvent.Hash, err) + return nil, fmt.Errorf("failed to run transaction [%x]: %w", txEvent.Hash, err) } } // validate results if validate { if err := ValidateResult(res, txEvent); err != nil { - return fmt.Errorf("transaction replay failed (txHash %x): %w", txEvent.Hash, err) + return nil, fmt.Errorf("transaction replay failed (txHash %x): %w", txEvent.Hash, err) } } - return nil + return res, nil } func ValidateResult( diff --git a/fvm/evm/offchain/sync/replayer.go b/fvm/evm/offchain/sync/replayer.go index 25ccdc10cbf..33411b7c133 100644 --- a/fvm/evm/offchain/sync/replayer.go +++ b/fvm/evm/offchain/sync/replayer.go @@ -46,7 +46,11 @@ func NewReplayer( // ReplayBlock replays the execution of the transactions of an EVM block // using the provided transactionEvents and blockEvents, -// which include all the context data for re-executing the transactions, and returns the replay result. +// which include all the context data for re-executing the transactions, and returns +// the replay result and the result of each transaction. +// the replay result contains the register updates, and the result of each transaction +// contains the execution result of each transaction, which is useful for recontstructing +// the EVM block proposal. // this method can be called concurrently if underlying storage // tracer and block snapshot provider support concurrency. // @@ -56,11 +60,11 @@ func NewReplayer( func (cr *Replayer) ReplayBlock( transactionEvents []events.TransactionEventPayload, blockEvent *events.BlockEventPayload, -) (types.ReplayResultCollector, error) { +) (types.ReplayResultCollector, []*types.Result, error) { // prepare storage st, err := cr.storageProvider.GetSnapshotAt(blockEvent.Height) if err != nil { - return nil, err + return nil, nil, err } // create storage @@ -69,11 +73,11 @@ func (cr *Replayer) ReplayBlock( // get block snapshot bs, err := cr.blockProvider.GetSnapshotAt(blockEvent.Height) if err != nil { - return nil, err + return nil, nil, err } // replay transactions - err = ReplayBlockExecution( + results, err := ReplayBlockExecution( cr.chainID, cr.rootAddr, state, @@ -84,8 +88,8 @@ func (cr *Replayer) ReplayBlock( cr.validateResults, ) if err != nil { - return nil, err + return nil, nil, err } - return state, nil + return state, results, nil } diff --git a/fvm/evm/offchain/sync/replayer_test.go b/fvm/evm/offchain/sync/replayer_test.go index f7c05ab63b5..d193163283b 100644 --- a/fvm/evm/offchain/sync/replayer_test.go +++ b/fvm/evm/offchain/sync/replayer_test.go @@ -162,9 +162,11 @@ func TestChainReplay(t *testing.T) { sp := NewTestStorageProvider(snapshot, 1) cr := sync.NewReplayer(chainID, rootAddr, sp, bp, zerolog.Logger{}, nil, true) - res, err := cr.ReplayBlock(txEventPayloads, blockEventPayload) + res, results, err := cr.ReplayBlock(txEventPayloads, blockEventPayload) require.NoError(t, err) + require.Len(t, results, totalTxCount) + err = bp.OnBlockExecuted(blockEventPayload.Height, res) require.NoError(t, err) diff --git a/fvm/evm/offchain/utils/collection_test.go b/fvm/evm/offchain/utils/collection_test.go index e5b3059661b..ae8b10a0e59 100644 --- a/fvm/evm/offchain/utils/collection_test.go +++ b/fvm/evm/offchain/utils/collection_test.go @@ -147,7 +147,7 @@ func replayEvents( sp := NewTestStorageProvider(store, blockEventPayload.Height) cr := sync.NewReplayer(chainID, rootAddr, sp, bp, zerolog.Logger{}, nil, true) - res, err := cr.ReplayBlock(txEvents, blockEventPayload) + res, _, err := cr.ReplayBlock(txEvents, blockEventPayload) require.NoError(t, err) // commit all changes diff --git a/fvm/evm/offchain/utils/verify.go b/fvm/evm/offchain/utils/verify.go index 2045de36f22..a3f3e871f13 100644 --- a/fvm/evm/offchain/utils/verify.go +++ b/fvm/evm/offchain/utils/verify.go @@ -18,6 +18,7 @@ import ( evmStorage "github.com/onflow/flow-go/fvm/evm/offchain/storage" "github.com/onflow/flow-go/fvm/evm/offchain/sync" "github.com/onflow/flow-go/fvm/evm/testutils" + "github.com/onflow/flow-go/fvm/evm/types" "github.com/onflow/flow-go/ledger" "github.com/onflow/flow-go/ledger/common/convert" "github.com/onflow/flow-go/model/flow" @@ -100,7 +101,7 @@ func OffchainReplayBackwardCompatibilityTest( sp := testutils.NewTestStorageProvider(store, evmBlockEvent.Height) cr := sync.NewReplayer(chainID, rootAddr, sp, bp, log, nil, true) - res, err := cr.ReplayBlock(evmTxEvents, evmBlockEvent) + res, results, err := cr.ReplayBlock(evmTxEvents, evmBlockEvent) if err != nil { return err } @@ -113,7 +114,9 @@ func OffchainReplayBackwardCompatibilityTest( } } - err = bp.OnBlockExecuted(evmBlockEvent.Height, res) + blockProposal := reconstructProposal(evmBlockEvent, evmTxEvents, results) + + err = bp.OnBlockExecuted(evmBlockEvent.Height, res, blockProposal) if err != nil { return err } @@ -185,3 +188,35 @@ func parseEVMEvents(evts flow.EventsList) (*events.BlockEventPayload, []events.T return blockEvent, txEvents, nil } + +func reconstructProposal( + blockEvent *events.BlockEventPayload, + txEvents []events.TransactionEventPayload, + results []*types.Result, +) *types.BlockProposal { + receipts := make([]types.LightReceipt, 0, len(results)) + + for _, result := range results { + receipts = append(receipts, *result.LightReceipt()) + } + + txHashes := make(types.TransactionHashes, 0, len(txEvents)) + for _, tx := range txEvents { + txHashes = append(txHashes, tx.Hash) + } + + return &types.BlockProposal{ + Block: types.Block{ + ParentBlockHash: blockEvent.ParentBlockHash, + Height: blockEvent.Height, + Timestamp: blockEvent.Timestamp, + TotalSupply: blockEvent.TotalSupply.Big(), + ReceiptRoot: blockEvent.ReceiptRoot, + TransactionHashRoot: blockEvent.TransactionHashRoot, + TotalGasUsed: blockEvent.TotalGasUsed, + PrevRandao: blockEvent.PrevRandao, + }, + Receipts: receipts, + TxHashes: txHashes, + } +} From 30b3c3f39e3582a5b749266893035f725a7995f7 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Fri, 22 Nov 2024 11:18:53 -0800 Subject: [PATCH 48/64] fix tests --- fvm/evm/offchain/blocks/block_proposal.go | 38 +++++++++++++++++++++++ fvm/evm/offchain/sync/replayer_test.go | 14 ++++----- fvm/evm/offchain/utils/collection_test.go | 6 ++-- fvm/evm/offchain/utils/verify.go | 35 +-------------------- 4 files changed, 50 insertions(+), 43 deletions(-) create mode 100644 fvm/evm/offchain/blocks/block_proposal.go diff --git a/fvm/evm/offchain/blocks/block_proposal.go b/fvm/evm/offchain/blocks/block_proposal.go new file mode 100644 index 00000000000..877ba3303fe --- /dev/null +++ b/fvm/evm/offchain/blocks/block_proposal.go @@ -0,0 +1,38 @@ +package blocks + +import ( + "github.com/onflow/flow-go/fvm/evm/events" + "github.com/onflow/flow-go/fvm/evm/types" +) + +func ReconstructProposal( + blockEvent *events.BlockEventPayload, + txEvents []events.TransactionEventPayload, + results []*types.Result, +) *types.BlockProposal { + receipts := make([]types.LightReceipt, 0, len(results)) + + for _, result := range results { + receipts = append(receipts, *result.LightReceipt()) + } + + txHashes := make(types.TransactionHashes, 0, len(txEvents)) + for _, tx := range txEvents { + txHashes = append(txHashes, tx.Hash) + } + + return &types.BlockProposal{ + Block: types.Block{ + ParentBlockHash: blockEvent.ParentBlockHash, + Height: blockEvent.Height, + Timestamp: blockEvent.Timestamp, + TotalSupply: blockEvent.TotalSupply.Big(), + ReceiptRoot: blockEvent.ReceiptRoot, + TransactionHashRoot: blockEvent.TransactionHashRoot, + TotalGasUsed: blockEvent.TotalGasUsed, + PrevRandao: blockEvent.PrevRandao, + }, + Receipts: receipts, + TxHashes: txHashes, + } +} diff --git a/fvm/evm/offchain/sync/replayer_test.go b/fvm/evm/offchain/sync/replayer_test.go index d193163283b..2da1a5ba76b 100644 --- a/fvm/evm/offchain/sync/replayer_test.go +++ b/fvm/evm/offchain/sync/replayer_test.go @@ -12,6 +12,7 @@ import ( "github.com/onflow/flow-go/fvm/evm" "github.com/onflow/flow-go/fvm/evm/events" "github.com/onflow/flow-go/fvm/evm/offchain/blocks" + "github.com/onflow/flow-go/fvm/evm/offchain/storage" "github.com/onflow/flow-go/fvm/evm/offchain/sync" . "github.com/onflow/flow-go/fvm/evm/testutils" "github.com/onflow/flow-go/fvm/evm/types" @@ -154,7 +155,8 @@ func TestChainReplay(t *testing.T) { // check replay - bp, err := blocks.NewBasicProvider(chainID, snapshot, rootAddr) + bpStorage := storage.NewEphemeralStorage(snapshot) + bp, err := blocks.NewBasicProvider(chainID, bpStorage, rootAddr) require.NoError(t, err) err = bp.OnBlockReceived(blockEventPayload) @@ -167,14 +169,12 @@ func TestChainReplay(t *testing.T) { require.Len(t, results, totalTxCount) - err = bp.OnBlockExecuted(blockEventPayload.Height, res) + proposal := blocks.ReconstructProposal(blockEventPayload, txEventPayloads, results) + + err = bp.OnBlockExecuted(blockEventPayload.Height, res, proposal) require.NoError(t, err) - // TODO: verify the state delta - // currently the backend storage doesn't work well with this - // changes needed to make this work, which is left for future PRs - // - // for k, v := range result.StorageRegisterUpdates() { + // for k, v := range bpStorage.StorageRegisterUpdates() { // ret, err := backend.GetValue([]byte(k.Owner), []byte(k.Key)) // require.NoError(t, err) // require.Equal(t, ret[:], v[:]) diff --git a/fvm/evm/offchain/utils/collection_test.go b/fvm/evm/offchain/utils/collection_test.go index ae8b10a0e59..a4385c7f664 100644 --- a/fvm/evm/offchain/utils/collection_test.go +++ b/fvm/evm/offchain/utils/collection_test.go @@ -147,7 +147,7 @@ func replayEvents( sp := NewTestStorageProvider(store, blockEventPayload.Height) cr := sync.NewReplayer(chainID, rootAddr, sp, bp, zerolog.Logger{}, nil, true) - res, _, err := cr.ReplayBlock(txEvents, blockEventPayload) + res, results, err := cr.ReplayBlock(txEvents, blockEventPayload) require.NoError(t, err) // commit all changes @@ -156,7 +156,9 @@ func replayEvents( require.NoError(t, err) } - err = bp.OnBlockExecuted(blockEventPayload.Height, res) + proposal := blocks.ReconstructProposal(blockEventPayload, txEvents, results) + + err = bp.OnBlockExecuted(blockEventPayload.Height, res, proposal) require.NoError(t, err) // commit all block hash list changes diff --git a/fvm/evm/offchain/utils/verify.go b/fvm/evm/offchain/utils/verify.go index a3f3e871f13..3a3d9d9b9ce 100644 --- a/fvm/evm/offchain/utils/verify.go +++ b/fvm/evm/offchain/utils/verify.go @@ -18,7 +18,6 @@ import ( evmStorage "github.com/onflow/flow-go/fvm/evm/offchain/storage" "github.com/onflow/flow-go/fvm/evm/offchain/sync" "github.com/onflow/flow-go/fvm/evm/testutils" - "github.com/onflow/flow-go/fvm/evm/types" "github.com/onflow/flow-go/ledger" "github.com/onflow/flow-go/ledger/common/convert" "github.com/onflow/flow-go/model/flow" @@ -114,7 +113,7 @@ func OffchainReplayBackwardCompatibilityTest( } } - blockProposal := reconstructProposal(evmBlockEvent, evmTxEvents, results) + blockProposal := blocks.ReconstructProposal(evmBlockEvent, evmTxEvents, results) err = bp.OnBlockExecuted(evmBlockEvent.Height, res, blockProposal) if err != nil { @@ -188,35 +187,3 @@ func parseEVMEvents(evts flow.EventsList) (*events.BlockEventPayload, []events.T return blockEvent, txEvents, nil } - -func reconstructProposal( - blockEvent *events.BlockEventPayload, - txEvents []events.TransactionEventPayload, - results []*types.Result, -) *types.BlockProposal { - receipts := make([]types.LightReceipt, 0, len(results)) - - for _, result := range results { - receipts = append(receipts, *result.LightReceipt()) - } - - txHashes := make(types.TransactionHashes, 0, len(txEvents)) - for _, tx := range txEvents { - txHashes = append(txHashes, tx.Hash) - } - - return &types.BlockProposal{ - Block: types.Block{ - ParentBlockHash: blockEvent.ParentBlockHash, - Height: blockEvent.Height, - Timestamp: blockEvent.Timestamp, - TotalSupply: blockEvent.TotalSupply.Big(), - ReceiptRoot: blockEvent.ReceiptRoot, - TransactionHashRoot: blockEvent.TransactionHashRoot, - TotalGasUsed: blockEvent.TotalGasUsed, - PrevRandao: blockEvent.PrevRandao, - }, - Receipts: receipts, - TxHashes: txHashes, - } -} From 4655a149e0a794511a678c18cce66baa59d75899 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Fri, 22 Nov 2024 11:43:45 -0800 Subject: [PATCH 49/64] update error message --- fvm/evm/offchain/utils/verify.go | 76 +++++++++++++++++++++++++------- 1 file changed, 61 insertions(+), 15 deletions(-) diff --git a/fvm/evm/offchain/utils/verify.go b/fvm/evm/offchain/utils/verify.go index 3a3d9d9b9ce..64c50fce3b7 100644 --- a/fvm/evm/offchain/utils/verify.go +++ b/fvm/evm/offchain/utils/verify.go @@ -68,7 +68,7 @@ func OffchainReplayBackwardCompatibilityTest( payloads = append(payloads, chunkData.TrieUpdate.Payloads...) } - updates := make(map[flow.RegisterID]flow.RegisterValue, len(payloads)) + expectedUpdates := make(map[flow.RegisterID]flow.RegisterValue, len(payloads)) for i := len(payloads) - 1; i >= 0; i-- { regID, regVal, err := convert.PayloadToRegister(payloads[i]) if err != nil { @@ -82,8 +82,8 @@ func OffchainReplayBackwardCompatibilityTest( // when iterating backwards, duplicated register updates are stale updates, // so skipping them - if _, ok := updates[regID]; !ok { - updates[regID] = regVal + if _, ok := expectedUpdates[regID]; !ok { + expectedUpdates[regID] = regVal } } @@ -105,12 +105,16 @@ func OffchainReplayBackwardCompatibilityTest( return err } + actualUpdates := make(map[flow.RegisterID]flow.RegisterValue, len(expectedUpdates)) + // commit all changes for k, v := range res.StorageRegisterUpdates() { err = store.SetValue([]byte(k.Owner), []byte(k.Key), v) if err != nil { return err } + + actualUpdates[k] = v } blockProposal := blocks.ReconstructProposal(evmBlockEvent, evmTxEvents, results) @@ -129,20 +133,12 @@ func OffchainReplayBackwardCompatibilityTest( return err } - expectedUpdate, ok := updates[k] - if !ok { - return fmt.Errorf("missing update for register %v, %v", k, expectedUpdate) - } - - if !bytes.Equal(expectedUpdate, v) { - return fmt.Errorf("unexpected update for register %v, expected %v, got %v", k, expectedUpdate, v) - } - - delete(updates, k) + actualUpdates[k] = v } - if len(updates) > 0 { - return fmt.Errorf("missing updates for registers %v", updates) + err = verifyRegisterUpdates(expectedUpdates, actualUpdates) + if err != nil { + return err } log.Info().Msgf("verified block %d", height) @@ -187,3 +183,53 @@ func parseEVMEvents(evts flow.EventsList) (*events.BlockEventPayload, []events.T return blockEvent, txEvents, nil } + +func verifyRegisterUpdates(expectedUpdates map[flow.RegisterID]flow.RegisterValue, actualUpdates map[flow.RegisterID]flow.RegisterValue) error { + missingUpdates := make(map[flow.RegisterID]flow.RegisterValue) + additionalUpdates := make(map[flow.RegisterID]flow.RegisterValue) + mismatchingUpdates := make(map[flow.RegisterID][2]flow.RegisterValue) + + for k, v := range expectedUpdates { + if actualVal, ok := actualUpdates[k]; !ok { + missingUpdates[k] = v + } else if !bytes.Equal(v, actualVal) { + mismatchingUpdates[k] = [2]flow.RegisterValue{v, actualVal} + } + + delete(actualUpdates, k) + } + + for k, v := range actualUpdates { + additionalUpdates[k] = v + } + + // Build a combined error message + var errorMessage strings.Builder + + if len(missingUpdates) > 0 { + errorMessage.WriteString("Missing register updates:\n") + for id, value := range missingUpdates { + errorMessage.WriteString(fmt.Sprintf(" RegisterID: %v, ExpectedValue: %v\n", id, value)) + } + } + + if len(additionalUpdates) > 0 { + errorMessage.WriteString("Additional register updates:\n") + for id, value := range additionalUpdates { + errorMessage.WriteString(fmt.Sprintf(" RegisterID: %v, ActualValue: %v\n", id, value)) + } + } + + if len(mismatchingUpdates) > 0 { + errorMessage.WriteString("Mismatching register updates:\n") + for id, values := range mismatchingUpdates { + errorMessage.WriteString(fmt.Sprintf(" RegisterID: %v, ExpectedValue: %v, ActualValue: %v\n", id, values[0], values[1])) + } + } + + if errorMessage.Len() > 0 { + return errors.New(errorMessage.String()) + } + + return nil +} From d4c1d33425ea87ea4733196ce45b9afe7e7cdca2 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Fri, 22 Nov 2024 11:45:59 -0800 Subject: [PATCH 50/64] update error message --- fvm/evm/offchain/utils/verify.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/fvm/evm/offchain/utils/verify.go b/fvm/evm/offchain/utils/verify.go index 64c50fce3b7..9afb272acec 100644 --- a/fvm/evm/offchain/utils/verify.go +++ b/fvm/evm/offchain/utils/verify.go @@ -209,21 +209,21 @@ func verifyRegisterUpdates(expectedUpdates map[flow.RegisterID]flow.RegisterValu if len(missingUpdates) > 0 { errorMessage.WriteString("Missing register updates:\n") for id, value := range missingUpdates { - errorMessage.WriteString(fmt.Sprintf(" RegisterID: %v, ExpectedValue: %v\n", id, value)) + errorMessage.WriteString(fmt.Sprintf(" RegisterKey: %v, ExpectedValue: %x\n", id.Key, value)) } } if len(additionalUpdates) > 0 { errorMessage.WriteString("Additional register updates:\n") for id, value := range additionalUpdates { - errorMessage.WriteString(fmt.Sprintf(" RegisterID: %v, ActualValue: %v\n", id, value)) + errorMessage.WriteString(fmt.Sprintf(" RegisterKey: %v, ActualValue: %x\n", id.Key, value)) } } if len(mismatchingUpdates) > 0 { errorMessage.WriteString("Mismatching register updates:\n") for id, values := range mismatchingUpdates { - errorMessage.WriteString(fmt.Sprintf(" RegisterID: %v, ExpectedValue: %v, ActualValue: %v\n", id, values[0], values[1])) + errorMessage.WriteString(fmt.Sprintf(" RegisterKey: %v, ExpectedValue: %x, ActualValue: %x\n", id.Key, values[0], values[1])) } } From c7a8627ae96355bdffe922bf95269b5a7cd0f550 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Fri, 22 Nov 2024 12:06:47 -0800 Subject: [PATCH 51/64] add account status updates --- fvm/evm/offchain/blocks/block_proposal.go | 8 ++----- fvm/evm/offchain/sync/replayer_test.go | 2 +- fvm/evm/offchain/utils/collection_test.go | 2 +- fvm/evm/offchain/utils/verify.go | 27 +++++++++++++++++++---- 4 files changed, 27 insertions(+), 12 deletions(-) diff --git a/fvm/evm/offchain/blocks/block_proposal.go b/fvm/evm/offchain/blocks/block_proposal.go index 877ba3303fe..cd1d68ed517 100644 --- a/fvm/evm/offchain/blocks/block_proposal.go +++ b/fvm/evm/offchain/blocks/block_proposal.go @@ -7,18 +7,14 @@ import ( func ReconstructProposal( blockEvent *events.BlockEventPayload, - txEvents []events.TransactionEventPayload, results []*types.Result, ) *types.BlockProposal { receipts := make([]types.LightReceipt, 0, len(results)) + txHashes := make(types.TransactionHashes, 0, len(results)) for _, result := range results { receipts = append(receipts, *result.LightReceipt()) - } - - txHashes := make(types.TransactionHashes, 0, len(txEvents)) - for _, tx := range txEvents { - txHashes = append(txHashes, tx.Hash) + txHashes = append(txHashes, result.TxHash) } return &types.BlockProposal{ diff --git a/fvm/evm/offchain/sync/replayer_test.go b/fvm/evm/offchain/sync/replayer_test.go index 2da1a5ba76b..3668e445c84 100644 --- a/fvm/evm/offchain/sync/replayer_test.go +++ b/fvm/evm/offchain/sync/replayer_test.go @@ -169,7 +169,7 @@ func TestChainReplay(t *testing.T) { require.Len(t, results, totalTxCount) - proposal := blocks.ReconstructProposal(blockEventPayload, txEventPayloads, results) + proposal := blocks.ReconstructProposal(blockEventPayload, results) err = bp.OnBlockExecuted(blockEventPayload.Height, res, proposal) require.NoError(t, err) diff --git a/fvm/evm/offchain/utils/collection_test.go b/fvm/evm/offchain/utils/collection_test.go index a4385c7f664..8e292530534 100644 --- a/fvm/evm/offchain/utils/collection_test.go +++ b/fvm/evm/offchain/utils/collection_test.go @@ -156,7 +156,7 @@ func replayEvents( require.NoError(t, err) } - proposal := blocks.ReconstructProposal(blockEventPayload, txEvents, results) + proposal := blocks.ReconstructProposal(blockEventPayload, results) err = bp.OnBlockExecuted(blockEventPayload.Height, res, proposal) require.NoError(t, err) diff --git a/fvm/evm/offchain/utils/verify.go b/fvm/evm/offchain/utils/verify.go index 9afb272acec..740989eac23 100644 --- a/fvm/evm/offchain/utils/verify.go +++ b/fvm/evm/offchain/utils/verify.go @@ -25,6 +25,16 @@ import ( "github.com/onflow/flow-go/storage" ) +// EVM Root Height is the first block that has EVM Block Event where the EVM block height is 1 +func isEVMRootHeight(chainID flow.ChainID, flowHeight uint64) bool { + if chainID == flow.Testnet { + return flowHeight == 211176671 + } else if chainID == flow.Mainnet { + return flowHeight == 85981136 + } + return flowHeight == 1 +} + func OffchainReplayBackwardCompatibilityTest( log zerolog.Logger, chainID flow.ChainID, @@ -44,6 +54,14 @@ func OffchainReplayBackwardCompatibilityTest( return err } + // setup account status at EVM root block + if isEVMRootHeight(chainID, flowStartHeight) { + err = bpStorage.SetValue(rootAddr[:], []byte(flow.AccountStatusKey), environment.NewAccountStatus().ToBytes()) + if err != nil { + return err + } + } + for height := flowStartHeight; height <= flowEndHeight; height++ { blockID, err := headers.BlockIDByHeight(height) if err != nil { @@ -87,7 +105,7 @@ func OffchainReplayBackwardCompatibilityTest( } } - // parse events + // parse EVM events evmBlockEvent, evmTxEvents, err := parseEVMEvents(events) if err != nil { return err @@ -107,7 +125,7 @@ func OffchainReplayBackwardCompatibilityTest( actualUpdates := make(map[flow.RegisterID]flow.RegisterValue, len(expectedUpdates)) - // commit all changes + // commit all register changes from the EVM state transition for k, v := range res.StorageRegisterUpdates() { err = store.SetValue([]byte(k.Owner), []byte(k.Key), v) if err != nil { @@ -117,14 +135,15 @@ func OffchainReplayBackwardCompatibilityTest( actualUpdates[k] = v } - blockProposal := blocks.ReconstructProposal(evmBlockEvent, evmTxEvents, results) + blockProposal := blocks.ReconstructProposal(evmBlockEvent, results) err = bp.OnBlockExecuted(evmBlockEvent.Height, res, blockProposal) if err != nil { return err } - // verify and commit all block hash list changes + // commit all register changes from non-EVM state transition, such + // as block hash list changes for k, v := range bpStorage.StorageRegisterUpdates() { // verify the block hash list changes are included in the trie update From 33b75b369e84c9209ab26df34e8aa3c7cef3a79a Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Fri, 22 Nov 2024 16:32:42 -0800 Subject: [PATCH 52/64] update provider --- fvm/evm/offchain/utils/verify.go | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/fvm/evm/offchain/utils/verify.go b/fvm/evm/offchain/utils/verify.go index 740989eac23..f059874bbb9 100644 --- a/fvm/evm/offchain/utils/verify.go +++ b/fvm/evm/offchain/utils/verify.go @@ -48,21 +48,21 @@ func OffchainReplayBackwardCompatibilityTest( rootAddr := evm.StorageAccountAddress(chainID) rootAddrStr := string(rootAddr.Bytes()) - bpStorage := evmStorage.NewEphemeralStorage(store) - bp, err := blocks.NewBasicProvider(chainID, bpStorage, rootAddr) - if err != nil { - return err - } - - // setup account status at EVM root block - if isEVMRootHeight(chainID, flowStartHeight) { - err = bpStorage.SetValue(rootAddr[:], []byte(flow.AccountStatusKey), environment.NewAccountStatus().ToBytes()) + for height := flowStartHeight; height <= flowEndHeight; height++ { + bpStorage := evmStorage.NewEphemeralStorage(store) + bp, err := blocks.NewBasicProvider(chainID, bpStorage, rootAddr) if err != nil { return err } - } - for height := flowStartHeight; height <= flowEndHeight; height++ { + // setup account status at EVM root block + if isEVMRootHeight(chainID, flowStartHeight) { + err = bpStorage.SetValue(rootAddr[:], []byte(flow.AccountStatusKey), environment.NewAccountStatus().ToBytes()) + if err != nil { + return err + } + } + blockID, err := headers.BlockIDByHeight(height) if err != nil { return err From 190a1209c4c80ac91de7ba8a8809bcc31f5b829b Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Fri, 22 Nov 2024 16:39:28 -0800 Subject: [PATCH 53/64] update verifable keys --- fvm/evm/handler/blockHashList.go | 9 +++++++++ fvm/evm/offchain/utils/verify.go | 14 ++++++++++++++ 2 files changed, 23 insertions(+) diff --git a/fvm/evm/handler/blockHashList.go b/fvm/evm/handler/blockHashList.go index 91eefded24e..0db2aff73f9 100644 --- a/fvm/evm/handler/blockHashList.go +++ b/fvm/evm/handler/blockHashList.go @@ -3,6 +3,7 @@ package handler import ( "encoding/binary" "fmt" + "strings" gethCommon "github.com/onflow/go-ethereum/common" @@ -26,6 +27,14 @@ const ( heightEncodingSize ) +func IsBlockHashListBucketKeyFormat(id flow.RegisterID) bool { + return strings.HasPrefix(id.Key, "BlockHashListBucket") +} + +func IsBlockHashListMetaKey(id flow.RegisterID) bool { + return id.Key == blockHashListMetaKey +} + // BlockHashList stores the last `capacity` number of block hashes // // Under the hood it breaks the list of hashes into diff --git a/fvm/evm/offchain/utils/verify.go b/fvm/evm/offchain/utils/verify.go index f059874bbb9..cfa48d39f9d 100644 --- a/fvm/evm/offchain/utils/verify.go +++ b/fvm/evm/offchain/utils/verify.go @@ -14,6 +14,7 @@ import ( "github.com/onflow/flow-go/fvm/environment" "github.com/onflow/flow-go/fvm/evm" "github.com/onflow/flow-go/fvm/evm/events" + "github.com/onflow/flow-go/fvm/evm/handler" "github.com/onflow/flow-go/fvm/evm/offchain/blocks" evmStorage "github.com/onflow/flow-go/fvm/evm/offchain/storage" "github.com/onflow/flow-go/fvm/evm/offchain/sync" @@ -98,6 +99,10 @@ func OffchainReplayBackwardCompatibilityTest( continue } + if !verifiableKeys(regID) { + continue + } + // when iterating backwards, duplicated register updates are stale updates, // so skipping them if _, ok := expectedUpdates[regID]; !ok { @@ -152,7 +157,12 @@ func OffchainReplayBackwardCompatibilityTest( return err } + if !verifiableKeys(k) { + continue + } + actualUpdates[k] = v + } err = verifyRegisterUpdates(expectedUpdates, actualUpdates) @@ -166,6 +176,10 @@ func OffchainReplayBackwardCompatibilityTest( return nil } +func verifiableKeys(key flow.RegisterID) bool { + return handler.IsBlockHashListBucketKeyFormat(key) || handler.IsBlockHashListMetaKey(key) +} + func parseEVMEvents(evts flow.EventsList) (*events.BlockEventPayload, []events.TransactionEventPayload, error) { var blockEvent *events.BlockEventPayload txEvents := make([]events.TransactionEventPayload, 0) From 9c0bebb7dedd6c72953d76b0dff81f142f397354 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Fri, 22 Nov 2024 16:42:00 -0800 Subject: [PATCH 54/64] update verifable keys --- fvm/evm/offchain/utils/verify.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/fvm/evm/offchain/utils/verify.go b/fvm/evm/offchain/utils/verify.go index cfa48d39f9d..a269e81ec1b 100644 --- a/fvm/evm/offchain/utils/verify.go +++ b/fvm/evm/offchain/utils/verify.go @@ -137,6 +137,10 @@ func OffchainReplayBackwardCompatibilityTest( return err } + if !verifiableKeys(k) { + continue + } + actualUpdates[k] = v } From 6ac47bcee3b402ad4c3dace3cee8e612764553a9 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Fri, 22 Nov 2024 20:37:34 -0800 Subject: [PATCH 55/64] skip register verification --- fvm/evm/offchain/utils/verify.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/fvm/evm/offchain/utils/verify.go b/fvm/evm/offchain/utils/verify.go index a269e81ec1b..9a6f6a45d87 100644 --- a/fvm/evm/offchain/utils/verify.go +++ b/fvm/evm/offchain/utils/verify.go @@ -14,7 +14,6 @@ import ( "github.com/onflow/flow-go/fvm/environment" "github.com/onflow/flow-go/fvm/evm" "github.com/onflow/flow-go/fvm/evm/events" - "github.com/onflow/flow-go/fvm/evm/handler" "github.com/onflow/flow-go/fvm/evm/offchain/blocks" evmStorage "github.com/onflow/flow-go/fvm/evm/offchain/storage" "github.com/onflow/flow-go/fvm/evm/offchain/sync" @@ -58,7 +57,8 @@ func OffchainReplayBackwardCompatibilityTest( // setup account status at EVM root block if isEVMRootHeight(chainID, flowStartHeight) { - err = bpStorage.SetValue(rootAddr[:], []byte(flow.AccountStatusKey), environment.NewAccountStatus().ToBytes()) + err = bpStorage.SetValue(rootAddr[:], []byte(flow.AccountStatusKey), + environment.NewAccountStatus().ToBytes()) if err != nil { return err } @@ -166,7 +166,6 @@ func OffchainReplayBackwardCompatibilityTest( } actualUpdates[k] = v - } err = verifyRegisterUpdates(expectedUpdates, actualUpdates) @@ -181,7 +180,8 @@ func OffchainReplayBackwardCompatibilityTest( } func verifiableKeys(key flow.RegisterID) bool { - return handler.IsBlockHashListBucketKeyFormat(key) || handler.IsBlockHashListMetaKey(key) + return false + // return handler.IsBlockHashListBucketKeyFormat(key) || handler.IsBlockHashListMetaKey(key) } func parseEVMEvents(evts flow.EventsList) (*events.BlockEventPayload, []events.TransactionEventPayload, error) { From 0ed057379fcf967002a034e77e266dabaf832e4f Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Tue, 26 Nov 2024 09:42:21 -0800 Subject: [PATCH 56/64] refactor verify.go --- fvm/evm/offchain/utils/verify.go | 61 +++++++++++++++++++++++--------- 1 file changed, 45 insertions(+), 16 deletions(-) diff --git a/fvm/evm/offchain/utils/verify.go b/fvm/evm/offchain/utils/verify.go index 9a6f6a45d87..f09f392b6dd 100644 --- a/fvm/evm/offchain/utils/verify.go +++ b/fvm/evm/offchain/utils/verify.go @@ -26,7 +26,7 @@ import ( ) // EVM Root Height is the first block that has EVM Block Event where the EVM block height is 1 -func isEVMRootHeight(chainID flow.ChainID, flowHeight uint64) bool { +func IsEVMRootHeight(chainID flow.ChainID, flowHeight uint64) bool { if chainID == flow.Testnet { return flowHeight == 211176671 } else if chainID == flow.Mainnet { @@ -44,10 +44,27 @@ func OffchainReplayBackwardCompatibilityTest( results storage.ExecutionResults, executionDataStore execution_data.ExecutionDataGetter, store environment.ValueStore, + onHeightReplayed func(uint64) error, ) error { rootAddr := evm.StorageAccountAddress(chainID) rootAddrStr := string(rootAddr.Bytes()) + if IsEVMRootHeight(chainID, flowStartHeight) { + log.Info().Msgf("initializing EVM state for root height %d", flowStartHeight) + + as := environment.NewAccountStatus() + rootAddr := evm.StorageAccountAddress(chainID) + err := store.SetValue(rootAddr[:], []byte(flow.AccountStatusKey), as.ToBytes()) + if err != nil { + return err + } + } + + // pendingEVMTxEvents are tx events that are executed block included in a flow block that + // didn't emit EVM block event, which is caused when the system tx to emit EVM block fails. + // we accumulate these pending txs, and replay them when we encounter a block with EVM block event. + pendingEVMTxEvents := make([]events.TransactionEventPayload, 0) + for height := flowStartHeight; height <= flowEndHeight; height++ { bpStorage := evmStorage.NewEphemeralStorage(store) bp, err := blocks.NewBasicProvider(chainID, bpStorage, rootAddr) @@ -55,15 +72,6 @@ func OffchainReplayBackwardCompatibilityTest( return err } - // setup account status at EVM root block - if isEVMRootHeight(chainID, flowStartHeight) { - err = bpStorage.SetValue(rootAddr[:], []byte(flow.AccountStatusKey), - environment.NewAccountStatus().ToBytes()) - if err != nil { - return err - } - } - blockID, err := headers.BlockIDByHeight(height) if err != nil { return err @@ -76,14 +84,14 @@ func OffchainReplayBackwardCompatibilityTest( executionData, err := executionDataStore.Get(context.Background(), result.ExecutionDataID) if err != nil { - return err + return fmt.Errorf("could not get execution data %v for block %d: %w", result.ExecutionDataID, height, err) } - events := flow.EventsList{} + evts := flow.EventsList{} payloads := []*ledger.Payload{} for _, chunkData := range executionData.ChunkExecutionDatas { - events = append(events, chunkData.Events...) + evts = append(evts, chunkData.Events...) payloads = append(payloads, chunkData.TrieUpdate.Payloads...) } @@ -111,11 +119,29 @@ func OffchainReplayBackwardCompatibilityTest( } // parse EVM events - evmBlockEvent, evmTxEvents, err := parseEVMEvents(events) + evmBlockEvent, evmTxEvents, err := parseEVMEvents(evts) if err != nil { return err } + pendingEVMTxEvents = append(pendingEVMTxEvents, evmTxEvents...) + + if evmBlockEvent == nil { + log.Info().Msgf("block has no EVM block, height :%v, txEvents: %v", height, len(evmTxEvents)) + + err = onHeightReplayed(height) + if err != nil { + return err + } + continue + } + + // when we encounter a block with EVM block event, we replay the pending txs accumulated + // from previous blocks that had no EVM block event. + evmTxEventsIncludedInBlock := pendingEVMTxEvents + // reset pendingEVMTxEvents + pendingEVMTxEvents = make([]events.TransactionEventPayload, 0) + err = bp.OnBlockReceived(evmBlockEvent) if err != nil { return err @@ -123,7 +149,7 @@ func OffchainReplayBackwardCompatibilityTest( sp := testutils.NewTestStorageProvider(store, evmBlockEvent.Height) cr := sync.NewReplayer(chainID, rootAddr, sp, bp, log, nil, true) - res, results, err := cr.ReplayBlock(evmTxEvents, evmBlockEvent) + res, results, err := cr.ReplayBlock(evmTxEventsIncludedInBlock, evmBlockEvent) if err != nil { return err } @@ -173,7 +199,10 @@ func OffchainReplayBackwardCompatibilityTest( return err } - log.Info().Msgf("verified block %d", height) + err = onHeightReplayed(height) + if err != nil { + return err + } } return nil From 892922e22982b5d793a2ee9726f34e46d82e07f9 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Tue, 26 Nov 2024 11:24:31 -0800 Subject: [PATCH 57/64] refactor block replay verification --- .../cmd/verify-evm-offchain-replay/verify.go | 1 + fvm/evm/offchain/utils/replay.go | 101 +++++++ fvm/evm/offchain/utils/verify.go | 246 +++++++++--------- 3 files changed, 220 insertions(+), 128 deletions(-) create mode 100644 fvm/evm/offchain/utils/replay.go diff --git a/cmd/util/cmd/verify-evm-offchain-replay/verify.go b/cmd/util/cmd/verify-evm-offchain-replay/verify.go index f75cd8278b6..b7bb0ab0e87 100644 --- a/cmd/util/cmd/verify-evm-offchain-replay/verify.go +++ b/cmd/util/cmd/verify-evm-offchain-replay/verify.go @@ -77,6 +77,7 @@ func Verify(log zerolog.Logger, from uint64, to uint64, chainID flow.ChainID, da storages.Results, executionDataStore, store, + func(uint64) error { return nil }, ) if err != nil { diff --git a/fvm/evm/offchain/utils/replay.go b/fvm/evm/offchain/utils/replay.go new file mode 100644 index 00000000000..1c556f82d19 --- /dev/null +++ b/fvm/evm/offchain/utils/replay.go @@ -0,0 +1,101 @@ +package utils + +import ( + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/fvm/environment" + "github.com/onflow/flow-go/fvm/evm/events" + "github.com/onflow/flow-go/fvm/evm/offchain/blocks" + evmStorage "github.com/onflow/flow-go/fvm/evm/offchain/storage" + "github.com/onflow/flow-go/fvm/evm/offchain/sync" + "github.com/onflow/flow-go/fvm/evm/testutils" + "github.com/onflow/flow-go/model/flow" +) + +func ReplayEVMEventsToStore( + log zerolog.Logger, + store environment.ValueStore, + chainID flow.ChainID, + rootAddr flow.Address, + evmBlockEvent *events.BlockEventPayload, // EVM block event + evmTxEvents []events.TransactionEventPayload, // EVM transaction event +) ( + map[flow.RegisterID]flow.RegisterValue, // EVM state transition updates + map[flow.RegisterID]flow.RegisterValue, // block provider updates + error, +) { + + bpStorage := evmStorage.NewEphemeralStorage(store) + bp, err := blocks.NewBasicProvider(chainID, bpStorage, rootAddr) + if err != nil { + return nil, nil, err + } + + err = bp.OnBlockReceived(evmBlockEvent) + if err != nil { + return nil, nil, err + } + + sp := testutils.NewTestStorageProvider(store, evmBlockEvent.Height) + cr := sync.NewReplayer(chainID, rootAddr, sp, bp, log, nil, true) + res, results, err := cr.ReplayBlock(evmTxEvents, evmBlockEvent) + if err != nil { + return nil, nil, err + } + + // commit all register changes from the EVM state transition + for k, v := range res.StorageRegisterUpdates() { + err = store.SetValue([]byte(k.Owner), []byte(k.Key), v) + if err != nil { + return nil, nil, err + } + } + + blockProposal := blocks.ReconstructProposal(evmBlockEvent, results) + + err = bp.OnBlockExecuted(evmBlockEvent.Height, res, blockProposal) + if err != nil { + return nil, nil, err + } + + // commit all register changes from non-EVM state transition, such + // as block hash list changes + for k, v := range bpStorage.StorageRegisterUpdates() { + // verify the block hash list changes are included in the trie update + + err = store.SetValue([]byte(k.Owner), []byte(k.Key), v) + if err != nil { + return nil, nil, err + } + } + + return res.StorageRegisterUpdates(), bpStorage.StorageRegisterUpdates(), nil +} + +type EVMEventsAccumulator struct { + pendingEVMTxEvents []events.TransactionEventPayload +} + +func NewEVMEventsAccumulator() *EVMEventsAccumulator { + return &EVMEventsAccumulator{ + pendingEVMTxEvents: make([]events.TransactionEventPayload, 0), + } +} + +func (a *EVMEventsAccumulator) HasBlockEvent( + evmBlockEvent *events.BlockEventPayload, + evmTxEvents []events.TransactionEventPayload) ( + *events.BlockEventPayload, + []events.TransactionEventPayload, + bool, // true if there is an EVM block event +) { + a.pendingEVMTxEvents = append(a.pendingEVMTxEvents, evmTxEvents...) + + // if there is no EVM block event, we will accumulate the pending txs + if evmBlockEvent == nil { + return evmBlockEvent, a.pendingEVMTxEvents, false + } + + // if there is an EVM block event, we return the EVM block and the accumulated tx events + return evmBlockEvent, a.pendingEVMTxEvents, true +} diff --git a/fvm/evm/offchain/utils/verify.go b/fvm/evm/offchain/utils/verify.go index f09f392b6dd..c007e4976bc 100644 --- a/fvm/evm/offchain/utils/verify.go +++ b/fvm/evm/offchain/utils/verify.go @@ -14,10 +14,6 @@ import ( "github.com/onflow/flow-go/fvm/environment" "github.com/onflow/flow-go/fvm/evm" "github.com/onflow/flow-go/fvm/evm/events" - "github.com/onflow/flow-go/fvm/evm/offchain/blocks" - evmStorage "github.com/onflow/flow-go/fvm/evm/offchain/storage" - "github.com/onflow/flow-go/fvm/evm/offchain/sync" - "github.com/onflow/flow-go/fvm/evm/testutils" "github.com/onflow/flow-go/ledger" "github.com/onflow/flow-go/ledger/common/convert" "github.com/onflow/flow-go/model/flow" @@ -35,6 +31,24 @@ func IsEVMRootHeight(chainID flow.ChainID, flowHeight uint64) bool { return flowHeight == 1 } +// IsSporkHeight returns true if the given flow height is a spork height for the given chainID +// At spork height, there is no EVM events +func IsSporkHeight(chainID flow.ChainID, flowHeight uint64) bool { + if IsEVMRootHeight(chainID, flowHeight) { + return true + } + + if chainID == flow.Testnet { + return flowHeight == 218215349 // Testnet 52 + } else if chainID == flow.Mainnet { + return flowHeight == 88226267 // Mainnet 26 + } + return false +} + +// OffchainReplayBackwardCompatibilityTest replays the offchain EVM state transition for a given range of flow blocks, +// the replay will also verify the StateUpdateChecksum of the EVM state transition from each transaction execution. +// the updated register values will be saved to the given value store. func OffchainReplayBackwardCompatibilityTest( log zerolog.Logger, chainID flow.ChainID, @@ -49,84 +63,40 @@ func OffchainReplayBackwardCompatibilityTest( rootAddr := evm.StorageAccountAddress(chainID) rootAddrStr := string(rootAddr.Bytes()) - if IsEVMRootHeight(chainID, flowStartHeight) { - log.Info().Msgf("initializing EVM state for root height %d", flowStartHeight) - - as := environment.NewAccountStatus() - rootAddr := evm.StorageAccountAddress(chainID) - err := store.SetValue(rootAddr[:], []byte(flow.AccountStatusKey), as.ToBytes()) - if err != nil { - return err - } - } - // pendingEVMTxEvents are tx events that are executed block included in a flow block that // didn't emit EVM block event, which is caused when the system tx to emit EVM block fails. // we accumulate these pending txs, and replay them when we encounter a block with EVM block event. - pendingEVMTxEvents := make([]events.TransactionEventPayload, 0) + pendingEVMEvents := NewEVMEventsAccumulator() for height := flowStartHeight; height <= flowEndHeight; height++ { - bpStorage := evmStorage.NewEphemeralStorage(store) - bp, err := blocks.NewBasicProvider(chainID, bpStorage, rootAddr) - if err != nil { - return err - } - - blockID, err := headers.BlockIDByHeight(height) - if err != nil { - return err - } - - result, err := results.ByBlockID(blockID) - if err != nil { - return err - } - - executionData, err := executionDataStore.Get(context.Background(), result.ExecutionDataID) - if err != nil { - return fmt.Errorf("could not get execution data %v for block %d: %w", result.ExecutionDataID, height, err) - } - - evts := flow.EventsList{} - payloads := []*ledger.Payload{} - - for _, chunkData := range executionData.ChunkExecutionDatas { - evts = append(evts, chunkData.Events...) - payloads = append(payloads, chunkData.TrieUpdate.Payloads...) - } + // account status initialization for the root account at the EVM root height + if IsEVMRootHeight(chainID, height) { + log.Info().Msgf("initializing EVM state for root height %d", flowStartHeight) - expectedUpdates := make(map[flow.RegisterID]flow.RegisterValue, len(payloads)) - for i := len(payloads) - 1; i >= 0; i-- { - regID, regVal, err := convert.PayloadToRegister(payloads[i]) + as := environment.NewAccountStatus() + rootAddr := evm.StorageAccountAddress(chainID) + err := store.SetValue(rootAddr[:], []byte(flow.AccountStatusKey), as.ToBytes()) if err != nil { return err } + } - // skip non-evm-account registers - if regID.Owner != rootAddrStr { - continue - } - - if !verifiableKeys(regID) { - continue - } - - // when iterating backwards, duplicated register updates are stale updates, - // so skipping them - if _, ok := expectedUpdates[regID]; !ok { - expectedUpdates[regID] = regVal - } + if IsSporkHeight(chainID, height) { + // spork root block has no EVM events + continue } - // parse EVM events - evmBlockEvent, evmTxEvents, err := parseEVMEvents(evts) + // get EVM events and register updates at the flow height + evmBlockEvent, evmTxEvents, registerUpdates, err := evmEventsAndRegisterUpdatesAtFlowHeight( + height, + headers, results, executionDataStore, rootAddrStr) if err != nil { - return err + return fmt.Errorf("failed to get EVM events and register updates at height %d: %w", height, err) } - pendingEVMTxEvents = append(pendingEVMTxEvents, evmTxEvents...) + blockEvent, txEvents, hasBlockEvent := pendingEVMEvents.HasBlockEvent(evmBlockEvent, evmTxEvents) - if evmBlockEvent == nil { + if !hasBlockEvent { log.Info().Msgf("block has no EVM block, height :%v, txEvents: %v", height, len(evmTxEvents)) err = onHeightReplayed(height) @@ -136,65 +106,19 @@ func OffchainReplayBackwardCompatibilityTest( continue } - // when we encounter a block with EVM block event, we replay the pending txs accumulated - // from previous blocks that had no EVM block event. - evmTxEventsIncludedInBlock := pendingEVMTxEvents - // reset pendingEVMTxEvents - pendingEVMTxEvents = make([]events.TransactionEventPayload, 0) - - err = bp.OnBlockReceived(evmBlockEvent) - if err != nil { - return err - } - - sp := testutils.NewTestStorageProvider(store, evmBlockEvent.Height) - cr := sync.NewReplayer(chainID, rootAddr, sp, bp, log, nil, true) - res, results, err := cr.ReplayBlock(evmTxEventsIncludedInBlock, evmBlockEvent) - if err != nil { - return err - } - - actualUpdates := make(map[flow.RegisterID]flow.RegisterValue, len(expectedUpdates)) - - // commit all register changes from the EVM state transition - for k, v := range res.StorageRegisterUpdates() { - err = store.SetValue([]byte(k.Owner), []byte(k.Key), v) - if err != nil { - return err - } - - if !verifiableKeys(k) { - continue - } - - actualUpdates[k] = v - } - - blockProposal := blocks.ReconstructProposal(evmBlockEvent, results) - - err = bp.OnBlockExecuted(evmBlockEvent.Height, res, blockProposal) + evmUpdates, blockProviderUpdates, err := ReplayEVMEventsToStore( + log, + store, + chainID, + rootAddr, + blockEvent, + txEvents, + ) if err != nil { - return err - } - - // commit all register changes from non-EVM state transition, such - // as block hash list changes - for k, v := range bpStorage.StorageRegisterUpdates() { - // verify the block hash list changes are included in the trie update - - err = store.SetValue([]byte(k.Owner), []byte(k.Key), v) - if err != nil { - return err - } - - if !verifiableKeys(k) { - continue - } - - actualUpdates[k] = v + return fmt.Errorf("fail to replay events: %w", err) } - err = verifyRegisterUpdates(expectedUpdates, actualUpdates) + err = verifyEVMRegisterUpdates(registerUpdates, evmUpdates, blockProviderUpdates) if err != nil { return err } @@ -208,11 +132,6 @@ func OffchainReplayBackwardCompatibilityTest( return nil } -func verifiableKeys(key flow.RegisterID) bool { - return false - // return handler.IsBlockHashListBucketKeyFormat(key) || handler.IsBlockHashListMetaKey(key) -} - func parseEVMEvents(evts flow.EventsList) (*events.BlockEventPayload, []events.TransactionEventPayload, error) { var blockEvent *events.BlockEventPayload txEvents := make([]events.TransactionEventPayload, 0) @@ -250,7 +169,78 @@ func parseEVMEvents(evts flow.EventsList) (*events.BlockEventPayload, []events.T return blockEvent, txEvents, nil } -func verifyRegisterUpdates(expectedUpdates map[flow.RegisterID]flow.RegisterValue, actualUpdates map[flow.RegisterID]flow.RegisterValue) error { +func evmEventsAndRegisterUpdatesAtFlowHeight( + flowHeight uint64, + headers storage.Headers, + results storage.ExecutionResults, + executionDataStore execution_data.ExecutionDataGetter, + rootAddr string, +) ( + *events.BlockEventPayload, // EVM block event, might be nil if there is no block Event at this height + []events.TransactionEventPayload, // EVM transaction event + map[flow.RegisterID]flow.RegisterValue, // update registers + error, +) { + + blockID, err := headers.BlockIDByHeight(flowHeight) + if err != nil { + return nil, nil, nil, err + } + + result, err := results.ByBlockID(blockID) + if err != nil { + return nil, nil, nil, err + } + + executionData, err := executionDataStore.Get(context.Background(), result.ExecutionDataID) + if err != nil { + return nil, nil, nil, + fmt.Errorf("could not get execution data %v for block %d: %w", + result.ExecutionDataID, flowHeight, err) + } + + evts := flow.EventsList{} + payloads := []*ledger.Payload{} + + for _, chunkData := range executionData.ChunkExecutionDatas { + evts = append(evts, chunkData.Events...) + payloads = append(payloads, chunkData.TrieUpdate.Payloads...) + } + + updates := make(map[flow.RegisterID]flow.RegisterValue, len(payloads)) + for i := len(payloads) - 1; i >= 0; i-- { + regID, regVal, err := convert.PayloadToRegister(payloads[i]) + if err != nil { + return nil, nil, nil, err + } + + // find the register updates for the root account + if regID.Owner == rootAddr { + updates[regID] = regVal + } + } + + // parse EVM events + evmBlockEvent, evmTxEvents, err := parseEVMEvents(evts) + if err != nil { + return nil, nil, nil, err + } + return evmBlockEvent, evmTxEvents, updates, nil +} + +func verifyEVMRegisterUpdates( + registerUpdates map[flow.RegisterID]flow.RegisterValue, + evmUpdates map[flow.RegisterID]flow.RegisterValue, + blockProviderUpdates map[flow.RegisterID]flow.RegisterValue, +) error { + // skip the register level validation + // since the register is not stored at the same slab id as the on-chain EVM + // instead, we will compare by exporting the logic EVM state, which contains + // accounts, codes and slots. + return nil +} + +func VerifyRegisterUpdates(expectedUpdates map[flow.RegisterID]flow.RegisterValue, actualUpdates map[flow.RegisterID]flow.RegisterValue) error { missingUpdates := make(map[flow.RegisterID]flow.RegisterValue) additionalUpdates := make(map[flow.RegisterID]flow.RegisterValue) mismatchingUpdates := make(map[flow.RegisterID][2]flow.RegisterValue) From e08cad88b12cc5c5949d1e2d411853629ebb885e Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Tue, 26 Nov 2024 11:34:55 -0800 Subject: [PATCH 58/64] refactor util tests --- fvm/evm/offchain/utils/collection_test.go | 42 ++++++----------------- 1 file changed, 10 insertions(+), 32 deletions(-) diff --git a/fvm/evm/offchain/utils/collection_test.go b/fvm/evm/offchain/utils/collection_test.go index 8e292530534..5dad9b86658 100644 --- a/fvm/evm/offchain/utils/collection_test.go +++ b/fvm/evm/offchain/utils/collection_test.go @@ -10,7 +10,6 @@ import ( "strings" "testing" - "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/stretchr/testify/require" @@ -20,9 +19,6 @@ import ( "github.com/onflow/flow-go/fvm/environment" "github.com/onflow/flow-go/fvm/evm" "github.com/onflow/flow-go/fvm/evm/events" - "github.com/onflow/flow-go/fvm/evm/offchain/blocks" - "github.com/onflow/flow-go/fvm/evm/offchain/storage" - "github.com/onflow/flow-go/fvm/evm/offchain/sync" "github.com/onflow/flow-go/fvm/evm/offchain/utils" . "github.com/onflow/flow-go/fvm/evm/testutils" "github.com/onflow/flow-go/model/flow" @@ -128,10 +124,6 @@ func replayEvents( rootAddr := evm.StorageAccountAddress(chainID) - bpStorage := storage.NewEphemeralStorage(store) - bp, err := blocks.NewBasicProvider(chainID, bpStorage, rootAddr) - require.NoError(t, err) - nextHeight := initialNextHeight scanEventFilesAndRun(t, eventsFilePath, @@ -142,31 +134,17 @@ func replayEvents( nextHeight, blockEventPayload.Height) } - err = bp.OnBlockReceived(blockEventPayload) - require.NoError(t, err) - - sp := NewTestStorageProvider(store, blockEventPayload.Height) - cr := sync.NewReplayer(chainID, rootAddr, sp, bp, zerolog.Logger{}, nil, true) - res, results, err := cr.ReplayBlock(txEvents, blockEventPayload) - require.NoError(t, err) - - // commit all changes - for k, v := range res.StorageRegisterUpdates() { - err = store.SetValue([]byte(k.Owner), []byte(k.Key), v) - require.NoError(t, err) - } - - proposal := blocks.ReconstructProposal(blockEventPayload, results) - - err = bp.OnBlockExecuted(blockEventPayload.Height, res, proposal) - require.NoError(t, err) - - // commit all block hash list changes - for k, v := range bpStorage.StorageRegisterUpdates() { - err = store.SetValue([]byte(k.Owner), []byte(k.Key), v) - require.NoError(t, err) + _, _, err := utils.ReplayEVMEventsToStore( + log.Logger, + store, + chainID, + rootAddr, + blockEventPayload, + txEvents, + ) + if err != nil { + return fmt.Errorf("fail to replay events: %w", err) } - // verify the block height is sequential without gap nextHeight++ From 822f127e27c48f81f915d5a077d3bb92ac73604c Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Tue, 26 Nov 2024 15:30:43 -0800 Subject: [PATCH 59/64] remove unused chainID arg --- cmd/util/cmd/verify-evm-offchain-replay/verify.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/util/cmd/verify-evm-offchain-replay/verify.go b/cmd/util/cmd/verify-evm-offchain-replay/verify.go index b7bb0ab0e87..d3e0b5c6d1d 100644 --- a/cmd/util/cmd/verify-evm-offchain-replay/verify.go +++ b/cmd/util/cmd/verify-evm-offchain-replay/verify.go @@ -31,7 +31,7 @@ func Verify(log zerolog.Logger, from uint64, to uint64, chainID flow.ChainID, da Str("evmStateGobDir", evmStateGobDir). Msgf("verifying range from %d to %d", from, to) - db, storages, executionDataStore, dsStore, err := initStorages(chainID, dataDir, executionDataDir) + db, storages, executionDataStore, dsStore, err := initStorages(dataDir, executionDataDir) if err != nil { return fmt.Errorf("could not initialize storages: %w", err) } @@ -100,7 +100,7 @@ func Verify(log zerolog.Logger, from uint64, to uint64, chainID flow.ChainID, da return nil } -func initStorages(chainID flow.ChainID, dataDir string, executionDataDir string) ( +func initStorages(dataDir string, executionDataDir string) ( *badger.DB, *storage.All, execution_data.ExecutionDataGetter, From 3fcd79287d667cc6ff41c21ee0287ad5b298fa7c Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Tue, 26 Nov 2024 16:53:13 -0800 Subject: [PATCH 60/64] refactor verify --- .../cmd/verify-evm-offchain-replay/main.go | 13 +-- .../cmd/verify-evm-offchain-replay/verify.go | 100 +++++++++++------- 2 files changed, 71 insertions(+), 42 deletions(-) diff --git a/cmd/util/cmd/verify-evm-offchain-replay/main.go b/cmd/util/cmd/verify-evm-offchain-replay/main.go index 0bc6eef8187..d1027fb8a74 100644 --- a/cmd/util/cmd/verify-evm-offchain-replay/main.go +++ b/cmd/util/cmd/verify-evm-offchain-replay/main.go @@ -17,11 +17,12 @@ var ( flagEVMStateGobDir string flagChain string flagFromTo string + flagSaveEveryNBlocks uint64 ) // usage example // -// ./util verify-evm-offchain-replay --chain flow-testnet --from-to 211176671-211177000 +// ./util verify-evm-offchain-replay --chain flow-testnet --from_to 211176671-211177000 // --datadir /var/flow/data/protocol --execution_data_dir /var/flow/data/execution_data var Cmd = &cobra.Command{ Use: "verify-evm-offchain-replay", @@ -44,23 +45,23 @@ func init() { Cmd.Flags().StringVar(&flagEVMStateGobDir, "evm_state_gob_dir", "/var/flow/data/evm_state_gob", "directory that stores the evm state gob files as checkpoint") + + Cmd.Flags().Uint64Var(&flagSaveEveryNBlocks, "save_every", uint64(1_000_000), + "save the evm state gob files every N blocks") } func run(*cobra.Command, []string) { - _ = flow.ChainID(flagChain).Chain() + chainID := flow.ChainID(flagChain) from, to, err := parseFromTo(flagFromTo) if err != nil { log.Fatal().Err(err).Msg("could not parse from_to") } - log.Info().Msgf("verifying range from %d to %d", from, to) - err = Verify(log.Logger, from, to, flow.Testnet, flagDatadir, flagExecutionDataDir, flagEVMStateGobDir) + err = Verify(log.Logger, from, to, chainID, flagDatadir, flagExecutionDataDir, flagEVMStateGobDir, flagSaveEveryNBlocks) if err != nil { log.Fatal().Err(err).Msg("could not verify height") } - log.Info().Msgf("successfully verified range from %d to %d", from, to) - } func parseFromTo(fromTo string) (from, to uint64, err error) { diff --git a/cmd/util/cmd/verify-evm-offchain-replay/verify.go b/cmd/util/cmd/verify-evm-offchain-replay/verify.go index d3e0b5c6d1d..47b34c72afa 100644 --- a/cmd/util/cmd/verify-evm-offchain-replay/verify.go +++ b/cmd/util/cmd/verify-evm-offchain-replay/verify.go @@ -9,10 +9,9 @@ import ( "github.com/dgraph-io/badger/v2" badgerds "github.com/ipfs/go-ds-badger2" "github.com/rs/zerolog" + "github.com/rs/zerolog/log" "github.com/onflow/flow-go/cmd/util/cmd/common" - "github.com/onflow/flow-go/fvm/environment" - "github.com/onflow/flow-go/fvm/evm" "github.com/onflow/flow-go/fvm/evm/offchain/utils" "github.com/onflow/flow-go/fvm/evm/testutils" "github.com/onflow/flow-go/model/flow" @@ -23,13 +22,26 @@ import ( // Verify verifies the offchain replay of EVM blocks from the given height range // and updates the EVM state gob files with the latest state -func Verify(log zerolog.Logger, from uint64, to uint64, chainID flow.ChainID, dataDir string, executionDataDir string, evmStateGobDir string) error { - log.Info(). +func Verify( + log zerolog.Logger, + from uint64, + to uint64, + chainID flow.ChainID, + dataDir string, + executionDataDir string, + evmStateGobDir string, + saveEveryNBlocks uint64, +) error { + lg := log.With(). + Uint64("from", from).Uint64("to", to). Str("chain", chainID.String()). Str("dataDir", dataDir). Str("executionDataDir", executionDataDir). Str("evmStateGobDir", evmStateGobDir). - Msgf("verifying range from %d to %d", from, to) + Uint64("saveEveryNBlocks", saveEveryNBlocks). + Logger() + + lg.Info().Msgf("verifying range from %d to %d", from, to) db, storages, executionDataStore, dsStore, err := initStorages(dataDir, executionDataDir) if err != nil { @@ -40,34 +52,32 @@ func Verify(log zerolog.Logger, from uint64, to uint64, chainID flow.ChainID, da defer dsStore.Close() var store *testutils.TestValueStore - isRoot := isEVMRootHeight(chainID, from) - if isRoot { - log.Info().Msgf("initializing EVM state for root height %d", from) + // root block require the account status registers to be saved + isRoot := utils.IsEVMRootHeight(chainID, from) + if isRoot { store = testutils.GetSimpleValueStore() - as := environment.NewAccountStatus() - rootAddr := evm.StorageAccountAddress(chainID) - err = store.SetValue(rootAddr[:], []byte(flow.AccountStatusKey), as.ToBytes()) - if err != nil { - return err - } } else { prev := from - 1 - log.Info().Msgf("loading EVM state from previous height %d", prev) - - valueFileName, allocatorFileName := evmStateGobFileNamesByEndHeight(evmStateGobDir, prev) - values, err := testutils.DeserializeState(valueFileName) + store, err = loadState(prev, evmStateGobDir) if err != nil { - return fmt.Errorf("could not deserialize state %v: %w", valueFileName, err) + return fmt.Errorf("could not load EVM state from previous height %d: %w", prev, err) } + } - allocators, err := testutils.DeserializeAllocator(allocatorFileName) - if err != nil { - return fmt.Errorf("could not deserialize allocator %v: %w", allocatorFileName, err) + // save state every N blocks + onHeightReplayed := func(height uint64) error { + log.Info().Msgf("replayed height %d", height) + if height%saveEveryNBlocks == 0 { + err := saveState(store, height, evmStateGobDir) + if err != nil { + return err + } } - store = testutils.GetSimpleValueStorePopulated(values, allocators) + return nil } + // replay blocks err = utils.OffchainReplayBackwardCompatibilityTest( log, chainID, @@ -77,16 +87,27 @@ func Verify(log zerolog.Logger, from uint64, to uint64, chainID flow.ChainID, da storages.Results, executionDataStore, store, - func(uint64) error { return nil }, + onHeightReplayed, ) if err != nil { return err } - valueFileName, allocatorFileName := evmStateGobFileNamesByEndHeight(evmStateGobDir, to) + err = saveState(store, to, evmStateGobDir) + if err != nil { + return err + } + + lg.Info().Msgf("successfully verified range from %d to %d", from, to) + + return nil +} + +func saveState(store *testutils.TestValueStore, height uint64, gobDir string) error { + valueFileName, allocatorFileName := evmStateGobFileNamesByEndHeight(gobDir, height) values, allocators := store.Dump() - err = testutils.SerializeState(valueFileName, values) + err := testutils.SerializeState(valueFileName, values) if err != nil { return err } @@ -100,6 +121,23 @@ func Verify(log zerolog.Logger, from uint64, to uint64, chainID flow.ChainID, da return nil } +func loadState(height uint64, gobDir string) (*testutils.TestValueStore, error) { + valueFileName, allocatorFileName := evmStateGobFileNamesByEndHeight(gobDir, height) + values, err := testutils.DeserializeState(valueFileName) + if err != nil { + return nil, fmt.Errorf("could not deserialize state %v: %w", valueFileName, err) + } + + allocators, err := testutils.DeserializeAllocator(allocatorFileName) + if err != nil { + return nil, fmt.Errorf("could not deserialize allocator %v: %w", allocatorFileName, err) + } + store := testutils.GetSimpleValueStorePopulated(values, allocators) + + log.Info().Msgf("loaded EVM state for height %d from gob file %v", height, valueFileName) + return store, nil +} + func initStorages(dataDir string, executionDataDir string) ( *badger.DB, *storage.All, @@ -128,16 +166,6 @@ func initStorages(dataDir string, executionDataDir string) ( return db, storages, executionDataStore, ds, nil } -// EVM Root Height is the first block that has EVM Block Event where the EVM block height is 1 -func isEVMRootHeight(chainID flow.ChainID, flowHeight uint64) bool { - if chainID == flow.Testnet { - return flowHeight == 211176671 - } else if chainID == flow.Mainnet { - return flowHeight == 85981136 - } - return flowHeight == 1 -} - func evmStateGobFileNamesByEndHeight(evmStateGobDir string, endHeight uint64) (string, string) { valueFileName := filepath.Join(evmStateGobDir, fmt.Sprintf("values-%d.gob", endHeight)) allocatorFileName := filepath.Join(evmStateGobDir, fmt.Sprintf("allocators-%d.gob", endHeight)) From a9e90040064967b73b3a50b4f725949ccc3e7b1b Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Tue, 26 Nov 2024 22:50:17 -0800 Subject: [PATCH 61/64] fix spork heights --- cmd/util/cmd/verify-evm-offchain-replay/main.go | 2 +- fvm/evm/offchain/utils/verify.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/util/cmd/verify-evm-offchain-replay/main.go b/cmd/util/cmd/verify-evm-offchain-replay/main.go index d1027fb8a74..d42c9841435 100644 --- a/cmd/util/cmd/verify-evm-offchain-replay/main.go +++ b/cmd/util/cmd/verify-evm-offchain-replay/main.go @@ -22,7 +22,7 @@ var ( // usage example // -// ./util verify-evm-offchain-replay --chain flow-testnet --from_to 211176671-211177000 +// ./util verify-evm-offchain-replay --chain flow-testnet --from_to 211176670-211177000 // --datadir /var/flow/data/protocol --execution_data_dir /var/flow/data/execution_data var Cmd = &cobra.Command{ Use: "verify-evm-offchain-replay", diff --git a/fvm/evm/offchain/utils/verify.go b/fvm/evm/offchain/utils/verify.go index c007e4976bc..3cfe410f315 100644 --- a/fvm/evm/offchain/utils/verify.go +++ b/fvm/evm/offchain/utils/verify.go @@ -24,9 +24,9 @@ import ( // EVM Root Height is the first block that has EVM Block Event where the EVM block height is 1 func IsEVMRootHeight(chainID flow.ChainID, flowHeight uint64) bool { if chainID == flow.Testnet { - return flowHeight == 211176671 + return flowHeight == 211176670 } else if chainID == flow.Mainnet { - return flowHeight == 85981136 + return flowHeight == 85981135 } return flowHeight == 1 } From 52c08c65a7ce53cbb42ab29f22557a5d2ed43764 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Tue, 26 Nov 2024 23:27:33 -0800 Subject: [PATCH 62/64] fix pendingEVMTxEvents --- fvm/evm/offchain/utils/replay.go | 5 ++++- fvm/evm/offchain/utils/verify.go | 2 ++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/fvm/evm/offchain/utils/replay.go b/fvm/evm/offchain/utils/replay.go index 1c556f82d19..d6cb222fa73 100644 --- a/fvm/evm/offchain/utils/replay.go +++ b/fvm/evm/offchain/utils/replay.go @@ -96,6 +96,9 @@ func (a *EVMEventsAccumulator) HasBlockEvent( return evmBlockEvent, a.pendingEVMTxEvents, false } + pendingEVMTxEvents := a.pendingEVMTxEvents + // reset pending events + a.pendingEVMTxEvents = make([]events.TransactionEventPayload, 0) // if there is an EVM block event, we return the EVM block and the accumulated tx events - return evmBlockEvent, a.pendingEVMTxEvents, true + return evmBlockEvent, pendingEVMTxEvents, true } diff --git a/fvm/evm/offchain/utils/verify.go b/fvm/evm/offchain/utils/verify.go index 3cfe410f315..9335beb6230 100644 --- a/fvm/evm/offchain/utils/verify.go +++ b/fvm/evm/offchain/utils/verify.go @@ -79,6 +79,8 @@ func OffchainReplayBackwardCompatibilityTest( if err != nil { return err } + + continue } if IsSporkHeight(chainID, height) { From 1bdc486a83e94b74889e25a01ad4b1eb36972d84 Mon Sep 17 00:00:00 2001 From: "Leo Zhang (zhangchiqing)" Date: Wed, 27 Nov 2024 06:43:35 -0800 Subject: [PATCH 63/64] keep ReplayBlock unchanged --- fvm/evm/offchain/sync/replayer.go | 11 ++++++++++- fvm/evm/offchain/sync/replayer_test.go | 8 +------- fvm/evm/offchain/utils/replay.go | 2 +- 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/fvm/evm/offchain/sync/replayer.go b/fvm/evm/offchain/sync/replayer.go index 33411b7c133..96df01d58a0 100644 --- a/fvm/evm/offchain/sync/replayer.go +++ b/fvm/evm/offchain/sync/replayer.go @@ -45,6 +45,15 @@ func NewReplayer( } // ReplayBlock replays the execution of the transactions of an EVM block +func (cr *Replayer) ReplayBlock( + transactionEvents []events.TransactionEventPayload, + blockEvent *events.BlockEventPayload, +) (types.ReplayResultCollector, error) { + res, _, err := cr.ReplayBlockEvents(transactionEvents, blockEvent) + return res, err +} + +// ReplayBlockEvents replays the execution of the transactions of an EVM block // using the provided transactionEvents and blockEvents, // which include all the context data for re-executing the transactions, and returns // the replay result and the result of each transaction. @@ -57,7 +66,7 @@ func NewReplayer( // Warning! the list of transaction events has to be sorted based on their // execution, sometimes the access node might return events out of order // it needs to be sorted by txIndex and eventIndex respectively. -func (cr *Replayer) ReplayBlock( +func (cr *Replayer) ReplayBlockEvents( transactionEvents []events.TransactionEventPayload, blockEvent *events.BlockEventPayload, ) (types.ReplayResultCollector, []*types.Result, error) { diff --git a/fvm/evm/offchain/sync/replayer_test.go b/fvm/evm/offchain/sync/replayer_test.go index 3668e445c84..06262b5811e 100644 --- a/fvm/evm/offchain/sync/replayer_test.go +++ b/fvm/evm/offchain/sync/replayer_test.go @@ -164,7 +164,7 @@ func TestChainReplay(t *testing.T) { sp := NewTestStorageProvider(snapshot, 1) cr := sync.NewReplayer(chainID, rootAddr, sp, bp, zerolog.Logger{}, nil, true) - res, results, err := cr.ReplayBlock(txEventPayloads, blockEventPayload) + res, results, err := cr.ReplayBlockEvents(txEventPayloads, blockEventPayload) require.NoError(t, err) require.Len(t, results, totalTxCount) @@ -173,12 +173,6 @@ func TestChainReplay(t *testing.T) { err = bp.OnBlockExecuted(blockEventPayload.Height, res, proposal) require.NoError(t, err) - - // for k, v := range bpStorage.StorageRegisterUpdates() { - // ret, err := backend.GetValue([]byte(k.Owner), []byte(k.Key)) - // require.NoError(t, err) - // require.Equal(t, ret[:], v[:]) - // } }) }) }) diff --git a/fvm/evm/offchain/utils/replay.go b/fvm/evm/offchain/utils/replay.go index d6cb222fa73..5aba8affcd1 100644 --- a/fvm/evm/offchain/utils/replay.go +++ b/fvm/evm/offchain/utils/replay.go @@ -38,7 +38,7 @@ func ReplayEVMEventsToStore( sp := testutils.NewTestStorageProvider(store, evmBlockEvent.Height) cr := sync.NewReplayer(chainID, rootAddr, sp, bp, log, nil, true) - res, results, err := cr.ReplayBlock(evmTxEvents, evmBlockEvent) + res, results, err := cr.ReplayBlockEvents(evmTxEvents, evmBlockEvent) if err != nil { return nil, nil, err } From f8668ef4840fe743cdbd86839224f747adb5c8e2 Mon Sep 17 00:00:00 2001 From: Uliana Andrukhiv Date: Tue, 3 Dec 2024 17:05:14 +0200 Subject: [PATCH 64/64] Update block data provider test Co-authored-by: Peter Argue <89119817+peterargue@users.noreply.github.com> --- .../rest/websockets/data_providers/blocks_provider_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/engine/access/rest/websockets/data_providers/blocks_provider_test.go b/engine/access/rest/websockets/data_providers/blocks_provider_test.go index 6f46d27ccfe..9e07f9459e9 100644 --- a/engine/access/rest/websockets/data_providers/blocks_provider_test.go +++ b/engine/access/rest/websockets/data_providers/blocks_provider_test.go @@ -233,7 +233,7 @@ func (s *BlocksProviderSuite) testHappyPath( // Create a channel to simulate the subscription's data channel dataChan := make(chan interface{}) - // // Create a mock subscription and mock the channel + // Create a mock subscription and mock the channel sub := statestreamsmock.NewSubscription(s.T()) sub.On("Channel").Return((<-chan interface{})(dataChan)) sub.On("Err").Return(nil)