diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 6186061ffc..d0cb8f53be 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -103,7 +103,7 @@ jobs: - name: Generate coverage report run: | go test ./... -coverprofile=coverage.out -covermode=atomic -timeout=20m - - uses: codecov/codecov-action@v5.0.2 + - uses: codecov/codecov-action@v5.0.7 with: files: ./coverage.out flags: unit-tests diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml index ab9853e324..8d4c761c37 100644 --- a/.github/workflows/unit-tests.yml +++ b/.github/workflows/unit-tests.yml @@ -89,7 +89,7 @@ jobs: - name: Test - Race run: make test-using-race-detector - - uses: codecov/codecov-action@v5.0.2 + - uses: codecov/codecov-action@v5.0.7 with: if_ci_failed: success informational: true diff --git a/docs/docs/repo/labels.md b/docs/docs/repo/labels.md index 40ecd1190f..757b965e64 100644 --- a/docs/docs/repo/labels.md +++ b/docs/docs/repo/labels.md @@ -23,7 +23,7 @@ Below is the list of labels and their descriptions used in Gossamer repository. - **T-research** - this issue/pr is a research type issue. - **T-investigation** - this issue/pr is an investigation, probably related to some bug with unknown causes. - **`C-`** Complexity label. We operate only 3 complexity levels. - - **C-simple** - Minor changes changes, no additional research needed. Good first issue/review. + - **C-simple** - Minor changes, no additional research needed. Good first issue/review. - **C-complex** - Complex changes across multiple modules. Possibly will require additional research. - **C-chaotic** - Unpredictable nature of this task/changes makes its chaotic. - **`P-`** Priority level. We only have 3 priority levels, everything else is average by default. @@ -35,7 +35,7 @@ Below is the list of labels and their descriptions used in Gossamer repository. - **S-tests** - issue related to adding new tests. - **S-doc** - documentation related. - **S-cli** - issue related to Gossamer CLI. - - **S-ci** - issue related to continuous integration tasks or piplelines. + - **S-ci** - issue related to continuous integration tasks or pipelines. - **S-crypto** - issues related to the lib/crypto package. - **S-grandpa** - issues related to block finality. - **S-babe** - issues related to block production functionality. @@ -52,4 +52,4 @@ Below is the list of labels and their descriptions used in Gossamer repository. - **S-subsystems-availability** - issues related to polkadot host availability subsystem functionality. - **S-subsystems-disputes** - issues related to polkadot host disputes subsystem functionality. - **S-infrastructure** - issues related to infrastructure and DevOps. - - **S-dependencies** - issues related to dependencies changes. Used by dependabot. \ No newline at end of file + - **S-dependencies** - issues related to dependencies changes. Used by dependabot. diff --git a/docs/docs/usage/command-line.md b/docs/docs/usage/command-line.md index 58a12908e7..d69f034c65 100644 --- a/docs/docs/usage/command-line.md +++ b/docs/docs/usage/command-line.md @@ -142,14 +142,14 @@ To run more than two nodes, repeat steps for bob with a new `port` and `base-pat Available built-in keys: ``` -./bin/gossmer --key alice -./bin/gossmer --key bob -./bin/gossmer --key charlie -./bin/gossmer --key dave -./bin/gossmer --key eve -./bin/gossmer --key ferdie -./bin/gossmer --key george -./bin/gossmer --key heather +./bin/gossamer --key alice +./bin/gossamer --key bob +./bin/gossamer --key charlie +./bin/gossamer --key dave +./bin/gossamer --key eve +./bin/gossamer --key ferdie +./bin/gossamer --key george +./bin/gossamer --key heather ``` ## Initialising Nodes diff --git a/docs/docs/usage/import-state.md b/docs/docs/usage/import-state.md index 99177e1bf9..22de126583 100644 --- a/docs/docs/usage/import-state.md +++ b/docs/docs/usage/import-state.md @@ -1,6 +1,6 @@ --- layout: default -title: Import Sate +title: Import State permalink: /usage/import-state/ --- diff --git a/dot/network/host.go b/dot/network/host.go index a84eff48e8..f3fa3343c8 100644 --- a/dot/network/host.go +++ b/dot/network/host.go @@ -17,7 +17,7 @@ import ( "github.com/ChainSafe/gossamer/dot/network/messages" "github.com/ChainSafe/gossamer/dot/peerset" "github.com/ChainSafe/gossamer/internal/pubip" - "github.com/dgraph-io/ristretto" + "github.com/dgraph-io/ristretto/v2" badger "github.com/ipfs/go-ds-badger4" "github.com/libp2p/go-libp2p" libp2phost "github.com/libp2p/go-libp2p/core/host" diff --git a/dot/network/message_cache.go b/dot/network/message_cache.go index 3d62bc2433..a5d77642c9 100644 --- a/dot/network/message_cache.go +++ b/dot/network/message_cache.go @@ -8,7 +8,7 @@ import ( "time" "github.com/ChainSafe/gossamer/lib/common" - "github.com/dgraph-io/ristretto" + "github.com/dgraph-io/ristretto/v2" "github.com/libp2p/go-libp2p/core/peer" ) diff --git a/dot/network/message_cache_integration_test.go b/dot/network/message_cache_integration_test.go index 3eba7e3659..a42cb85d2a 100644 --- a/dot/network/message_cache_integration_test.go +++ b/dot/network/message_cache_integration_test.go @@ -11,7 +11,7 @@ import ( "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/lib/common" - "github.com/dgraph-io/ristretto" + "github.com/dgraph-io/ristretto/v2" "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/require" ) diff --git a/dot/sync/fullsync.go b/dot/sync/fullsync.go index 79db8b4a29..862e74327e 100644 --- a/dot/sync/fullsync.go +++ b/dot/sync/fullsync.go @@ -15,6 +15,7 @@ import ( "github.com/ChainSafe/gossamer/dot/peerset" "github.com/ChainSafe/gossamer/dot/types" "github.com/ChainSafe/gossamer/internal/database" + "github.com/ChainSafe/gossamer/lib/common" "github.com/libp2p/go-libp2p/core/peer" ) @@ -157,28 +158,33 @@ func (f *FullSyncStrategy) Process(results []*SyncTaskResult) ( return false, nil, nil, fmt.Errorf("getting highest finalized header") } - readyBlocks := make([][]*types.BlockData, 0, len(validResp)) + readyBlocks := make([]*Fragment, 0, len(validResp)) for _, reqRespData := range validResp { + responseFragment := NewFragment(reqRespData.responseData) + // if Gossamer requested the header, then the response data should contains // the full blocks to be imported. If Gossamer didn't request the header, // then the response should only contain the missing parts that will complete // the unreadyBlocks and then with the blocks completed we should be able to import them if reqRespData.req.RequestField(messages.RequestedDataHeader) { - updatedFragment, ok := f.unreadyBlocks.updateDisjointFragments(reqRespData.responseData) + updatedFragment, ok := f.unreadyBlocks.updateDisjointFragments(responseFragment) if ok { - validBlocks := validBlocksUnderFragment(highestFinalized.Number, updatedFragment) - if len(validBlocks) > 0 { - readyBlocks = append(readyBlocks, validBlocks) + validFragment := updatedFragment.Filter(func(bd *types.BlockData) bool { + return bd.Header.Number > highestFinalized.Number + }) + + if validFragment.Len() > 0 { + readyBlocks = append(readyBlocks, validFragment) } } else { - readyBlocks = append(readyBlocks, reqRespData.responseData) + readyBlocks = append(readyBlocks, responseFragment) } continue } - completedBlocks := f.unreadyBlocks.updateIncompleteBlocks(reqRespData.responseData) - readyBlocks = append(readyBlocks, completedBlocks) + completedBlocks := f.unreadyBlocks.updateIncompleteBlocks(responseFragment) + readyBlocks = append(readyBlocks, completedBlocks...) } // disjoint fragments are pieces of the chain that could not be imported right now @@ -186,17 +192,17 @@ func (f *FullSyncStrategy) Process(results []*SyncTaskResult) ( sortFragmentsOfChain(readyBlocks) orderedFragments := mergeFragmentsOfChain(readyBlocks) - nextBlocksToImport := make([]*types.BlockData, 0) - disjointFragments := make([][]*types.BlockData, 0) + nextBlocksToImport := new(Fragment) + disjointFragments := make([]*Fragment, 0) for _, fragment := range orderedFragments { - ok, err := f.blockState.HasHeader(fragment[0].Header.ParentHash) + ok, err := f.blockState.HasHeader(fragment.First().Header.ParentHash) if err != nil && !errors.Is(err, database.ErrNotFound) { return false, nil, nil, fmt.Errorf("checking block parent header: %w", err) } if ok { - nextBlocksToImport = append(nextBlocksToImport, fragment...) + nextBlocksToImport = nextBlocksToImport.Concat(fragment) continue } @@ -204,8 +210,8 @@ func (f *FullSyncStrategy) Process(results []*SyncTaskResult) ( } // this loop goal is to import ready blocks as well as update the highestFinalized header - for len(nextBlocksToImport) > 0 || len(disjointFragments) > 0 { - for _, blockToImport := range nextBlocksToImport { + for nextBlocksToImport.Len() > 0 || len(disjointFragments) > 0 { + for blockToImport := range nextBlocksToImport.Iter() { imported, err := f.blockImporter.importBlock(blockToImport, networkInitialSync) if err != nil { return false, nil, nil, fmt.Errorf("while handling ready block: %w", err) @@ -216,7 +222,7 @@ func (f *FullSyncStrategy) Process(results []*SyncTaskResult) ( } } - nextBlocksToImport = make([]*types.BlockData, 0) + nextBlocksToImport = new(Fragment) highestFinalized, err = f.blockState.GetHighestFinalisedHeader() if err != nil { return false, nil, nil, fmt.Errorf("getting highest finalized header") @@ -226,45 +232,54 @@ func (f *FullSyncStrategy) Process(results []*SyncTaskResult) ( // given that fragment contains chains and these chains contains blocks // check if the first block in the chain contains a parent known by us for _, fragment := range disjointFragments { - validFragment := validBlocksUnderFragment(highestFinalized.Number, fragment) - if len(validFragment) == 0 { + validFragment := fragment.Filter(func(bd *types.BlockData) bool { + return bd.Header.Number > highestFinalized.Number + }) + + if validFragment.Len() == 0 { continue } - ok, err := f.blockState.HasHeader(validFragment[0].Header.ParentHash) + ok, err := f.blockState.HasHeader(validFragment.First().Header.ParentHash) if err != nil && !errors.Is(err, database.ErrNotFound) { return false, nil, nil, err } if !ok { + firstFragmentBlock := validFragment.First() // if the parent of this valid fragment is behind our latest finalized number // then we can discard the whole fragment since it is a invalid fork - if (validFragment[0].Header.Number - 1) <= highestFinalized.Number { + if (firstFragmentBlock.Header.Number - 1) <= highestFinalized.Number { continue } logger.Infof("starting an acestor search from %s parent of #%d (%s)", - validFragment[0].Header.ParentHash, - validFragment[0].Header.Number, - validFragment[0].Header.Hash(), + firstFragmentBlock.Header.ParentHash, + firstFragmentBlock.Header.Number, + firstFragmentBlock.Header.Hash(), ) f.unreadyBlocks.newDisjointFragment(validFragment) request := messages.NewBlockRequest( - *messages.NewFromBlock(validFragment[0].Header.ParentHash), + *messages.NewFromBlock(firstFragmentBlock.Header.ParentHash), messages.MaxBlocksInResponse, messages.BootstrapRequestData, messages.Descending) f.requestQueue.PushBack(request) } else { // inserting them in the queue to be processed after the main chain - nextBlocksToImport = append(nextBlocksToImport, validFragment...) + nextBlocksToImport = nextBlocksToImport.Concat(validFragment) } } disjointFragments = nil } - f.unreadyBlocks.removeIrrelevantFragments(highestFinalized.Number) + // update unready blocks based on the highest finalized block + f.unreadyBlocks.pruneDisjointFragments(LowerThanOrEq(highestFinalized.Number)) + f.unreadyBlocks.removeIncompleteBlocks(func(_ common.Hash, value *types.BlockData) bool { + return value.Header.Number <= highestFinalized.Number + }) + return false, repChanges, peersToIgnore, nil } @@ -486,19 +501,24 @@ resultLoop: // note that we have fragments with single blocks, fragments with fork (in case of 8) // after sorting these fragments we end up with: // [ {1, 2, 3, 4, 5} {6, 7, 8, 9, 10} {8} {11, 12, 13, 14, 15, 16} {17} ] -func sortFragmentsOfChain(fragments [][]*types.BlockData) { +func sortFragmentsOfChain(fragments []*Fragment) { if len(fragments) == 0 { return } - slices.SortFunc(fragments, func(a, b []*types.BlockData) int { - if a[0].Header.Number < b[0].Header.Number { - return -1 - } - if a[0].Header.Number == b[0].Header.Number { - return 0 + slices.SortFunc(fragments, func(fragA, fragB *Fragment) int { + if fragA.First() != nil && fragB.First() != nil { + switch { + case fragA.First().Header.Number < fragB.First().Header.Number: + return -1 + case fragA.First().Header.Number == fragB.First().Header.Number: + return 0 + default: + return 1 + } } - return 1 + + return 0 }) } @@ -510,20 +530,21 @@ func sortFragmentsOfChain(fragments [][]*types.BlockData) { // [ {1, 2, 3, 4, 5} {6, 7, 8, 9, 10} {8} {11, 12, 13, 14, 15, 16} {17} ] // merge will transform it to the following slice: // [ {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17} {8} ] -func mergeFragmentsOfChain(fragments [][]*types.BlockData) [][]*types.BlockData { +func mergeFragmentsOfChain(fragments []*Fragment) []*Fragment { if len(fragments) == 0 { return nil } - mergedFragments := [][]*types.BlockData{fragments[0]} + mergedFragments := []*Fragment{fragments[0]} + for i := 1; i < len(fragments); i++ { lastMergedFragment := mergedFragments[len(mergedFragments)-1] currentFragment := fragments[i] - lastBlock := lastMergedFragment[len(lastMergedFragment)-1] + lastBlock := lastMergedFragment.Last() - if lastBlock.IsParent(currentFragment[0]) { - mergedFragments[len(mergedFragments)-1] = append(lastMergedFragment, currentFragment...) + if lastBlock.IsParent(currentFragment.First()) { + mergedFragments[len(mergedFragments)-1] = lastMergedFragment.Concat(currentFragment) } else { mergedFragments = append(mergedFragments, currentFragment) } @@ -532,23 +553,6 @@ func mergeFragmentsOfChain(fragments [][]*types.BlockData) [][]*types.BlockData return mergedFragments } -// validBlocksUnderFragment ignore all blocks prior to the given last finalized number -func validBlocksUnderFragment(highestFinalizedNumber uint, fragmentBlocks []*types.BlockData) []*types.BlockData { - startFragmentFrom := -1 - for idx, block := range fragmentBlocks { - if block.Header.Number > highestFinalizedNumber { - startFragmentFrom = idx - break - } - } - - if startFragmentFrom < 0 { - return nil - } - - return fragmentBlocks[startFragmentFrom:] -} - // validateResponseFields checks that the expected fields are in the block data func validateResponseFields(req *messages.BlockRequestMessage, blocks []*types.BlockData) error { for _, bd := range blocks { diff --git a/dot/sync/fullsync_test.go b/dot/sync/fullsync_test.go index 0c9bbd4122..04536f5125 100644 --- a/dot/sync/fullsync_test.go +++ b/dot/sync/fullsync_test.go @@ -254,8 +254,8 @@ func TestFullSyncProcess(t *testing.T) { require.Equal(t, fs.requestQueue.Len(), 1) require.Len(t, fs.unreadyBlocks.incompleteBlocks, 0) require.Len(t, fs.unreadyBlocks.disjointFragments, 1) - require.Equal(t, fs.unreadyBlocks.disjointFragments[0], sndTaskBlockResponse.BlockData) - require.Equal(t, len(fs.unreadyBlocks.disjointFragments[0]), len(sndTaskBlockResponse.BlockData)) + require.Equal(t, fs.unreadyBlocks.disjointFragments[0], NewFragment(sndTaskBlockResponse.BlockData)) + require.Equal(t, fs.unreadyBlocks.disjointFragments[0].Len(), len(sndTaskBlockResponse.BlockData)) expectedAncestorRequest := messages.NewBlockRequest( *messages.NewFromBlock(sndTaskBlockResponse.BlockData[0].Header.ParentHash), diff --git a/dot/sync/unready_blocks.go b/dot/sync/unready_blocks.go index 58f477ff52..d08d69ee16 100644 --- a/dot/sync/unready_blocks.go +++ b/dot/sync/unready_blocks.go @@ -4,6 +4,7 @@ package sync import ( + "iter" "maps" "slices" "sync" @@ -12,16 +13,90 @@ import ( "github.com/ChainSafe/gossamer/lib/common" ) +type Fragment struct { + chain []*types.BlockData +} + +func NewFragment(chain []*types.BlockData) *Fragment { + return &Fragment{chain} +} + +// Filter returns a new fragments with blocks that satisfies the predicate p +func (f *Fragment) Filter(p func(*types.BlockData) bool) *Fragment { + filtered := make([]*types.BlockData, 0, len(f.chain)) + for _, bd := range f.chain { + if p(bd) { + filtered = append(filtered, bd) + } + } + return NewFragment(filtered) +} + +// Find returns the first occurrence of a types.BlockData that +// satisfies the predicate p +func (f *Fragment) Find(p func(*types.BlockData) bool) *types.BlockData { + for _, bd := range f.chain { + if p(bd) { + return bd + } + } + + return nil +} + +// First returns the first block in the fragment or nil otherwise +func (f *Fragment) First() *types.BlockData { + if len(f.chain) > 0 { + return f.chain[0] + } + + return nil +} + +// Last returns the last block in the fragment or nil otherwise +func (f *Fragment) Last() *types.BlockData { + if len(f.chain) > 0 { + return f.chain[len(f.chain)-1] + } + + return nil +} + +// Len returns the amount of blocks in the fragment +func (f *Fragment) Len() int { + return len(f.chain) +} + +// Iter returns an iterator of the blocks in the fragment +// it enables the caller to use range keyword in the Fragment instance +func (f *Fragment) Iter() iter.Seq[*types.BlockData] { + return func(yield func(*types.BlockData) bool) { + for _, bd := range f.chain { + if !yield(bd) { + return + } + } + } +} + +// Concat returns a new fragment containing the concatenation +// between this fragment and the given as argument fragment +func (f *Fragment) Concat(snd *Fragment) *Fragment { + return &Fragment{ + chain: slices.Concat(f.chain, snd.chain), + } +} + type unreadyBlocks struct { mtx sync.RWMutex incompleteBlocks map[common.Hash]*types.BlockData - disjointFragments [][]*types.BlockData + disjointFragments []*Fragment } func newUnreadyBlocks() *unreadyBlocks { return &unreadyBlocks{ incompleteBlocks: make(map[common.Hash]*types.BlockData), - disjointFragments: make([][]*types.BlockData, 0), + disjointFragments: make([]*Fragment, 0), } } @@ -36,34 +111,32 @@ func (u *unreadyBlocks) newIncompleteBlock(blockHeader *types.Header) { } } -func (u *unreadyBlocks) newDisjointFragment(frag []*types.BlockData) { +func (u *unreadyBlocks) newDisjointFragment(frag *Fragment) { u.mtx.Lock() defer u.mtx.Unlock() u.disjointFragments = append(u.disjointFragments, frag) } // updateDisjointFragments given a set of blocks check if it -// connects to a disjoint fragment, if so we remove the fragment from the -// disjoint set and return the fragment concatenated with the chain argument -func (u *unreadyBlocks) updateDisjointFragments(chain []*types.BlockData) ([]*types.BlockData, bool) { +// connects to a disjoint fragment, and returns a new fragment +func (u *unreadyBlocks) updateDisjointFragments(chain *Fragment) (*Fragment, bool) { u.mtx.Lock() defer u.mtx.Unlock() - indexToChange := -1 for idx, disjointChain := range u.disjointFragments { - lastBlockArriving := chain[len(chain)-1] - firstDisjointBlock := disjointChain[0] + var outFragment *Fragment + if chain.Last().IsParent(disjointChain.First()) { + outFragment = chain.Concat(disjointChain) + } - if lastBlockArriving.IsParent(firstDisjointBlock) { - indexToChange = idx - break + if disjointChain.Last().IsParent(chain.First()) { + outFragment = disjointChain.Concat(chain) } - } - if indexToChange >= 0 { - disjointChain := u.disjointFragments[indexToChange] - u.disjointFragments = append(u.disjointFragments[:indexToChange], u.disjointFragments[indexToChange+1:]...) - return append(chain, disjointChain...), true + if outFragment != nil { + u.disjointFragments = slices.Delete(u.disjointFragments, idx, idx+1) + return outFragment, true + } } return nil, false @@ -72,12 +145,13 @@ func (u *unreadyBlocks) updateDisjointFragments(chain []*types.BlockData) ([]*ty // updateIncompleteBlocks given a set of blocks check if they can fullfil // incomplete blocks, the blocks that can be completed will be removed from // the incompleteBlocks map and returned -func (u *unreadyBlocks) updateIncompleteBlocks(chain []*types.BlockData) []*types.BlockData { +func (u *unreadyBlocks) updateIncompleteBlocks(chain *Fragment) []*Fragment { u.mtx.Lock() defer u.mtx.Unlock() - completeBlocks := make([]*types.BlockData, 0) - for _, blockData := range chain { + completeBlocks := make([]*Fragment, 0) + + for blockData := range chain.Iter() { incomplete, ok := u.incompleteBlocks[blockData.Hash] if !ok { continue @@ -87,7 +161,7 @@ func (u *unreadyBlocks) updateIncompleteBlocks(chain []*types.BlockData) []*type incomplete.Justification = blockData.Justification delete(u.incompleteBlocks, blockData.Hash) - completeBlocks = append(completeBlocks, incomplete) + completeBlocks = append(completeBlocks, NewFragment([]*types.BlockData{incomplete})) } return completeBlocks @@ -102,26 +176,17 @@ func (u *unreadyBlocks) isIncomplete(blockHash common.Hash) bool { } // inDisjointFragment iterate through the disjoint fragments and -// check if the block hash an number already exists in one of them +// check if the block hash and number already exists in one of them func (u *unreadyBlocks) inDisjointFragment(blockHash common.Hash, blockNumber uint) bool { u.mtx.RLock() defer u.mtx.RUnlock() for _, frag := range u.disjointFragments { - target := &types.BlockData{Header: &types.Header{Number: blockNumber}} - idx, found := slices.BinarySearchFunc(frag, target, - func(a, b *types.BlockData) int { - switch { - case a.Header.Number == b.Header.Number: - return 0 - case a.Header.Number < b.Header.Number: - return -1 - default: - return 1 - } - }) - - if found && frag[idx].Hash == blockHash { + bd := frag.Find(func(bd *types.BlockData) bool { + return bd.Header.Number == blockNumber && bd.Hash == blockHash + }) + + if bd != nil { return true } } @@ -129,35 +194,28 @@ func (u *unreadyBlocks) inDisjointFragment(blockHash common.Hash, blockNumber ui return false } -// removeIrrelevantFragments checks if there is blocks in the fragments that can be pruned -// given the finalised block number -func (u *unreadyBlocks) removeIrrelevantFragments(finalisedNumber uint) { +func (u *unreadyBlocks) removeIncompleteBlocks(del func(key common.Hash, value *types.BlockData) bool) { u.mtx.Lock() defer u.mtx.Unlock() - maps.DeleteFunc(u.incompleteBlocks, func(_ common.Hash, value *types.BlockData) bool { - return value.Header.Number <= finalisedNumber - }) - - fragmentIdx := 0 - for _, fragment := range u.disjointFragments { - // the fragments are sorted in ascending order - // starting from the latest item and going backwards - // we have a higher chance to find the idx that has - // a block with number lower or equal the finalised one - idx := len(fragment) - 1 - for ; idx >= 0; idx-- { - if fragment[idx].Header.Number <= finalisedNumber { - break - } - } + maps.DeleteFunc(u.incompleteBlocks, del) +} - updatedFragment := fragment[idx+1:] - if len(updatedFragment) != 0 { - u.disjointFragments[fragmentIdx] = updatedFragment - fragmentIdx++ - } - } +// pruneFragments will iterate over the disjoint fragments and check if they +// can be removed based on the del param +func (u *unreadyBlocks) pruneDisjointFragments(del func(*Fragment) bool) { + u.mtx.Lock() + defer u.mtx.Unlock() - u.disjointFragments = u.disjointFragments[:fragmentIdx] + u.disjointFragments = slices.DeleteFunc(u.disjointFragments, del) +} + +// LowerThanOrEq returns true if the fragment contains +// a block that has a number lower than highest finalized number +func LowerThanOrEq(blockNumber uint) func(*Fragment) bool { + return func(f *Fragment) bool { + return f.Find(func(bd *types.BlockData) bool { + return bd.Header.Number <= blockNumber + }) != nil + } } diff --git a/dot/sync/unready_blocks_test.go b/dot/sync/unready_blocks_test.go index b15019c362..1f313f4d90 100644 --- a/dot/sync/unready_blocks_test.go +++ b/dot/sync/unready_blocks_test.go @@ -13,38 +13,32 @@ import ( func TestUnreadyBlocks_removeIrrelevantFragments(t *testing.T) { t.Run("removing_all_disjoint_fragment", func(t *testing.T) { ub := newUnreadyBlocks() - ub.disjointFragments = [][]*types.BlockData{ - { - { - Header: &types.Header{ - Number: 100, - }, - }, - }, - { + ub.disjointFragments = []*Fragment{ + NewFragment([]*types.BlockData{ { Header: &types.Header{ Number: 99, }, }, - }, - { + }), + NewFragment([]*types.BlockData{ { Header: &types.Header{ Number: 92, }, }, - }, + }), } - ub.removeIrrelevantFragments(100) + + ub.pruneDisjointFragments(LowerThanOrEq(100)) require.Empty(t, ub.disjointFragments) }) t.Run("removing_irrelevant_fragments", func(t *testing.T) { ub := newUnreadyBlocks() - ub.disjointFragments = [][]*types.BlockData{ + ub.disjointFragments = []*Fragment{ // first fragment - { + NewFragment([]*types.BlockData{ { Header: &types.Header{ Number: 192, @@ -62,10 +56,10 @@ func TestUnreadyBlocks_removeIrrelevantFragments(t *testing.T) { Number: 190, }, }, - }, + }), // second fragment - { + NewFragment([]*types.BlockData{ { Header: &types.Header{ Number: 253, @@ -83,10 +77,10 @@ func TestUnreadyBlocks_removeIrrelevantFragments(t *testing.T) { Number: 255, }, }, - }, + }), // third fragment - { + NewFragment([]*types.BlockData{ { Header: &types.Header{ Number: 1022, @@ -104,30 +98,16 @@ func TestUnreadyBlocks_removeIrrelevantFragments(t *testing.T) { Number: 1024, }, }, - }, + }), } // the first fragment should be removed // the second fragment should have only 2 items // the third frament shold not be affected - ub.removeIrrelevantFragments(253) - require.Len(t, ub.disjointFragments, 2) - - expectedSecondFrag := []*types.BlockData{ - { - Header: &types.Header{ - Number: 254, - }, - }, - - { - Header: &types.Header{ - Number: 255, - }, - }, - } + ub.pruneDisjointFragments(LowerThanOrEq(253)) + require.Len(t, ub.disjointFragments, 1) - expectedThirdFragment := []*types.BlockData{ + expectedThirdFragment := NewFragment([]*types.BlockData{ { Header: &types.Header{ Number: 1022, @@ -145,37 +125,37 @@ func TestUnreadyBlocks_removeIrrelevantFragments(t *testing.T) { Number: 1024, }, }, - } - require.Equal(t, ub.disjointFragments[0], expectedSecondFrag) - require.Equal(t, ub.disjointFragments[1], expectedThirdFragment) + }) + + require.Equal(t, ub.disjointFragments[0], expectedThirdFragment) }) t.Run("keep_all_fragments", func(t *testing.T) { ub := newUnreadyBlocks() - ub.disjointFragments = [][]*types.BlockData{ - { + ub.disjointFragments = []*Fragment{ + NewFragment([]*types.BlockData{ { Header: &types.Header{ Number: 101, }, }, - }, - { + }), + NewFragment([]*types.BlockData{ { Header: &types.Header{ Number: 103, }, }, - }, - { + }), + NewFragment([]*types.BlockData{ { Header: &types.Header{ Number: 104, }, }, - }, + }), } - ub.removeIrrelevantFragments(100) + ub.pruneDisjointFragments(LowerThanOrEq(100)) require.Len(t, ub.disjointFragments, 3) }) } diff --git a/go.mod b/go.mod index f24bba31f5..884eee8b56 100644 --- a/go.mod +++ b/go.mod @@ -9,10 +9,10 @@ require ( github.com/centrifuge/go-substrate-rpc-client/v4 v4.1.0 github.com/cockroachdb/pebble v1.1.2 github.com/cosmos/go-bip39 v1.0.0 - github.com/dgraph-io/badger/v4 v4.4.0 - github.com/dgraph-io/ristretto v1.0.0 + github.com/dgraph-io/badger/v4 v4.5.0 + github.com/dgraph-io/ristretto/v2 v2.0.0 github.com/disiqueira/gotree v1.0.0 - github.com/ethereum/go-ethereum v1.14.11 + github.com/ethereum/go-ethereum v1.14.12 github.com/fatih/color v1.18.0 github.com/gammazero/deque v1.0.0 github.com/go-playground/validator/v10 v10.23.0 @@ -37,7 +37,7 @@ require ( github.com/qdm12/gotree v0.3.0 github.com/spf13/cobra v1.8.1 github.com/spf13/viper v1.19.0 - github.com/stretchr/testify v1.9.0 + github.com/stretchr/testify v1.10.0 github.com/tetratelabs/wazero v1.1.0 github.com/tidwall/btree v1.7.0 github.com/tyler-smith/go-bip39 v1.1.0 @@ -55,7 +55,6 @@ require ( github.com/StackExchange/wmi v1.2.1 // indirect github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/btcsuite/btcd/btcec/v2 v2.3.4 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cockroachdb/errors v1.11.3 // indirect github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce // indirect @@ -70,7 +69,6 @@ require ( github.com/decred/base58 v1.0.5 // indirect github.com/decred/dcrd/crypto/blake256 v1.0.1 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect - github.com/dgraph-io/ristretto/v2 v2.0.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/elastic/gosigar v0.14.3 // indirect @@ -88,7 +86,6 @@ require ( github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.4 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect github.com/google/flatbuffers v24.3.25+incompatible // indirect github.com/google/gopacket v1.1.19 // indirect @@ -208,7 +205,7 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/mod v0.21.0 // indirect - golang.org/x/net v0.30.0 // indirect + golang.org/x/net v0.31.0 // indirect golang.org/x/sync v0.9.0 // indirect golang.org/x/sys v0.27.0 // indirect golang.org/x/text v0.20.0 // indirect diff --git a/go.sum b/go.sum index f95bfd4b4f..9e1256a291 100644 --- a/go.sum +++ b/go.sum @@ -37,12 +37,9 @@ github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBT github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btcd v0.22.0-beta.0.20220111032746-97732e52810c/go.mod h1:tjmYdS6MLJ5/s0Fj4DbLgSbDHbEqLJrtnHecBFkdz5M= github.com/btcsuite/btcd v0.23.5-0.20231215221805-96c9fd8078fd/go.mod h1:nm3Bko6zh6bWP60UxwoT5LzdGJsQJaPo6HjduXq9p6A= -github.com/btcsuite/btcd v0.24.2 h1:aLmxPguqxza+4ag8R1I2nnJjSu2iFn/kqtHTIImswcY= github.com/btcsuite/btcd v0.24.2/go.mod h1:5C8ChTkl5ejr3WHj8tkQSCmydiMEPB0ZhQhehpq7Dgg= github.com/btcsuite/btcd/btcec/v2 v2.1.0/go.mod h1:2VzYrv4Gm4apmbVVsSq5bqf1Ec8v56E48Vt0Y/umPgA= github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE= -github.com/btcsuite/btcd/btcec/v2 v2.3.4 h1:3EJjcN70HCu/mwqlUsGK8GcNVyLVxFDlWurTXGPFfiQ= -github.com/btcsuite/btcd/btcec/v2 v2.3.4/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= github.com/btcsuite/btcd/btcutil v1.0.0/go.mod h1:Uoxwv0pqYWhD//tfTiipkxNfdhG9UrLwaeswfjfdF0A= github.com/btcsuite/btcd/btcutil v1.1.0/go.mod h1:5OapHB7A2hBBWLm48mmw4MOHNJCcUBTwmWH/0Jn8VHE= github.com/btcsuite/btcd/btcutil v1.1.5/go.mod h1:PSZZ4UitpLBWzxGd5VGOrLnmOjtPP/a6HaFo12zMs00= @@ -50,7 +47,6 @@ github.com/btcsuite/btcd/btcutil v1.1.6 h1:zFL2+c3Lb9gEgqKNzowKUPQNb8jV7v5Oaodi/ github.com/btcsuite/btcd/btcutil v1.1.6/go.mod h1:9dFymx8HpuLqBnsPELrImQeTQfKBQqzqGbbV3jK55aE= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= -github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 h1:59Kx4K6lzOW5w6nFlA0v5+lk/6sjybR934QNHSJZPTQ= github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= @@ -117,10 +113,8 @@ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeC github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= -github.com/dgraph-io/badger/v4 v4.4.0 h1:rA48XiDynZLyMdlaJl67p9+lqfqwxlgKtCpYLAio7Zk= -github.com/dgraph-io/badger/v4 v4.4.0/go.mod h1:sONMmPPfbnj9FPwS/etCqky/ULth6CQJuAZSuWCmixE= -github.com/dgraph-io/ristretto v1.0.0 h1:SYG07bONKMlFDUYu5pEu3DGAh8c2OFNzKm6G9J4Si84= -github.com/dgraph-io/ristretto v1.0.0/go.mod h1:jTi2FiYEhQ1NsMmA7DeBykizjOuY88NhKBkepyu1jPc= +github.com/dgraph-io/badger/v4 v4.5.0 h1:TeJE3I1pIWLBjYhIYCA1+uxrjWEoJXImFBMEBVSm16g= +github.com/dgraph-io/badger/v4 v4.5.0/go.mod h1:ysgYmIeG8dS/E8kwxT7xHyc7MkmwNYLRoYnFbr7387A= github.com/dgraph-io/ristretto/v2 v2.0.0 h1:l0yiSOtlJvc0otkqyMaDNysg8E9/F/TYZwMbxscNOAQ= github.com/dgraph-io/ristretto/v2 v2.0.0/go.mod h1:FVFokF2dRqXyPyeMnK1YDy8Fc6aTe0IKgbcd03CYeEk= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= @@ -140,8 +134,8 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/ethereum/go-ethereum v1.14.11 h1:8nFDCUUE67rPc6AKxFj7JKaOa2W/W1Rse3oS6LvvxEY= -github.com/ethereum/go-ethereum v1.14.11/go.mod h1:+l/fr42Mma+xBnhefL/+z11/hcmJ2egl+ScIVPjhc7E= +github.com/ethereum/go-ethereum v1.14.12 h1:8hl57x77HSUo+cXExrURjU/w1VhL+ShCTJrTwcCQSe4= +github.com/ethereum/go-ethereum v1.14.12/go.mod h1:RAC2gVMWJ6FkxSPESfbshrcKpIokgQKsVKmAuqdekDY= github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= @@ -211,8 +205,6 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -621,8 +613,9 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= @@ -750,8 +743,8 @@ golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= -golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= +golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo= +golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= diff --git a/lib/primitives/math_test.go b/lib/primitives/math_test.go index 52cfb78db2..7332e333dc 100644 --- a/lib/primitives/math_test.go +++ b/lib/primitives/math_test.go @@ -4,9 +4,9 @@ package primitives import ( + "math" "testing" - "github.com/ethereum/go-ethereum/common/math" "github.com/stretchr/testify/require" )