Skip to content

Commit

Permalink
Merge branch 'development' into dependabot/go_modules/github.com/libp…
Browse files Browse the repository at this point in the history
…2p/go-libp2p-kad-dht-0.28.1
  • Loading branch information
haikoschol authored Dec 3, 2024
2 parents 24c9858 + 77bb7ab commit b65267d
Show file tree
Hide file tree
Showing 15 changed files with 240 additions and 208 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ jobs:
- name: Generate coverage report
run: |
go test ./... -coverprofile=coverage.out -covermode=atomic -timeout=20m
- uses: codecov/[email protected].2
- uses: codecov/[email protected].7
with:
files: ./coverage.out
flags: unit-tests
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/unit-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ jobs:
- name: Test - Race
run: make test-using-race-detector

- uses: codecov/[email protected].2
- uses: codecov/[email protected].7
with:
if_ci_failed: success
informational: true
Expand Down
6 changes: 3 additions & 3 deletions docs/docs/repo/labels.md
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ Below is the list of labels and their descriptions used in Gossamer repository.
- **T-research** - this issue/pr is a research type issue.
- **T-investigation** - this issue/pr is an investigation, probably related to some bug with unknown causes.
- **`C-`** Complexity label. We operate only 3 complexity levels.
- **C-simple** - Minor changes changes, no additional research needed. Good first issue/review.
- **C-simple** - Minor changes, no additional research needed. Good first issue/review.
- **C-complex** - Complex changes across multiple modules. Possibly will require additional research.
- **C-chaotic** - Unpredictable nature of this task/changes makes its chaotic.
- **`P-`** Priority level. We only have 3 priority levels, everything else is average by default.
Expand All @@ -35,7 +35,7 @@ Below is the list of labels and their descriptions used in Gossamer repository.
- **S-tests** - issue related to adding new tests.
- **S-doc** - documentation related.
- **S-cli** - issue related to Gossamer CLI.
- **S-ci** - issue related to continuous integration tasks or piplelines.
- **S-ci** - issue related to continuous integration tasks or pipelines.
- **S-crypto** - issues related to the lib/crypto package.
- **S-grandpa** - issues related to block finality.
- **S-babe** - issues related to block production functionality.
Expand All @@ -52,4 +52,4 @@ Below is the list of labels and their descriptions used in Gossamer repository.
- **S-subsystems-availability** - issues related to polkadot host availability subsystem functionality.
- **S-subsystems-disputes** - issues related to polkadot host disputes subsystem functionality.
- **S-infrastructure** - issues related to infrastructure and DevOps.
- **S-dependencies** - issues related to dependencies changes. Used by dependabot.
- **S-dependencies** - issues related to dependencies changes. Used by dependabot.
16 changes: 8 additions & 8 deletions docs/docs/usage/command-line.md
Original file line number Diff line number Diff line change
Expand Up @@ -142,14 +142,14 @@ To run more than two nodes, repeat steps for bob with a new `port` and `base-pat

Available built-in keys:
```
./bin/gossmer --key alice
./bin/gossmer --key bob
./bin/gossmer --key charlie
./bin/gossmer --key dave
./bin/gossmer --key eve
./bin/gossmer --key ferdie
./bin/gossmer --key george
./bin/gossmer --key heather
./bin/gossamer --key alice
./bin/gossamer --key bob
./bin/gossamer --key charlie
./bin/gossamer --key dave
./bin/gossamer --key eve
./bin/gossamer --key ferdie
./bin/gossamer --key george
./bin/gossamer --key heather
```

## Initialising Nodes
Expand Down
2 changes: 1 addition & 1 deletion docs/docs/usage/import-state.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
---
layout: default
title: Import Sate
title: Import State
permalink: /usage/import-state/
---

Expand Down
2 changes: 1 addition & 1 deletion dot/network/host.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ import (
"github.com/ChainSafe/gossamer/dot/network/messages"
"github.com/ChainSafe/gossamer/dot/peerset"
"github.com/ChainSafe/gossamer/internal/pubip"
"github.com/dgraph-io/ristretto"
"github.com/dgraph-io/ristretto/v2"
badger "github.com/ipfs/go-ds-badger4"
"github.com/libp2p/go-libp2p"
libp2phost "github.com/libp2p/go-libp2p/core/host"
Expand Down
2 changes: 1 addition & 1 deletion dot/network/message_cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ import (
"time"

"github.com/ChainSafe/gossamer/lib/common"
"github.com/dgraph-io/ristretto"
"github.com/dgraph-io/ristretto/v2"
"github.com/libp2p/go-libp2p/core/peer"
)

Expand Down
2 changes: 1 addition & 1 deletion dot/network/message_cache_integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ import (

"github.com/ChainSafe/gossamer/dot/types"
"github.com/ChainSafe/gossamer/lib/common"
"github.com/dgraph-io/ristretto"
"github.com/dgraph-io/ristretto/v2"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/stretchr/testify/require"
)
Expand Down
114 changes: 59 additions & 55 deletions dot/sync/fullsync.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ import (
"github.com/ChainSafe/gossamer/dot/peerset"
"github.com/ChainSafe/gossamer/dot/types"
"github.com/ChainSafe/gossamer/internal/database"
"github.com/ChainSafe/gossamer/lib/common"

"github.com/libp2p/go-libp2p/core/peer"
)
Expand Down Expand Up @@ -157,55 +158,60 @@ func (f *FullSyncStrategy) Process(results []*SyncTaskResult) (
return false, nil, nil, fmt.Errorf("getting highest finalized header")
}

readyBlocks := make([][]*types.BlockData, 0, len(validResp))
readyBlocks := make([]*Fragment, 0, len(validResp))
for _, reqRespData := range validResp {
responseFragment := NewFragment(reqRespData.responseData)

// if Gossamer requested the header, then the response data should contains
// the full blocks to be imported. If Gossamer didn't request the header,
// then the response should only contain the missing parts that will complete
// the unreadyBlocks and then with the blocks completed we should be able to import them
if reqRespData.req.RequestField(messages.RequestedDataHeader) {
updatedFragment, ok := f.unreadyBlocks.updateDisjointFragments(reqRespData.responseData)
updatedFragment, ok := f.unreadyBlocks.updateDisjointFragments(responseFragment)
if ok {
validBlocks := validBlocksUnderFragment(highestFinalized.Number, updatedFragment)
if len(validBlocks) > 0 {
readyBlocks = append(readyBlocks, validBlocks)
validFragment := updatedFragment.Filter(func(bd *types.BlockData) bool {
return bd.Header.Number > highestFinalized.Number
})

if validFragment.Len() > 0 {
readyBlocks = append(readyBlocks, validFragment)
}
} else {
readyBlocks = append(readyBlocks, reqRespData.responseData)
readyBlocks = append(readyBlocks, responseFragment)
}

continue
}

completedBlocks := f.unreadyBlocks.updateIncompleteBlocks(reqRespData.responseData)
readyBlocks = append(readyBlocks, completedBlocks)
completedBlocks := f.unreadyBlocks.updateIncompleteBlocks(responseFragment)
readyBlocks = append(readyBlocks, completedBlocks...)
}

// disjoint fragments are pieces of the chain that could not be imported right now
// because is blocks too far ahead or blocks that belongs to forks
sortFragmentsOfChain(readyBlocks)
orderedFragments := mergeFragmentsOfChain(readyBlocks)

nextBlocksToImport := make([]*types.BlockData, 0)
disjointFragments := make([][]*types.BlockData, 0)
nextBlocksToImport := new(Fragment)
disjointFragments := make([]*Fragment, 0)

for _, fragment := range orderedFragments {
ok, err := f.blockState.HasHeader(fragment[0].Header.ParentHash)
ok, err := f.blockState.HasHeader(fragment.First().Header.ParentHash)
if err != nil && !errors.Is(err, database.ErrNotFound) {
return false, nil, nil, fmt.Errorf("checking block parent header: %w", err)
}

if ok {
nextBlocksToImport = append(nextBlocksToImport, fragment...)
nextBlocksToImport = nextBlocksToImport.Concat(fragment)
continue
}

disjointFragments = append(disjointFragments, fragment)
}

// this loop goal is to import ready blocks as well as update the highestFinalized header
for len(nextBlocksToImport) > 0 || len(disjointFragments) > 0 {
for _, blockToImport := range nextBlocksToImport {
for nextBlocksToImport.Len() > 0 || len(disjointFragments) > 0 {
for blockToImport := range nextBlocksToImport.Iter() {
imported, err := f.blockImporter.importBlock(blockToImport, networkInitialSync)
if err != nil {
return false, nil, nil, fmt.Errorf("while handling ready block: %w", err)
Expand All @@ -216,7 +222,7 @@ func (f *FullSyncStrategy) Process(results []*SyncTaskResult) (
}
}

nextBlocksToImport = make([]*types.BlockData, 0)
nextBlocksToImport = new(Fragment)
highestFinalized, err = f.blockState.GetHighestFinalisedHeader()
if err != nil {
return false, nil, nil, fmt.Errorf("getting highest finalized header")
Expand All @@ -226,45 +232,54 @@ func (f *FullSyncStrategy) Process(results []*SyncTaskResult) (
// given that fragment contains chains and these chains contains blocks
// check if the first block in the chain contains a parent known by us
for _, fragment := range disjointFragments {
validFragment := validBlocksUnderFragment(highestFinalized.Number, fragment)
if len(validFragment) == 0 {
validFragment := fragment.Filter(func(bd *types.BlockData) bool {
return bd.Header.Number > highestFinalized.Number
})

if validFragment.Len() == 0 {
continue
}

ok, err := f.blockState.HasHeader(validFragment[0].Header.ParentHash)
ok, err := f.blockState.HasHeader(validFragment.First().Header.ParentHash)
if err != nil && !errors.Is(err, database.ErrNotFound) {
return false, nil, nil, err
}

if !ok {
firstFragmentBlock := validFragment.First()
// if the parent of this valid fragment is behind our latest finalized number
// then we can discard the whole fragment since it is a invalid fork
if (validFragment[0].Header.Number - 1) <= highestFinalized.Number {
if (firstFragmentBlock.Header.Number - 1) <= highestFinalized.Number {
continue
}

logger.Infof("starting an acestor search from %s parent of #%d (%s)",
validFragment[0].Header.ParentHash,
validFragment[0].Header.Number,
validFragment[0].Header.Hash(),
firstFragmentBlock.Header.ParentHash,
firstFragmentBlock.Header.Number,
firstFragmentBlock.Header.Hash(),
)

f.unreadyBlocks.newDisjointFragment(validFragment)
request := messages.NewBlockRequest(
*messages.NewFromBlock(validFragment[0].Header.ParentHash),
*messages.NewFromBlock(firstFragmentBlock.Header.ParentHash),
messages.MaxBlocksInResponse,
messages.BootstrapRequestData, messages.Descending)
f.requestQueue.PushBack(request)
} else {
// inserting them in the queue to be processed after the main chain
nextBlocksToImport = append(nextBlocksToImport, validFragment...)
nextBlocksToImport = nextBlocksToImport.Concat(validFragment)
}
}

disjointFragments = nil
}

f.unreadyBlocks.removeIrrelevantFragments(highestFinalized.Number)
// update unready blocks based on the highest finalized block
f.unreadyBlocks.pruneDisjointFragments(LowerThanOrEq(highestFinalized.Number))
f.unreadyBlocks.removeIncompleteBlocks(func(_ common.Hash, value *types.BlockData) bool {
return value.Header.Number <= highestFinalized.Number
})

return false, repChanges, peersToIgnore, nil
}

Expand Down Expand Up @@ -486,19 +501,24 @@ resultLoop:
// note that we have fragments with single blocks, fragments with fork (in case of 8)
// after sorting these fragments we end up with:
// [ {1, 2, 3, 4, 5} {6, 7, 8, 9, 10} {8} {11, 12, 13, 14, 15, 16} {17} ]
func sortFragmentsOfChain(fragments [][]*types.BlockData) {
func sortFragmentsOfChain(fragments []*Fragment) {
if len(fragments) == 0 {
return
}

slices.SortFunc(fragments, func(a, b []*types.BlockData) int {
if a[0].Header.Number < b[0].Header.Number {
return -1
}
if a[0].Header.Number == b[0].Header.Number {
return 0
slices.SortFunc(fragments, func(fragA, fragB *Fragment) int {
if fragA.First() != nil && fragB.First() != nil {
switch {
case fragA.First().Header.Number < fragB.First().Header.Number:
return -1
case fragA.First().Header.Number == fragB.First().Header.Number:
return 0
default:
return 1
}
}
return 1

return 0
})
}

Expand All @@ -510,20 +530,21 @@ func sortFragmentsOfChain(fragments [][]*types.BlockData) {
// [ {1, 2, 3, 4, 5} {6, 7, 8, 9, 10} {8} {11, 12, 13, 14, 15, 16} {17} ]
// merge will transform it to the following slice:
// [ {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17} {8} ]
func mergeFragmentsOfChain(fragments [][]*types.BlockData) [][]*types.BlockData {
func mergeFragmentsOfChain(fragments []*Fragment) []*Fragment {
if len(fragments) == 0 {
return nil
}

mergedFragments := [][]*types.BlockData{fragments[0]}
mergedFragments := []*Fragment{fragments[0]}

for i := 1; i < len(fragments); i++ {
lastMergedFragment := mergedFragments[len(mergedFragments)-1]
currentFragment := fragments[i]

lastBlock := lastMergedFragment[len(lastMergedFragment)-1]
lastBlock := lastMergedFragment.Last()

if lastBlock.IsParent(currentFragment[0]) {
mergedFragments[len(mergedFragments)-1] = append(lastMergedFragment, currentFragment...)
if lastBlock.IsParent(currentFragment.First()) {
mergedFragments[len(mergedFragments)-1] = lastMergedFragment.Concat(currentFragment)
} else {
mergedFragments = append(mergedFragments, currentFragment)
}
Expand All @@ -532,23 +553,6 @@ func mergeFragmentsOfChain(fragments [][]*types.BlockData) [][]*types.BlockData
return mergedFragments
}

// validBlocksUnderFragment ignore all blocks prior to the given last finalized number
func validBlocksUnderFragment(highestFinalizedNumber uint, fragmentBlocks []*types.BlockData) []*types.BlockData {
startFragmentFrom := -1
for idx, block := range fragmentBlocks {
if block.Header.Number > highestFinalizedNumber {
startFragmentFrom = idx
break
}
}

if startFragmentFrom < 0 {
return nil
}

return fragmentBlocks[startFragmentFrom:]
}

// validateResponseFields checks that the expected fields are in the block data
func validateResponseFields(req *messages.BlockRequestMessage, blocks []*types.BlockData) error {
for _, bd := range blocks {
Expand Down
4 changes: 2 additions & 2 deletions dot/sync/fullsync_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -254,8 +254,8 @@ func TestFullSyncProcess(t *testing.T) {
require.Equal(t, fs.requestQueue.Len(), 1)
require.Len(t, fs.unreadyBlocks.incompleteBlocks, 0)
require.Len(t, fs.unreadyBlocks.disjointFragments, 1)
require.Equal(t, fs.unreadyBlocks.disjointFragments[0], sndTaskBlockResponse.BlockData)
require.Equal(t, len(fs.unreadyBlocks.disjointFragments[0]), len(sndTaskBlockResponse.BlockData))
require.Equal(t, fs.unreadyBlocks.disjointFragments[0], NewFragment(sndTaskBlockResponse.BlockData))
require.Equal(t, fs.unreadyBlocks.disjointFragments[0].Len(), len(sndTaskBlockResponse.BlockData))

expectedAncestorRequest := messages.NewBlockRequest(
*messages.NewFromBlock(sndTaskBlockResponse.BlockData[0].Header.ParentHash),
Expand Down
Loading

0 comments on commit b65267d

Please sign in to comment.