Skip to content

Commit

Permalink
Merge branch 'development' into eclesio/block-origin
Browse files Browse the repository at this point in the history
  • Loading branch information
EclesioMeloJunior committed Nov 29, 2023
2 parents 7950d87 + a9c1f8f commit cfff017
Show file tree
Hide file tree
Showing 11 changed files with 241 additions and 164 deletions.
48 changes: 37 additions & 11 deletions chain/kusama/genesis.json
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,14 @@
"id": "ksmcc3",
"chainType": "Live",
"bootNodes": [
"/dns/p2p.cc3-0.kusama.network/tcp/30100/p2p/12D3KooWDgtynm4S9M3m6ZZhXYu2RrWKdvkCSScc25xKDVSg1Sjd",
"/dns/p2p.cc3-1.kusama.network/tcp/30100/p2p/12D3KooWNpGriWPmf621Lza9UWU9eLLBdCFaErf6d4HSK7Bcqnv4",
"/dns/p2p.cc3-2.kusama.network/tcp/30100/p2p/12D3KooWLmLiB4AenmN2g2mHbhNXbUcNiGi99sAkSk1kAQedp8uE",
"/dns/p2p.cc3-3.kusama.network/tcp/30100/p2p/12D3KooWEGHw84b4hfvXEfyq4XWEmWCbRGuHMHQMpby4BAtZ4xJf",
"/dns/p2p.cc3-4.kusama.network/tcp/30100/p2p/12D3KooWF9KDPRMN8WpeyXhEeURZGP8Dmo7go1tDqi7hTYpxV9uW",
"/dns/p2p.cc3-5.kusama.network/tcp/30100/p2p/12D3KooWDiwMeqzvgWNreS9sV1HW3pZv1PA7QGA7HUCo7FzN5gcA",
"/dns/kusama-connect-0.parity.io/tcp/443/wss/p2p/12D3KooWBjxpFhVNM9poSsMEfdnXJaSWSZQ7otK9aV1SPA9zJp5W",
"/dns/kusama-connect-1.parity.io/tcp/443/wss/p2p/12D3KooWAJRVca93jLm4zft4rtTLLxNV4ZrHPMBkbGy5XkXooBFt",
"/dns/kusama-connect-2.parity.io/tcp/443/wss/p2p/12D3KooWLn22TSPR3HXMRSSmWoK4pkDtspdCVi5j86QyyUNViDeL",
"/dns/kusama-connect-3.parity.io/tcp/443/wss/p2p/12D3KooWSwnJSP3QJ6cnFCTpcXq4EEFotVEiQuCWVprzCnWj5e4G",
"/dns/kusama-connect-4.parity.io/tcp/443/wss/p2p/12D3KooWHi7zHUev7n1zs9kSQwh4KMPJcS8Jky2JN58cNabcXGvK",
"/dns/kusama-connect-5.parity.io/tcp/443/wss/p2p/12D3KooWMBF6DXADrNLg6kNt1A1zmKzw478gJw79NmTQhSDxuZvR",
"/dns/kusama-connect-6.parity.io/tcp/443/wss/p2p/12D3KooWNnG7YqYB9eEoACRuSEax8qhuPQzRn878AWKN4vUUtQXd",
"/dns/kusama-connect-7.parity.io/tcp/443/wss/p2p/12D3KooWMmtoLnkVCGyuCpsWw4zoNtWPH4nsVLn92mutvjQknEqR",
"/dns/p2p.0.kusama.network/tcp/30333/p2p/12D3KooWJDohybWd7FvRmyeGjgi56yy36mRWLHmgRprFdUadUt6b",
"/dns/p2p.1.kusama.network/tcp/30333/p2p/12D3KooWC7dnTvDY97afoLrvQSBrh7dDFEkWniTwyxAsBjfpaZk6",
"/dns/p2p.2.kusama.network/tcp/30333/p2p/12D3KooWGGK6Mj1pWF1bk4R1HjBQ4E7bgkfSJ5gmEfVRuwRZapT5",
Expand All @@ -17,7 +19,31 @@
"/dns/p2p.5.kusama.network/tcp/30333/p2p/12D3KooWBsJKGJFuv83ixryzMsUS53A8JzEVeTA8PGi4U6T2dnif",
"/dns/kusama-bootnode-0.paritytech.net/tcp/30333/p2p/12D3KooWSueCPH3puP2PcvqPJdNaDNF3jMZjtJtDiSy35pWrbt5h",
"/dns/kusama-bootnode-0.paritytech.net/tcp/30334/ws/p2p/12D3KooWSueCPH3puP2PcvqPJdNaDNF3jMZjtJtDiSy35pWrbt5h",
"/dns/kusama-bootnode-1.paritytech.net/tcp/30333/p2p/12D3KooWQKqane1SqWJNWMQkbia9qiMWXkcHtAdfW5eVF8hbwEDw"
"/dns/kusama-bootnode-1.paritytech.net/tcp/30333/p2p/12D3KooWQKqane1SqWJNWMQkbia9qiMWXkcHtAdfW5eVF8hbwEDw",
"/dns/kusama-boot.dwellir.com/tcp/30333/ws/p2p/12D3KooWFj2ndawdYyk2spc42Y2arYwb2TUoHLHFAsKuHRzWXwoJ",
"/dns/kusama-boot.dwellir.com/tcp/443/wss/p2p/12D3KooWFj2ndawdYyk2spc42Y2arYwb2TUoHLHFAsKuHRzWXwoJ",
"/dns/boot.stake.plus/tcp/31333/p2p/12D3KooWLa1UyG5xLPds2GbiRBCTJjpsVwRWHWN7Dff14yiNJRpR",
"/dns/boot.stake.plus/tcp/31334/wss/p2p/12D3KooWLa1UyG5xLPds2GbiRBCTJjpsVwRWHWN7Dff14yiNJRpR",
"/dns/boot-node.helikon.io/tcp/7060/p2p/12D3KooWL4KPqfAsPE2aY1g5Zo1CxsDwcdJ7mmAghK7cg6M2fdbD",
"/dns/boot-node.helikon.io/tcp/7062/wss/p2p/12D3KooWL4KPqfAsPE2aY1g5Zo1CxsDwcdJ7mmAghK7cg6M2fdbD",
"/dns/kusama.bootnode.amforc.com/tcp/30333/p2p/12D3KooWLx6nsj6Fpd8biP1VDyuCUjazvRiGWyBam8PsqRJkbUb9",
"/dns/kusama.bootnode.amforc.com/tcp/30334/wss/p2p/12D3KooWLx6nsj6Fpd8biP1VDyuCUjazvRiGWyBam8PsqRJkbUb9",
"/dns/kusama-bootnode.polkadotters.com/tcp/30333/p2p/12D3KooWHB5rTeNkQdXNJ9ynvGz8Lpnmsctt7Tvp7mrYv6bcwbPG",
"/dns/kusama-bootnode.polkadotters.com/tcp/30334/wss/p2p/12D3KooWHB5rTeNkQdXNJ9ynvGz8Lpnmsctt7Tvp7mrYv6bcwbPG",
"/dns/boot-cr.gatotech.network/tcp/33200/p2p/12D3KooWRNZXf99BfzQDE1C8YhuBbuy7Sj18UEf7FNpD8egbURYD",
"/dns/boot-cr.gatotech.network/tcp/35200/wss/p2p/12D3KooWRNZXf99BfzQDE1C8YhuBbuy7Sj18UEf7FNpD8egbURYD",
"/dns/boot-kusama.metaspan.io/tcp/23012/p2p/12D3KooWE1tq9ZL9AAxMiUBBqy1ENmh5pwfWabnoBPMo8gFPXhn6",
"/dns/boot-kusama.metaspan.io/tcp/23015/ws/p2p/12D3KooWE1tq9ZL9AAxMiUBBqy1ENmh5pwfWabnoBPMo8gFPXhn6",
"/dns/boot-kusama.metaspan.io/tcp/23016/wss/p2p/12D3KooWE1tq9ZL9AAxMiUBBqy1ENmh5pwfWabnoBPMo8gFPXhn6",
"/dns/kusama-bootnode.turboflakes.io/tcp/30305/p2p/12D3KooWR6cMhCYRhbJdqYZfzWZT6bcck3unpRLk8GBQGmHBgPwu",
"/dns/kusama-bootnode.turboflakes.io/tcp/30405/wss/p2p/12D3KooWR6cMhCYRhbJdqYZfzWZT6bcck3unpRLk8GBQGmHBgPwu",
"/dns/kusama-boot-ng.dwellir.com/tcp/443/wss/p2p/12D3KooWLswepVYVdCNduvWRTyNTaDMXEBcmvJdZ9Bhw3u2Jhad2",
"/dns/kusama-boot-ng.dwellir.com/tcp/30334/p2p/12D3KooWLswepVYVdCNduvWRTyNTaDMXEBcmvJdZ9Bhw3u2Jhad2",
"/dns/kusama-bootnode.radiumblock.com/tcp/30335/wss/p2p/12D3KooWGzKffWe7JSXeKMQeSQC5xfBafZtgBDCuBVxmwe2TJRuc",
"/dns/kusama-bootnode.radiumblock.com/tcp/30333/p2p/12D3KooWGzKffWe7JSXeKMQeSQC5xfBafZtgBDCuBVxmwe2TJRuc",
"/dns/ksm-bootnode.stakeworld.io/tcp/30300/p2p/12D3KooWFRin7WWVS6RgUsSpkfUHSv4tfGKnr2zJPmf1pbMv118H",
"/dns/ksm-bootnode.stakeworld.io/tcp/30301/ws/p2p/12D3KooWFRin7WWVS6RgUsSpkfUHSv4tfGKnr2zJPmf1pbMv118H",
"/dns/ksm-bootnode.stakeworld.io/tcp/30302/wss/p2p/12D3KooWFRin7WWVS6RgUsSpkfUHSv4tfGKnr2zJPmf1pbMv118H"
],
"telemetryEndpoints": [
[
Expand All @@ -34,12 +60,12 @@
"consensusEngine": null,
"forkBlocks": null,
"badBlocks": [
"0x15b1b925b0aa5cfe43c88cd024f74258cb5cfe3af424882c901014e8acd0d241",
"0x2563260209012232649ab9dc003f62e274c684037de499a23062f8e0e816c605"
"0x15b1b925b0aa5cfe43c88cd024f74258cb5cfe3af424882c901014e8acd0d241",
"0x2563260209012232649ab9dc003f62e274c684037de499a23062f8e0e816c605"
],
"genesis": {
"raw": {
"top": {
"top": {
"0x9c5d795d0297be56027a4b2464e333979c5d795d0297be56027a4b2464e33397974a8f6e094002e424b603628718939b060c4c6305a73d36a014468c29b8b7d7": "0x00c0e1d0612100000000000000000000",
"0x9c5d795d0297be56027a4b2464e333979c5d795d0297be56027a4b2464e33397997f7003f78328f30c57e6ce10b1956c77d2187fe08441845cc0c18273852039": "0x00703874580800000000000000000000",
"0xc2261276cc9d1f8598ea4b6a74b15c2f6482b9ade7bc6657aaca787ba1add3b41a7b36634518c4bd258451d3afca781ef41c43e2cc13767ade6d58216bb4b54e": "0x0000c52ebca2b1000000000000000000",
Expand Down Expand Up @@ -3460,7 +3486,7 @@
"0x9c5d795d0297be56027a4b2464e333979c5d795d0297be56027a4b2464e33397a95fc10899f9939fe9f376f90b678d436e6cbd6cfbad752e1d16e1bd207970fd": "0x00aa8af681571e000000000000000000",
"0x9c5d795d0297be56027a4b2464e333979c5d795d0297be56027a4b2464e33397be0013089e49d5187af79a902767da35cf39652bf8ee558f7780d185a484dd3c": "0x007202ee615f09000000000000000000"
},
"childrenDefault":{}
"childrenDefault": {}
}
}
}
2 changes: 1 addition & 1 deletion dot/interfaces.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ type ServiceRegisterer interface {

// BlockJustificationVerifier has a verification method for block justifications.
type BlockJustificationVerifier interface {
VerifyBlockJustification(common.Hash, []byte) error
VerifyBlockJustification(common.Hash, []byte) (round uint64, setID uint64, err error)
}

// Telemetry is the telemetry client to send telemetry messages.
Expand Down
2 changes: 1 addition & 1 deletion dot/network/message.go
Original file line number Diff line number Diff line change
Expand Up @@ -388,7 +388,7 @@ func NewAscendingBlockRequests(startNumber, targetNumber uint, requestedData byt
numRequests := diff / MaxBlocksInResponse
// we should check if the diff is in the maxResponseSize bounds
// otherwise we should increase the numRequests by one, take this
// example, we want to sync from 0 to 259, the diff is 259
// example, we want to sync from 1 to 259, the diff is 259
// then the num of requests is 2 (uint(259)/uint(128)) however two requests will
// retrieve only 256 blocks (each request can retrieve a max of 128 blocks), so we should
// create one more request to retrieve those missing blocks, 3 in this example.
Expand Down
114 changes: 38 additions & 76 deletions dot/sync/chain_sync.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ import (
"github.com/ChainSafe/gossamer/dot/telemetry"
"github.com/ChainSafe/gossamer/dot/types"
"github.com/ChainSafe/gossamer/internal/database"
"github.com/ChainSafe/gossamer/lib/blocktree"
"github.com/ChainSafe/gossamer/lib/common"
"github.com/ChainSafe/gossamer/lib/common/variadic"
"github.com/ChainSafe/gossamer/lib/trie"
Expand Down Expand Up @@ -818,94 +817,51 @@ func (cs *chainSync) handleReadyBlock(bd *types.BlockData, origin blockOrigin) e
// returns the index of the last BlockData it handled on success,
// or the index of the block data that errored on failure.
func (cs *chainSync) processBlockData(blockData types.BlockData, origin blockOrigin) error {
headerInState, err := cs.blockState.HasHeader(blockData.Hash)
if err != nil {
return fmt.Errorf("checking if block state has header: %w", err)
}

bodyInState, err := cs.blockState.HasBlockBody(blockData.Hash)
if err != nil {
return fmt.Errorf("checking if block state has body: %w", err)
}

// while in bootstrap mode we don't need to broadcast block announcements
announceImportedBlock := cs.getSyncMode() == tip
if headerInState && bodyInState {
err = cs.processBlockDataWithStateHeaderAndBody(blockData, announceImportedBlock)
if err != nil {
return fmt.Errorf("processing block data with header and "+
"body in block state: %w", err)
}
return nil
var blockDataJustification []byte
if blockData.Justification != nil {
blockDataJustification = *blockData.Justification
}

if blockData.Header != nil {
round, setID, err := cs.verifyJustification(blockData.Header.Hash(), blockDataJustification)
if err != nil {
return err
}

if blockData.Body != nil {
err = cs.processBlockDataWithHeaderAndBody(blockData, origin, announceImportedBlock)
if err != nil {
return fmt.Errorf("processing block data with header and body: %w", err)
}
}

if blockData.Justification != nil && len(*blockData.Justification) > 0 {
logger.Infof("handling justification for block %s (#%d)", blockData.Hash.Short(), blockData.Number())
err = cs.handleJustification(blockData.Header, *blockData.Justification)
if err != nil {
return fmt.Errorf("handling justification: %w", err)
}
err = cs.finalizeAndSetJustification(
blockData.Header,
round, setID,
blockDataJustification)
if err != nil {
return fmt.Errorf("while setting justification: %w", err)
}
}

err = cs.blockState.CompareAndSetBlockData(&blockData)
err := cs.blockState.CompareAndSetBlockData(&blockData)
if err != nil {
return fmt.Errorf("comparing and setting block data: %w", err)
}

return nil
}

func (cs *chainSync) processBlockDataWithStateHeaderAndBody(blockData types.BlockData,
announceImportedBlock bool) (err error) {
// TODO: fix this; sometimes when the node shuts down the "best block" isn't stored properly,
// so when the node restarts it has blocks higher than what it thinks is the best, causing it not to sync
// if we update the node to only store finalised blocks in the database, this should be fixed and the entire
// code block can be removed (#1784)
block, err := cs.blockState.GetBlockByHash(blockData.Hash)
if err != nil {
return fmt.Errorf("getting block by hash: %w", err)
}

err = cs.blockState.AddBlockToBlockTree(block)
if errors.Is(err, blocktree.ErrBlockExists) {
logger.Debugf(
"block number %d with hash %s already exists in block tree, skipping it.",
block.Header.Number, blockData.Hash)
return nil
} else if err != nil {
return fmt.Errorf("adding block to blocktree: %w", err)
func (cs *chainSync) verifyJustification(headerHash common.Hash, justification []byte) (
round uint64, setID uint64, err error) {
if len(justification) > 0 {
round, setID, err = cs.finalityGadget.VerifyBlockJustification(headerHash, justification)
return round, setID, err
}

if blockData.Justification != nil && len(*blockData.Justification) > 0 {
err = cs.handleJustification(&block.Header, *blockData.Justification)
if err != nil {
return fmt.Errorf("handling justification: %w", err)
}
}

// TODO: this is probably unnecessary, since the state is already in the database
// however, this case shouldn't be hit often, since it's only hit if the node state
// is rewinded or if the node shuts down unexpectedly (#1784)
state, err := cs.storageState.TrieState(&block.Header.StateRoot)
if err != nil {
return fmt.Errorf("loading trie state: %w", err)
}

err = cs.blockImportHandler.HandleBlockImport(block, state, announceImportedBlock)
if err != nil {
return fmt.Errorf("handling block import: %w", err)
}

return nil
return 0, 0, nil
}

func (cs *chainSync) processBlockDataWithHeaderAndBody(blockData types.BlockData,
Expand Down Expand Up @@ -944,21 +900,27 @@ func (cs *chainSync) handleBody(body *types.Body) {
blockSizeGauge.Set(float64(acc))
}

func (cs *chainSync) handleJustification(header *types.Header, justification []byte) (err error) {
logger.Debugf("handling justification for block %d...", header.Number)
func (cs *chainSync) finalizeAndSetJustification(header *types.Header,
round, setID uint64, justification []byte) (err error) {
if len(justification) > 0 {
err = cs.blockState.SetFinalisedHash(header.Hash(), round, setID)
if err != nil {
return fmt.Errorf("setting finalised hash: %w", err)
}

headerHash := header.Hash()
err = cs.finalityGadget.VerifyBlockJustification(headerHash, justification)
if err != nil {
return fmt.Errorf("verifying block number %d justification: %w", header.Number, err)
}
logger.Debugf(
"finalised block with hash #%d (%s), round %d and set id %d",
header.Number, header.Hash(), round, setID)

err = cs.blockState.SetJustification(headerHash, justification)
if err != nil {
return fmt.Errorf("setting justification for block number %d: %w", header.Number, err)
err = cs.blockState.SetJustification(header.Hash(), justification)
if err != nil {
return fmt.Errorf("setting justification for block number %d: %w",
header.Number, err)
}

logger.Infof("🔨 finalised block number #%d (%s)", header.Number, header.Hash())
}

logger.Infof("🔨 finalised block number %d with hash %s", header.Number, headerHash)
return nil
}

Expand Down
92 changes: 90 additions & 2 deletions dot/sync/chain_sync_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1288,8 +1288,6 @@ func ensureSuccessfulBlockImportFlow(t *testing.T, parentHeader *types.Header,
t.Helper()

for idx, blockData := range blocksReceived {
mockBlockState.EXPECT().HasHeader(blockData.Header.Hash()).Return(false, nil)
mockBlockState.EXPECT().HasBlockBody(blockData.Header.Hash()).Return(false, nil)
if origin != networkInitialSync {
mockBabeVerifier.EXPECT().VerifyBlock(blockData.Header).Return(nil)
}
Expand Down Expand Up @@ -1678,3 +1676,93 @@ func TestChainSync_getHighestBlock(t *testing.T) {
})
}
}

func TestChainSync_BootstrapSync_SuccessfulSync_WithInvalidJusticationBlock(t *testing.T) {
t.Parallel()

ctrl := gomock.NewController(t)
mockBlockState := NewMockBlockState(ctrl)
mockBlockState.EXPECT().GetFinalisedNotifierChannel().Return(make(chan *types.FinalisationInfo))
mockedGenesisHeader := types.NewHeader(common.NewHash([]byte{0}), trie.EmptyHash,
trie.EmptyHash, 0, types.NewDigest())

mockNetwork := NewMockNetwork(ctrl)
mockRequestMaker := NewMockRequestMaker(ctrl)

mockBabeVerifier := NewMockBabeVerifier(ctrl)
mockStorageState := NewMockStorageState(ctrl)
mockImportHandler := NewMockBlockImportHandler(ctrl)
mockTelemetry := NewMockTelemetry(ctrl)
mockFinalityGadget := NewMockFinalityGadget(ctrl)

// this test expects two workers responding each request with 128 blocks which means
// we should import 256 blocks in total
blockResponse := createSuccesfullBlockResponse(t, mockedGenesisHeader.Hash(), 1, 129)
const announceBlock = false

invalidJustificationBlock := blockResponse.BlockData[90]
invalidJustification := &[]byte{0x01, 0x01, 0x01, 0x02}
invalidJustificationBlock.Justification = invalidJustification

// here we split the whole set in two parts each one will be the "response" for each peer
worker1Response := &network.BlockResponseMessage{
BlockData: blockResponse.BlockData[:128],
}

// the first peer will respond the from the block 1 to 128 so the ensureBlockImportFlow
// will setup the expectations starting from the genesis header until block 128
ensureSuccessfulBlockImportFlow(t, mockedGenesisHeader, worker1Response.BlockData[:90], mockBlockState,
mockBabeVerifier, mockStorageState, mockImportHandler, mockTelemetry, networkInitialSync, announceBlock)

errVerifyBlockJustification := errors.New("VerifyBlockJustification mock error")
mockFinalityGadget.EXPECT().
VerifyBlockJustification(
invalidJustificationBlock.Header.Hash(),
*invalidJustification).
Return(uint64(0), uint64(0), errVerifyBlockJustification)

// we use gomock.Any since I cannot guarantee which peer picks which request
// but the first call to DoBlockRequest will return the first set and the second
// call will return the second set
mockRequestMaker.EXPECT().
Do(gomock.Any(), gomock.Any(), &network.BlockResponseMessage{}).
DoAndReturn(func(peerID, _, response any) any {
responsePtr := response.(*network.BlockResponseMessage)
*responsePtr = *worker1Response

fmt.Println("mocked request maker")
return nil
})

// setup a chain sync which holds in its peer view map
// 3 peers, each one announce block 129 as its best block number.
// We start this test with genesis block being our best block, so
// we're far behind by 128 blocks, we should execute a bootstrap
// sync request those blocks
const blocksAhead = 128
cs := setupChainSyncToBootstrapMode(t, blocksAhead,
mockBlockState, mockNetwork, mockRequestMaker, mockBabeVerifier,
mockStorageState, mockImportHandler, mockTelemetry)

cs.finalityGadget = mockFinalityGadget

target, err := cs.getTarget()
require.NoError(t, err)
require.Equal(t, uint(blocksAhead), target)

// include a new worker in the worker pool set, this worker
// should be an available peer that will receive a block request
// the worker pool executes the workers management
cs.workerPool.fromBlockAnnounce(peer.ID("alice"))
//cs.workerPool.fromBlockAnnounce(peer.ID("bob"))

err = cs.requestMaxBlocksFrom(mockedGenesisHeader, networkInitialSync)
require.ErrorIs(t, err, errVerifyBlockJustification)

err = cs.workerPool.stop()
require.NoError(t, err)

// peer should be not in the worker pool
// peer should be in the ignore list
require.Len(t, cs.workerPool.workers, 1)
}
4 changes: 2 additions & 2 deletions dot/sync/interfaces.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ type BlockState interface {
BestBlockHeader() (*types.Header, error)
BestBlockNumber() (number uint, err error)
CompareAndSetBlockData(bd *types.BlockData) error
HasBlockBody(hash common.Hash) (bool, error)
GetBlockBody(common.Hash) (*types.Body, error)
GetHeader(common.Hash) (*types.Header, error)
HasHeader(hash common.Hash) (bool, error)
Expand All @@ -40,6 +39,7 @@ type BlockState interface {
GetHeaderByNumber(num uint) (*types.Header, error)
GetAllBlocksAtNumber(num uint) ([]common.Hash, error)
IsDescendantOf(parent, child common.Hash) (bool, error)
SetFinalisedHash(common.Hash, uint64, uint64) error
}

// StorageState is the interface for the storage state
Expand All @@ -60,7 +60,7 @@ type BabeVerifier interface {

// FinalityGadget implements justification verification functionality
type FinalityGadget interface {
VerifyBlockJustification(common.Hash, []byte) error
VerifyBlockJustification(common.Hash, []byte) (round uint64, setID uint64, err error)
}

// BlockImportHandler is the interface for the handler of newly imported blocks
Expand Down
Loading

0 comments on commit cfff017

Please sign in to comment.