From 65230de10cc84f455d080c198747bc703ca1e200 Mon Sep 17 00:00:00 2001 From: Jakub Sztandera Date: Wed, 4 Dec 2024 01:27:48 +0100 Subject: [PATCH] fix(sync): do not allow to expand checkpointed tipsets (#12747) * fix(sync): do not allow to expand checkpointed tipsets Resolves a bug in sync by preventing checkpoint expansion. Signed-off-by: Jakub Sztandera * Simplify checkpoint affecting checks, add a pre-check Signed-off-by: Jakub Sztandera * Test checkpoint which restricts tipset size Signed-off-by: Jakub Sztandera * fixup wording Signed-off-by: Jakub Sztandera * Improve comment Signed-off-by: Jakub Sztandera --------- Signed-off-by: Jakub Sztandera --- CHANGELOG.md | 1 + chain/sync.go | 26 ++++++++++++ chain/sync_test.go | 100 +++++++++++++++++++++++++++++++++++++++++++-- 3 files changed, 124 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d6a9048d1eb..634a4057a9f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ # UNRELEASED - Add Market PendingProposals API / CLI. ([filecoin-project/lotus#12724](https://github.com/filecoin-project/lotus/pull/12724)) +- Fix checkpointed tipsets being expanded #12747 ## Improvements diff --git a/chain/sync.go b/chain/sync.go index bfc2d01214d..0529928e6aa 100644 --- a/chain/sync.go +++ b/chain/sync.go @@ -520,6 +520,7 @@ func (syncer *Syncer) Sync(ctx context.Context, maybeHead *types.TipSet) error { hts := syncer.store.GetHeaviestTipSet() + // noop pre-checks if hts.ParentWeight().GreaterThan(maybeHead.ParentWeight()) { return nil } @@ -527,6 +528,24 @@ func (syncer *Syncer) Sync(ctx context.Context, maybeHead *types.TipSet) error { return nil } + if maybeHead.Height() == hts.Height() { + // check if maybeHead is fully contained in headTipSet + // meaning we already synced all the blocks that are a part of maybeHead + // if that is the case, there is nothing for us to do + // we need to exit out early, otherwise checkpoint-fork logic might wrongly reject it + fullyContained := true + for _, c := range maybeHead.Cids() { + if !hts.Contains(c) { + fullyContained = false + break + } + } + if fullyContained { + return nil + } + } + // end of noop prechecks + if err := syncer.collectChain(ctx, maybeHead, hts, false); err != nil { span.AddAttributes(trace.StringAttribute("col_error", err.Error())) span.SetStatus(trace.Status{ @@ -845,6 +864,13 @@ loop: if err != nil { return nil, xerrors.Errorf("failed to load next local tipset: %w", err) } + + if !ignoreCheckpoint { + if chkpt := syncer.store.GetCheckpoint(); chkpt != nil && base.Height() <= chkpt.Height() { + return nil, xerrors.Errorf("merge point affecting the checkpoing: %w", ErrForkCheckpoint) + } + } + if base.IsChildOf(knownParent) { // common case: receiving a block that's potentially part of the same tipset as our best block return blockSet, nil diff --git a/chain/sync_test.go b/chain/sync_test.go index 4cff5767ed5..1b72bf5a9c9 100644 --- a/chain/sync_test.go +++ b/chain/sync_test.go @@ -110,7 +110,7 @@ func prepSyncTest(t testing.TB, h int) *syncTestUtil { //tu.checkHeight("source", source, h) // separate logs - fmt.Println("\x1b[31m///////////////////////////////////////////////////\x1b[39b") + fmt.Println("///////////////////////////////////////////////////") return tu } @@ -175,7 +175,7 @@ func prepSyncTestWithV5Height(t testing.TB, h int, v5height abi.ChainEpoch) *syn //tu.checkHeight("source", source, h) // separate logs - fmt.Println("\x1b[31m///////////////////////////////////////////////////\x1b[39b") + fmt.Println("///////////////////////////////////////////////////") return tu } @@ -208,6 +208,7 @@ func (tu *syncTestUtil) pushFtsAndWait(to int, fts *store.FullTipSet, wait bool) require.NoError(tu.t, err) if time.Since(start) > time.Second*10 { + tu.t.Helper() tu.t.Fatal("took too long waiting for block to be accepted") } } @@ -219,7 +220,6 @@ func (tu *syncTestUtil) pushTsExpectErr(to int, fts *store.FullTipSet, experr bo for _, fb := range fts.Blocks { var b types.BlockMsg - // -1 to match block.Height b.Header = fb.Header for _, msg := range fb.SecpkMessages { c, err := tu.nds[to].(*impl.FullNodeAPI).ChainAPI.Chain.PutMessage(ctx, msg) @@ -1026,6 +1026,100 @@ func TestSyncCheckpointHead(t *testing.T) { require.True(tu.t, p1Head.Equals(b.TipSet())) } +func TestSyncCheckpointPartial(t *testing.T) { + H := 10 + tu := prepSyncTest(t, H) + + p1 := tu.addClientNode() + p2 := tu.addClientNode() + + fmt.Println("GENESIS: ", tu.g.Genesis().Cid()) + tu.loadChainToNode(p1) + tu.loadChainToNode(p2) + + base := tu.g.CurTipset + fmt.Println("Mining base: ", base.TipSet().Cids(), base.TipSet().Height()) + + last := base + a := base + for { + a = tu.mineOnBlock(last, p1, []int{0, 1}, true, false, nil, 0, true) + if len(a.Blocks) == 2 { + // enfoce tipset of two blocks + break + } + tu.pushTsExpectErr(p2, a, false) // push these to p2 as well + last = a + } + var aPartial *store.FullTipSet + var aPartial2 *store.FullTipSet + for _, b := range a.Blocks { + if b.Header.Miner == tu.g.Miners[1] { + // need to have miner two block in the partial tipset + // as otherwise it will be a parent grinding fault + aPartial = store.NewFullTipSet([]*types.FullBlock{b}) + } else { + aPartial2 = store.NewFullTipSet([]*types.FullBlock{b}) + } + } + tu.waitUntilSyncTarget(p1, a.TipSet()) + + tu.pushFtsAndWait(p2, a, true) + tu.checkpointTs(p2, aPartial.TipSet().Key()) + t.Logf("p1 head: %v, p2 head: %v, a: %v", tu.getHead(p1), tu.getHead(p2), a.TipSet()) + tu.pushTsExpectErr(p2, aPartial2, true) + + b := tu.mineOnBlock(a, p1, []int{0}, true, false, nil, 0, true) + tu.pushTsExpectErr(p2, b, true) + + require.NoError(t, tu.g.ResyncBankerNonce(b.TipSet())) // don't ask me why it has to be TS b + c := tu.mineOnBlock(aPartial, p2, []int{1}, true, false, nil, 0, true) + + require.NoError(t, tu.mn.LinkAll()) + tu.connect(p1, p2) + + tu.pushFtsAndWait(p2, c, true) + tu.waitUntilNodeHasTs(p1, c.TipSet().Key()) + tu.checkpointTs(p1, c.TipSet().Key()) + +} + +func TestSyncCheckpointSubmitOneOfTheBlocks(t *testing.T) { + H := 10 + tu := prepSyncTest(t, H) + + p1 := tu.addClientNode() + p2 := tu.addClientNode() + + fmt.Println("GENESIS: ", tu.g.Genesis().Cid()) + tu.loadChainToNode(p1) + tu.loadChainToNode(p2) + + base := tu.g.CurTipset + fmt.Println("Mining base: ", base.TipSet().Cids(), base.TipSet().Height()) + + last := base + a := base + for { + a = tu.mineOnBlock(last, p1, []int{0, 1}, true, false, nil, 0, true) + if len(a.Blocks) == 2 { + // enfoce tipset of two blocks + break + } + last = a + } + aPartial := store.NewFullTipSet([]*types.FullBlock{a.Blocks[0]}) + tu.waitUntilSyncTarget(p1, a.TipSet()) + + tu.checkpointTs(p1, a.TipSet().Key()) + t.Logf("p1 head: %v, p2 head: %v, a: %v", tu.getHead(p1), tu.getHead(p2), a.TipSet()) + tu.pushTsExpectErr(p1, aPartial, false) + + tu.mineOnBlock(a, p1, []int{0, 1}, true, false, nil, 0, true) + tu.pushTsExpectErr(p1, aPartial, false) // check that pushing older partial tispet doesn't error + +} + func TestSyncCheckpointEarlierThanHead(t *testing.T) { //stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001 //stm: @CHAIN_SYNCER_SYNC_001, @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001