Skip to content

Commit

Permalink
fix(sync): do not allow to expand checkpointed tipsets (#12747)
Browse files Browse the repository at this point in the history
* fix(sync): do not allow to expand checkpointed tipsets

Resolves a bug in sync by preventing checkpoint expansion.

Signed-off-by: Jakub Sztandera <[email protected]>

* Simplify checkpoint affecting checks, add a pre-check

Signed-off-by: Jakub Sztandera <[email protected]>

* Test checkpoint which restricts tipset size

Signed-off-by: Jakub Sztandera <[email protected]>

* fixup wording

Signed-off-by: Jakub Sztandera <[email protected]>

* Improve comment

Signed-off-by: Jakub Sztandera <[email protected]>

---------

Signed-off-by: Jakub Sztandera <[email protected]>
  • Loading branch information
Kubuxu authored Dec 4, 2024
1 parent cf423e1 commit 65230de
Show file tree
Hide file tree
Showing 3 changed files with 124 additions and 3 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
# UNRELEASED

- Add Market PendingProposals API / CLI. ([filecoin-project/lotus#12724](https://github.com/filecoin-project/lotus/pull/12724))
- Fix checkpointed tipsets being expanded #12747

## Improvements

Expand Down
26 changes: 26 additions & 0 deletions chain/sync.go
Original file line number Diff line number Diff line change
Expand Up @@ -520,13 +520,32 @@ func (syncer *Syncer) Sync(ctx context.Context, maybeHead *types.TipSet) error {

hts := syncer.store.GetHeaviestTipSet()

// noop pre-checks
if hts.ParentWeight().GreaterThan(maybeHead.ParentWeight()) {
return nil
}
if syncer.Genesis.Equals(maybeHead) || hts.Equals(maybeHead) {
return nil
}

if maybeHead.Height() == hts.Height() {
// check if maybeHead is fully contained in headTipSet
// meaning we already synced all the blocks that are a part of maybeHead
// if that is the case, there is nothing for us to do
// we need to exit out early, otherwise checkpoint-fork logic might wrongly reject it
fullyContained := true
for _, c := range maybeHead.Cids() {
if !hts.Contains(c) {
fullyContained = false
break
}
}
if fullyContained {
return nil
}
}
// end of noop prechecks

if err := syncer.collectChain(ctx, maybeHead, hts, false); err != nil {
span.AddAttributes(trace.StringAttribute("col_error", err.Error()))
span.SetStatus(trace.Status{
Expand Down Expand Up @@ -845,6 +864,13 @@ loop:
if err != nil {
return nil, xerrors.Errorf("failed to load next local tipset: %w", err)
}

if !ignoreCheckpoint {
if chkpt := syncer.store.GetCheckpoint(); chkpt != nil && base.Height() <= chkpt.Height() {
return nil, xerrors.Errorf("merge point affecting the checkpoing: %w", ErrForkCheckpoint)
}
}

if base.IsChildOf(knownParent) {
// common case: receiving a block that's potentially part of the same tipset as our best block
return blockSet, nil
Expand Down
100 changes: 97 additions & 3 deletions chain/sync_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ func prepSyncTest(t testing.TB, h int) *syncTestUtil {
//tu.checkHeight("source", source, h)

// separate logs
fmt.Println("\x1b[31m///////////////////////////////////////////////////\x1b[39b")
fmt.Println("///////////////////////////////////////////////////")

return tu
}
Expand Down Expand Up @@ -175,7 +175,7 @@ func prepSyncTestWithV5Height(t testing.TB, h int, v5height abi.ChainEpoch) *syn
//tu.checkHeight("source", source, h)

// separate logs
fmt.Println("\x1b[31m///////////////////////////////////////////////////\x1b[39b")
fmt.Println("///////////////////////////////////////////////////")
return tu
}

Expand Down Expand Up @@ -208,6 +208,7 @@ func (tu *syncTestUtil) pushFtsAndWait(to int, fts *store.FullTipSet, wait bool)
require.NoError(tu.t, err)

if time.Since(start) > time.Second*10 {
tu.t.Helper()
tu.t.Fatal("took too long waiting for block to be accepted")
}
}
Expand All @@ -219,7 +220,6 @@ func (tu *syncTestUtil) pushTsExpectErr(to int, fts *store.FullTipSet, experr bo
for _, fb := range fts.Blocks {
var b types.BlockMsg

// -1 to match block.Height
b.Header = fb.Header
for _, msg := range fb.SecpkMessages {
c, err := tu.nds[to].(*impl.FullNodeAPI).ChainAPI.Chain.PutMessage(ctx, msg)
Expand Down Expand Up @@ -1026,6 +1026,100 @@ func TestSyncCheckpointHead(t *testing.T) {
require.True(tu.t, p1Head.Equals(b.TipSet()))
}

func TestSyncCheckpointPartial(t *testing.T) {
H := 10
tu := prepSyncTest(t, H)

p1 := tu.addClientNode()
p2 := tu.addClientNode()

fmt.Println("GENESIS: ", tu.g.Genesis().Cid())
tu.loadChainToNode(p1)
tu.loadChainToNode(p2)

base := tu.g.CurTipset
fmt.Println("Mining base: ", base.TipSet().Cids(), base.TipSet().Height())

last := base
a := base
for {
a = tu.mineOnBlock(last, p1, []int{0, 1}, true, false, nil, 0, true)
if len(a.Blocks) == 2 {
// enfoce tipset of two blocks
break
}
tu.pushTsExpectErr(p2, a, false) // push these to p2 as well
last = a
}
var aPartial *store.FullTipSet
var aPartial2 *store.FullTipSet
for _, b := range a.Blocks {
if b.Header.Miner == tu.g.Miners[1] {
// need to have miner two block in the partial tipset
// as otherwise it will be a parent grinding fault
aPartial = store.NewFullTipSet([]*types.FullBlock{b})
} else {
aPartial2 = store.NewFullTipSet([]*types.FullBlock{b})
}
}
tu.waitUntilSyncTarget(p1, a.TipSet())

tu.pushFtsAndWait(p2, a, true)
tu.checkpointTs(p2, aPartial.TipSet().Key())
t.Logf("p1 head: %v, p2 head: %v, a: %v", tu.getHead(p1), tu.getHead(p2), a.TipSet())
tu.pushTsExpectErr(p2, aPartial2, true)

b := tu.mineOnBlock(a, p1, []int{0}, true, false, nil, 0, true)
tu.pushTsExpectErr(p2, b, true)

require.NoError(t, tu.g.ResyncBankerNonce(b.TipSet())) // don't ask me why it has to be TS b
c := tu.mineOnBlock(aPartial, p2, []int{1}, true, false, nil, 0, true)

require.NoError(t, tu.mn.LinkAll())
tu.connect(p1, p2)

tu.pushFtsAndWait(p2, c, true)
tu.waitUntilNodeHasTs(p1, c.TipSet().Key())
tu.checkpointTs(p1, c.TipSet().Key())

}

func TestSyncCheckpointSubmitOneOfTheBlocks(t *testing.T) {
H := 10
tu := prepSyncTest(t, H)

p1 := tu.addClientNode()
p2 := tu.addClientNode()

fmt.Println("GENESIS: ", tu.g.Genesis().Cid())
tu.loadChainToNode(p1)
tu.loadChainToNode(p2)

base := tu.g.CurTipset
fmt.Println("Mining base: ", base.TipSet().Cids(), base.TipSet().Height())

last := base
a := base
for {
a = tu.mineOnBlock(last, p1, []int{0, 1}, true, false, nil, 0, true)
if len(a.Blocks) == 2 {
// enfoce tipset of two blocks
break
}
last = a
}
aPartial := store.NewFullTipSet([]*types.FullBlock{a.Blocks[0]})
tu.waitUntilSyncTarget(p1, a.TipSet())

tu.checkpointTs(p1, a.TipSet().Key())
t.Logf("p1 head: %v, p2 head: %v, a: %v", tu.getHead(p1), tu.getHead(p2), a.TipSet())
tu.pushTsExpectErr(p1, aPartial, false)

tu.mineOnBlock(a, p1, []int{0, 1}, true, false, nil, 0, true)
tu.pushTsExpectErr(p1, aPartial, false) // check that pushing older partial tispet doesn't error

}

func TestSyncCheckpointEarlierThanHead(t *testing.T) {
//stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001
//stm: @CHAIN_SYNCER_SYNC_001, @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001
Expand Down

0 comments on commit 65230de

Please sign in to comment.