diff --git a/itests/kit/node_unmanaged.go b/itests/kit/node_unmanaged.go index afaf3e9037d..23dcca37a3a 100644 --- a/itests/kit/node_unmanaged.go +++ b/itests/kit/node_unmanaged.go @@ -597,6 +597,10 @@ func (tm *TestUnmanagedMiner) mkStagedFileWithPieces(pt abi.RegisteredSealProof) return publicPieces, unsealedSectorFile.Name(), nil } +// waitForMutableDeadline will wait until we are not in the proving deadline for the given +// sector, or the deadline after the proving deadline. +// For safety, to avoid possible races with the window post loop, we will also avoid the +// deadline before the proving deadline. func (tm *TestUnmanagedMiner) waitForMutableDeadline(sectorNum abi.SectorNumber) { req := require.New(tm.t) @@ -609,20 +613,19 @@ func (tm *TestUnmanagedMiner) waitForMutableDeadline(sectorNum abi.SectorNumber) dlinfo, err := tm.FullNode.StateMinerProvingDeadline(tm.ctx, tm.ActorAddr, ts.Key()) req.NoError(err) - sectorDeadlineOpen := sl.Deadline == dlinfo.Index - sectorDeadlineNext := (dlinfo.Index+1)%dlinfo.WPoStPeriodDeadlines == sl.Deadline - immutable := sectorDeadlineOpen || sectorDeadlineNext - - // Sleep for immutable epochs - if immutable { - dlineEpochsRemaining := dlinfo.NextOpen() - ts.Height() - var targetEpoch abi.ChainEpoch - if sectorDeadlineOpen { - // sleep for remainder of deadline - targetEpoch = ts.Height() + dlineEpochsRemaining - } else { - // sleep for remainder of deadline and next one - targetEpoch = ts.Height() + dlineEpochsRemaining + dlinfo.WPoStChallengeWindow + sectorDeadlineCurrent := sl.Deadline == dlinfo.Index // we are in the proving deadline + sectorDeadlineNext := (dlinfo.Index+1)%dlinfo.WPoStPeriodDeadlines == sl.Deadline // we are in the deadline after the proving deadline + sectorDeadlinePrev := (dlinfo.Index-1+dlinfo.WPoStPeriodDeadlines)%dlinfo.WPoStPeriodDeadlines == sl.Deadline // we are in the deadline before the proving deadline + + if sectorDeadlineCurrent || sectorDeadlineNext || sectorDeadlinePrev { + // We are in a sensitive, or immutable deadline, we need to wait + targetEpoch := dlinfo.NextOpen() // end of current deadline + if sectorDeadlineCurrent { + // we are in the proving deadline, wait until the end of the next one + targetEpoch += dlinfo.WPoStChallengeWindow + } else if sectorDeadlinePrev { + // we are in the deadline before the proving deadline, wait an additional window + targetEpoch += dlinfo.WPoStChallengeWindow * 2 } _, err := tm.FullNode.WaitTillChainOrError(tm.ctx, HeightAtLeast(targetEpoch+5)) req.NoError(err) @@ -978,7 +981,6 @@ func (tm *TestUnmanagedMiner) submitWindowPost(sectorNumbers []abi.SectorNumber) // sectors by partition and submit the PoSts in batches of PoStedPartitionsMax. partitionMap := make(map[uint64][]sectorInfo) - challengeEpoch := abi.ChainEpoch(-1) // should be the same for all head, err := tm.FullNode.ChainHead(tm.ctx) if err != nil { @@ -989,6 +991,7 @@ func (tm *TestUnmanagedMiner) submitWindowPost(sectorNumbers []abi.SectorNumber) if err != nil { return fmt.Errorf("Miner(%s): failed to get proving deadline: %w", tm.ActorAddr, err) } + chainRandomnessEpoch := di.Challenge for _, sectorNumber := range sectorNumbers { sector, err := tm.getCommittedSector(sectorNumber) @@ -1002,13 +1005,7 @@ func (tm *TestUnmanagedMiner) submitWindowPost(sectorNumbers []abi.SectorNumber) } if di.Index != sp.Deadline { - return fmt.Errorf("Miner(%s): sector %d is not in the deadline %d, but %d", tm.ActorAddr, sectorNumber, sp.Deadline, di.Index) - } - - if challengeEpoch == abi.ChainEpoch(-1) { - challengeEpoch = di.Challenge - } else if di.Index != sp.Deadline { - return fmt.Errorf("Miner(%s): sector %d is in deadline %d, but expected %d", tm.ActorAddr, sectorNumber, sp.Deadline, di.Index) + return fmt.Errorf("Miner(%s): sector %d is not in the expected deadline %d, but %d", tm.ActorAddr, sectorNumber, sp.Deadline, di.Index) } if _, ok := partitionMap[sp.Partition]; !ok { @@ -1017,7 +1014,6 @@ func (tm *TestUnmanagedMiner) submitWindowPost(sectorNumbers []abi.SectorNumber) partitionMap[sp.Partition] = append(partitionMap[sp.Partition], sector) } - chainRandomnessEpoch := challengeEpoch chainRandomness, err := tm.FullNode.StateGetRandomnessFromTickets(tm.ctx, crypto.DomainSeparationTag_PoStChainCommit, chainRandomnessEpoch, nil, head.Key()) if err != nil { diff --git a/itests/niporep_manual_test.go b/itests/niporep_manual_test.go index 385b8d58e39..a2fa949082a 100644 --- a/itests/niporep_manual_test.go +++ b/itests/niporep_manual_test.go @@ -94,7 +94,7 @@ func TestManualNISectorOnboarding(t *testing.T) { }, }, { - name: "mock proofs, miner with 65 sectors", + name: "mock proofs, 1 miner with 65 sectors", mockProofs: true, miners: []testCaseMiner{ { @@ -186,6 +186,19 @@ func TestManualNISectorOnboarding(t *testing.T) { }, }, }, + { + name: "real proofs, 1 miner with 65 sectors", + mockProofs: false, + skip: true, // uncomment if you want to run this test manually + miners: []testCaseMiner{ + { + sectorsToOnboard: mkgood(65), + allOrNothing: true, + expectPower: uint64(defaultSectorSize * 65), + snapDeal: true, + }, + }, + }, } for _, tc := range testCases { @@ -342,21 +355,45 @@ func TestManualNISectorOnboarding(t *testing.T) { req.Equal(expectedEntries, recentEvents[0].Entries) } } - - // Verify that ProveCommitSectorsNI rejects messages with invalid parameters - verifyProveCommitSectorsNIErrorConditions(ctx, t, miners[0], sealProofType) }) } } -// This should be run after we've successfully onboarded a sector so we are properly enrolled in -// cron and the deadline tests work properly. -func verifyProveCommitSectorsNIErrorConditions(ctx context.Context, t *testing.T, miner *kit.TestUnmanagedMiner, sealProofType abi.RegisteredSealProof) { +func TestNISectorFailureCases(t *testing.T) { req := require.New(t) - head, err := miner.FullNode.ChainHead(ctx) + const blocktime = 2 * time.Millisecond + const defaultSectorSize = abi.SectorSize(2 << 10) // 2KiB + sealProofType, err := miner.SealProofTypeFromSectorSize(defaultSectorSize, network.Version23, miner.SealProofVariant_NonInteractive) req.NoError(err) + kit.QuietMiningLogs() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var ( + client kit.TestFullNode + genesisMiner kit.TestMiner + ) + ens := kit.NewEnsemble(t, kit.MockProofs(true)). + FullNode(&client, kit.SectorSize(defaultSectorSize)). + Miner(&genesisMiner, &client, kit.PresealSectors(5), kit.SectorSize(defaultSectorSize), kit.WithAllSubsystems()). + Start(). + InterconnectAll() + _ = ens.BeginMining(blocktime) + + miner, _ := ens.UnmanagedMiner(ctx, &client, kit.SectorSize(defaultSectorSize), kit.OwnerAddr(client.DefaultKey)) + defer miner.Stop() + + ens.Start() + + build.Clock.Sleep(time.Second) + + // We have to onboard a sector first to get the miner enrolled in cron; although we don't need to wait for it to prove + _ = miner.OnboardSectors(sealProofType, false, 1) + + // Utility functions and variables for our failure cases + actorIdNum, err := address.IDFromAddress(miner.ActorAddr) req.NoError(err) actorId := abi.ActorID(actorIdNum) @@ -370,6 +407,11 @@ func verifyProveCommitSectorsNIErrorConditions(ctx context.Context, t *testing.T provingDeadline = miner14.WPoStPeriodDeadlines - 1 } + head, err := miner.FullNode.ChainHead(ctx) + req.NoError(err) + + sectorNumber := abi.SectorNumber(5000) + submitAndFail := func(params *miner14.ProveCommitSectorsNIParams, errMsg string, errCode int) { t.Helper() r, err := miner.SubmitMessage(params, 1, builtin.MethodsMiner.ProveCommitSectorsNI) @@ -380,15 +422,13 @@ func verifyProveCommitSectorsNIErrorConditions(ctx context.Context, t *testing.T } req.Nil(r) } - - sn := abi.SectorNumber(5000) mkSai := func() miner14.SectorNIActivationInfo { - sn++ + sectorNumber++ // unique per sector return miner14.SectorNIActivationInfo{ - SealingNumber: sn, + SealingNumber: sectorNumber, SealerID: actorId, SealedCID: cid.MustParse("bagboea4b5abcatlxechwbp7kjpjguna6r6q7ejrhe6mdp3lf34pmswn27pkkiekz"), - SectorNumber: sn, + SectorNumber: sectorNumber, SealRandEpoch: head.Height() - 10, Expiration: 2880 * 300, } @@ -404,68 +444,78 @@ func verifyProveCommitSectorsNIErrorConditions(ctx context.Context, t *testing.T } } - // Test message rejection on no sectors - params := mkParams() - params.Sectors = []miner14.SectorNIActivationInfo{} - submitAndFail(¶ms, "too few sectors", 16) + // Failure cases - // Test message rejection on too many sectors - sectorInfos := make([]miner14.SectorNIActivationInfo, 66) - for i := range sectorInfos { - sectorInfos[i] = mkSai() - } - params = mkParams() - params.Sectors = sectorInfos - submitAndFail(¶ms, "too many sectors", 16) - - // Test bad aggregation proof type - params = mkParams() - params.AggregateProofType = abi.RegisteredAggregationProof_SnarkPackV1 - submitAndFail(¶ms, "aggregate proof type", 16) - - // Test bad SealerID - params = mkParams() - params.Sectors[1].SealerID = 1234 - submitAndFail(¶ms, "invalid NI commit 1 while requiring activation success", 16) - - // Test bad SealingNumber - params = mkParams() - params.Sectors[1].SealingNumber = 1234 - submitAndFail(¶ms, "invalid NI commit 1 while requiring activation success", 16) - - // Test bad SealedCID - params = mkParams() - params.Sectors[1].SealedCID = cid.MustParse("baga6ea4seaqjtovkwk4myyzj56eztkh5pzsk5upksan6f5outesy62bsvl4dsha") - submitAndFail(¶ms, "invalid NI commit 1 while requiring activation success", 16) - - // Test bad SealRandEpoch - head, err = miner.FullNode.ChainHead(ctx) - req.NoError(err) - params = mkParams() - params.Sectors[1].SealRandEpoch = head.Height() + builtin.EpochsInDay - submitAndFail(¶ms, fmt.Sprintf("seal challenge epoch %d must be before now", params.Sectors[1].SealRandEpoch), 16) - params.Sectors[1].SealRandEpoch = head.Height() - miner14.MaxProveCommitNiLookback - builtin.EpochsInDay - submitAndFail(¶ms, "invalid NI commit 1 while requiring activation success", 16) - - // Immutable/bad deadlines - di, err = miner.FullNode.StateMinerProvingDeadline(ctx, miner.ActorAddr, head.Key()) - req.NoError(err) - currentDeadlineIdx = kit.CurrentDeadlineIndex(di) + t.Run("message rejection on no sectors", func(t *testing.T) { + params := mkParams() + params.Sectors = []miner14.SectorNIActivationInfo{} + submitAndFail(¶ms, "too few sectors", 16) + }) - t.Logf("Validating submission failure for current and next deadline. Current Deadline Info: %+v, calculated current deadline: %d.", di, currentDeadlineIdx) - - params = mkParams() - params.ProvingDeadline = currentDeadlineIdx - submitAndFail(¶ms, fmt.Sprintf("proving deadline %d must not be the current or next deadline", currentDeadlineIdx), 18) - params.ProvingDeadline = currentDeadlineIdx + 1 - if params.ProvingDeadline == di.WPoStPeriodDeadlines { - params.ProvingDeadline = 0 - } - msgdline := currentDeadlineIdx + 1 - if msgdline == di.WPoStPeriodDeadlines { - msgdline = 0 - } - submitAndFail(¶ms, fmt.Sprintf("proving deadline %d must not be the current or next deadline", msgdline), 18) - params.ProvingDeadline = di.WPoStPeriodDeadlines // too big - submitAndFail(¶ms, fmt.Sprintf("proving deadline index %d invalid", di.WPoStPeriodDeadlines), 16) + t.Run("message rejection on too many sectors", func(t *testing.T) { + sectorInfos := make([]miner14.SectorNIActivationInfo, 66) + for i := range sectorInfos { + sectorInfos[i] = mkSai() + } + params := mkParams() + params.Sectors = sectorInfos + submitAndFail(¶ms, "too many sectors", 16) + }) + + t.Run("bad aggregation proof type", func(t *testing.T) { + params := mkParams() + params.AggregateProofType = abi.RegisteredAggregationProof_SnarkPackV1 + submitAndFail(¶ms, "aggregate proof type", 16) + }) + + t.Run("bad SealerID", func(t *testing.T) { + params := mkParams() + params.Sectors[1].SealerID = 1234 + submitAndFail(¶ms, "invalid NI commit 1 while requiring activation success", 16) + }) + + t.Run("bad SealingNumber", func(t *testing.T) { + params := mkParams() + params.Sectors[1].SealingNumber = 1234 + submitAndFail(¶ms, "invalid NI commit 1 while requiring activation success", 16) + }) + + t.Run("bad SealedCID", func(t *testing.T) { + params := mkParams() + params.Sectors[1].SealedCID = cid.MustParse("baga6ea4seaqjtovkwk4myyzj56eztkh5pzsk5upksan6f5outesy62bsvl4dsha") + submitAndFail(¶ms, "invalid NI commit 1 while requiring activation success", 16) + }) + + t.Run("bad SealRandEpoch", func(t *testing.T) { + head, err = miner.FullNode.ChainHead(ctx) + req.NoError(err) + params := mkParams() + params.Sectors[1].SealRandEpoch = head.Height() + builtin.EpochsInDay + submitAndFail(¶ms, fmt.Sprintf("seal challenge epoch %d must be before now", params.Sectors[1].SealRandEpoch), 16) + params.Sectors[1].SealRandEpoch = head.Height() - miner14.MaxProveCommitNiLookback - builtin.EpochsInDay + submitAndFail(¶ms, "invalid NI commit 1 while requiring activation success", 16) + }) + + t.Run("immutable deadlines", func(t *testing.T) { + di, err = miner.FullNode.StateMinerProvingDeadline(ctx, miner.ActorAddr, head.Key()) + req.NoError(err) + currentDeadlineIdx = kit.CurrentDeadlineIndex(di) + + t.Logf("Validating submission failure for current and next deadline. Current Deadline Info: %+v, calculated current deadline: %d.", di, currentDeadlineIdx) + + params := mkParams() + params.ProvingDeadline = currentDeadlineIdx + submitAndFail(¶ms, fmt.Sprintf("proving deadline %d must not be the current or next deadline", currentDeadlineIdx), 18) + params.ProvingDeadline = currentDeadlineIdx + 1 + if params.ProvingDeadline == di.WPoStPeriodDeadlines { + params.ProvingDeadline = 0 + } + msgdline := currentDeadlineIdx + 1 + if msgdline == di.WPoStPeriodDeadlines { + msgdline = 0 + } + submitAndFail(¶ms, fmt.Sprintf("proving deadline %d must not be the current or next deadline", msgdline), 18) + params.ProvingDeadline = di.WPoStPeriodDeadlines // too big + submitAndFail(¶ms, fmt.Sprintf("proving deadline index %d invalid", di.WPoStPeriodDeadlines), 16) + }) }