diff --git a/itests/kit/ensemble.go b/itests/kit/ensemble.go index 5f8f60ccd35..5e36c77a069 100644 --- a/itests/kit/ensemble.go +++ b/itests/kit/ensemble.go @@ -323,11 +323,11 @@ func (n *Ensemble) Miner(minerNode *TestMiner, full *TestFullNode, opts ...NodeO return n } -func (n *Ensemble) UnmanagedMiner(full *TestFullNode, opts ...NodeOpt) (*TestUnmanagedMiner, *Ensemble) { +func (n *Ensemble) UnmanagedMiner(ctx context.Context, full *TestFullNode, opts ...NodeOpt) (*TestUnmanagedMiner, *Ensemble) { actorAddr, err := address.NewIDAddress(genesis2.MinerStart + n.minerCount()) require.NoError(n.t, err) - minerNode := NewTestUnmanagedMiner(n.t, full, actorAddr, n.options.mockProofs, opts...) + minerNode := NewTestUnmanagedMiner(ctx, n.t, full, actorAddr, n.options.mockProofs, opts...) n.AddInactiveUnmanagedMiner(minerNode) return minerNode, n } diff --git a/itests/kit/node_unmanaged.go b/itests/kit/node_unmanaged.go index 5dc3ed379ad..23dcca37a3a 100644 --- a/itests/kit/node_unmanaged.go +++ b/itests/kit/node_unmanaged.go @@ -6,16 +6,18 @@ import ( "crypto/rand" "fmt" "io" - "math" "os" "path/filepath" + "sync" "testing" + "time" "github.com/ipfs/go-cid" libp2pcrypto "github.com/libp2p/go-libp2p/core/crypto" "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/require" cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/sync/errgroup" ffi "github.com/filecoin-project/filecoin-ffi" "github.com/filecoin-project/go-address" @@ -23,6 +25,8 @@ import ( "github.com/filecoin-project/go-state-types/builtin" miner14 "github.com/filecoin-project/go-state-types/builtin/v14/miner" "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/dline" + "github.com/filecoin-project/go-state-types/exitcode" "github.com/filecoin-project/go-state-types/proof" "github.com/filecoin-project/lotus/api" @@ -32,9 +36,19 @@ import ( "github.com/filecoin-project/lotus/chain/wallet/key" ) +// TODO: this shouldn't be fixed, we should make a new one for each sector/piece +var fixedPieceCid = cid.MustParse("baga6ea4seaqjtovkwk4myyzj56eztkh5pzsk5upksan6f5outesy62bsvl4dsha") + +// 32 bytes of 1's: this value essentially ignored in NI-PoRep proofs, but all zeros is not recommended. +// Regardless of what we submit to the chain, actors will replace it with 32 1's anyway but we are +// also doing proof verification call directly when we prepare an aggregate NI proof. +var niPorepInteractiveRandomness = abi.InteractiveSealRandomness([]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}) + // TestUnmanagedMiner is a miner that's not managed by the storage/infrastructure, all tasks must be manually executed, managed and scheduled by the test or test kit. // Note: `TestUnmanagedMiner` is not thread safe and assumes linear access of it's methods type TestUnmanagedMiner struct { + ctx context.Context + cancelFunc context.CancelFunc t *testing.T options nodeOpts mockProofs bool @@ -44,14 +58,12 @@ type TestUnmanagedMiner struct { sealedSectorDir string currentSectorNum abi.SectorNumber - cacheDirPaths map[abi.SectorNumber]string - unsealedSectorPaths map[abi.SectorNumber]string - sealedSectorPaths map[abi.SectorNumber]string - sealedCids map[abi.SectorNumber]cid.Cid - unsealedCids map[abi.SectorNumber]cid.Cid - sealTickets map[abi.SectorNumber]abi.SealRandomness + committedSectorsLk sync.Mutex + committedSectors map[abi.SectorNumber]sectorInfo - proofType map[abi.SectorNumber]abi.RegisteredSealProof + runningWdPostLoop bool + postsLk sync.Mutex + posts []windowPost ActorAddr address.Address OwnerKey *key.Key @@ -62,27 +74,50 @@ type TestUnmanagedMiner struct { } } -type WindowPostResp struct { - Posted bool +// sectorInfo contains all of the info we need to manage the lifecycle of the sector. These +// properties are private, but could be moved to OnboardedSector if they are useful to calling tests +// or need to be modified by WithModifyNIActivationsBeforeSubmit. +type sectorInfo struct { + sectorNumber abi.SectorNumber + proofType abi.RegisteredSealProof + cacheDirPath string + unsealedSectorPath string + sealedSectorPath string + sealedCid cid.Cid + unsealedCid cid.Cid + sectorProof []byte + pieces []abi.PieceInfo + sealTickets abi.SealRandomness + sealRandomnessEpoch abi.ChainEpoch +} + +type windowPost struct { + Posted []abi.SectorNumber + Epoch abi.ChainEpoch Error error } -func NewTestUnmanagedMiner(t *testing.T, full *TestFullNode, actorAddr address.Address, mockProofs bool, opts ...NodeOpt) *TestUnmanagedMiner { - require.NotNil(t, full, "full node required when instantiating miner") +func NewTestUnmanagedMiner(ctx context.Context, t *testing.T, full *TestFullNode, actorAddr address.Address, mockProofs bool, opts ...NodeOpt) *TestUnmanagedMiner { + req := require.New(t) + + req.NotNil(full, "full node required when instantiating miner") options := DefaultNodeOpts for _, o := range opts { err := o(&options) - require.NoError(t, err) + req.NoError(err) } + actorIdNum, err := address.IDFromAddress(actorAddr) + req.NoError(err) + privkey, _, err := libp2pcrypto.GenerateEd25519Key(rand.Reader) - require.NoError(t, err) + req.NoError(err) - require.NotNil(t, options.ownerKey, "owner key is required for initializing a miner") + req.NotNil(options.ownerKey, "owner key is required for initializing a miner") peerId, err := peer.IDFromPrivateKey(privkey) - require.NoError(t, err) + req.NoError(err) tmpDir := t.TempDir() cacheDir := filepath.Join(tmpDir, fmt.Sprintf("cache-%s", actorAddr)) @@ -93,7 +128,11 @@ func NewTestUnmanagedMiner(t *testing.T, full *TestFullNode, actorAddr address.A _ = os.Mkdir(unsealedSectorDir, 0755) _ = os.Mkdir(sealedSectorDir, 0755) + ctx, cancel := context.WithCancel(ctx) + tm := TestUnmanagedMiner{ + ctx: ctx, + cancelFunc: cancel, t: t, options: options, mockProofs: mockProofs, @@ -101,18 +140,12 @@ func NewTestUnmanagedMiner(t *testing.T, full *TestFullNode, actorAddr address.A unsealedSectorDir: unsealedSectorDir, sealedSectorDir: sealedSectorDir, - unsealedSectorPaths: make(map[abi.SectorNumber]string), - cacheDirPaths: make(map[abi.SectorNumber]string), - sealedSectorPaths: make(map[abi.SectorNumber]string), - sealedCids: make(map[abi.SectorNumber]cid.Cid), - unsealedCids: make(map[abi.SectorNumber]cid.Cid), - sealTickets: make(map[abi.SectorNumber]abi.SealRandomness), + committedSectors: make(map[abi.SectorNumber]sectorInfo), ActorAddr: actorAddr, OwnerKey: options.ownerKey, FullNode: full, - currentSectorNum: 101, - proofType: make(map[abi.SectorNumber]abi.RegisteredSealProof), + currentSectorNum: abi.SectorNumber(actorIdNum * 100), // include the actor id to make them unique across miners and easier to identify in logs } tm.Libp2p.PeerID = peerId tm.Libp2p.PrivKey = privkey @@ -120,187 +153,203 @@ func NewTestUnmanagedMiner(t *testing.T, full *TestFullNode, actorAddr address.A return &tm } -func (tm *TestUnmanagedMiner) AssertNoPower(ctx context.Context) { - p := tm.CurrentPower(ctx) - tm.t.Logf("Miner %s RBP: %v, QaP: %v", tm.ActorAddr, p.MinerPower.QualityAdjPower.String(), p.MinerPower.RawBytePower.String()) - require.True(tm.t, p.MinerPower.RawBytePower.IsZero()) +func (tm *TestUnmanagedMiner) Stop() { + tm.cancelFunc() + tm.AssertNoWindowPostError() } -func (tm *TestUnmanagedMiner) CurrentPower(ctx context.Context) *api.MinerPower { - head, err := tm.FullNode.ChainHead(ctx) - require.NoError(tm.t, err) - - p, err := tm.FullNode.StateMinerPower(ctx, tm.ActorAddr, head.Key()) - require.NoError(tm.t, err) - - return p +type OnboardOpt func(opts *onboardOpt) error +type onboardOpt struct { + modifyNIActivationsBeforeSubmit func([]miner14.SectorNIActivationInfo) []miner14.SectorNIActivationInfo + requireActivationSuccess bool + expectedExitCodes []exitcode.ExitCode } -func (tm *TestUnmanagedMiner) AssertPower(ctx context.Context, raw uint64, qa uint64) { - req := require.New(tm.t) - p := tm.CurrentPower(ctx) - tm.t.Logf("Miner %s RBP: %v, QaP: %v", p.MinerPower.QualityAdjPower.String(), tm.ActorAddr, p.MinerPower.RawBytePower.String()) - req.Equal(raw, p.MinerPower.RawBytePower.Uint64()) - req.Equal(qa, p.MinerPower.QualityAdjPower.Uint64()) +func WithModifyNIActivationsBeforeSubmit(f func([]miner14.SectorNIActivationInfo) []miner14.SectorNIActivationInfo) OnboardOpt { + return func(opts *onboardOpt) error { + opts.modifyNIActivationsBeforeSubmit = f + return nil + } } -func (tm *TestUnmanagedMiner) mkAndSavePiecesToOnboard(_ context.Context, sectorNumber abi.SectorNumber, pt abi.RegisteredSealProof) []abi.PieceInfo { - paddedPieceSize := abi.PaddedPieceSize(tm.options.sectorSize) - unpaddedPieceSize := paddedPieceSize.Unpadded() - - // Generate random bytes for the piece - randomBytes := make([]byte, unpaddedPieceSize) - _, err := io.ReadFull(rand.Reader, randomBytes) - require.NoError(tm.t, err) +func WithRequireActivationSuccess(requireActivationSuccess bool) OnboardOpt { + return func(opts *onboardOpt) error { + opts.requireActivationSuccess = requireActivationSuccess + return nil + } +} - // Create a temporary file for the first piece - pieceFileA := requireTempFile(tm.t, bytes.NewReader(randomBytes), uint64(unpaddedPieceSize)) +func WithExpectedExitCodes(exitCodes []exitcode.ExitCode) OnboardOpt { + return func(opts *onboardOpt) error { + opts.expectedExitCodes = exitCodes + return nil + } +} - // Generate the piece CID from the file - pieceCIDA, err := ffi.GeneratePieceCIDFromFile(pt, pieceFileA, unpaddedPieceSize) - require.NoError(tm.t, err) +// OnboardSectors onboards the specified number of sectors to the miner using the specified proof +// type. If `withPieces` is true and the proof type supports it, the sectors will be onboarded with +// pieces, otherwise they will be CC. +// +// This method is synchronous but each sector is prepared in a separate goroutine to speed up the +// process with real proofs. +func (tm *TestUnmanagedMiner) OnboardSectors( + proofType abi.RegisteredSealProof, + withPieces bool, + count int, + opts ...OnboardOpt, +) []abi.SectorNumber { - // Reset file offset to the beginning after CID generation - _, err = pieceFileA.Seek(0, io.SeekStart) - require.NoError(tm.t, err) + req := require.New(tm.t) - unsealedSectorFile := requireTempFile(tm.t, bytes.NewReader([]byte{}), 0) - defer func() { - _ = unsealedSectorFile.Close() - }() + options := onboardOpt{} + for _, o := range opts { + req.NoError(o(&options)) + } - // Write the piece to the staged sector file without alignment - writtenBytes, pieceCID, err := ffi.WriteWithoutAlignment(pt, pieceFileA, unpaddedPieceSize, unsealedSectorFile) - require.NoError(tm.t, err) - require.EqualValues(tm.t, unpaddedPieceSize, writtenBytes) - require.True(tm.t, pieceCID.Equals(pieceCIDA)) + sectors := make([]sectorInfo, count) - // Create a struct for the piece info - publicPieces := []abi.PieceInfo{{ - Size: paddedPieceSize, - PieceCID: pieceCIDA, - }} + // Wait for the seal randomness to be available (we can only draw seal randomness from + // tipsets that have already achieved finality) + sealRandEpoch, err := tm.waitPreCommitSealRandomness(proofType) + req.NoError(err) - // Create a temporary file for the sealed sector - sealedSectorFile := requireTempFile(tm.t, bytes.NewReader([]byte{}), 0) - defer func() { - _ = sealedSectorFile.Close() - }() + var eg errgroup.Group + + // For each sector, run PC1, PC2, C1 an C2, preparing for ProveCommit. If the proof needs it we + // will also submit a precommit for the sector. + for i := 0; i < count; i++ { + idx := i + + // We hold on to `sector`, adding new properties to it as we go along until we're finished with + // this phase, then add it to `sectors` + sector := tm.nextSector(proofType) + sector.sealRandomnessEpoch = sealRandEpoch + + eg.Go(func() error { + if withPieces { + // Build a sector with non-zero pieces to onboard + if tm.mockProofs { + sector.pieces = []abi.PieceInfo{{ + Size: abi.PaddedPieceSize(tm.options.sectorSize), + PieceCID: fixedPieceCid, + }} + } else { + var err error + sector, err = tm.mkAndSavePiecesToOnboard(sector) + if err != nil { + return fmt.Errorf("failed to create sector with pieces: %w", err) + } + } + } else { + // Build a sector with no pieces (CC) to onboard + if !tm.mockProofs { + sector, err = tm.makeAndSaveCCSector(sector) + if err != nil { + return fmt.Errorf("failed to create CC sector: %w", err) + } + } + } - // Update paths for the sector - tm.sealedSectorPaths[sectorNumber] = sealedSectorFile.Name() - tm.unsealedSectorPaths[sectorNumber] = unsealedSectorFile.Name() - tm.cacheDirPaths[sectorNumber] = filepath.Join(tm.cacheDir, fmt.Sprintf("%d", sectorNumber)) + // Generate a PreCommit for the CC sector if required + sector, err = tm.generatePreCommit(sector, sealRandEpoch) + if err != nil { + return fmt.Errorf("failed to generate PreCommit for sector: %w", err) + } - // Ensure the cache directory exists - _ = os.Mkdir(tm.cacheDirPaths[sectorNumber], 0755) + // Submit the PreCommit to the network if required + //TODO: should do this for all sectors in the batch in one go + err = tm.preCommitSectors(sealRandEpoch, proofType, sector) + if err != nil { + return fmt.Errorf("failed to submit PreCommit for sector: %w", err) + } - return publicPieces -} + // Generate a ProveCommit for the CC sector + sectorProof, err := tm.generateSectorProof(sector) + if err != nil { + return fmt.Errorf("failed to generate ProveCommit for sector: %w", err) + } + sector.sectorProof = sectorProof -func (tm *TestUnmanagedMiner) makeAndSaveCCSector(_ context.Context, sectorNumber abi.SectorNumber) { - requirements := require.New(tm.t) + sectors[idx] = sector + return nil + }) + } - // Create cache directory - cacheDirPath := filepath.Join(tm.cacheDir, fmt.Sprintf("%d", sectorNumber)) - requirements.NoError(os.Mkdir(cacheDirPath, 0755)) - tm.t.Logf("Miner %s: Sector %d: created cache directory at %s", tm.ActorAddr, sectorNumber, cacheDirPath) + if err := eg.Wait(); err != nil { + tm.t.Fatal(err) + } - // Define paths for unsealed and sealed sectors - unsealedSectorPath := filepath.Join(tm.unsealedSectorDir, fmt.Sprintf("%d", sectorNumber)) - sealedSectorPath := filepath.Join(tm.sealedSectorDir, fmt.Sprintf("%d", sectorNumber)) - unsealedSize := abi.PaddedPieceSize(tm.options.sectorSize).Unpadded() + // Submit ProveCommit for all sectors + exitCodes := tm.submitProveCommit(proofType, sectors, options.requireActivationSuccess, options.modifyNIActivationsBeforeSubmit) + + // ProveCommit may have succeeded overall, but some sectors may have failed if RequireActivationSuccess + // was set to false. We need to return the exit codes for each sector so the caller can determine + // which sectors failed. + onboarded := make([]abi.SectorNumber, 0) + for i, sector := range sectors { + // expect them all to pass unless the caller specified otherwise + if options.expectedExitCodes == nil { + req.Equal(exitCodes[i], exitcode.Ok, "sector %d failed with exit code %s", sector.sectorNumber, exitCodes[i]) + } else { + req.Equal(options.expectedExitCodes[i], exitCodes[i], "sector %d failed with exit code %s", sector.sectorNumber, exitCodes[i]) + } - // Write unsealed sector file - requirements.NoError(os.WriteFile(unsealedSectorPath, make([]byte, unsealedSize), 0644)) - tm.t.Logf("Miner %s: Sector %d: wrote unsealed CC sector to %s", tm.ActorAddr, sectorNumber, unsealedSectorPath) + if exitCodes[i].IsSuccess() { + // only save the sector if it was successfully committed + tm.setCommittedSector(sectors[i]) + onboarded = append(onboarded, sector.sectorNumber) + } + } - // Write sealed sector file - requirements.NoError(os.WriteFile(sealedSectorPath, make([]byte, tm.options.sectorSize), 0644)) - tm.t.Logf("Miner %s: Sector %d: wrote sealed CC sector to %s", tm.ActorAddr, sectorNumber, sealedSectorPath) + tm.wdPostLoop() - // Update paths in the struct - tm.unsealedSectorPaths[sectorNumber] = unsealedSectorPath - tm.sealedSectorPaths[sectorNumber] = sealedSectorPath - tm.cacheDirPaths[sectorNumber] = cacheDirPath + return onboarded } -func (tm *TestUnmanagedMiner) mkStagedFileWithPieces(pt abi.RegisteredSealProof) ([]abi.PieceInfo, string) { - paddedPieceSize := abi.PaddedPieceSize(tm.options.sectorSize) - unpaddedPieceSize := paddedPieceSize.Unpadded() - - // Generate random bytes for the piece - randomBytes := make([]byte, unpaddedPieceSize) - _, err := io.ReadFull(rand.Reader, randomBytes) - require.NoError(tm.t, err) - - // Create a temporary file for the first piece - pieceFileA := requireTempFile(tm.t, bytes.NewReader(randomBytes), uint64(unpaddedPieceSize)) - - // Generate the piece CID from the file - pieceCIDA, err := ffi.GeneratePieceCIDFromFile(pt, pieceFileA, unpaddedPieceSize) - require.NoError(tm.t, err) - - // Reset file offset to the beginning after CID generation - _, err = pieceFileA.Seek(0, io.SeekStart) - require.NoError(tm.t, err) - - unsealedSectorFile := requireTempFile(tm.t, bytes.NewReader([]byte{}), 0) - defer func() { - _ = unsealedSectorFile.Close() - }() +// SnapDeal snaps a deal into a sector, generating a new sealed sector and updating the sector's state. +// WindowPoSt should continue to operate after this operation if required. +func (tm *TestUnmanagedMiner) SnapDeal(sectorNumber abi.SectorNumber) []abi.PieceInfo { + req := require.New(tm.t) - // Write the piece to the staged sector file without alignment - writtenBytes, pieceCID, err := ffi.WriteWithoutAlignment(pt, pieceFileA, unpaddedPieceSize, unsealedSectorFile) - require.NoError(tm.t, err) - require.EqualValues(tm.t, unpaddedPieceSize, writtenBytes) - require.True(tm.t, pieceCID.Equals(pieceCIDA)) + tm.log("Snapping a deal into sector %d ...", sectorNumber) - // Create a struct for the piece info - publicPieces := []abi.PieceInfo{{ - Size: paddedPieceSize, - PieceCID: pieceCIDA, - }} - - return publicPieces, unsealedSectorFile.Name() -} + si, err := tm.getCommittedSector(sectorNumber) + req.NoError(err) -func (tm *TestUnmanagedMiner) SnapDeal(ctx context.Context, proofType abi.RegisteredSealProof, sectorNumber abi.SectorNumber) []abi.PieceInfo { - updateProofType := abi.SealProofInfos[proofType].UpdateProof + updateProofType := abi.SealProofInfos[si.proofType].UpdateProof var pieces []abi.PieceInfo var snapProof []byte - var newSealedCid cid.Cid + var newSealedCid, newUnsealedCid cid.Cid if !tm.mockProofs { - // generate sector key var unsealedPath string - pieces, unsealedPath = tm.mkStagedFileWithPieces(proofType) + var err error + pieces, unsealedPath, err = tm.mkStagedFileWithPieces(si.proofType) + req.NoError(err) - s, err := os.Stat(tm.sealedSectorPaths[sectorNumber]) - require.NoError(tm.t, err) + s, err := os.Stat(si.sealedSectorPath) + req.NoError(err) randomBytes := make([]byte, s.Size()) _, err = io.ReadFull(rand.Reader, randomBytes) - require.NoError(tm.t, err) + req.NoError(err) - updatePath := requireTempFile(tm.t, bytes.NewReader(randomBytes), uint64(s.Size())) - require.NoError(tm.t, updatePath.Close()) + updatePath, err := mkTempFile(tm.t, bytes.NewReader(randomBytes), uint64(s.Size())) + req.NoError(err) + req.NoError(updatePath.Close()) updateDir := filepath.Join(tm.t.TempDir(), fmt.Sprintf("update-%d", sectorNumber)) - require.NoError(tm.t, os.MkdirAll(updateDir, 0700)) + req.NoError(os.MkdirAll(updateDir, 0700)) - var newUnsealedCid cid.Cid newSealedCid, newUnsealedCid, err = ffi.SectorUpdate.EncodeInto(updateProofType, updatePath.Name(), updateDir, - tm.sealedSectorPaths[sectorNumber], tm.cacheDirPaths[sectorNumber], unsealedPath, pieces) - require.NoError(tm.t, err) + si.sealedSectorPath, si.cacheDirPath, unsealedPath, pieces) + req.NoError(err) - vp, err := ffi.SectorUpdate.GenerateUpdateVanillaProofs(updateProofType, tm.sealedCids[sectorNumber], - newSealedCid, newUnsealedCid, updatePath.Name(), updateDir, tm.sealedSectorPaths[sectorNumber], - tm.cacheDirPaths[sectorNumber]) - require.NoError(tm.t, err) + vp, err := ffi.SectorUpdate.GenerateUpdateVanillaProofs(updateProofType, si.sealedCid, + newSealedCid, newUnsealedCid, updatePath.Name(), updateDir, si.sealedSectorPath, si.cacheDirPath) + req.NoError(err) - snapProof, err = ffi.SectorUpdate.GenerateUpdateProofWithVanilla(updateProofType, tm.sealedCids[sectorNumber], - newSealedCid, newUnsealedCid, vp) - require.NoError(tm.t, err) + snapProof, err = ffi.SectorUpdate.GenerateUpdateProofWithVanilla(updateProofType, si.sealedCid, newSealedCid, newUnsealedCid, vp) + req.NoError(err) } else { pieces = []abi.PieceInfo{{ Size: abi.PaddedPieceSize(tm.options.sectorSize), @@ -310,9 +359,8 @@ func (tm *TestUnmanagedMiner) SnapDeal(ctx context.Context, proofType abi.Regist newSealedCid = cid.MustParse("bagboea4b5abcatlxechwbp7kjpjguna6r6q7ejrhe6mdp3lf34pmswn27pkkieka") } - tm.waitForMutableDeadline(ctx, sectorNumber) + tm.waitForMutableDeadline(sectorNumber) - // submit proof var manifest []miner14.PieceActivationManifest for _, piece := range pieces { manifest = append(manifest, miner14.PieceActivationManifest{ @@ -321,11 +369,13 @@ func (tm *TestUnmanagedMiner) SnapDeal(ctx context.Context, proofType abi.Regist }) } - head, err := tm.FullNode.ChainHead(ctx) - require.NoError(tm.t, err) + head, err := tm.FullNode.ChainHead(tm.ctx) + req.NoError(err) - sl, err := tm.FullNode.StateSectorPartition(ctx, tm.ActorAddr, sectorNumber, head.Key()) - require.NoError(tm.t, err) + sl, err := tm.FullNode.StateSectorPartition(tm.ctx, tm.ActorAddr, sectorNumber, head.Key()) + req.NoError(err) + + tm.log("Submitting ProveReplicaUpdates3 for sector %d ...", sectorNumber) params := &miner14.ProveReplicaUpdates3Params{ SectorUpdates: []miner14.SectorUpdateManifest{ @@ -342,454 +392,728 @@ func (tm *TestUnmanagedMiner) SnapDeal(ctx context.Context, proofType abi.Regist RequireActivationSuccess: true, RequireNotificationSuccess: false, } - r, err := tm.SubmitMessage(ctx, params, 1, builtin.MethodsMiner.ProveReplicaUpdates3) - require.NoError(tm.t, err) - require.True(tm.t, r.Receipt.ExitCode.IsSuccess()) + r, err := tm.SubmitMessage(params, 1, builtin.MethodsMiner.ProveReplicaUpdates3) + req.NoError(err) + req.True(r.Receipt.ExitCode.IsSuccess()) + + si.pieces = pieces + si.sealedCid = newSealedCid + si.unsealedCid = newUnsealedCid + tm.setCommittedSector(si) return pieces } -func (tm *TestUnmanagedMiner) waitForMutableDeadline(ctx context.Context, sectorNum abi.SectorNumber) { - ts, err := tm.FullNode.ChainHead(ctx) - require.NoError(tm.t, err) - - sl, err := tm.FullNode.StateSectorPartition(ctx, tm.ActorAddr, sectorNum, ts.Key()) - require.NoError(tm.t, err) - - dlinfo, err := tm.FullNode.StateMinerProvingDeadline(ctx, tm.ActorAddr, ts.Key()) - require.NoError(tm.t, err) +func (tm *TestUnmanagedMiner) log(msg string, args ...interface{}) { + tm.t.Logf(fmt.Sprintf("Miner %s: %s", tm.ActorAddr, msg), args...) +} - sectorDeadlineOpen := sl.Deadline == dlinfo.Index - sectorDeadlineNext := (dlinfo.Index+1)%dlinfo.WPoStPeriodDeadlines == sl.Deadline - immutable := sectorDeadlineOpen || sectorDeadlineNext - - // Sleep for immutable epochs - if immutable { - dlineEpochsRemaining := dlinfo.NextOpen() - ts.Height() - var targetEpoch abi.ChainEpoch - if sectorDeadlineOpen { - // sleep for remainder of deadline - targetEpoch = ts.Height() + dlineEpochsRemaining - } else { - // sleep for remainder of deadline and next one - targetEpoch = ts.Height() + dlineEpochsRemaining + dlinfo.WPoStChallengeWindow - } - _, err := tm.FullNode.WaitTillChainOrError(ctx, HeightAtLeast(targetEpoch+5)) - require.NoError(tm.t, err) +func (tm *TestUnmanagedMiner) getCommittedSector(sectorNumber abi.SectorNumber) (sectorInfo, error) { + tm.committedSectorsLk.Lock() + defer tm.committedSectorsLk.Unlock() + si, ok := tm.committedSectors[sectorNumber] + if !ok { + return sectorInfo{}, fmt.Errorf("sector %d not found", sectorNumber) } + return si, nil +} + +func (tm *TestUnmanagedMiner) setCommittedSector(sectorInfo sectorInfo) { + tm.committedSectorsLk.Lock() + tm.committedSectors[sectorInfo.sectorNumber] = sectorInfo + tm.committedSectorsLk.Unlock() } -func (tm *TestUnmanagedMiner) NextSectorNumber() abi.SectorNumber { - sectorNumber := tm.currentSectorNum +// nextSector creates a new sectorInfo{} with a new unique sector number for this miner, +// but it doesn't persist it to the miner state so we don't accidentally try and wdpost +// a sector that's either not ready or doesn't get committed. +func (tm *TestUnmanagedMiner) nextSector(proofType abi.RegisteredSealProof) sectorInfo { + si := sectorInfo{ + sectorNumber: tm.currentSectorNum, + proofType: proofType, + } tm.currentSectorNum++ - return sectorNumber + return si } -func (tm *TestUnmanagedMiner) PrepareSectorForProveCommit( - ctx context.Context, - proofType abi.RegisteredSealProof, - sectorNumber abi.SectorNumber, - pieces []abi.PieceInfo, -) (seedEpoch abi.ChainEpoch, proveCommit []byte) { +func (tm *TestUnmanagedMiner) mkAndSavePiecesToOnboard(sector sectorInfo) (sectorInfo, error) { + paddedPieceSize := abi.PaddedPieceSize(tm.options.sectorSize) + unpaddedPieceSize := paddedPieceSize.Unpadded() - req := require.New(tm.t) + // Generate random bytes for the piece + randomBytes := make([]byte, unpaddedPieceSize) + if _, err := io.ReadFull(rand.Reader, randomBytes); err != nil { + return sectorInfo{}, err + } - // Wait for the pre-commitseal randomness to be available (we can only draw seal randomness from tipsets that have already achieved finality) - preCommitSealRandEpoch := tm.waitPreCommitSealRandomness(ctx, sectorNumber, proofType) + // Create a temporary file for the first piece + pieceFileA, err := mkTempFile(tm.t, bytes.NewReader(randomBytes), uint64(unpaddedPieceSize)) + if err != nil { + return sectorInfo{}, err + } - // Generate a Pre-Commit for the CC sector -> this persists the proof on the `TestUnmanagedMiner` Miner State - tm.generatePreCommit(ctx, sectorNumber, preCommitSealRandEpoch, proofType, pieces) + // Generate the piece CID from the file + pieceCIDA, err := ffi.GeneratePieceCIDFromFile(sector.proofType, pieceFileA, unpaddedPieceSize) + if err != nil { + return sectorInfo{}, err + } - // --------------------Create pre-commit for the CC sector -> we'll just pre-commit `sector size` worth of 0s for this CC sector + // Reset file offset to the beginning after CID generation + if _, err = pieceFileA.Seek(0, io.SeekStart); err != nil { + return sectorInfo{}, err + } - if !proofType.IsNonInteractive() { - // Submit the Pre-Commit to the network - var uc *cid.Cid - if len(pieces) > 0 { - unsealedCid := tm.unsealedCids[sectorNumber] - uc = &unsealedCid - } - r, err := tm.SubmitMessage(ctx, &miner14.PreCommitSectorBatchParams2{ - Sectors: []miner14.SectorPreCommitInfo{{ - Expiration: 2880 * 300, - SectorNumber: sectorNumber, - SealProof: proofType, - SealedCID: tm.sealedCids[sectorNumber], - SealRandEpoch: preCommitSealRandEpoch, - UnsealedCid: uc, - }}, - }, 1, builtin.MethodsMiner.PreCommitSectorBatch2) - req.NoError(err) - req.True(r.Receipt.ExitCode.IsSuccess()) + unsealedSectorFile, err := mkTempFile(tm.t, bytes.NewReader([]byte{}), 0) + if err != nil { + return sectorInfo{}, err } - // Generate a ProveCommit for the CC sector - var seedRandomness abi.InteractiveSealRandomness - seedEpoch, seedRandomness = tm.proveCommitWaitSeed(ctx, sectorNumber, proofType) + defer func() { + _ = unsealedSectorFile.Close() + }() - proveCommit = []byte{0xde, 0xad, 0xbe, 0xef} // mock prove commit - if !tm.mockProofs { - proveCommit = tm.generateProveCommit(ctx, sectorNumber, proofType, seedRandomness, pieces) + // Write the piece to the staged sector file without alignment + writtenBytes, pieceCID, err := ffi.WriteWithoutAlignment(sector.proofType, pieceFileA, unpaddedPieceSize, unsealedSectorFile) + if err != nil { + return sectorInfo{}, err + } + if unpaddedPieceSize != writtenBytes { + return sectorInfo{}, fmt.Errorf("expected to write %d bytes, wrote %d", unpaddedPieceSize, writtenBytes) + } + if !pieceCID.Equals(pieceCIDA) { + return sectorInfo{}, fmt.Errorf("expected piece CID %s, got %s", pieceCIDA, pieceCID) } - return seedEpoch, proveCommit -} + // Create a struct for the piece info + sector.pieces = []abi.PieceInfo{{ + Size: paddedPieceSize, + PieceCID: pieceCIDA, + }} -func (tm *TestUnmanagedMiner) SubmitProveCommit( - ctx context.Context, - proofType abi.RegisteredSealProof, - sectorNumber abi.SectorNumber, - seedEpoch abi.ChainEpoch, - proveCommit []byte, - pieceManifest []miner14.PieceActivationManifest, -) { + // Create a temporary file for the sealed sector + sealedSectorFile, err := mkTempFile(tm.t, bytes.NewReader([]byte{}), 0) + if err != nil { + return sectorInfo{}, err + } - req := require.New(tm.t) + defer func() { + _ = sealedSectorFile.Close() + }() - if proofType.IsNonInteractive() { - req.Nil(pieceManifest, "piece manifest should be nil for Non-interactive PoRep") - } + // Update paths for the sector + sector.sealedSectorPath = sealedSectorFile.Name() + sector.unsealedSectorPath = unsealedSectorFile.Name() + sector.cacheDirPath = filepath.Join(tm.cacheDir, fmt.Sprintf("%d", sector.sectorNumber)) - // Step 6: Submit the ProveCommit to the network - if proofType.IsNonInteractive() { - tm.t.Log("Submitting ProveCommitSector ...") + // Ensure the cache directory exists + if err = os.Mkdir(sector.cacheDirPath, 0755); err != nil { + return sectorInfo{}, err + } - var provingDeadline uint64 = 7 - if tm.IsImmutableDeadline(ctx, provingDeadline) { - // avoid immutable deadlines - provingDeadline = 5 - } + return sector, nil +} - actorIdNum, err := address.IDFromAddress(tm.ActorAddr) - req.NoError(err) - actorId := abi.ActorID(actorIdNum) +func (tm *TestUnmanagedMiner) makeAndSaveCCSector(sector sectorInfo) (sectorInfo, error) { + // Create cache directory + sector.cacheDirPath = filepath.Join(tm.cacheDir, fmt.Sprintf("%d", sector.sectorNumber)) + if err := os.Mkdir(sector.cacheDirPath, 0755); err != nil { + return sectorInfo{}, err + } + tm.log("Sector %d: created cache directory at %s", sector.sectorNumber, sector.cacheDirPath) - r, err := tm.SubmitMessage(ctx, &miner14.ProveCommitSectorsNIParams{ - Sectors: []miner14.SectorNIActivationInfo{{ - SealingNumber: sectorNumber, - SealerID: actorId, - SealedCID: tm.sealedCids[sectorNumber], - SectorNumber: sectorNumber, - SealRandEpoch: seedEpoch, - Expiration: 2880 * 300, - }}, - AggregateProof: proveCommit, - SealProofType: proofType, - AggregateProofType: abi.RegisteredAggregationProof_SnarkPackV2, - ProvingDeadline: provingDeadline, - RequireActivationSuccess: true, - }, 1, builtin.MethodsMiner.ProveCommitSectorsNI) - req.NoError(err) - req.True(r.Receipt.ExitCode.IsSuccess()) + // Define paths for unsealed and sealed sectors + sector.unsealedSectorPath = filepath.Join(tm.unsealedSectorDir, fmt.Sprintf("%d", sector.sectorNumber)) + sector.sealedSectorPath = filepath.Join(tm.sealedSectorDir, fmt.Sprintf("%d", sector.sectorNumber)) + unsealedSize := abi.PaddedPieceSize(tm.options.sectorSize).Unpadded() - // NI-PoRep lets us determine the deadline, so we can check that it's set correctly - sp, err := tm.FullNode.StateSectorPartition(ctx, tm.ActorAddr, sectorNumber, r.TipSet) - req.NoError(err) - req.Equal(provingDeadline, sp.Deadline) - } else { - tm.t.Log("Submitting ProveCommitSector ...") + // Write unsealed sector file + if err := os.WriteFile(sector.unsealedSectorPath, make([]byte, unsealedSize), 0644); err != nil { + return sectorInfo{}, err + } + tm.log("Sector %d: wrote unsealed CC sector to %s", sector.sectorNumber, sector.unsealedSectorPath) - r, err := tm.SubmitMessage(ctx, &miner14.ProveCommitSectors3Params{ - SectorActivations: []miner14.SectorActivationManifest{{SectorNumber: sectorNumber, Pieces: pieceManifest}}, - SectorProofs: [][]byte{proveCommit}, - RequireActivationSuccess: true, - }, 0, builtin.MethodsMiner.ProveCommitSectors3) - req.NoError(err) - req.True(r.Receipt.ExitCode.IsSuccess()) + // Write sealed sector file + if err := os.WriteFile(sector.sealedSectorPath, make([]byte, tm.options.sectorSize), 0644); err != nil { + return sectorInfo{}, err } + tm.log("Sector %d: wrote sealed CC sector to %s", sector.sectorNumber, sector.sealedSectorPath) + + return sector, nil } -func (tm *TestUnmanagedMiner) OnboardCCSector(ctx context.Context, proofType abi.RegisteredSealProof) (abi.SectorNumber, chan WindowPostResp, context.CancelFunc) { - sectorNumber := tm.NextSectorNumber() +func (tm *TestUnmanagedMiner) mkStagedFileWithPieces(pt abi.RegisteredSealProof) ([]abi.PieceInfo, string, error) { + paddedPieceSize := abi.PaddedPieceSize(tm.options.sectorSize) + unpaddedPieceSize := paddedPieceSize.Unpadded() - if !tm.mockProofs { - // Write empty bytes that we want to seal i.e. create our CC sector - tm.makeAndSaveCCSector(ctx, sectorNumber) + // Generate random bytes for the piece + randomBytes := make([]byte, unpaddedPieceSize) + if _, err := io.ReadFull(rand.Reader, randomBytes); err != nil { + return nil, "", err } - seedEpoch, proveCommit := tm.PrepareSectorForProveCommit(ctx, proofType, sectorNumber, []abi.PieceInfo{}) - - tm.SubmitProveCommit(ctx, proofType, sectorNumber, seedEpoch, proveCommit, nil) + // Create a temporary file for the first piece + pieceFileA, err := mkTempFile(tm.t, bytes.NewReader(randomBytes), uint64(unpaddedPieceSize)) + if err != nil { + return nil, "", err + } - tm.proofType[sectorNumber] = proofType - respCh, cancelFn := tm.wdPostLoop(ctx, sectorNumber, tm.sealedCids[sectorNumber], tm.sealedSectorPaths[sectorNumber], tm.cacheDirPaths[sectorNumber]) + // Generate the piece CID from the file + pieceCIDA, err := ffi.GeneratePieceCIDFromFile(pt, pieceFileA, unpaddedPieceSize) + if err != nil { + return nil, "", err + } - return sectorNumber, respCh, cancelFn -} + // Reset file offset to the beginning after CID generation + if _, err = pieceFileA.Seek(0, io.SeekStart); err != nil { + return nil, "", err + } -func (tm *TestUnmanagedMiner) OnboardSectorWithPieces(ctx context.Context, proofType abi.RegisteredSealProof) (abi.SectorNumber, chan WindowPostResp, context.CancelFunc) { - sectorNumber := tm.NextSectorNumber() + unsealedSectorFile, err := mkTempFile(tm.t, bytes.NewReader([]byte{}), 0) + if err != nil { + return nil, "", err + } + defer func() { + _ = unsealedSectorFile.Close() + }() - // Build a sector with non 0 Pieces that we want to onboard - var pieces []abi.PieceInfo - if !tm.mockProofs { - pieces = tm.mkAndSavePiecesToOnboard(ctx, sectorNumber, proofType) - } else { - pieces = []abi.PieceInfo{{ - Size: abi.PaddedPieceSize(tm.options.sectorSize), - PieceCID: cid.MustParse("baga6ea4seaqjtovkwk4myyzj56eztkh5pzsk5upksan6f5outesy62bsvl4dsha"), - }} + // Write the piece to the staged sector file without alignment + writtenBytes, pieceCID, err := ffi.WriteWithoutAlignment(pt, pieceFileA, unpaddedPieceSize, unsealedSectorFile) + if err != nil { + return nil, "", err + } + if unpaddedPieceSize != writtenBytes { + return nil, "", fmt.Errorf("expected to write %d bytes, wrote %d", unpaddedPieceSize, writtenBytes) + } + if !pieceCID.Equals(pieceCIDA) { + return nil, "", fmt.Errorf("expected piece CID %s, got %s", pieceCIDA, pieceCID) } - _, proveCommit := tm.PrepareSectorForProveCommit(ctx, proofType, sectorNumber, pieces) + // Create a struct for the piece info + publicPieces := []abi.PieceInfo{{ + Size: paddedPieceSize, + PieceCID: pieceCIDA, + }} - // Submit the ProveCommit to the network - tm.t.Log("Submitting ProveCommitSector ...") + return publicPieces, unsealedSectorFile.Name(), nil +} - var manifest []miner14.PieceActivationManifest - for _, piece := range pieces { - manifest = append(manifest, miner14.PieceActivationManifest{ - CID: piece.PieceCID, - Size: piece.Size, - }) - } +// waitForMutableDeadline will wait until we are not in the proving deadline for the given +// sector, or the deadline after the proving deadline. +// For safety, to avoid possible races with the window post loop, we will also avoid the +// deadline before the proving deadline. +func (tm *TestUnmanagedMiner) waitForMutableDeadline(sectorNum abi.SectorNumber) { + req := require.New(tm.t) - tm.SubmitProveCommit(ctx, proofType, sectorNumber, 0, proveCommit, manifest) + ts, err := tm.FullNode.ChainHead(tm.ctx) + req.NoError(err) - tm.proofType[sectorNumber] = proofType - respCh, cancelFn := tm.wdPostLoop(ctx, sectorNumber, tm.sealedCids[sectorNumber], tm.sealedSectorPaths[sectorNumber], tm.cacheDirPaths[sectorNumber]) + sl, err := tm.FullNode.StateSectorPartition(tm.ctx, tm.ActorAddr, sectorNum, ts.Key()) + req.NoError(err) - return sectorNumber, respCh, cancelFn + dlinfo, err := tm.FullNode.StateMinerProvingDeadline(tm.ctx, tm.ActorAddr, ts.Key()) + req.NoError(err) + + sectorDeadlineCurrent := sl.Deadline == dlinfo.Index // we are in the proving deadline + sectorDeadlineNext := (dlinfo.Index+1)%dlinfo.WPoStPeriodDeadlines == sl.Deadline // we are in the deadline after the proving deadline + sectorDeadlinePrev := (dlinfo.Index-1+dlinfo.WPoStPeriodDeadlines)%dlinfo.WPoStPeriodDeadlines == sl.Deadline // we are in the deadline before the proving deadline + + if sectorDeadlineCurrent || sectorDeadlineNext || sectorDeadlinePrev { + // We are in a sensitive, or immutable deadline, we need to wait + targetEpoch := dlinfo.NextOpen() // end of current deadline + if sectorDeadlineCurrent { + // we are in the proving deadline, wait until the end of the next one + targetEpoch += dlinfo.WPoStChallengeWindow + } else if sectorDeadlinePrev { + // we are in the deadline before the proving deadline, wait an additional window + targetEpoch += dlinfo.WPoStChallengeWindow * 2 + } + _, err := tm.FullNode.WaitTillChainOrError(tm.ctx, HeightAtLeast(targetEpoch+5)) + req.NoError(err) + } } -// calculateNextPostEpoch calculates the first epoch of the deadline proving window -// that is desired for the given sector for the specified miner. -// This function returns the current epoch and the calculated proving epoch. -func (tm *TestUnmanagedMiner) calculateNextPostEpoch( - ctx context.Context, - sectorNumber abi.SectorNumber, -) (abi.ChainEpoch, abi.ChainEpoch, error) { - // Retrieve the current blockchain head - head, err := tm.FullNode.ChainHead(ctx) - if err != nil { - return 0, 0, fmt.Errorf("failed to get chain head: %w", err) +func (tm *TestUnmanagedMiner) preCommitSectors( + sealRandEpoch abi.ChainEpoch, + proofType abi.RegisteredSealProof, + sector sectorInfo, +) error { + + if proofType.IsNonInteractive() { + return nil } - // Obtain the proving deadline information for the miner - di, err := tm.FullNode.StateMinerProvingDeadline(ctx, tm.ActorAddr, head.Key()) + // Submit the PreCommit to the network + if sector.proofType != proofType { + return fmt.Errorf("sector proof type does not match PreCommit proof type") + } + sealedCid := sector.sealedCid + var uc *cid.Cid + if len(sector.pieces) > 0 { + unsealedCid := sector.unsealedCid + uc = &unsealedCid + } + spci := []miner14.SectorPreCommitInfo{{ + Expiration: 2880 * 300, + SectorNumber: sector.sectorNumber, + SealProof: proofType, + SealedCID: sealedCid, + SealRandEpoch: sealRandEpoch, + UnsealedCid: uc, + }} + r, err := tm.SubmitMessage(&miner14.PreCommitSectorBatchParams2{Sectors: spci}, 1, builtin.MethodsMiner.PreCommitSectorBatch2) if err != nil { - return 0, 0, fmt.Errorf("failed to get proving deadline: %w", err) + return err + } + tm.log("PreCommitSectorBatch2 submitted at epoch %d: %+v", r.Height, spci[0]) + if !r.Receipt.ExitCode.IsSuccess() { + return fmt.Errorf("PreCommit failed with exit code: %s", r.Receipt.ExitCode) } - tm.t.Logf("Miner %s: WindowPoST(%d): ProvingDeadline: %+v", tm.ActorAddr, sectorNumber, di) + return nil +} + +func (tm *TestUnmanagedMiner) submitProveCommit( + proofType abi.RegisteredSealProof, + sectors []sectorInfo, + requireActivationSuccess bool, + modifyNIActivationsBeforeSubmit func([]miner14.SectorNIActivationInfo) []miner14.SectorNIActivationInfo, +) []exitcode.ExitCode { + + req := require.New(tm.t) + + sectorProofs := make([][]byte, len(sectors)) + for i, sector := range sectors { + sectorProofs[i] = sector.sectorProof + } + + var msgReturn *api.MsgLookup + var provingDeadline uint64 = 7 // for niporep + + // Step 6: Submit the ProveCommit to the network + if proofType.IsNonInteractive() { + if tm.IsImmutableDeadline(provingDeadline) { + // avoid immutable deadlines + provingDeadline = 5 + } + + actorIdNum, err := address.IDFromAddress(tm.ActorAddr) + req.NoError(err) + actorId := abi.ActorID(actorIdNum) + + infos := make([]proof.AggregateSealVerifyInfo, len(sectors)) + activations := make([]miner14.SectorNIActivationInfo, len(sectors)) + for i, sector := range sectors { + req.Nil(sector.pieces, "pieces should be nil for Non-interactive PoRep") + + infos[i] = proof.AggregateSealVerifyInfo{ + Number: sector.sectorNumber, + Randomness: sector.sealTickets, + InteractiveRandomness: niPorepInteractiveRandomness, + SealedCID: sector.sealedCid, + UnsealedCID: sector.unsealedCid, + } + + activations[i] = miner14.SectorNIActivationInfo{ + SealingNumber: sector.sectorNumber, + SealerID: actorId, + SealedCID: sector.sealedCid, + SectorNumber: sector.sectorNumber, + SealRandEpoch: sector.sealRandomnessEpoch, + Expiration: 2880 * 300, + } + } + + var sectorProof []byte + if tm.mockProofs { + sectorProof = []byte{0xde, 0xad, 0xbe, 0xef} + } else { + asvpai := proof.AggregateSealVerifyProofAndInfos{ + Miner: actorId, + SealProof: proofType, + AggregateProof: abi.RegisteredAggregationProof_SnarkPackV2, + Infos: infos, + } + tm.log("Aggregating circuit proofs: %+v", asvpai) + sectorProof, err = ffi.AggregateSealProofs(asvpai, sectorProofs) + req.NoError(err) + + asvpai.Proof = sectorProof + + verified, err := ffi.VerifyAggregateSeals(asvpai) + req.NoError(err) + req.True(verified, "failed to verify aggregated circuit proof") + } - // Fetch the sector partition for the given sector number - sp, err := tm.FullNode.StateSectorPartition(ctx, tm.ActorAddr, sectorNumber, head.Key()) - if err != nil { - return 0, 0, fmt.Errorf("failed to get sector partition: %w", err) - } + if modifyNIActivationsBeforeSubmit != nil { + activations = modifyNIActivationsBeforeSubmit(activations) + } - tm.t.Logf("Miner %s: WindowPoST(%d): SectorPartition: %+v", tm.ActorAddr, sectorNumber, sp) + tm.log("Submitting ProveCommitSectorsNI ...") + msgReturn, err = tm.SubmitMessage(&miner14.ProveCommitSectorsNIParams{ + Sectors: activations, + AggregateProof: sectorProof, + SealProofType: proofType, + AggregateProofType: abi.RegisteredAggregationProof_SnarkPackV2, + ProvingDeadline: provingDeadline, + RequireActivationSuccess: requireActivationSuccess, + }, 1, builtin.MethodsMiner.ProveCommitSectorsNI) + req.NoError(err) + req.True(msgReturn.Receipt.ExitCode.IsSuccess()) + } else { + // else standard porep activations := make([]miner14.SectorActivationManifest, len(sectors)) + activations := make([]miner14.SectorActivationManifest, len(sectors)) + for i, sector := range sectors { + activations[i] = miner14.SectorActivationManifest{SectorNumber: sector.sectorNumber} + if len(sector.pieces) > 0 { + activations[i].Pieces = make([]miner14.PieceActivationManifest, len(sector.pieces)) + for j, piece := range sector.pieces { + activations[i].Pieces[j] = miner14.PieceActivationManifest{ + CID: piece.PieceCID, + Size: piece.Size, + } + } + } + } - // Calculate the start of the period, adjusting if the current deadline has passed - periodStart := di.PeriodStart - // calculate current deadline index because it won't be reliable from state until the first - // challenge window cron tick after first sector onboarded - currentDeadlineIdx := uint64(math.Abs(float64((di.CurrentEpoch - di.PeriodStart) / di.WPoStChallengeWindow))) - if di.PeriodStart < di.CurrentEpoch && sp.Deadline <= currentDeadlineIdx { - // If the deadline has passed in the current proving period, calculate for the next period - // Note that di.Open may be > di.CurrentEpoch if the miner has just been enrolled in cron so - // their deadlines haven't started rolling yet - periodStart += di.WPoStProvingPeriod + tm.log("Submitting ProveCommitSectors3 with activations: %+v", activations) + var err error + msgReturn, err = tm.SubmitMessage(&miner14.ProveCommitSectors3Params{ + SectorActivations: activations, + SectorProofs: sectorProofs, + RequireActivationSuccess: requireActivationSuccess, + }, 0, builtin.MethodsMiner.ProveCommitSectors3) + req.NoError(err) + req.True(msgReturn.Receipt.ExitCode.IsSuccess()) } - // Calculate the exact epoch when proving should occur - provingEpoch := periodStart + di.WPoStChallengeWindow*abi.ChainEpoch(sp.Deadline) - - tm.t.Logf("Miner %s: WindowPoST(%d): next ProvingEpoch: %d", tm.ActorAddr, sectorNumber, provingEpoch) + var returnValue miner14.BatchReturn + req.NoError(returnValue.UnmarshalCBOR(bytes.NewReader(msgReturn.Receipt.Return))) + exitCodes := exitCodesFromBatchReturn(returnValue) + + for i, sector := range sectors { + si, err := tm.FullNode.StateSectorGetInfo(tm.ctx, tm.ActorAddr, sector.sectorNumber, msgReturn.TipSet) + tm.log("SectorOnChainInfo for sector %d w/ exit code %s: %+v", sector.sectorNumber, exitCodes[i], si) + if exitCodes[i].IsSuccess() { + req.NoError(err) + req.Equal(si.SectorNumber, sector.sectorNumber) + req.Equal(si.Activation, msgReturn.Height-1) + } else { + req.Nil(si, "sector should not be on chain") + } + if proofType.IsNonInteractive() { + // NI-PoRep lets us determine the deadline, so we can check that it's set correctly + sp, err := tm.FullNode.StateSectorPartition(tm.ctx, tm.ActorAddr, sector.sectorNumber, msgReturn.TipSet) + if exitCodes[i].IsSuccess() { + req.NoError(err) + req.Equal(provingDeadline, sp.Deadline) + } else { + req.ErrorContains(err, fmt.Sprintf("sector %d not due at any deadline", sector.sectorNumber)) + } + } + } - return di.CurrentEpoch, provingEpoch, nil + return exitCodes } -func (tm *TestUnmanagedMiner) wdPostLoop( - pctx context.Context, - sectorNumber abi.SectorNumber, - sealedCid cid.Cid, - sealedPath, - cacheDir string, -) (chan WindowPostResp, context.CancelFunc) { - - ctx, cancelFn := context.WithCancel(pctx) - respCh := make(chan WindowPostResp, 1) +func (tm *TestUnmanagedMiner) wdPostLoop() { + if tm.runningWdPostLoop { + return + } + tm.runningWdPostLoop = true - head, err := tm.FullNode.ChainHead(ctx) + head, err := tm.FullNode.ChainHead(tm.ctx) require.NoError(tm.t, err) // wait one challenge window for cron to do its thing with deadlines, just to be sure so we get // an accurate dline.Info whenever we ask for it - _ = tm.FullNode.WaitTillChain(ctx, HeightAtLeast(head.Height()+miner14.WPoStChallengeWindow+5)) + _ = tm.FullNode.WaitTillChain(tm.ctx, HeightAtLeast(head.Height()+miner14.WPoStChallengeWindow+5)) go func() { - var firstPost bool + var postCount int - writeRespF := func(respErr error) { - var send WindowPostResp - if respErr == nil { - if firstPost { - return // already reported on our first post, no error to report, don't send anything - } - send.Posted = true - firstPost = true + recordPostOrError := func(post windowPost) { + head, err := tm.FullNode.ChainHead(tm.ctx) + if err != nil { + tm.log("WindowPoSt submission failed to get chain head: %s", err) } else { - if ctx.Err() == nil { - tm.t.Logf("Sector %d: WindowPoSt submission failed: %s", sectorNumber, respErr) - } - send.Error = respErr + post.Epoch = head.Height() + } + if post.Error != nil && tm.ctx.Err() == nil { + tm.log("WindowPoSt submission failed for sectors %v at epoch %d: %s", post.Posted, post.Epoch, post.Error) + } else if tm.ctx.Err() == nil { + postCount++ + tm.log("WindowPoSt loop completed post %d at epoch %d for sectors %v", postCount, post.Epoch, post.Posted) } - select { - case respCh <- send: - case <-ctx.Done(): - default: + if tm.ctx.Err() == nil { + tm.postsLk.Lock() + tm.posts = append(tm.posts, post) + tm.postsLk.Unlock() } } - var postCount int - for ctx.Err() == nil { - currentEpoch, nextPost, err := tm.calculateNextPostEpoch(ctx, sectorNumber) - tm.t.Logf("Activating sector %d, next post %d, current epoch %d", sectorNumber, nextPost, currentEpoch) + for tm.ctx.Err() == nil { + postSectors, err := tm.sectorsToPost() if err != nil { - writeRespF(err) + recordPostOrError(windowPost{Error: err}) return } - - nextPost += 5 // add some padding so we're properly into the window - - if nextPost > currentEpoch { - if _, err := tm.FullNode.WaitTillChainOrError(ctx, HeightAtLeast(nextPost)); err != nil { - writeRespF(err) + tm.log("WindowPoST sectors to post in this challenge window: %v", postSectors) + if len(postSectors) > 0 { + // something to post now + if err = tm.submitWindowPost(postSectors); err != nil { + recordPostOrError(windowPost{Error: err}) return } + recordPostOrError(windowPost{Posted: postSectors}) } - err = tm.submitWindowPost(ctx, sectorNumber, sealedCid, sealedPath, cacheDir) - writeRespF(err) // send an error, or first post, or nothing if no error and this isn't the first post - if err != nil { + // skip to next challenge window + if err := tm.waitForNextPostDeadline(); err != nil { + recordPostOrError(windowPost{Error: err}) return } - postCount++ - tm.t.Logf("Sector %d: WindowPoSt #%d submitted", sectorNumber, postCount) } }() +} + +// waitForNextPostDeadline waits until we are within the next challenge window for this miner +func (tm *TestUnmanagedMiner) waitForNextPostDeadline() error { + ctx, cancel := context.WithCancel(tm.ctx) + defer cancel() + + di, err := tm.FullNode.StateMinerProvingDeadline(ctx, tm.ActorAddr, types.EmptyTSK) + if err != nil { + return fmt.Errorf("waitForNextPostDeadline: failed to get proving deadline: %w", err) + } + currentDeadlineIdx := CurrentDeadlineIndex(di) + nextDeadlineEpoch := di.PeriodStart + di.WPoStChallengeWindow*abi.ChainEpoch(currentDeadlineIdx+1) + + tm.log("Window PoST waiting until next challenge window, currentDeadlineIdx: %d, nextDeadlineEpoch: %d", currentDeadlineIdx, nextDeadlineEpoch) + + heads, err := tm.FullNode.ChainNotify(ctx) + if err != nil { + return err + } + + for chg := range heads { + for _, c := range chg { + if c.Type != "apply" { + continue + } + if ts := c.Val; ts.Height() >= nextDeadlineEpoch+5 { // add some buffer + return nil + } + } + } + + return fmt.Errorf("waitForNextPostDeadline: failed to wait for nextDeadlineEpoch %d", nextDeadlineEpoch) +} + +// sectorsToPost returns the sectors that are due to be posted in the current challenge window +func (tm *TestUnmanagedMiner) sectorsToPost() ([]abi.SectorNumber, error) { + di, err := tm.FullNode.StateMinerProvingDeadline(tm.ctx, tm.ActorAddr, types.EmptyTSK) + if err != nil { + return nil, fmt.Errorf("failed to get proving deadline: %w", err) + } + + currentDeadlineIdx := CurrentDeadlineIndex(di) + + var allSectors []abi.SectorNumber + var sectorsToPost []abi.SectorNumber + + tm.committedSectorsLk.Lock() + for _, sector := range tm.committedSectors { + allSectors = append(allSectors, sector.sectorNumber) + } + tm.committedSectorsLk.Unlock() + + for _, sectorNumber := range allSectors { + sp, err := tm.FullNode.StateSectorPartition(tm.ctx, tm.ActorAddr, sectorNumber, types.EmptyTSK) + if err != nil { + return nil, fmt.Errorf("failed to get sector partition: %w", err) + } + if sp.Deadline == currentDeadlineIdx { + sectorsToPost = append(sectorsToPost, sectorNumber) + } + } - return respCh, cancelFn + return sectorsToPost, nil } -func (tm *TestUnmanagedMiner) SubmitPostDispute(ctx context.Context, sectorNumber abi.SectorNumber) error { - tm.t.Logf("Miner %s: Starting dispute submission for sector %d", tm.ActorAddr, sectorNumber) +func (tm *TestUnmanagedMiner) submitPostDispute(sectorNumber abi.SectorNumber) error { + tm.log("Starting dispute submission for sector %d", sectorNumber) - head, err := tm.FullNode.ChainHead(ctx) + head, err := tm.FullNode.ChainHead(tm.ctx) if err != nil { return fmt.Errorf("MinerB(%s): failed to get chain head: %w", tm.ActorAddr, err) } - sp, err := tm.FullNode.StateSectorPartition(ctx, tm.ActorAddr, sectorNumber, head.Key()) + sp, err := tm.FullNode.StateSectorPartition(tm.ctx, tm.ActorAddr, sectorNumber, head.Key()) if err != nil { return fmt.Errorf("MinerB(%s): failed to get sector partition for sector %d: %w", tm.ActorAddr, sectorNumber, err) } - di, err := tm.FullNode.StateMinerProvingDeadline(ctx, tm.ActorAddr, head.Key()) + di, err := tm.FullNode.StateMinerProvingDeadline(tm.ctx, tm.ActorAddr, head.Key()) if err != nil { return fmt.Errorf("MinerB(%s): failed to get proving deadline for sector %d: %w", tm.ActorAddr, sectorNumber, err) } disputeEpoch := di.Close + 5 - tm.t.Logf("Miner %s: Sector %d - Waiting %d epochs until epoch %d to submit dispute", tm.ActorAddr, sectorNumber, disputeEpoch-head.Height(), disputeEpoch) + tm.log("Sector %d - Waiting %d epochs until epoch %d to submit dispute", sectorNumber, disputeEpoch-head.Height(), disputeEpoch) - tm.FullNode.WaitTillChain(ctx, HeightAtLeast(disputeEpoch)) + tm.FullNode.WaitTillChain(tm.ctx, HeightAtLeast(disputeEpoch)) - tm.t.Logf("Miner %s: Sector %d - Disputing WindowedPoSt to confirm validity at epoch %d", tm.ActorAddr, sectorNumber, disputeEpoch) + tm.log("Sector %d - Disputing WindowedPoSt to confirm validity at epoch %d", sectorNumber, disputeEpoch) - _, err = tm.SubmitMessage(ctx, &miner14.DisputeWindowedPoStParams{ + _, err = tm.SubmitMessage(&miner14.DisputeWindowedPoStParams{ Deadline: sp.Deadline, PoStIndex: 0, }, 1, builtin.MethodsMiner.DisputeWindowedPoSt) return err } -func (tm *TestUnmanagedMiner) submitWindowPost(ctx context.Context, sectorNumber abi.SectorNumber, sealedCid cid.Cid, sealedPath, cacheDir string) error { - tm.t.Logf("Miner(%s): WindowPoST(%d): Running WindowPoSt ...\n", tm.ActorAddr, sectorNumber) +func (tm *TestUnmanagedMiner) submitWindowPost(sectorNumbers []abi.SectorNumber) error { + if len(sectorNumbers) == 0 { + return fmt.Errorf("no sectors to submit window post for") + } + + // We are limited to PoSting PoStedPartitionsMax (3) partitions at a time, so we need to group the + // sectors by partition and submit the PoSts in batches of PoStedPartitionsMax. - head, err := tm.FullNode.ChainHead(ctx) + partitionMap := make(map[uint64][]sectorInfo) + + head, err := tm.FullNode.ChainHead(tm.ctx) if err != nil { return fmt.Errorf("Miner(%s): failed to get chain head: %w", tm.ActorAddr, err) } - sp, err := tm.FullNode.StateSectorPartition(ctx, tm.ActorAddr, sectorNumber, head.Key()) + di, err := tm.FullNode.StateMinerProvingDeadline(tm.ctx, tm.ActorAddr, head.Key()) if err != nil { - return fmt.Errorf("Miner(%s): failed to get sector partition for sector %d: %w", tm.ActorAddr, sectorNumber, err) + return fmt.Errorf("Miner(%s): failed to get proving deadline: %w", tm.ActorAddr, err) } + chainRandomnessEpoch := di.Challenge - di, err := tm.FullNode.StateMinerProvingDeadline(ctx, tm.ActorAddr, head.Key()) - if err != nil { - return fmt.Errorf("Miner(%s): failed to get proving deadline for sector %d: %w", tm.ActorAddr, sectorNumber, err) - } - tm.t.Logf("Miner(%s): WindowPoST(%d): SectorPartition: %+v, ProvingDeadline: %+v\n", tm.ActorAddr, sectorNumber, sp, di) - if di.Index != sp.Deadline { - return fmt.Errorf("Miner(%s): sector %d is not in the deadline %d, but %d", tm.ActorAddr, sectorNumber, sp.Deadline, di.Index) - } + for _, sectorNumber := range sectorNumbers { + sector, err := tm.getCommittedSector(sectorNumber) + if err != nil { + return fmt.Errorf("Miner(%s): failed to get committed sector %d: %w", tm.ActorAddr, sectorNumber, err) + } - var proofBytes []byte - if tm.mockProofs { - proofBytes = []byte{0xde, 0xad, 0xbe, 0xef} - } else { - proofBytes, err = tm.generateWindowPost(ctx, sectorNumber, sealedCid, sealedPath, cacheDir) + sp, err := tm.FullNode.StateSectorPartition(tm.ctx, tm.ActorAddr, sectorNumber, head.Key()) if err != nil { - return fmt.Errorf("Miner(%s): failed to generate window post for sector %d: %w", tm.ActorAddr, sectorNumber, err) + return fmt.Errorf("Miner(%s): failed to get sector partition for sector %d: %w", tm.ActorAddr, sectorNumber, err) } - } - tm.t.Logf("Miner(%s): WindowedPoSt(%d) Submitting ...\n", tm.ActorAddr, sectorNumber) + if di.Index != sp.Deadline { + return fmt.Errorf("Miner(%s): sector %d is not in the expected deadline %d, but %d", tm.ActorAddr, sectorNumber, sp.Deadline, di.Index) + } - chainRandomnessEpoch := di.Challenge - chainRandomness, err := tm.FullNode.StateGetRandomnessFromTickets(ctx, crypto.DomainSeparationTag_PoStChainCommit, chainRandomnessEpoch, + if _, ok := partitionMap[sp.Partition]; !ok { + partitionMap[sp.Partition] = make([]sectorInfo, 0) + } + partitionMap[sp.Partition] = append(partitionMap[sp.Partition], sector) + } + + chainRandomness, err := tm.FullNode.StateGetRandomnessFromTickets(tm.ctx, crypto.DomainSeparationTag_PoStChainCommit, chainRandomnessEpoch, nil, head.Key()) if err != nil { - return fmt.Errorf("Miner(%s): failed to get chain randomness for sector %d: %w", tm.ActorAddr, sectorNumber, err) + return fmt.Errorf("Miner(%s): failed to get chain randomness for deadline %d: %w", tm.ActorAddr, di.Index, err) } - minerInfo, err := tm.FullNode.StateMinerInfo(ctx, tm.ActorAddr, head.Key()) + minerInfo, err := tm.FullNode.StateMinerInfo(tm.ctx, tm.ActorAddr, head.Key()) if err != nil { - return fmt.Errorf("Miner(%s): failed to get miner info for sector %d: %w", tm.ActorAddr, sectorNumber, err) + return fmt.Errorf("Miner(%s): failed to get miner info: %w", tm.ActorAddr, err) } - r, err := tm.SubmitMessage(ctx, &miner14.SubmitWindowedPoStParams{ - ChainCommitEpoch: chainRandomnessEpoch, - ChainCommitRand: chainRandomness, - Deadline: sp.Deadline, - Partitions: []miner14.PoStPartition{{Index: sp.Partition}}, - Proofs: []proof.PoStProof{{PoStProof: minerInfo.WindowPoStProofType, ProofBytes: proofBytes}}, - }, 0, builtin.MethodsMiner.SubmitWindowedPoSt) - if err != nil { - return fmt.Errorf("Miner(%s): failed to submit window post for sector %d: %w", tm.ActorAddr, sectorNumber, err) + postMessages := make([]cid.Cid, 0) + + submit := func(partitions []uint64, sectors []sectorInfo) error { + var proofBytes []byte + if tm.mockProofs { + proofBytes = []byte{0xde, 0xad, 0xbe, 0xef} + } else { + proofBytes, err = tm.generateWindowPost(sectors) + if err != nil { + return fmt.Errorf("Miner(%s): failed to generate window post for deadline %d, partitions %v: %w", tm.ActorAddr, di.Index, partitions, err) + } + } + + tm.log("WindowPoST submitting %d sectors for deadline %d, partitions %v", len(sectors), di.Index, partitions) + + pp := make([]miner14.PoStPartition, len(partitions)) + for i, p := range partitions { + pp[i] = miner14.PoStPartition{Index: p} + } + + // Push all our post messages to the mpool and wait for them later. The blockminer might have + // ceased mining while it waits to see our posts in the mpool and if we wait for the first + // message to land but it's waiting for a later sector then we'll deadlock. + mCid, err := tm.mpoolPushMessage(&miner14.SubmitWindowedPoStParams{ + ChainCommitEpoch: chainRandomnessEpoch, + ChainCommitRand: chainRandomness, + Deadline: di.Index, + Partitions: pp, + Proofs: []proof.PoStProof{{PoStProof: minerInfo.WindowPoStProofType, ProofBytes: proofBytes}}, // can only have 1 + }, 0, builtin.MethodsMiner.SubmitWindowedPoSt) + if err != nil { + return fmt.Errorf("Miner(%s): failed to submit PoSt for deadline %d, partitions %v: %w", tm.ActorAddr, di.Index, partitions, err) + } + + postMessages = append(postMessages, mCid) + + return nil } - if !r.Receipt.ExitCode.IsSuccess() { - return fmt.Errorf("Miner(%s): submitting PoSt for sector %d failed: %s", tm.ActorAddr, sectorNumber, r.Receipt.ExitCode) + toSubmitPartitions := make([]uint64, 0) + toSubmitSectors := make([]sectorInfo, 0) + for partition, sectors := range partitionMap { + if len(sectors) == 0 { + continue + } + toSubmitPartitions = append(toSubmitPartitions, partition) + toSubmitSectors = append(toSubmitSectors, sectors...) + if len(toSubmitPartitions) == miner14.PoStedPartitionsMax { + if err := submit(toSubmitPartitions, toSubmitSectors); err != nil { + return err + } + toSubmitPartitions = make([]uint64, 0) + toSubmitSectors = make([]sectorInfo, 0) + } + } + if len(toSubmitPartitions) > 0 { + if err := submit(toSubmitPartitions, toSubmitSectors); err != nil { + return err + } + } + + tm.log("%d WindowPoST messages submitted for %d sectors, waiting for them to land on chain ...", len(postMessages), len(sectorNumbers)) + for _, mCid := range postMessages { + r, err := tm.waitMessage(mCid) + if err != nil { + return fmt.Errorf("Miner(%s): failed to wait for PoSt message %s: %w", tm.ActorAddr, mCid, err) + } + if !r.Receipt.ExitCode.IsSuccess() { + return fmt.Errorf("Miner(%s): PoSt submission failed for deadline %d: %s", tm.ActorAddr, di.Index, r.Receipt.ExitCode) + } } - tm.t.Logf("Miner(%s): WindowedPoSt(%d) Submitted ...\n", tm.ActorAddr, sectorNumber) + tm.log("WindowPoST(%v) submitted for deadline %d", sectorNumbers, di.Index) return nil } -func (tm *TestUnmanagedMiner) generateWindowPost( - ctx context.Context, - sectorNumber abi.SectorNumber, - sealedCid cid.Cid, - sealedPath string, - cacheDir string, -) ([]byte, error) { - head, err := tm.FullNode.ChainHead(ctx) +func (tm *TestUnmanagedMiner) generateWindowPost(sectorInfos []sectorInfo) ([]byte, error) { + head, err := tm.FullNode.ChainHead(tm.ctx) if err != nil { return nil, fmt.Errorf("failed to get chain head: %w", err) } - minerInfo, err := tm.FullNode.StateMinerInfo(ctx, tm.ActorAddr, head.Key()) + minerInfo, err := tm.FullNode.StateMinerInfo(tm.ctx, tm.ActorAddr, head.Key()) if err != nil { return nil, fmt.Errorf("failed to get miner info: %w", err) } - di, err := tm.FullNode.StateMinerProvingDeadline(ctx, tm.ActorAddr, types.EmptyTSK) + di, err := tm.FullNode.StateMinerProvingDeadline(tm.ctx, tm.ActorAddr, types.EmptyTSK) if err != nil { return nil, fmt.Errorf("failed to get proving deadline: %w", err) } @@ -799,22 +1123,31 @@ func (tm *TestUnmanagedMiner) generateWindowPost( return nil, fmt.Errorf("failed to marshal miner address: %w", err) } - rand, err := tm.FullNode.StateGetRandomnessFromBeacon(ctx, crypto.DomainSeparationTag_WindowedPoStChallengeSeed, di.Challenge, minerAddrBytes.Bytes(), head.Key()) + rand, err := tm.FullNode.StateGetRandomnessFromBeacon(tm.ctx, crypto.DomainSeparationTag_WindowedPoStChallengeSeed, di.Challenge, minerAddrBytes.Bytes(), head.Key()) if err != nil { return nil, fmt.Errorf("failed to get randomness: %w", err) } postRand := abi.PoStRandomness(rand) postRand[31] &= 0x3f // make fr32 compatible - privateSectorInfo := ffi.PrivateSectorInfo{ - SectorInfo: proof.SectorInfo{ - SealProof: tm.proofType[sectorNumber], - SectorNumber: sectorNumber, - SealedCID: sealedCid, - }, - CacheDirPath: cacheDir, - PoStProofType: minerInfo.WindowPoStProofType, - SealedSectorPath: sealedPath, + privateSectorInfo := make([]ffi.PrivateSectorInfo, len(sectorInfos)) + proofSectorInfo := make([]proof.SectorInfo, len(sectorInfos)) + for i, sector := range sectorInfos { + privateSectorInfo[i] = ffi.PrivateSectorInfo{ + SectorInfo: proof.SectorInfo{ + SealProof: sector.proofType, + SectorNumber: sector.sectorNumber, + SealedCID: sector.sealedCid, + }, + CacheDirPath: sector.cacheDirPath, + PoStProofType: minerInfo.WindowPoStProofType, + SealedSectorPath: sector.sealedSectorPath, + } + proofSectorInfo[i] = proof.SectorInfo{ + SealProof: sector.proofType, + SectorNumber: sector.sectorNumber, + SealedCID: sector.sealedCid, + } } actorIdNum, err := address.IDFromAddress(tm.ActorAddr) @@ -823,7 +1156,7 @@ func (tm *TestUnmanagedMiner) generateWindowPost( } actorId := abi.ActorID(actorIdNum) - windowProofs, faultySectors, err := ffi.GenerateWindowPoSt(actorId, ffi.NewSortedPrivateSectorInfo(privateSectorInfo), postRand) + windowProofs, faultySectors, err := ffi.GenerateWindowPoSt(actorId, ffi.NewSortedPrivateSectorInfo(privateSectorInfo...), postRand) if err != nil { return nil, fmt.Errorf("failed to generate window post: %w", err) } @@ -841,7 +1174,7 @@ func (tm *TestUnmanagedMiner) generateWindowPost( info := proof.WindowPoStVerifyInfo{ Randomness: postRand, Proofs: []proof.PoStProof{{PoStProof: minerInfo.WindowPoStProofType, ProofBytes: proofBytes}}, - ChallengedSectors: []proof.SectorInfo{{SealProof: tm.proofType[sectorNumber], SectorNumber: sectorNumber, SealedCID: sealedCid}}, + ChallengedSectors: proofSectorInfo, Prover: actorId, } @@ -855,208 +1188,239 @@ func (tm *TestUnmanagedMiner) generateWindowPost( return proofBytes, nil } -func (tm *TestUnmanagedMiner) waitPreCommitSealRandomness(ctx context.Context, sectorNumber abi.SectorNumber, proofType abi.RegisteredSealProof) abi.ChainEpoch { + +func (tm *TestUnmanagedMiner) waitPreCommitSealRandomness(proofType abi.RegisteredSealProof) (abi.ChainEpoch, error) { // We want to draw seal randomness from a tipset that has already achieved finality as PreCommits are expensive to re-generate. // Check if we already have an epoch that is already final and wait for such an epoch if we don't have one. - head, err := tm.FullNode.ChainHead(ctx) - require.NoError(tm.t, err) + head, err := tm.FullNode.ChainHead(tm.ctx) + if err != nil { + return 0, err + } + + sealLookback := policy.SealRandomnessLookback if proofType.IsNonInteractive() { - return head.Height() - 1 // no need to wait + // For NI-PoRep there isn't a strict wait, but we should wait enough epochs to be sure to avoid reorgs + sealLookback = 150 } var sealRandEpoch abi.ChainEpoch - if head.Height() > policy.SealRandomnessLookback { - sealRandEpoch = head.Height() - policy.SealRandomnessLookback + if head.Height() > sealLookback { + sealRandEpoch = head.Height() - sealLookback } else { - sealRandEpoch = policy.SealRandomnessLookback - tm.t.Logf("Miner %s waiting for at least epoch %d for seal randomness for sector %d (current epoch %d)...", tm.ActorAddr, sealRandEpoch+5, - sectorNumber, head.Height()) - tm.FullNode.WaitTillChain(ctx, HeightAtLeast(sealRandEpoch+5)) + sealRandEpoch = sealLookback + tm.log("Waiting for at least epoch %d for seal randomness (current epoch %d)...", sealRandEpoch+5, head.Height()) + head = tm.FullNode.WaitTillChain(tm.ctx, HeightAtLeast(sealRandEpoch+5)) } - tm.t.Logf("Miner %s using seal randomness from epoch %d for head %d for sector %d", tm.ActorAddr, sealRandEpoch, head.Height(), sectorNumber) + tm.log("Using seal randomness from epoch %d (now at head %d)", sealRandEpoch, head.Height()) - return sealRandEpoch + return sealRandEpoch, nil } -func (tm *TestUnmanagedMiner) generatePreCommit( - ctx context.Context, - sectorNumber abi.SectorNumber, - sealRandEpoch abi.ChainEpoch, - proofType abi.RegisteredSealProof, - pieceInfo []abi.PieceInfo, -) { - +// generatePreCommit is goroutine safe, only using assertions, no FailNow or other panic inducing methods. +func (tm *TestUnmanagedMiner) generatePreCommit(sector sectorInfo, sealRandEpoch abi.ChainEpoch) (sectorInfo, error) { if tm.mockProofs { - tm.sealedCids[sectorNumber] = cid.MustParse("bagboea4b5abcatlxechwbp7kjpjguna6r6q7ejrhe6mdp3lf34pmswn27pkkiekz") - if len(pieceInfo) > 0 { - tm.unsealedCids[sectorNumber] = cid.MustParse("baga6ea4seaqjtovkwk4myyzj56eztkh5pzsk5upksan6f5outesy62bsvl4dsha") + sector.sealedCid = cid.MustParse("bagboea4b5abcatlxechwbp7kjpjguna6r6q7ejrhe6mdp3lf34pmswn27pkkiekz") + if len(sector.pieces) > 0 { + sector.unsealedCid = fixedPieceCid } - return + return sector, nil } - req := require.New(tm.t) - tm.t.Logf("Miner %s: Generating proof type %d PreCommit for sector %d...", tm.ActorAddr, proofType, sectorNumber) + tm.log("Generating proof type %d PreCommit for sector %d...", sector.proofType, sector.sectorNumber) - head, err := tm.FullNode.ChainHead(ctx) - req.NoError(err, "Miner %s: Failed to get chain head for sector %d", tm.ActorAddr, sectorNumber) + head, err := tm.FullNode.ChainHead(tm.ctx) + if err != nil { + return sectorInfo{}, fmt.Errorf("failed to get chain head for sector %d: %w", sector.sectorNumber, err) + } minerAddrBytes := new(bytes.Buffer) - req.NoError(tm.ActorAddr.MarshalCBOR(minerAddrBytes), "Miner %s: Failed to marshal address for sector %d", tm.ActorAddr, sectorNumber) + if err := tm.ActorAddr.MarshalCBOR(minerAddrBytes); err != nil { + return sectorInfo{}, fmt.Errorf("failed to marshal address for sector %d: %w", sector.sectorNumber, err) + } - rand, err := tm.FullNode.StateGetRandomnessFromTickets(ctx, crypto.DomainSeparationTag_SealRandomness, sealRandEpoch, minerAddrBytes.Bytes(), head.Key()) - req.NoError(err, "Miner %s: Failed to get randomness for sector %d", tm.ActorAddr, sectorNumber) + rand, err := tm.FullNode.StateGetRandomnessFromTickets(tm.ctx, crypto.DomainSeparationTag_SealRandomness, sealRandEpoch, minerAddrBytes.Bytes(), head.Key()) + if err != nil { + return sectorInfo{}, fmt.Errorf("failed to get randomness for sector %d: %w", sector.sectorNumber, err) + } sealTickets := abi.SealRandomness(rand) - tm.t.Logf("Miner %s: Running proof type %d SealPreCommitPhase1 for sector %d...", tm.ActorAddr, proofType, sectorNumber) + tm.log("Running proof type %d SealPreCommitPhase1 for sector %d...", sector.proofType, sector.sectorNumber) actorIdNum, err := address.IDFromAddress(tm.ActorAddr) - req.NoError(err, "Miner %s: Failed to get actor ID for sector %d", tm.ActorAddr, sectorNumber) + if err != nil { + return sectorInfo{}, fmt.Errorf("failed to get actor ID for sector %d: %w", sector.sectorNumber, err) + } actorId := abi.ActorID(actorIdNum) pc1, err := ffi.SealPreCommitPhase1( - proofType, - tm.cacheDirPaths[sectorNumber], - tm.unsealedSectorPaths[sectorNumber], - tm.sealedSectorPaths[sectorNumber], - sectorNumber, + sector.proofType, + sector.cacheDirPath, + sector.unsealedSectorPath, + sector.sealedSectorPath, + sector.sectorNumber, actorId, sealTickets, - pieceInfo, + sector.pieces, ) - req.NoError(err, "Miner %s: SealPreCommitPhase1 failed for sector %d", tm.ActorAddr, sectorNumber) - req.NotNil(pc1, "Miner %s: SealPreCommitPhase1 returned nil for sector %d", tm.ActorAddr, sectorNumber) + if err != nil { + return sectorInfo{}, fmt.Errorf("failed to run SealPreCommitPhase1 for sector %d: %w", sector.sectorNumber, err) + } + if pc1 == nil { + return sectorInfo{}, fmt.Errorf("SealPreCommitPhase1 returned nil for sector %d", sector.sectorNumber) + } - tm.t.Logf("Miner %s: Running proof type %d SealPreCommitPhase2 for sector %d...", tm.ActorAddr, proofType, sectorNumber) + tm.log("Running proof type %d SealPreCommitPhase2 for sector %d...", sector.proofType, sector.sectorNumber) sealedCid, unsealedCid, err := ffi.SealPreCommitPhase2( pc1, - tm.cacheDirPaths[sectorNumber], - tm.sealedSectorPaths[sectorNumber], + sector.cacheDirPath, + sector.sealedSectorPath, ) - req.NoError(err, "Miner %s: SealPreCommitPhase2 failed for sector %d", tm.ActorAddr, sectorNumber) + if err != nil { + return sectorInfo{}, fmt.Errorf("failed to run SealPreCommitPhase2 for sector %d: %w", sector.sectorNumber, err) + } - tm.t.Logf("Miner %s: Unsealed CID for sector %d: %s", tm.ActorAddr, sectorNumber, unsealedCid) - tm.t.Logf("Miner %s: Sealed CID for sector %d: %s", tm.ActorAddr, sectorNumber, sealedCid) + tm.log("Unsealed CID for sector %d: %s", sector.sectorNumber, unsealedCid) + tm.log("Sealed CID for sector %d: %s", sector.sectorNumber, sealedCid) - tm.sealTickets[sectorNumber] = sealTickets - tm.sealedCids[sectorNumber] = sealedCid - tm.unsealedCids[sectorNumber] = unsealedCid + sector.sealTickets = sealTickets + sector.sealedCid = sealedCid + sector.unsealedCid = unsealedCid + + return sector, nil } -func (tm *TestUnmanagedMiner) proveCommitWaitSeed(ctx context.Context, sectorNumber abi.SectorNumber, proofType abi.RegisteredSealProof) (abi.ChainEpoch, abi.InteractiveSealRandomness) { +func (tm *TestUnmanagedMiner) proveCommitInteractiveRandomness( + sectorNumber abi.SectorNumber, + proofType abi.RegisteredSealProof, +) abi.InteractiveSealRandomness { + req := require.New(tm.t) - head, err := tm.FullNode.ChainHead(ctx) + + head, err := tm.FullNode.ChainHead(tm.ctx) req.NoError(err) - var seedRandomnessHeight abi.ChainEpoch + var interactiveRandomnessHeight abi.ChainEpoch if proofType.IsNonInteractive() { - seedRandomnessHeight = head.Height() - 1 // no need to wait, it just can't be current epoch - } else { - tm.t.Logf("Miner %s: Fetching pre-commit info for sector %d...", tm.ActorAddr, sectorNumber) - preCommitInfo, err := tm.FullNode.StateSectorPreCommitInfo(ctx, tm.ActorAddr, sectorNumber, head.Key()) - req.NoError(err) - seedRandomnessHeight = preCommitInfo.PreCommitEpoch + policy.GetPreCommitChallengeDelay() + // For NI-PoRep this isn't used, so we don't need to wait + return niPorepInteractiveRandomness + } - tm.t.Logf("Miner %s: Waiting %d epochs for seed randomness at epoch %d (current epoch %d) for sector %d...", tm.ActorAddr, seedRandomnessHeight-head.Height(), seedRandomnessHeight, head.Height(), sectorNumber) - tm.FullNode.WaitTillChain(ctx, HeightAtLeast(seedRandomnessHeight+5)) + tm.log("Fetching PreCommit info for sector %d...", sectorNumber) + preCommitInfo, err := tm.FullNode.StateSectorPreCommitInfo(tm.ctx, tm.ActorAddr, sectorNumber, head.Key()) + req.NoError(err) + interactiveRandomnessHeight = preCommitInfo.PreCommitEpoch + policy.GetPreCommitChallengeDelay() + tm.log("Waiting %d (+5) epochs for interactive randomness at epoch %d (current epoch %d) for sector %d...", interactiveRandomnessHeight-head.Height(), interactiveRandomnessHeight, head.Height(), sectorNumber) - head, err = tm.FullNode.ChainHead(ctx) - req.NoError(err) - } + head = tm.FullNode.WaitTillChain(tm.ctx, HeightAtLeast(interactiveRandomnessHeight+5)) minerAddrBytes := new(bytes.Buffer) req.NoError(tm.ActorAddr.MarshalCBOR(minerAddrBytes)) - tm.t.Logf("Miner %s: Fetching seed randomness for sector %d...", tm.ActorAddr, sectorNumber) - rand, err := tm.FullNode.StateGetRandomnessFromBeacon(ctx, crypto.DomainSeparationTag_InteractiveSealChallengeSeed, seedRandomnessHeight, minerAddrBytes.Bytes(), head.Key()) + tm.log("Fetching interactive randomness for sector %d...", sectorNumber) + rand, err := tm.FullNode.StateGetRandomnessFromBeacon(tm.ctx, crypto.DomainSeparationTag_InteractiveSealChallengeSeed, interactiveRandomnessHeight, minerAddrBytes.Bytes(), head.Key()) req.NoError(err) - seedRandomness := abi.InteractiveSealRandomness(rand) + interactiveRandomness := abi.InteractiveSealRandomness(rand) - tm.t.Logf("Miner %s: Obtained seed randomness for sector %d: %x", tm.ActorAddr, sectorNumber, seedRandomness) - return seedRandomnessHeight, seedRandomness + return interactiveRandomness } -func (tm *TestUnmanagedMiner) generateProveCommit( - _ context.Context, - sectorNumber abi.SectorNumber, - proofType abi.RegisteredSealProof, - seedRandomness abi.InteractiveSealRandomness, - pieces []abi.PieceInfo, -) []byte { +func (tm *TestUnmanagedMiner) generateSectorProof(sector sectorInfo) ([]byte, error) { + interactiveRandomness := tm.proveCommitInteractiveRandomness(sector.sectorNumber, sector.proofType) - tm.t.Logf("Miner %s: Generating proof type %d Sector Proof for sector %d...", tm.ActorAddr, proofType, sectorNumber) - req := require.New(tm.t) + if tm.mockProofs { + return []byte{0xde, 0xad, 0xbe, 0xef}, nil // mock prove commit + } + + tm.log("Generating proof type %d Sector Proof for sector %d...", sector.proofType, sector.sectorNumber) actorIdNum, err := address.IDFromAddress(tm.ActorAddr) - req.NoError(err) + if err != nil { + return nil, fmt.Errorf("failed to get actor ID for sector %d: %w", sector.sectorNumber, err) + } actorId := abi.ActorID(actorIdNum) - tm.t.Logf("Miner %s: Running proof type %d SealCommitPhase1 for sector %d...", tm.ActorAddr, proofType, sectorNumber) + tm.log("Running proof type %d SealCommitPhase1 for sector %d...", sector.proofType, sector.sectorNumber) scp1, err := ffi.SealCommitPhase1( - proofType, - tm.sealedCids[sectorNumber], - tm.unsealedCids[sectorNumber], - tm.cacheDirPaths[sectorNumber], - tm.sealedSectorPaths[sectorNumber], - sectorNumber, + sector.proofType, + sector.sealedCid, + sector.unsealedCid, + sector.cacheDirPath, + sector.sealedSectorPath, + sector.sectorNumber, actorId, - tm.sealTickets[sectorNumber], - seedRandomness, - pieces, + sector.sealTickets, + interactiveRandomness, + sector.pieces, ) - req.NoError(err) + if err != nil { + return nil, fmt.Errorf("failed to run SealCommitPhase1 for sector %d: %w", sector.sectorNumber, err) + } - tm.t.Logf("Miner %s: Running proof type %d SealCommitPhase2 for sector %d...", tm.ActorAddr, proofType, sectorNumber) + tm.log("Running proof type %d SealCommitPhase2 for sector %d...", sector.proofType, sector.sectorNumber) var sectorProof []byte - if proofType.IsNonInteractive() { - circuitProofs, err := ffi.SealCommitPhase2CircuitProofs(scp1, sectorNumber) - req.NoError(err) - asvpai := proof.AggregateSealVerifyProofAndInfos{ - Miner: actorId, - SealProof: proofType, - AggregateProof: abi.RegisteredAggregationProof_SnarkPackV2, - Infos: []proof.AggregateSealVerifyInfo{{ - Number: sectorNumber, - Randomness: tm.sealTickets[sectorNumber], - InteractiveRandomness: make([]byte, 32), - SealedCID: tm.sealedCids[sectorNumber], - UnsealedCID: tm.unsealedCids[sectorNumber], - }}, + if sector.proofType.IsNonInteractive() { + sectorProof, err = ffi.SealCommitPhase2CircuitProofs(scp1, sector.sectorNumber) + if err != nil { + return nil, fmt.Errorf("failed to run SealCommitPhase2CircuitProofs for sector %d: %w", sector.sectorNumber, err) } - tm.t.Logf("Miner %s: Aggregating circuit proofs for sector %d: %+v", tm.ActorAddr, sectorNumber, asvpai) - sectorProof, err = ffi.AggregateSealProofs(asvpai, [][]byte{circuitProofs}) - req.NoError(err) } else { - sectorProof, err = ffi.SealCommitPhase2(scp1, sectorNumber, actorId) - req.NoError(err) + sectorProof, err = ffi.SealCommitPhase2(scp1, sector.sectorNumber, actorId) + if err != nil { + return nil, fmt.Errorf("failed to run SealCommitPhase2 for sector %d: %w", sector.sectorNumber, err) + } } - tm.t.Logf("Miner %s: Got proof type %d sector proof of length %d for sector %d", tm.ActorAddr, proofType, len(sectorProof), sectorNumber) + tm.log("Got proof type %d sector proof of length %d for sector %d", sector.proofType, len(sectorProof), sector.sectorNumber) - return sectorProof + return sectorProof, nil } func (tm *TestUnmanagedMiner) SubmitMessage( - ctx context.Context, params cbg.CBORMarshaler, value uint64, method abi.MethodNum, ) (*api.MsgLookup, error) { + mCid, err := tm.mpoolPushMessage(params, value, method) + if err != nil { + return nil, err + } + + return tm.waitMessage(mCid) +} + +func (tm *TestUnmanagedMiner) waitMessage(mCid cid.Cid) (*api.MsgLookup, error) { + msg, err := tm.FullNode.StateWaitMsg(tm.ctx, mCid, 2, api.LookbackNoLimit, true) + if err != nil { + return nil, err + } + + tm.log("Message with CID: %s has been confirmed on-chain", mCid) + + return msg, nil +} + +func (tm *TestUnmanagedMiner) mpoolPushMessage( + params cbg.CBORMarshaler, + value uint64, + method abi.MethodNum, +) (cid.Cid, error) { + enc, aerr := actors.SerializeParams(params) if aerr != nil { - return nil, aerr + return cid.Undef, aerr } - tm.t.Logf("Submitting message for miner %s with method number %d", tm.ActorAddr, method) + tm.log("Submitting message for miner with method number %d", method) - m, err := tm.FullNode.MpoolPushMessage(ctx, &types.Message{ + m, err := tm.FullNode.MpoolPushMessage(tm.ctx, &types.Message{ To: tm.ActorAddr, From: tm.OwnerKey.Address, Value: types.FromFil(value), @@ -1064,90 +1428,183 @@ func (tm *TestUnmanagedMiner) SubmitMessage( Params: enc, }, nil) if err != nil { - return nil, err - } - - tm.t.Logf("Pushed message with CID: %s for miner %s", m.Cid(), tm.ActorAddr) - - msg, err := tm.FullNode.StateWaitMsg(ctx, m.Cid(), 2, api.LookbackNoLimit, true) - if err != nil { - return nil, err + return cid.Undef, err } - tm.t.Logf("Message with CID: %s has been confirmed on-chain for miner %s", m.Cid(), tm.ActorAddr) + tm.log("Pushed message with CID: %s", m.Cid()) - return msg, nil + return m.Cid(), nil } -func requireTempFile(t *testing.T, fileContentsReader io.Reader, size uint64) *os.File { +func mkTempFile(t *testing.T, fileContentsReader io.Reader, size uint64) (*os.File, error) { // Create a temporary file tempFile, err := os.CreateTemp(t.TempDir(), "") - require.NoError(t, err) + if err != nil { + return nil, err + } // Copy contents from the reader to the temporary file bytesCopied, err := io.Copy(tempFile, fileContentsReader) - require.NoError(t, err) + if err != nil { + return nil, err + } // Ensure the expected size matches the copied size - require.EqualValues(t, size, bytesCopied) + if int64(size) != bytesCopied { + return nil, fmt.Errorf("expected file size %d, got %d", size, bytesCopied) + } // Synchronize the file's content to disk - require.NoError(t, tempFile.Sync()) + if err := tempFile.Sync(); err != nil { + return nil, err + } // Reset the file pointer to the beginning of the file - _, err = tempFile.Seek(0, io.SeekStart) - require.NoError(t, err) + if _, err = tempFile.Seek(0, io.SeekStart); err != nil { + return nil, err + } - return tempFile + return tempFile, nil } -func (tm *TestUnmanagedMiner) WaitTillActivatedAndAssertPower( - ctx context.Context, - respCh chan WindowPostResp, - sector abi.SectorNumber, -) { +func (tm *TestUnmanagedMiner) WaitTillActivatedAndAssertPower(sectors []abi.SectorNumber, raw uint64, qa uint64) { + req := require.New(tm.t) - // wait till sector is activated - select { - case resp := <-respCh: - require.NoError(tm.t, resp.Error) - require.True(tm.t, resp.Posted) - case <-ctx.Done(): - tm.t.Fatal("timed out waiting for sector activation") - } + // wait till sectors are activated + for _, sectorNumber := range sectors { + tm.WaitTillPost(sectorNumber) - // Fetch on-chain sector properties - head, err := tm.FullNode.ChainHead(ctx) - require.NoError(tm.t, err) + // Fetch on-chain sector properties + head, err := tm.FullNode.ChainHead(tm.ctx) + req.NoError(err) - soi, err := tm.FullNode.StateSectorGetInfo(ctx, tm.ActorAddr, sector, head.Key()) - require.NoError(tm.t, err) - tm.t.Logf("Miner %s SectorOnChainInfo %d: %+v", tm.ActorAddr.String(), sector, soi) + soi, err := tm.FullNode.StateSectorGetInfo(tm.ctx, tm.ActorAddr, sectorNumber, head.Key()) + req.NoError(err) + tm.log("SectorOnChainInfo %d: %+v", sectorNumber, soi) - _ = tm.FullNode.WaitTillChain(ctx, HeightAtLeast(head.Height()+5)) + _ = tm.FullNode.WaitTillChain(tm.ctx, HeightAtLeast(head.Height()+5)) + + if !tm.mockProofs { + // WindowPost Dispute should fail + tm.AssertDisputeFails(sectorNumber) + } // else it would pass, which we don't want + } tm.t.Log("Checking power after PoSt ...") // Miner B should now have power - tm.AssertPower(ctx, uint64(tm.options.sectorSize), uint64(tm.options.sectorSize)) + tm.AssertPower(raw, qa) +} - if !tm.mockProofs { - // WindowPost Dispute should fail - tm.AssertDisputeFails(ctx, sector) - } // else it would pass, which we don't want +func (tm *TestUnmanagedMiner) AssertNoWindowPostError() { + tm.postsLk.Lock() + defer tm.postsLk.Unlock() + for _, post := range tm.posts { + require.NoError(tm.t, post.Error, "expected no error in window post but found one at epoch %d", post.Epoch) + } } -func (tm *TestUnmanagedMiner) AssertDisputeFails(ctx context.Context, sector abi.SectorNumber) { - err := tm.SubmitPostDispute(ctx, sector) - require.Error(tm.t, err) - require.Contains(tm.t, err.Error(), "failed to dispute valid post") - require.Contains(tm.t, err.Error(), "(RetCode=16)") +func (tm *TestUnmanagedMiner) WaitTillPost(sectorNumber abi.SectorNumber) { + for i := 0; tm.ctx.Err() == nil; i++ { + if i%10 == 0 { + tm.log("Waiting for sector %d to be posted", sectorNumber) + } + tm.postsLk.Lock() + for _, post := range tm.posts { + require.NoError(tm.t, post.Error, "expected no error in window post but found one at epoch %d", post.Epoch) + if post.Error == nil { + for _, sn := range post.Posted { + if sn == sectorNumber { + tm.postsLk.Unlock() + return + } + } + } + } + tm.postsLk.Unlock() + select { + case <-tm.ctx.Done(): + return + case <-time.After(time.Millisecond * 100): + } + } } -func (tm *TestUnmanagedMiner) IsImmutableDeadline(ctx context.Context, deadlineIndex uint64) bool { - di, err := tm.FullNode.StateMinerProvingDeadline(ctx, tm.ActorAddr, types.EmptyTSK) - require.NoError(tm.t, err) - // don't rely on di.Index because if we haven't enrolled in cron it won't be ticking - currentDeadlineIdx := uint64(math.Abs(float64((di.CurrentEpoch - di.PeriodStart) / di.WPoStChallengeWindow))) +func (tm *TestUnmanagedMiner) AssertNoPower() { + req := require.New(tm.t) + + p := tm.CurrentPower() + tm.log("RBP: %v, QaP: %v", p.MinerPower.QualityAdjPower.String(), p.MinerPower.RawBytePower.String()) + req.True(p.MinerPower.RawBytePower.IsZero()) +} + +func (tm *TestUnmanagedMiner) CurrentPower() *api.MinerPower { + req := require.New(tm.t) + + head, err := tm.FullNode.ChainHead(tm.ctx) + req.NoError(err) + + p, err := tm.FullNode.StateMinerPower(tm.ctx, tm.ActorAddr, head.Key()) + req.NoError(err) + + return p +} + +func (tm *TestUnmanagedMiner) AssertPower(raw uint64, qa uint64) { + req := require.New(tm.t) + + p := tm.CurrentPower() + tm.log("RBP: %v, QaP: %v", p.MinerPower.QualityAdjPower.String(), p.MinerPower.RawBytePower.String()) + req.Equal(raw, p.MinerPower.RawBytePower.Uint64()) + req.Equal(qa, p.MinerPower.QualityAdjPower.Uint64()) +} + +func (tm *TestUnmanagedMiner) AssertDisputeFails(sector abi.SectorNumber) { + req := require.New(tm.t) + + tm.log("Disputing WindowedPoSt for sector %d ...", sector) + + err := tm.submitPostDispute(sector) + req.Error(err) + req.Contains(err.Error(), "failed to dispute valid post") + req.Contains(err.Error(), "(RetCode=16)") +} + +func (tm *TestUnmanagedMiner) IsImmutableDeadline(deadlineIndex uint64) bool { + req := require.New(tm.t) + di, err := tm.FullNode.StateMinerProvingDeadline(tm.ctx, tm.ActorAddr, types.EmptyTSK) + req.NoError(err) + currentDeadlineIdx := CurrentDeadlineIndex(di) return currentDeadlineIdx == deadlineIndex || currentDeadlineIdx == deadlineIndex-1 } + +// CurrentDeadlineIndex manually calculates the current deadline index. This may be useful in +// situations where the miner hasn't been enrolled in cron and the deadline index isn't ticking +// so dline.Info.Index may not be accurate. +func CurrentDeadlineIndex(di *dline.Info) uint64 { + didx := int64((di.CurrentEpoch - di.PeriodStart) / di.WPoStChallengeWindow) + if didx < 0 { // before the first deadline + return uint64(int64(di.WPoStPeriodDeadlines) + didx) + } + return uint64(didx) +} + +// TODO: remove when https://github.com/filecoin-project/go-state-types/pull/286 is available +func exitCodesFromBatchReturn(b miner14.BatchReturn) []exitcode.ExitCode { + size := int(b.SuccessCount) + len(b.FailCodes) + codes := make([]exitcode.ExitCode, size) + i := 0 + for _, fc := range b.FailCodes { + if fc.Idx > uint64(i) { + for ; i < int(fc.Idx); i++ { + codes[i] = exitcode.Ok + } + } + codes[i] = fc.Code + i++ + } + for ; i < len(codes); i++ { + codes[i] = exitcode.Ok + } + return codes +} diff --git a/itests/manual_onboarding_test.go b/itests/manual_onboarding_test.go index 4e976b030b3..c03ddd9b1d7 100644 --- a/itests/manual_onboarding_test.go +++ b/itests/manual_onboarding_test.go @@ -61,9 +61,11 @@ func TestManualSectorOnboarding(t *testing.T) { // performing all related tasks manually. Managed by the TestKit, MinerB has the capability to utilize actual proofs // for the processes of sector onboarding and activation. nodeOpts := []kit.NodeOpt{kit.SectorSize(defaultSectorSize), kit.OwnerAddr(client.DefaultKey)} - minerB, ens := ens.UnmanagedMiner(&client, nodeOpts...) + minerB, ens := ens.UnmanagedMiner(ctx, &client, nodeOpts...) + defer minerB.Stop() // MinerC is similar to MinerB, but onboards pieces instead of a pure CC sector - minerC, ens := ens.UnmanagedMiner(&client, nodeOpts...) + minerC, ens := ens.UnmanagedMiner(ctx, &client, nodeOpts...) + defer minerC.Stop() ens.Start() @@ -79,40 +81,32 @@ func TestManualSectorOnboarding(t *testing.T) { t.Logf("MinerA RBP: %v, QaP: %v", p.MinerPower.QualityAdjPower.String(), p.MinerPower.RawBytePower.String()) // Miner B should have no power as it has yet to onboard and activate any sectors - minerB.AssertNoPower(ctx) + minerB.AssertNoPower() // Miner C should have no power as it has yet to onboard and activate any sectors - minerC.AssertNoPower(ctx) + minerC.AssertNoPower() // ---- Miner B onboards a CC sector - var bSectorNum abi.SectorNumber - var bRespCh chan kit.WindowPostResp - var bWdPostCancelF context.CancelFunc - - bSectorNum, bRespCh, bWdPostCancelF = minerB.OnboardCCSector(ctx, sealProofType) + bSectors := minerB.OnboardSectors(sealProofType, false, 1) + req.Len(bSectors, 1) // Miner B should still not have power as power can only be gained after sector is activated i.e. the first WindowPost is submitted for it - minerB.AssertNoPower(ctx) + minerB.AssertNoPower() // Ensure that the block miner checks for and waits for posts during the appropriate proving window from our new miner with a sector blockMiner.WatchMinerForPost(minerB.ActorAddr) // --- Miner C onboards sector with data/pieces - var cSectorNum abi.SectorNumber - var cRespCh chan kit.WindowPostResp - - cSectorNum, cRespCh, _ = minerC.OnboardSectorWithPieces(ctx, kit.TestSpt) + cSectors := minerC.OnboardSectors(sealProofType, true, 1) // Miner C should still not have power as power can only be gained after sector is activated i.e. the first WindowPost is submitted for it - minerC.AssertNoPower(ctx) + minerC.AssertNoPower() // Ensure that the block miner checks for and waits for posts during the appropriate proving window from our new miner with a sector blockMiner.WatchMinerForPost(minerC.ActorAddr) // Wait till both miners' sectors have had their first post and are activated and check that this is reflected in miner power - minerB.WaitTillActivatedAndAssertPower(ctx, bRespCh, bSectorNum) - minerC.WaitTillActivatedAndAssertPower(ctx, cRespCh, cSectorNum) + minerB.WaitTillActivatedAndAssertPower(bSectors, uint64(defaultSectorSize), uint64(defaultSectorSize)) + minerC.WaitTillActivatedAndAssertPower(cSectors, uint64(defaultSectorSize), uint64(defaultSectorSize)) // Miner B has activated the CC sector -> upgrade it with snapdeals - _ = minerB.SnapDeal(ctx, kit.TestSpt, bSectorNum) - // cancel the WdPost for the CC sector as the corresponding CommR is no longer valid - bWdPostCancelF() + _ = minerB.SnapDeal(bSectors[0]) }) } } diff --git a/itests/niporep_manual_test.go b/itests/niporep_manual_test.go index 76ea1f9696b..a2fa949082a 100644 --- a/itests/niporep_manual_test.go +++ b/itests/niporep_manual_test.go @@ -3,7 +3,6 @@ package itests import ( "context" "fmt" - "math" "testing" "time" @@ -20,6 +19,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/builtin" miner14 "github.com/filecoin-project/go-state-types/builtin/v14/miner" + "github.com/filecoin-project/go-state-types/exitcode" "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/lotus/build" @@ -29,52 +29,217 @@ import ( "github.com/filecoin-project/lotus/lib/must" ) +// All real-proofs test cases in here are skipped by default, gated with the LOTUS_RUN_VERY_EXPENSIVE_TESTS env var. +// Set this env var to "1" to run these tests. +// Some tests are also explicitly skipped with a `skipped` bool which can be manually commented out here and run. +// They are both very-very expensive, and are mainly useful to test very specific scenarios. You may need a -timeout +// of more than 10m to run these. + func TestManualNISectorOnboarding(t *testing.T) { req := require.New(t) + const blocktime = 2 * time.Millisecond const defaultSectorSize = abi.SectorSize(2 << 10) // 2KiB sealProofType, err := miner.SealProofTypeFromSectorSize(defaultSectorSize, network.Version23, miner.SealProofVariant_NonInteractive) req.NoError(err) - for _, withMockProofs := range []bool{true, false} { - testName := "WithRealProofs" - if withMockProofs { - testName = "WithMockProofs" + mkgood := func(c int) []exitcode.ExitCode { + out := make([]exitcode.ExitCode, c) + for i := range out { + out[i] = exitcode.Ok } - t.Run(testName, func(t *testing.T) { - if !withMockProofs { + return out + } + + type testCaseMiner struct { + // sectorsToOnboard tells us (a) how many sectors to prepare and (b) how many of them should pass the onboarding process + // with an Ok exit code + sectorsToOnboard []exitcode.ExitCode + // allOrNothing tells us whether all sectors should pass onboarding or none + allOrNothing bool + expectPower uint64 + // snapDeal tells us whether to snap a deal into one of the sectors after activation + snapDeal bool + } + + testCases := []struct { + name string + skip bool + mockProofs bool + miners []testCaseMiner + }{ + { + name: "mock proofs, miners with 1, 3 (1 bad) and 6 sectors", + mockProofs: true, + miners: []testCaseMiner{ + { + sectorsToOnboard: mkgood(1), + allOrNothing: true, + expectPower: uint64(defaultSectorSize), + snapDeal: true, + }, + { + sectorsToOnboard: []exitcode.ExitCode{exitcode.Ok, exitcode.ErrIllegalArgument, exitcode.Ok}, + allOrNothing: false, + expectPower: uint64(defaultSectorSize * 2), + snapDeal: true, + }, + { + // should trigger a non-zero aggregate fee (>5 for niporep) + sectorsToOnboard: mkgood(6), + allOrNothing: true, + expectPower: uint64(defaultSectorSize * 6), + snapDeal: true, + }, + }, + }, + { + name: "mock proofs, 1 miner with 65 sectors", + mockProofs: true, + miners: []testCaseMiner{ + { + sectorsToOnboard: mkgood(65), + allOrNothing: true, + expectPower: uint64(defaultSectorSize * 65), + snapDeal: true, + }, + }, + }, + { + name: "real proofs, 1 miner with 1 sector", + mockProofs: false, + skip: true, // uncomment if you want to run this test manually + miners: []testCaseMiner{ + { + sectorsToOnboard: mkgood(1), + allOrNothing: true, + expectPower: uint64(defaultSectorSize), + snapDeal: true, + }, + }, + }, + { + name: "real proofs, 1 miner with 2 sectors", + mockProofs: false, + skip: true, // uncomment if you want to run this test manually + miners: []testCaseMiner{ + { + sectorsToOnboard: mkgood(2), + allOrNothing: true, + expectPower: uint64(defaultSectorSize * 2), + snapDeal: true, + }, + }, + }, + { + name: "real proofs, 1 miner with 3 sectors", + mockProofs: false, + skip: true, // uncomment if you want to run this test manually + miners: []testCaseMiner{ + { + sectorsToOnboard: mkgood(3), + allOrNothing: true, + expectPower: uint64(defaultSectorSize * 3), + snapDeal: true, + }, + }, + }, + { + name: "real proofs, 1 miner with 4 sectors", + mockProofs: false, + skip: true, // uncomment if you want to run this test manually + miners: []testCaseMiner{ + { + sectorsToOnboard: mkgood(4), + allOrNothing: true, + expectPower: uint64(defaultSectorSize * 4), + snapDeal: true, + }, + }, + }, + { + // useful for testing aggregate fee + // "aggregate seal verify failed: invalid aggregate" + name: "real proofs, 1 miner with 7 sectors", + mockProofs: false, + skip: true, // uncomment if you want to run this test manually + miners: []testCaseMiner{ + { + sectorsToOnboard: mkgood(7), + allOrNothing: true, + expectPower: uint64(defaultSectorSize * 7), + snapDeal: true, + }, + }, + }, + { + // useful for testing aggregate fee and successful activation with the aggregate proof even + // though one of them is bad + name: "real proofs, 1 miner with 7 sectors, 1 bad", + mockProofs: false, + miners: []testCaseMiner{ + { + sectorsToOnboard: []exitcode.ExitCode{exitcode.Ok, exitcode.ErrIllegalArgument, exitcode.Ok, exitcode.Ok, exitcode.Ok, exitcode.Ok, exitcode.Ok}, + allOrNothing: false, + expectPower: uint64(defaultSectorSize * 6), + snapDeal: true, + }, + }, + }, + { + name: "real proofs, 1 miner with 65 sectors", + mockProofs: false, + skip: true, // uncomment if you want to run this test manually + miners: []testCaseMiner{ + { + sectorsToOnboard: mkgood(65), + allOrNothing: true, + expectPower: uint64(defaultSectorSize * 65), + snapDeal: true, + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + if tc.skip { + t.Skip("skipping test") + } + if !tc.mockProofs { kit.VeryExpensive(t) } + req := require.New(t) + kit.QuietMiningLogs() ctx, cancel := context.WithCancel(context.Background()) defer cancel() var ( - // need to pick a balance value so that the test is not racy on CI by running through it's WindowPostDeadlines too fast - blocktime = 2 * time.Millisecond - client kit.TestFullNode - minerA kit.TestMiner // A is a standard genesis miner + client kit.TestFullNode + genesisMiner kit.TestMiner ) - // Setup and begin mining with a single miner (A) - // Miner A will only be a genesis Miner with power allocated in the genesis block and will not onboard any sectors from here on - ens := kit.NewEnsemble(t, kit.MockProofs(withMockProofs)). + // Setup and begin mining with a single genesis block miner, this miner won't be used + // in the test beyond mining the chain and maintaining power + ens := kit.NewEnsemble(t, kit.MockProofs(tc.mockProofs)). FullNode(&client, kit.SectorSize(defaultSectorSize)). // preseal more than the default number of sectors to ensure that the genesis miner has power // because our unmanaged miners won't produce blocks so we may get null rounds - Miner(&minerA, &client, kit.PresealSectors(5), kit.SectorSize(defaultSectorSize), kit.WithAllSubsystems()). + Miner(&genesisMiner, &client, kit.PresealSectors(5), kit.SectorSize(defaultSectorSize), kit.WithAllSubsystems()). Start(). InterconnectAll() blockMiners := ens.BeginMiningMustPost(blocktime) req.Len(blockMiners, 1) blockMiner := blockMiners[0] - // Instantiate MinerB to manually handle sector onboarding and power acquisition through sector activation. - // Unlike other miners managed by the Lotus Miner storage infrastructure, MinerB operates independently, - // performing all related tasks manually. Managed by the TestKit, MinerB has the capability to utilize actual proofs - // for the processes of sector onboarding and activation. + // Instantiate our test miners to manually handle sector onboarding and power acquisition through sector activation. nodeOpts := []kit.NodeOpt{kit.SectorSize(defaultSectorSize), kit.OwnerAddr(client.DefaultKey)} - minerB, ens := ens.UnmanagedMiner(&client, nodeOpts...) + miners := make([]*kit.TestUnmanagedMiner, len(tc.miners)) + for i := range tc.miners { + miners[i], _ = ens.UnmanagedMiner(ctx, &client, nodeOpts...) + defer miners[i].Stop() + } ens.Start() @@ -82,111 +247,188 @@ func TestManualNISectorOnboarding(t *testing.T) { t.Log("Checking initial power ...") - // Miner A should have power as it has already onboarded sectors in the genesis block + // The genesis miner A should have power as it has already onboarded sectors in the genesis block head, err := client.ChainHead(ctx) req.NoError(err) - p, err := client.StateMinerPower(ctx, minerA.ActorAddr, head.Key()) + p, err := client.StateMinerPower(ctx, genesisMiner.ActorAddr, head.Key()) req.NoError(err) - t.Logf("MinerA RBP: %v, QaP: %v", p.MinerPower.QualityAdjPower.String(), p.MinerPower.RawBytePower.String()) - - // Miner B should have no power as it has yet to onboard and activate any sectors - minerB.AssertNoPower(ctx) - - // Verify that ProveCommitSectorsNI rejects messages with invalid parameters - verifyProveCommitSectorsNIErrorConditions(ctx, t, minerB, sealProofType) - - // ---- Miner B onboards a CC sector - var bSectorNum abi.SectorNumber - var bRespCh chan kit.WindowPostResp - var bWdPostCancelF context.CancelFunc - - // Onboard a CC sector with Miner B using NI-PoRep - bSectorNum, bRespCh, bWdPostCancelF = minerB.OnboardCCSector(ctx, sealProofType) - // Miner B should still not have power as power can only be gained after sector is activated i.e. the first WindowPost is submitted for it - minerB.AssertNoPower(ctx) - - // Check that the sector-activated event was emitted - { - expectedEntries := []types.EventEntry{ - {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "$type", Value: must.One(ipld.Encode(basicnode.NewString("sector-activated"), dagcbor.Encode))}, - {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "sector", Value: must.One(ipld.Encode(basicnode.NewInt(int64(bSectorNum)), dagcbor.Encode))}, - {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "unsealed-cid", Value: must.One(ipld.Encode(datamodel.Null, dagcbor.Encode))}, + t.Logf("genesisMiner RBP: %v, QaP: %v", p.MinerPower.QualityAdjPower.String(), p.MinerPower.RawBytePower.String()) + + // Our test miners should have no power as they have yet to onboard and activate any sectors + for _, miner := range miners { + miner.AssertNoPower() + } + + sectors := make([][]abi.SectorNumber, len(tc.miners)) + + for i, tcMiner := range tc.miners { + miner := miners[i] + var expectSuccesses int + for _, ec := range tcMiner.sectorsToOnboard { + if ec.IsSuccess() { + expectSuccesses++ + } } - from := head.Height() - recentEvents, err := client.FullNode.GetActorEventsRaw(ctx, &types.ActorEventFilter{FromHeight: &from}) + + head, err = client.ChainHead(ctx) req.NoError(err) - req.Len(recentEvents, 1) - req.Equal(expectedEntries, recentEvents[0].Entries) - } - // Ensure that the block miner checks for and waits for posts during the appropriate proving window from our new miner with a sector - blockMiner.WatchMinerForPost(minerB.ActorAddr) + // Onboard CC sectors to this test miner using NI-PoRep + sectors[i] = miner.OnboardSectors( + sealProofType, + false, + len(tcMiner.sectorsToOnboard), + kit.WithExpectedExitCodes(tcMiner.sectorsToOnboard), + kit.WithRequireActivationSuccess(tcMiner.allOrNothing), + kit.WithModifyNIActivationsBeforeSubmit(func(activations []miner14.SectorNIActivationInfo) []miner14.SectorNIActivationInfo { + head, err := client.ChainHead(ctx) + req.NoError(err) + + for j, ec := range tcMiner.sectorsToOnboard { + if !ec.IsSuccess() { + // Set the expiration in the past to ensure the sector fails onboarding + activations[j].Expiration = head.Height() - 100 + } + } + return activations + }), + ) + + req.Len(sectors[i], expectSuccesses) + + // Miner B should still not have power as power can only be gained after sector is activated i.e. the first WindowPost is submitted for it + miner.AssertNoPower() + + // Check that the sector-activated event was emitted + { + expectedEntries := make([][]types.EventEntry, 0) + for _, sectorNumber := range sectors[i] { + expectedEntries = append(expectedEntries, []types.EventEntry{ + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "$type", Value: must.One(ipld.Encode(basicnode.NewString("sector-activated"), dagcbor.Encode))}, + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "sector", Value: must.One(ipld.Encode(basicnode.NewInt(int64(sectorNumber)), dagcbor.Encode))}, + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "unsealed-cid", Value: must.One(ipld.Encode(datamodel.Null, dagcbor.Encode))}, + }) + } + from := head.Height() + recentEvents, err := client.FullNode.GetActorEventsRaw(ctx, &types.ActorEventFilter{FromHeight: &from}) + req.NoError(err) + req.Len(recentEvents, len(sectors[i])) + for i, event := range recentEvents { + req.Equal(expectedEntries[i], event.Entries) + } + } - // Wait till both miners' sectors have had their first post and are activated and check that this is reflected in miner power - minerB.WaitTillActivatedAndAssertPower(ctx, bRespCh, bSectorNum) + // Ensure that the block miner checks for and waits for posts during the appropriate proving window from our new miner with a sector + blockMiner.WatchMinerForPost(miner.ActorAddr) + } - head, err = client.ChainHead(ctx) - req.NoError(err) + // Wait till each miners' sectors have had their first post and are activated and check that this is reflected in miner power + for i, miner := range miners { + miner.WaitTillActivatedAndAssertPower(sectors[i], tc.miners[i].expectPower, tc.miners[i].expectPower) + } - // Miner B has activated the CC sector -> upgrade it with snapdeals - snapPieces := minerB.SnapDeal(ctx, kit.TestSpt, bSectorNum) - // cancel the WdPost for the CC sector as the corresponding CommR is no longer valid - bWdPostCancelF() - - // Check "sector-updated" event happned after snap - { - expectedEntries := []types.EventEntry{ - {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "$type", Value: must.One(ipld.Encode(basicnode.NewString("sector-updated"), dagcbor.Encode))}, - {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "sector", Value: must.One(ipld.Encode(basicnode.NewInt(int64(bSectorNum)), dagcbor.Encode))}, - {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "unsealed-cid", Value: must.One(ipld.Encode(basicnode.NewLink(cidlink.Link{Cid: snapPieces[0].PieceCID}), dagcbor.Encode))}, - {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "piece-cid", Value: must.One(ipld.Encode(basicnode.NewLink(cidlink.Link{Cid: snapPieces[0].PieceCID}), dagcbor.Encode))}, - {Flags: 0x01, Codec: uint64(multicodec.Cbor), Key: "piece-size", Value: must.One(ipld.Encode(basicnode.NewInt(int64(snapPieces[0].Size)), dagcbor.Encode))}, + for i, tcMiner := range tc.miners { + if !tcMiner.snapDeal { + continue } - from := head.Height() - recentEvents, err := client.FullNode.GetActorEventsRaw(ctx, &types.ActorEventFilter{FromHeight: &from}) + + miner := miners[i] + head, err = client.ChainHead(ctx) req.NoError(err) - req.Len(recentEvents, 1) - req.Equal(expectedEntries, recentEvents[0].Entries) + + // Snap a deal into the first of the successfully onboarded CC sectors for this miner + snapPieces := miner.SnapDeal(sectors[i][0]) + + // Check "sector-updated" event happned after snap + { + expectedEntries := []types.EventEntry{ + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "$type", Value: must.One(ipld.Encode(basicnode.NewString("sector-updated"), dagcbor.Encode))}, + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "sector", Value: must.One(ipld.Encode(basicnode.NewInt(int64(sectors[i][0])), dagcbor.Encode))}, + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "unsealed-cid", Value: must.One(ipld.Encode(basicnode.NewLink(cidlink.Link{Cid: snapPieces[0].PieceCID}), dagcbor.Encode))}, + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "piece-cid", Value: must.One(ipld.Encode(basicnode.NewLink(cidlink.Link{Cid: snapPieces[0].PieceCID}), dagcbor.Encode))}, + {Flags: 0x01, Codec: uint64(multicodec.Cbor), Key: "piece-size", Value: must.One(ipld.Encode(basicnode.NewInt(int64(snapPieces[0].Size)), dagcbor.Encode))}, + } + from := head.Height() + recentEvents, err := client.FullNode.GetActorEventsRaw(ctx, &types.ActorEventFilter{FromHeight: &from}) + req.NoError(err) + req.Len(recentEvents, 1) + req.Equal(expectedEntries, recentEvents[0].Entries) + } } }) } } -func verifyProveCommitSectorsNIErrorConditions(ctx context.Context, t *testing.T, miner *kit.TestUnmanagedMiner, sealProofType abi.RegisteredSealProof) { +func TestNISectorFailureCases(t *testing.T) { req := require.New(t) - head, err := miner.FullNode.ChainHead(ctx) + const blocktime = 2 * time.Millisecond + const defaultSectorSize = abi.SectorSize(2 << 10) // 2KiB + sealProofType, err := miner.SealProofTypeFromSectorSize(defaultSectorSize, network.Version23, miner.SealProofVariant_NonInteractive) req.NoError(err) + kit.QuietMiningLogs() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var ( + client kit.TestFullNode + genesisMiner kit.TestMiner + ) + ens := kit.NewEnsemble(t, kit.MockProofs(true)). + FullNode(&client, kit.SectorSize(defaultSectorSize)). + Miner(&genesisMiner, &client, kit.PresealSectors(5), kit.SectorSize(defaultSectorSize), kit.WithAllSubsystems()). + Start(). + InterconnectAll() + _ = ens.BeginMining(blocktime) + + miner, _ := ens.UnmanagedMiner(ctx, &client, kit.SectorSize(defaultSectorSize), kit.OwnerAddr(client.DefaultKey)) + defer miner.Stop() + + ens.Start() + + build.Clock.Sleep(time.Second) + + // We have to onboard a sector first to get the miner enrolled in cron; although we don't need to wait for it to prove + _ = miner.OnboardSectors(sealProofType, false, 1) + + // Utility functions and variables for our failure cases + actorIdNum, err := address.IDFromAddress(miner.ActorAddr) req.NoError(err) actorId := abi.ActorID(actorIdNum) - var provingDeadline uint64 = 7 - if miner.IsImmutableDeadline(ctx, provingDeadline) { - // avoid immutable deadlines - provingDeadline = 5 + // make sure we are working with a deadline that has no chance of being immutable (current or next) + di, err := miner.FullNode.StateMinerProvingDeadline(ctx, miner.ActorAddr, types.EmptyTSK) + req.NoError(err) + currentDeadlineIdx := kit.CurrentDeadlineIndex(di) + provingDeadline := currentDeadlineIdx - 2 + if currentDeadlineIdx <= 2 { + provingDeadline = miner14.WPoStPeriodDeadlines - 1 } + head, err := miner.FullNode.ChainHead(ctx) + req.NoError(err) + + sectorNumber := abi.SectorNumber(5000) + submitAndFail := func(params *miner14.ProveCommitSectorsNIParams, errMsg string, errCode int) { t.Helper() - r, err := miner.SubmitMessage(ctx, params, 1, builtin.MethodsMiner.ProveCommitSectorsNI) - req.Error(err) + r, err := miner.SubmitMessage(params, 1, builtin.MethodsMiner.ProveCommitSectorsNI) + req.Error(err, "expected error: [%s]", errMsg) req.Contains(err.Error(), errMsg) if errCode > 0 { req.Contains(err.Error(), fmt.Sprintf("(RetCode=%d)", errCode)) } req.Nil(r) } - - sn := abi.SectorNumber(5000) mkSai := func() miner14.SectorNIActivationInfo { - sn++ + sectorNumber++ // unique per sector return miner14.SectorNIActivationInfo{ - SealingNumber: sn, + SealingNumber: sectorNumber, SealerID: actorId, SealedCID: cid.MustParse("bagboea4b5abcatlxechwbp7kjpjguna6r6q7ejrhe6mdp3lf34pmswn27pkkiekz"), - SectorNumber: sn, + SectorNumber: sectorNumber, SealRandEpoch: head.Height() - 10, Expiration: 2880 * 300, } @@ -202,59 +444,78 @@ func verifyProveCommitSectorsNIErrorConditions(ctx context.Context, t *testing.T } } - // Test message rejection on no sectors - params := mkParams() - params.Sectors = []miner14.SectorNIActivationInfo{} - submitAndFail(¶ms, "too few sectors", 16) + // Failure cases - // Test message rejection on too many sectors - sectorInfos := make([]miner14.SectorNIActivationInfo, 66) - for i := range sectorInfos { - sectorInfos[i] = mkSai() - } - params = mkParams() - params.Sectors = sectorInfos - submitAndFail(¶ms, "too many sectors", 16) - - // Test bad aggregation proof type - params = mkParams() - params.AggregateProofType = abi.RegisteredAggregationProof_SnarkPackV1 - submitAndFail(¶ms, "aggregate proof type", 16) - - // Test bad SealerID - params = mkParams() - params.Sectors[1].SealerID = 1234 - submitAndFail(¶ms, "invalid NI commit 1 while requiring activation success", 16) - - // Test bad SealingNumber - params = mkParams() - params.Sectors[1].SealingNumber = 1234 - submitAndFail(¶ms, "invalid NI commit 1 while requiring activation success", 16) - - // Test bad SealedCID - params = mkParams() - params.Sectors[1].SealedCID = cid.MustParse("baga6ea4seaqjtovkwk4myyzj56eztkh5pzsk5upksan6f5outesy62bsvl4dsha") - submitAndFail(¶ms, "invalid NI commit 1 while requiring activation success", 16) - - // Test bad SealRandEpoch - head, err = miner.FullNode.ChainHead(ctx) - req.NoError(err) - params = mkParams() - params.Sectors[1].SealRandEpoch = head.Height() + builtin.EpochsInDay - submitAndFail(¶ms, fmt.Sprintf("seal challenge epoch %d must be before now", params.Sectors[1].SealRandEpoch), 16) - params.Sectors[1].SealRandEpoch = head.Height() - 190*builtin.EpochsInDay - submitAndFail(¶ms, "invalid NI commit 1 while requiring activation success", 16) - - // Immutable/bad deadlines - di, err := miner.FullNode.StateMinerProvingDeadline(ctx, miner.ActorAddr, head.Key()) - req.NoError(err) - currentDeadlineIdx := uint64(math.Abs(float64((di.CurrentEpoch - di.PeriodStart) / di.WPoStChallengeWindow))) - req.Less(currentDeadlineIdx, di.WPoStPeriodDeadlines) - params = mkParams() - params.ProvingDeadline = currentDeadlineIdx - submitAndFail(¶ms, fmt.Sprintf("proving deadline %d must not be the current or next deadline", currentDeadlineIdx), 18) - params.ProvingDeadline = currentDeadlineIdx + 1 - submitAndFail(¶ms, fmt.Sprintf("proving deadline %d must not be the current or next deadline", currentDeadlineIdx+1), 18) - params.ProvingDeadline = di.WPoStPeriodDeadlines // too big - submitAndFail(¶ms, fmt.Sprintf("proving deadline index %d invalid", di.WPoStPeriodDeadlines), 16) + t.Run("message rejection on no sectors", func(t *testing.T) { + params := mkParams() + params.Sectors = []miner14.SectorNIActivationInfo{} + submitAndFail(¶ms, "too few sectors", 16) + }) + + t.Run("message rejection on too many sectors", func(t *testing.T) { + sectorInfos := make([]miner14.SectorNIActivationInfo, 66) + for i := range sectorInfos { + sectorInfos[i] = mkSai() + } + params := mkParams() + params.Sectors = sectorInfos + submitAndFail(¶ms, "too many sectors", 16) + }) + + t.Run("bad aggregation proof type", func(t *testing.T) { + params := mkParams() + params.AggregateProofType = abi.RegisteredAggregationProof_SnarkPackV1 + submitAndFail(¶ms, "aggregate proof type", 16) + }) + + t.Run("bad SealerID", func(t *testing.T) { + params := mkParams() + params.Sectors[1].SealerID = 1234 + submitAndFail(¶ms, "invalid NI commit 1 while requiring activation success", 16) + }) + + t.Run("bad SealingNumber", func(t *testing.T) { + params := mkParams() + params.Sectors[1].SealingNumber = 1234 + submitAndFail(¶ms, "invalid NI commit 1 while requiring activation success", 16) + }) + + t.Run("bad SealedCID", func(t *testing.T) { + params := mkParams() + params.Sectors[1].SealedCID = cid.MustParse("baga6ea4seaqjtovkwk4myyzj56eztkh5pzsk5upksan6f5outesy62bsvl4dsha") + submitAndFail(¶ms, "invalid NI commit 1 while requiring activation success", 16) + }) + + t.Run("bad SealRandEpoch", func(t *testing.T) { + head, err = miner.FullNode.ChainHead(ctx) + req.NoError(err) + params := mkParams() + params.Sectors[1].SealRandEpoch = head.Height() + builtin.EpochsInDay + submitAndFail(¶ms, fmt.Sprintf("seal challenge epoch %d must be before now", params.Sectors[1].SealRandEpoch), 16) + params.Sectors[1].SealRandEpoch = head.Height() - miner14.MaxProveCommitNiLookback - builtin.EpochsInDay + submitAndFail(¶ms, "invalid NI commit 1 while requiring activation success", 16) + }) + + t.Run("immutable deadlines", func(t *testing.T) { + di, err = miner.FullNode.StateMinerProvingDeadline(ctx, miner.ActorAddr, head.Key()) + req.NoError(err) + currentDeadlineIdx = kit.CurrentDeadlineIndex(di) + + t.Logf("Validating submission failure for current and next deadline. Current Deadline Info: %+v, calculated current deadline: %d.", di, currentDeadlineIdx) + + params := mkParams() + params.ProvingDeadline = currentDeadlineIdx + submitAndFail(¶ms, fmt.Sprintf("proving deadline %d must not be the current or next deadline", currentDeadlineIdx), 18) + params.ProvingDeadline = currentDeadlineIdx + 1 + if params.ProvingDeadline == di.WPoStPeriodDeadlines { + params.ProvingDeadline = 0 + } + msgdline := currentDeadlineIdx + 1 + if msgdline == di.WPoStPeriodDeadlines { + msgdline = 0 + } + submitAndFail(¶ms, fmt.Sprintf("proving deadline %d must not be the current or next deadline", msgdline), 18) + params.ProvingDeadline = di.WPoStPeriodDeadlines // too big + submitAndFail(¶ms, fmt.Sprintf("proving deadline index %d invalid", di.WPoStPeriodDeadlines), 16) + }) }