diff --git a/components/dashboard/component.go b/components/dashboard/component.go index af6e5296f..7f79604e8 100644 --- a/components/dashboard/component.go +++ b/components/dashboard/component.go @@ -150,8 +150,8 @@ func currentNodeStatus() *nodestatus { LastPauseGC: m.PauseNs[(m.NumGC+255)%256], } // get TangleTime - cl := deps.Protocol.MainEngineInstance().Clock - syncStatus := deps.Protocol.MainEngineInstance().SyncManager.SyncStatus() + cl := deps.Protocol.Engines.Main.Get().Clock + syncStatus := deps.Protocol.Engines.Main.Get().SyncManager.SyncStatus() status.TangleTime = tangleTime{ Synced: syncStatus.NodeSynced, diff --git a/components/dashboard/explorer_routes.go b/components/dashboard/explorer_routes.go index b76e3525e..6a1267705 100644 --- a/components/dashboard/explorer_routes.go +++ b/components/dashboard/explorer_routes.go @@ -74,14 +74,14 @@ func setupExplorerRoutes(routeGroup *echo.Group) { } func findBlock(blockID iotago.BlockID) (explorerBlk *ExplorerBlock, err error) { - block, exists := deps.Protocol.MainEngineInstance().Block(blockID) + block, exists := deps.Protocol.Engines.Main.Get().Block(blockID) if !exists { return nil, ierrors.Errorf("block not found: %s", blockID.ToHex()) } - cachedBlock, _ := deps.Protocol.MainEngineInstance().BlockCache.Block(blockID) + cachedBlock, _ := deps.Protocol.Engines.Main.Get().BlockCache.Block(blockID) - blockMetadata, err := deps.Protocol.MainEngineInstance().Retainer.BlockMetadata(blockID) + blockMetadata, err := deps.Protocol.Engines.Main.Get().Retainer.BlockMetadata(blockID) if err != nil { return nil, ierrors.Wrapf(err, "block metadata %s", blockID.ToHex()) } @@ -196,12 +196,12 @@ func getTransaction(c echo.Context) error { outputID := iotago.OutputID{} copy(outputID[:], txID[:]) - output, err := deps.Protocol.MainEngineInstance().Ledger.Output(outputID) + output, err := deps.Protocol.Engines.Main.Get().Ledger.Output(outputID) if err != nil { return err } - block, exists := deps.Protocol.MainEngineInstance().Block(output.BlockID()) + block, exists := deps.Protocol.Engines.Main.Get().Block(output.BlockID()) if !exists { return ierrors.Errorf("block not found: %s", output.BlockID().ToHex()) } @@ -223,12 +223,12 @@ func getTransactionMetadata(c echo.Context) error { // Get the first output of that transaction (using index 0) outputID := iotago.OutputID{} copy(outputID[:], txID[:]) - txMetadata, exists := deps.Protocol.MainEngineInstance().Ledger.MemPool().TransactionMetadata(txID) + txMetadata, exists := deps.Protocol.Engines.Main.Get().Ledger.MemPool().TransactionMetadata(txID) if !exists { return ierrors.Errorf("tx metadata not found: %s", txID.ToHex()) } - conflicts, _ := deps.Protocol.MainEngineInstance().Ledger.SpendDAG().ConflictingSpenders(txID) + conflicts, _ := deps.Protocol.Engines.Main.Get().Ledger.SpendDAG().ConflictingSpenders(txID) return httpserver.JSONResponse(c, http.StatusOK, NewTransactionMetadata(txMetadata, conflicts)) } @@ -239,7 +239,7 @@ func getOutput(c echo.Context) error { return err } - output, err := deps.Protocol.MainEngineInstance().Ledger.Output(outputID) + output, err := deps.Protocol.Engines.Main.Get().Ledger.Output(outputID) if err != nil { return err } @@ -253,7 +253,7 @@ func getSlotDetailsByID(c echo.Context) error { return err } - commitment, err := deps.Protocol.MainEngineInstance().Storage.Commitments().Load(commitmentID.Slot()) + commitment, err := deps.Protocol.Engines.Main.Get().Storage.Commitments().Load(commitmentID.Slot()) if err != nil { return err } @@ -262,7 +262,7 @@ func getSlotDetailsByID(c echo.Context) error { return ierrors.Errorf("commitment in the store for slot %d does not match the given commitmentID (%s != %s)", commitmentID.Slot(), commitment.ID(), commitmentID) } - diffs, err := deps.Protocol.MainEngineInstance().Ledger.SlotDiffs(commitmentID.Slot()) + diffs, err := deps.Protocol.Engines.Main.Get().Ledger.SlotDiffs(commitmentID.Slot()) if err != nil { return err } diff --git a/components/dashboard/tip.go b/components/dashboard/tip.go index 0b75d9663..78e124553 100644 --- a/components/dashboard/tip.go +++ b/components/dashboard/tip.go @@ -17,7 +17,7 @@ func setupTipsRoutes(routeGroup *echo.Group) { } func tips() *TipsResponse { - allTips := append(deps.Protocol.MainEngineInstance().TipManager.StrongTips(), deps.Protocol.MainEngineInstance().TipManager.WeakTips()...) + allTips := append(deps.Protocol.Engines.Main.Get().TipManager.StrongTips(), deps.Protocol.Engines.Main.Get().TipManager.WeakTips()...) t := make([]string, len(allTips)) for i, tip := range allTips { diff --git a/components/dashboard/visualizer.go b/components/dashboard/visualizer.go index 3788bbf76..ad565cd41 100644 --- a/components/dashboard/visualizer.go +++ b/components/dashboard/visualizer.go @@ -55,7 +55,7 @@ func sendVertex(blk *blocks.Block, confirmed bool) { IsTx: isTx, IsTxAccepted: func() bool { if isTx { - txMetadata, exists := deps.Protocol.MainEngineInstance().Ledger.MemPool().TransactionMetadata(lo.PanicOnErr(signedTransaction.Transaction.ID())) + txMetadata, exists := deps.Protocol.Engines.Main.Get().Ledger.MemPool().TransactionMetadata(lo.PanicOnErr(signedTransaction.Transaction.ID())) if exists { return txMetadata.IsAccepted() } diff --git a/components/dashboard/ws.go b/components/dashboard/ws.go index 251da18e6..e66e4d2d1 100644 --- a/components/dashboard/ws.go +++ b/components/dashboard/ws.go @@ -50,7 +50,7 @@ func runWebSocketStreams(component *app.Component) { broadcastWsBlock(&wsblk{MsgTypeNodeStatus, currentNodeStatus()}) broadcastWsBlock(&wsblk{MsgTypeNeighborMetric, neighborMetrics()}) broadcastWsBlock(&wsblk{MsgTypeTipsMetric, &tipsInfo{ - TotalTips: len(deps.Protocol.MainEngineInstance().TipManager.StrongTips()) + len(deps.Protocol.MainEngineInstance().TipManager.WeakTips()), + TotalTips: len(deps.Protocol.Engines.Main.Get().TipManager.StrongTips()) + len(deps.Protocol.Engines.Main.Get().TipManager.WeakTips()), }}) case *componentsmetric: broadcastWsBlock(&wsblk{MsgTypeComponentCounterMetric, x}) diff --git a/components/dashboard_metrics/component.go b/components/dashboard_metrics/component.go index db0c8538a..c8a4cd83a 100644 --- a/components/dashboard_metrics/component.go +++ b/components/dashboard_metrics/component.go @@ -106,7 +106,7 @@ func run() error { } func configureComponentCountersEvents() { - deps.Protocol.Events.Network.BlockReceived.Hook(func(_ *model.Block, _ peer.ID) { + deps.Protocol.Network.OnBlockReceived(func(_ *model.Block, _ peer.ID) { incComponentCounter(Received) }) diff --git a/components/dashboard_metrics/node.go b/components/dashboard_metrics/node.go index 2e347ab0d..d4d59776a 100644 --- a/components/dashboard_metrics/node.go +++ b/components/dashboard_metrics/node.go @@ -27,9 +27,9 @@ func nodeInfoExtended() *NodeInfoExtended { func databaseSizesMetrics() (*DatabaseSizesMetric, error) { return &DatabaseSizesMetric{ - Prunable: deps.Protocol.MainEngineInstance().Storage.PrunableDatabaseSize(), - Permanent: deps.Protocol.MainEngineInstance().Storage.PermanentDatabaseSize(), - Total: deps.Protocol.MainEngineInstance().Storage.Size(), + Prunable: deps.Protocol.Engines.Main.Get().Storage.PrunableDatabaseSize(), + Permanent: deps.Protocol.Engines.Main.Get().Storage.PermanentDatabaseSize(), + Total: deps.Protocol.Engines.Main.Get().Storage.Size(), Time: time.Now().Unix(), }, nil } diff --git a/components/debugapi/blocks.go b/components/debugapi/blocks.go index 948aa76e0..b12b577e2 100644 --- a/components/debugapi/blocks.go +++ b/components/debugapi/blocks.go @@ -10,7 +10,7 @@ import ( ) func getSlotBlockIDs(index iotago.SlotIndex) (*BlockChangesResponse, error) { - blocksForSlot, err := deps.Protocol.MainEngineInstance().Storage.Blocks(index) + blocksForSlot, err := deps.Protocol.Engines.Main.Get().Storage.Blocks(index) if err != nil { return nil, ierrors.Wrapf(err, "failed to get block storage bucket for slot %d", index) } diff --git a/components/debugapi/commitment.go b/components/debugapi/commitment.go index 4b7dc5fdd..4319459c4 100644 --- a/components/debugapi/commitment.go +++ b/components/debugapi/commitment.go @@ -9,11 +9,11 @@ import ( "github.com/iotaledger/hive.go/ds/walker" "github.com/iotaledger/hive.go/ierrors" - "github.com/iotaledger/iota-core/pkg/protocol/chainmanager" + "github.com/iotaledger/iota-core/pkg/protocol" ) func chainManagerAllChainsDot() (string, error) { - rootCommitment := deps.Protocol.ChainManager.RootCommitment() + rootCommitment := deps.Protocol.Chains.Main.Get().ForkingPoint.Get() g := graphviz.New() defer g.Close() @@ -32,7 +32,7 @@ func chainManagerAllChainsDot() (string, error) { } func chainManagerAllChainsRendered() ([]byte, error) { - rootCommitment := deps.Protocol.ChainManager.RootCommitment() + rootCommitment := deps.Protocol.Chains.Main.Get().ForkingPoint.Get() g := graphviz.New() defer g.Close() @@ -50,7 +50,7 @@ func chainManagerAllChainsRendered() ([]byte, error) { return buf.Bytes(), nil } -func prepareCommitmentGraph(g *graphviz.Graphviz, rootCommitment *chainmanager.ChainCommitment) (*cgraph.Graph, error) { +func prepareCommitmentGraph(g *graphviz.Graphviz, rootCommitment *protocol.Commitment) (*cgraph.Graph, error) { graph, err := g.Graph() if err != nil { return nil, err @@ -62,36 +62,39 @@ func prepareCommitmentGraph(g *graphviz.Graphviz, rootCommitment *chainmanager.C } root.SetColor("green") - for commitmentWalker := walker.New[*chainmanager.ChainCommitment](false).Push(rootCommitment); commitmentWalker.HasNext(); { + for commitmentWalker := walker.New[*protocol.Commitment](false).Push(rootCommitment); commitmentWalker.HasNext(); { parentCommitment := commitmentWalker.Next() parent, parentErr := createNode(graph, parentCommitment) if parentErr != nil { return nil, parentErr } - for _, childCommitment := range parentCommitment.Children() { + if err = parentCommitment.Children.ForEach(func(childCommitment *protocol.Commitment) error { child, childErr := createNode(graph, childCommitment) if childErr != nil { - return nil, childErr + return childErr } - if childCommitment.Chain().ForkingPoint.ID() == deps.Protocol.MainEngineInstance().ChainID() { + if childCommitment.Chain.Get() == deps.Protocol.Chains.Main.Get() { child.SetColor("green") } if _, edgeErr := graph.CreateEdge(fmt.Sprintf("%s -> %s", parentCommitment.ID().String()[:8], childCommitment.ID().String()[:8]), parent, child); edgeErr != nil { - return nil, ierrors.Wrapf(edgeErr, "could not create edge %s -> %s", parentCommitment.ID().String()[:8], childCommitment.ID().String()[:8]) + return ierrors.Wrapf(edgeErr, "could not create edge %s -> %s", parentCommitment.ID().String()[:8], childCommitment.ID().String()[:8]) } commitmentWalker.Push(childCommitment) + return nil + }); err != nil { + return nil, err } } return graph, nil } -func createNode(graph *cgraph.Graph, commitment *chainmanager.ChainCommitment) (*cgraph.Node, error) { +func createNode(graph *cgraph.Graph, commitment *protocol.Commitment) (*cgraph.Node, error) { node, err := graph.Node(fmt.Sprintf("%d: %s", commitment.ID().Slot(), commitment.ID().String()[:8])) if err != nil { return nil, ierrors.Wrapf(err, "could not create node %s", commitment.ID().String()[:8]) diff --git a/components/debugapi/component.go b/components/debugapi/component.go index 5d4457ff5..b9b22ff58 100644 --- a/components/debugapi/component.go +++ b/components/debugapi/component.go @@ -154,7 +154,7 @@ func configure() error { return err } - if block, exists := deps.Protocol.MainEngineInstance().BlockCache.Block(blockID); exists && block.ProtocolBlock() != nil { + if block, exists := deps.Protocol.Engines.Main.Get().BlockCache.Block(blockID); exists && block.ProtocolBlock() != nil { response := BlockMetadataResponseFromBlock(block) return httpserver.JSONResponse(c, http.StatusOK, response) diff --git a/components/debugapi/node.go b/components/debugapi/node.go index e625cfdc6..020822a82 100644 --- a/components/debugapi/node.go +++ b/components/debugapi/node.go @@ -9,8 +9,8 @@ import ( //nolint:unparam // we have no error case right now func validatorsSummary() (*ValidatorsSummaryResponse, error) { - seatManager := deps.Protocol.MainEngineInstance().SybilProtection.SeatManager() - latestSlotIndex := deps.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Slot() + seatManager := deps.Protocol.Engines.Main.Get().SybilProtection.SeatManager() + latestSlotIndex := deps.Protocol.Engines.Main.Get().Storage.Settings().LatestCommitment().Slot() latestCommittee, exists := seatManager.CommitteeInSlot(latestSlotIndex) if !exists { return nil, ierrors.Errorf("committee for slot %d was not selected", latestSlotIndex) diff --git a/components/debugapi/transactions.go b/components/debugapi/transactions.go index 8a325e98c..55a8314d9 100644 --- a/components/debugapi/transactions.go +++ b/components/debugapi/transactions.go @@ -17,7 +17,7 @@ func init() { func storeTransactionsPerSlot(scd *notarization.SlotCommittedDetails) error { slot := scd.Commitment.Slot() - stateDiff, err := deps.Protocol.MainEngineInstance().Ledger.MemPool().StateDiff(slot) + stateDiff, err := deps.Protocol.Engines.Main.Get().Ledger.MemPool().StateDiff(slot) if err != nil { return ierrors.Wrapf(err, "failed to retrieve state diff for slot %d", slot) } diff --git a/components/inx/server_accounts.go b/components/inx/server_accounts.go index 6e8585be6..47795c49c 100644 --- a/components/inx/server_accounts.go +++ b/components/inx/server_accounts.go @@ -15,7 +15,7 @@ func (s *Server) ReadIsValidatorAccount(_ context.Context, accountInfoRequest *i return nil, ierrors.Wrap(err, "error when parsing account id") } - account, exists, err := deps.Protocol.MainEngineInstance().Ledger.Account(accountID, slot) + account, exists, err := deps.Protocol.Engines.Main.Get().Ledger.Account(accountID, slot) if err != nil { return nil, ierrors.Wrapf(err, "error when retrieving account data for %s", accountID) } @@ -29,7 +29,7 @@ func (s *Server) ReadIsCommitteeMember(_ context.Context, accountInfoRequest *in if err != nil { return nil, ierrors.Wrap(err, "error when parsing account id") } - committee, exists := deps.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInSlot(slot) + committee, exists := deps.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInSlot(slot) if !exists { return nil, ierrors.Errorf("committee does not exist for slot %d", slot) } @@ -44,7 +44,7 @@ func (s *Server) ReadIsCandidate(_ context.Context, accountInfoRequest *inx.Acco return nil, ierrors.Wrap(err, "error when parsing account id") } - isCandidateActive, err := deps.Protocol.MainEngineInstance().SybilProtection.IsCandidateActive(accountID, deps.Protocol.APIForSlot(slot).TimeProvider().EpochFromSlot(slot)) + isCandidateActive, err := deps.Protocol.Engines.Main.Get().SybilProtection.IsCandidateActive(accountID, deps.Protocol.APIForSlot(slot).TimeProvider().EpochFromSlot(slot)) if err != nil { return nil, ierrors.Wrap(err, "error when checking if candidate is active") } diff --git a/components/inx/server_blocks.go b/components/inx/server_blocks.go index 0bd9fefd1..1838d7ee6 100644 --- a/components/inx/server_blocks.go +++ b/components/inx/server_blocks.go @@ -19,14 +19,14 @@ import ( ) func (s *Server) ReadActiveRootBlocks(_ context.Context, _ *inx.NoParams) (*inx.RootBlocksResponse, error) { - activeRootBlocks := deps.Protocol.MainEngineInstance().EvictionState.ActiveRootBlocks() + activeRootBlocks := deps.Protocol.Engines.Main.Get().EvictionState.ActiveRootBlocks() return inx.WrapRootBlocks(activeRootBlocks), nil } func (s *Server) ReadBlock(_ context.Context, blockID *inx.BlockId) (*inx.RawBlock, error) { blkID := blockID.Unwrap() - block, exists := deps.Protocol.MainEngineInstance().Block(blkID) // block +1 + block, exists := deps.Protocol.Engines.Main.Get().Block(blkID) // block +1 if !exists { return nil, status.Errorf(codes.NotFound, "block %s not found", blkID.ToHex()) } @@ -146,7 +146,7 @@ func (s *Server) ListenToConfirmedBlocks(_ *inx.NoParams, srv inx.INX_ListenToCo } func (s *Server) ReadAcceptedBlocks(slot *inx.SlotIndex, srv inx.INX_ReadAcceptedBlocksServer) error { - blocksStore, err := deps.Protocol.MainEngineInstance().Storage.Blocks(slot.Unwrap()) + blocksStore, err := deps.Protocol.Engines.Main.Get().Storage.Blocks(slot.Unwrap()) if err != nil { return status.Errorf(codes.InvalidArgument, "failed to get blocks: %s", err.Error()) } @@ -203,7 +203,7 @@ func (s *Server) attachBlock(ctx context.Context, block *iotago.Block) (*inx.Blo } func getINXBlockMetadata(blockID iotago.BlockID) (*inx.BlockMetadata, error) { - retainerBlockMetadata, err := deps.Protocol.MainEngineInstance().Retainer.BlockMetadata(blockID) + retainerBlockMetadata, err := deps.Protocol.Engines.Main.Get().Retainer.BlockMetadata(blockID) if err != nil { return nil, ierrors.Errorf("failed to get BlockMetadata: %v", err) } diff --git a/components/inx/server_commitments.go b/components/inx/server_commitments.go index a071a5116..1e8fc3961 100644 --- a/components/inx/server_commitments.go +++ b/components/inx/server_commitments.go @@ -29,7 +29,7 @@ func inxCommitment(commitment *model.Commitment) *inx.Commitment { func (s *Server) ListenToCommitments(req *inx.SlotRangeRequest, srv inx.INX_ListenToCommitmentsServer) error { createCommitmentPayloadForSlotAndSend := func(slot iotago.SlotIndex) error { - commitment, err := deps.Protocol.MainEngineInstance().Storage.Commitments().Load(slot) + commitment, err := deps.Protocol.Engines.Main.Get().Storage.Commitments().Load(slot) if err != nil { if ierrors.Is(err, kvstore.ErrKeyNotFound) { return status.Errorf(codes.NotFound, "commitment slot %d not found", slot) @@ -64,7 +64,7 @@ func (s *Server) ListenToCommitments(req *inx.SlotRangeRequest, srv inx.INX_List return 0, nil } - latestCommitment := deps.Protocol.MainEngineInstance().SyncManager.LatestCommitment() + latestCommitment := deps.Protocol.Engines.Main.Get().SyncManager.LatestCommitment() if startSlot > latestCommitment.Slot() { // no need to send previous commitments @@ -72,7 +72,7 @@ func (s *Server) ListenToCommitments(req *inx.SlotRangeRequest, srv inx.INX_List } // Stream all available commitments first - prunedEpoch, hasPruned := deps.Protocol.MainEngineInstance().SyncManager.LastPrunedEpoch() + prunedEpoch, hasPruned := deps.Protocol.Engines.Main.Get().SyncManager.LastPrunedEpoch() if hasPruned && startSlot <= deps.Protocol.CommittedAPI().TimeProvider().EpochEnd(prunedEpoch) { return 0, status.Errorf(codes.InvalidArgument, "given startSlot %d is older than the current pruningSlot %d", startSlot, deps.Protocol.CommittedAPI().TimeProvider().EpochEnd(prunedEpoch)) } @@ -157,7 +157,7 @@ func (s *Server) ListenToCommitments(req *inx.SlotRangeRequest, srv inx.INX_List } func (s *Server) ForceCommitUntil(_ context.Context, slot *inx.SlotIndex) (*inx.NoParams, error) { - err := deps.Protocol.MainEngineInstance().Notarization.ForceCommitUntil(slot.Unwrap()) + err := deps.Protocol.Engines.Main.Get().Notarization.ForceCommitUntil(slot.Unwrap()) if err != nil { return nil, ierrors.Wrapf(err, "error while performing force commit until %d", slot.Index) } @@ -171,7 +171,7 @@ func (s *Server) ReadCommitment(_ context.Context, req *inx.CommitmentRequest) ( commitmentSlot = req.GetCommitmentId().Unwrap().Slot() } - commitment, err := deps.Protocol.MainEngineInstance().Storage.Commitments().Load(commitmentSlot) + commitment, err := deps.Protocol.Engines.Main.Get().Storage.Commitments().Load(commitmentSlot) if err != nil { if ierrors.Is(err, kvstore.ErrKeyNotFound) { return nil, status.Errorf(codes.NotFound, "commitment slot %d not found", req.GetCommitmentSlot()) diff --git a/components/inx/server_issuance.go b/components/inx/server_issuance.go index 762fc09c7..50d43f2ae 100644 --- a/components/inx/server_issuance.go +++ b/components/inx/server_issuance.go @@ -12,7 +12,7 @@ import ( ) func (s *Server) RequestTips(_ context.Context, req *inx.TipsRequest) (*inx.TipsResponse, error) { - references := deps.Protocol.MainEngineInstance().TipSelection.SelectTips(int(req.GetCount())) + references := deps.Protocol.Engines.Main.Get().TipSelection.SelectTips(int(req.GetCount())) return &inx.TipsResponse{ StrongTips: inx.NewBlockIds(references[iotago.StrongParentType]), @@ -30,7 +30,7 @@ func (s *Server) ValidatePayload(_ context.Context, payload *inx.RawPayload) (*i switch typedPayload := blockPayload.(type) { case *iotago.SignedTransaction: - memPool := deps.Protocol.MainEngineInstance().Ledger.MemPool() + memPool := deps.Protocol.Engines.Main.Get().Ledger.MemPool() inputReferences, inputsErr := memPool.VM().Inputs(typedPayload.Transaction) if inputsErr != nil { diff --git a/components/inx/server_node.go b/components/inx/server_node.go index 959e412ac..3359e7585 100644 --- a/components/inx/server_node.go +++ b/components/inx/server_node.go @@ -17,7 +17,7 @@ func inxNodeStatus(status *syncmanager.SyncStatus) *inx.NodeStatus { // to send finalized commitment. if !status.HasPruned || status.LatestFinalizedSlot > deps.Protocol.CommittedAPI().TimeProvider().EpochEnd(status.LastPrunedEpoch) { var err error - finalizedCommitment, err = deps.Protocol.MainEngineInstance().Storage.Commitments().Load(status.LatestFinalizedSlot) + finalizedCommitment, err = deps.Protocol.Engines.Main.Get().Storage.Commitments().Load(status.LatestFinalizedSlot) if err != nil { return nil } @@ -35,7 +35,7 @@ func inxNodeStatus(status *syncmanager.SyncStatus) *inx.NodeStatus { } func (s *Server) ReadNodeStatus(context.Context, *inx.NoParams) (*inx.NodeStatus, error) { - return inxNodeStatus(deps.Protocol.MainEngineInstance().SyncManager.SyncStatus()), nil + return inxNodeStatus(deps.Protocol.Engines.Main.Get().SyncManager.SyncStatus()), nil } func (s *Server) ListenToNodeStatus(req *inx.NodeStatusRequest, srv inx.INX_ListenToNodeStatusServer) error { @@ -96,7 +96,7 @@ func (s *Server) ListenToNodeStatus(req *inx.NodeStatusRequest, srv inx.INX_List func (s *Server) ReadNodeConfiguration(context.Context, *inx.NoParams) (*inx.NodeConfiguration, error) { protoParams := make([]*inx.RawProtocolParameters, 0) - provider := deps.Protocol.MainEngineInstance().Storage.Settings().APIProvider() + provider := deps.Protocol.Engines.Main.Get().Storage.Settings().APIProvider() for _, version := range provider.ProtocolEpochVersions() { protocolParams := provider.ProtocolParameters(version.Version) if protocolParams == nil { diff --git a/components/inx/server_transactions.go b/components/inx/server_transactions.go index 6705bbf71..5507fd276 100644 --- a/components/inx/server_transactions.go +++ b/components/inx/server_transactions.go @@ -20,7 +20,7 @@ func getINXTransactionMetadata(transactionID iotago.TransactionID) (*inx.Transac // Get the first output of that transaction (using index 0) outputID := iotago.OutputIDFromTransactionIDAndIndex(transactionID, 0) - output, spent, err := deps.Protocol.MainEngineInstance().Ledger.OutputOrSpent(outputID) + output, spent, err := deps.Protocol.Engines.Main.Get().Ledger.OutputOrSpent(outputID) if err != nil { return iotago.EmptyBlockID, status.Errorf(codes.Internal, "failed to get output %s: %s", outputID.ToHex(), err) } @@ -37,7 +37,7 @@ func getINXTransactionMetadata(transactionID iotago.TransactionID) (*inx.Transac return nil, err } - blockMetadata, err := deps.Protocol.MainEngineInstance().Retainer.BlockMetadata(blockID) + blockMetadata, err := deps.Protocol.Engines.Main.Get().Retainer.BlockMetadata(blockID) if err != nil { return nil, status.Errorf(codes.Internal, "failed to get block metadata %s: %s", blockID.ToHex(), err) } diff --git a/components/inx/server_utxo.go b/components/inx/server_utxo.go index 6cf753868..fd815e01a 100644 --- a/components/inx/server_utxo.go +++ b/components/inx/server_utxo.go @@ -18,7 +18,7 @@ import ( ) func NewLedgerOutput(o *utxoledger.Output, slotIncluded ...iotago.SlotIndex) (*inx.LedgerOutput, error) { - latestCommitment := deps.Protocol.MainEngineInstance().SyncManager.LatestCommitment() + latestCommitment := deps.Protocol.Engines.Main.Get().SyncManager.LatestCommitment() includedSlot := o.SlotBooked() if len(slotIncluded) > 0 { @@ -41,7 +41,7 @@ func NewLedgerOutput(o *utxoledger.Output, slotIncluded ...iotago.SlotIndex) (*i includedSlot <= latestCommitment.Slot() && includedSlot >= deps.Protocol.CommittedAPI().ProtocolParameters().GenesisSlot() { - includedCommitment, err := deps.Protocol.MainEngineInstance().Storage.Commitments().Load(includedSlot) + includedCommitment, err := deps.Protocol.Engines.Main.Get().Storage.Commitments().Load(includedSlot) if err != nil { return nil, ierrors.Wrapf(err, "failed to load commitment with slot: %d", includedSlot) } @@ -63,13 +63,13 @@ func NewLedgerSpent(s *utxoledger.Spent) (*inx.LedgerSpent, error) { SlotSpent: uint32(s.SlotSpent()), } - latestCommitment := deps.Protocol.MainEngineInstance().SyncManager.LatestCommitment() + latestCommitment := deps.Protocol.Engines.Main.Get().SyncManager.LatestCommitment() spentSlot := s.SlotSpent() if spentSlot > 0 && spentSlot <= latestCommitment.Slot() && spentSlot >= deps.Protocol.CommittedAPI().ProtocolParameters().GenesisSlot() { - spentCommitment, err := deps.Protocol.MainEngineInstance().Storage.Commitments().Load(spentSlot) + spentCommitment, err := deps.Protocol.Engines.Main.Get().Storage.Commitments().Load(spentSlot) if err != nil { return nil, ierrors.Wrapf(err, "failed to load commitment with slot: %d", spentSlot) } @@ -132,7 +132,7 @@ func NewLedgerUpdateBatchOperationConsumed(spent *utxoledger.Spent) (*inx.Ledger } func (s *Server) ReadOutput(_ context.Context, id *inx.OutputId) (*inx.OutputResponse, error) { - engine := deps.Protocol.MainEngineInstance() + engine := deps.Protocol.Engines.Main.Get() latestCommitment := engine.Storage.Settings().LatestCommitment() @@ -171,7 +171,7 @@ func (s *Server) ReadOutput(_ context.Context, id *inx.OutputId) (*inx.OutputRes } func (s *Server) ReadUnspentOutputs(_ *inx.NoParams, srv inx.INX_ReadUnspentOutputsServer) error { - engine := deps.Protocol.MainEngineInstance() + engine := deps.Protocol.Engines.Main.Get() latestCommitment := engine.Storage.Settings().LatestCommitment() var innerErr error @@ -205,7 +205,7 @@ func (s *Server) ReadUnspentOutputs(_ *inx.NoParams, srv inx.INX_ReadUnspentOutp func (s *Server) ListenToLedgerUpdates(req *inx.SlotRangeRequest, srv inx.INX_ListenToLedgerUpdatesServer) error { createLedgerUpdatePayloadAndSend := func(slot iotago.SlotIndex, outputs utxoledger.Outputs, spents utxoledger.Spents) error { - commitment, err := deps.Protocol.MainEngineInstance().Storage.Commitments().Load(slot) + commitment, err := deps.Protocol.Engines.Main.Get().Storage.Commitments().Load(slot) if err != nil { return status.Errorf(codes.NotFound, "commitment for slot %d not found", slot) } @@ -249,7 +249,7 @@ func (s *Server) ListenToLedgerUpdates(req *inx.SlotRangeRequest, srv inx.INX_Li sendStateDiffsRange := func(startSlot iotago.SlotIndex, endSlot iotago.SlotIndex) error { for currentSlot := startSlot; currentSlot <= endSlot; currentSlot++ { - stateDiff, err := deps.Protocol.MainEngineInstance().Ledger.SlotDiffs(currentSlot) + stateDiff, err := deps.Protocol.Engines.Main.Get().Ledger.SlotDiffs(currentSlot) if err != nil { return status.Errorf(codes.NotFound, "ledger update for slot %d not found", currentSlot) } @@ -271,7 +271,7 @@ func (s *Server) ListenToLedgerUpdates(req *inx.SlotRangeRequest, srv inx.INX_Li return 0, nil } - latestCommitment := deps.Protocol.MainEngineInstance().SyncManager.LatestCommitment() + latestCommitment := deps.Protocol.Engines.Main.Get().SyncManager.LatestCommitment() if startSlot > latestCommitment.Slot() { // no need to send previous state diffs @@ -279,7 +279,7 @@ func (s *Server) ListenToLedgerUpdates(req *inx.SlotRangeRequest, srv inx.INX_Li } // Stream all available milestone diffs first - prunedEpoch, hasPruned := deps.Protocol.MainEngineInstance().SyncManager.LastPrunedEpoch() + prunedEpoch, hasPruned := deps.Protocol.Engines.Main.Get().SyncManager.LastPrunedEpoch() if hasPruned && startSlot <= deps.Protocol.CommittedAPI().TimeProvider().EpochEnd(prunedEpoch) { return 0, status.Errorf(codes.InvalidArgument, "given startSlot %d is older than the current pruningSlot %d", startSlot, deps.Protocol.CommittedAPI().TimeProvider().EpochEnd(prunedEpoch)) } diff --git a/components/metrics/metrics_accounts.go b/components/metrics/metrics_accounts.go index ec7f7e3e5..dc2bfca3d 100644 --- a/components/metrics/metrics_accounts.go +++ b/components/metrics/metrics_accounts.go @@ -23,7 +23,7 @@ var AccountMetrics = collector.NewCollection(accountNamespace, collector.WithPruningDelay(10*time.Minute), collector.WithInitFunc(func() { deps.Protocol.Events.Engine.BlockGadget.BlockAccepted.Hook(func(block *blocks.Block) { - accountData, exists, _ := deps.Protocol.MainEngineInstance().Ledger.Account(block.ProtocolBlock().Header.IssuerID, deps.Protocol.MainEngineInstance().SyncManager.LatestCommitment().Slot()) + accountData, exists, _ := deps.Protocol.Engines.Main.Get().Ledger.Account(block.ProtocolBlock().Header.IssuerID, deps.Protocol.Engines.Main.Get().SyncManager.LatestCommitment().Slot()) if exists { deps.Collector.Update(accountNamespace, credits, float64(accountData.Credits.Value), accountData.ID.String()) } @@ -34,7 +34,7 @@ var AccountMetrics = collector.NewCollection(accountNamespace, collector.WithType(collector.Gauge), collector.WithHelp("Seats seen as active by the node."), collector.WithCollectFunc(func() (metricValue float64, labelValues []string) { - return float64(deps.Protocol.MainEngineInstance().SybilProtection.SeatManager().OnlineCommittee().Size()), nil + return float64(deps.Protocol.Engines.Main.Get().SybilProtection.SeatManager().OnlineCommittee().Size()), nil }), )), ) diff --git a/components/metrics/metrics_commitments.go b/components/metrics/metrics_commitments.go index 273dc6a6e..f45cfdeb1 100644 --- a/components/metrics/metrics_commitments.go +++ b/components/metrics/metrics_commitments.go @@ -6,7 +6,7 @@ import ( "github.com/iotaledger/hive.go/runtime/event" "github.com/iotaledger/iota-core/components/metrics/collector" - "github.com/iotaledger/iota-core/pkg/protocol/chainmanager" + "github.com/iotaledger/iota-core/pkg/protocol" "github.com/iotaledger/iota-core/pkg/protocol/engine/notarization" iotago "github.com/iotaledger/iota.go/v4" ) @@ -47,9 +47,11 @@ var CommitmentsMetrics = collector.NewCollection(commitmentsNamespace, collector.WithType(collector.Counter), collector.WithHelp("Number of forks seen by the node."), collector.WithInitFunc(func() { - deps.Protocol.Events.ChainManager.ForkDetected.Hook(func(_ *chainmanager.Fork) { - deps.Collector.Increment(commitmentsNamespace, forksCount) - }, event.WithWorkerPool(Component.WorkerPool)) + deps.Protocol.Chains.HeaviestVerifiedCandidate.OnUpdate(func(prevHeaviestVerifiedCandidate *protocol.Chain, _ *protocol.Chain) { + if prevHeaviestVerifiedCandidate != nil { + Component.WorkerPool.Submit(func() { deps.Collector.Increment(commitmentsNamespace, forksCount) }) + } + }) }), )), collector.WithMetric(collector.NewMetric(acceptedBlocks, diff --git a/components/metrics/metrics_conflicts.go b/components/metrics/metrics_conflicts.go index fb2743870..cee52f829 100644 --- a/components/metrics/metrics_conflicts.go +++ b/components/metrics/metrics_conflicts.go @@ -22,9 +22,9 @@ var ConflictMetrics = collector.NewCollection(conflictNamespace, collector.WithHelp("Time since transaction issuance to the conflict acceptance"), collector.WithInitFunc(func() { deps.Protocol.Events.Engine.SpendDAG.SpenderAccepted.Hook(func(spendID iotago.TransactionID) { - if txMetadata, exists := deps.Protocol.MainEngineInstance().Ledger.MemPool().TransactionMetadata(spendID); exists { + if txMetadata, exists := deps.Protocol.Engines.Main.Get().Ledger.MemPool().TransactionMetadata(spendID); exists { firstAttachmentID := txMetadata.EarliestIncludedAttachment() - if block, blockExists := deps.Protocol.MainEngineInstance().BlockFromCache(firstAttachmentID); blockExists { + if block, blockExists := deps.Protocol.Engines.Main.Get().BlockFromCache(firstAttachmentID); blockExists { timeSinceIssuance := time.Since(block.IssuingTime()).Milliseconds() timeIssuanceSeconds := float64(timeSinceIssuance) / 1000 deps.Collector.Update(conflictNamespace, resolutionTime, timeIssuanceSeconds) diff --git a/components/metrics/metrics_db.go b/components/metrics/metrics_db.go index c5147900e..2d2987868 100644 --- a/components/metrics/metrics_db.go +++ b/components/metrics/metrics_db.go @@ -17,14 +17,14 @@ var DBMetrics = collector.NewCollection(dbNamespace, collector.WithType(collector.Gauge), collector.WithHelp("DB size in bytes for permanent storage."), collector.WithCollectFunc(func() (metricValue float64, labelValues []string) { - return float64(deps.Protocol.MainEngineInstance().Storage.PermanentDatabaseSize()), nil + return float64(deps.Protocol.Engines.Main.Get().Storage.PermanentDatabaseSize()), nil }), )), collector.WithMetric(collector.NewMetric(sizeBytesPrunable, collector.WithType(collector.Gauge), collector.WithHelp("DB size in bytes for prunable storage."), collector.WithCollectFunc(func() (metricValue float64, labelValues []string) { - return float64(deps.Protocol.MainEngineInstance().Storage.PrunableDatabaseSize()), nil + return float64(deps.Protocol.Engines.Main.Get().Storage.PrunableDatabaseSize()), nil }), )), ) diff --git a/components/metrics/metrics_info.go b/components/metrics/metrics_info.go index 7072124f3..7bae443d5 100644 --- a/components/metrics/metrics_info.go +++ b/components/metrics/metrics_info.go @@ -36,7 +36,7 @@ var InfoMetrics = collector.NewCollection(infoNamespace, collector.WithType(collector.Gauge), collector.WithHelp("Node sync status based on ATT."), collector.WithCollectFunc(func() (metricValue float64, labelValues []string) { - if deps.Protocol.MainEngineInstance().SyncManager.IsNodeSynced() { + if deps.Protocol.Engines.Main.Get().SyncManager.IsNodeSynced() { return 1, nil } diff --git a/components/metrics/metrics_scheduler.go b/components/metrics/metrics_scheduler.go index f23b17221..c823b7379 100644 --- a/components/metrics/metrics_scheduler.go +++ b/components/metrics/metrics_scheduler.go @@ -38,23 +38,20 @@ var SchedulerMetrics = collector.NewCollection(schedulerNamespace, collector.WithHelp("Current size of each node's queue (in work units)."), collector.WithInitFunc(func() { deps.Protocol.Events.Engine.Scheduler.BlockEnqueued.Hook(func(block *blocks.Block) { - deps.Collector.Update(schedulerNamespace, queueSizePerNodeWork, float64(deps.Protocol.MainEngineInstance().Scheduler.IssuerQueueWork(block.ProtocolBlock().Header.IssuerID)), block.ProtocolBlock().Header.IssuerID.String()) + deps.Collector.Update(schedulerNamespace, queueSizePerNodeWork, float64(deps.Protocol.Engines.Main.Get().Scheduler.IssuerQueueWork(block.ProtocolBlock().Header.IssuerID)), block.ProtocolBlock().Header.IssuerID.String()) }, event.WithWorkerPool(Component.WorkerPool)) deps.Protocol.Events.Engine.Scheduler.BlockSkipped.Hook(func(block *blocks.Block) { - deps.Collector.Update(schedulerNamespace, queueSizePerNodeWork, float64(deps.Protocol.MainEngineInstance().Scheduler.IssuerQueueWork(block.ProtocolBlock().Header.IssuerID)), block.ProtocolBlock().Header.IssuerID.String()) - + deps.Collector.Update(schedulerNamespace, queueSizePerNodeWork, float64(deps.Protocol.Engines.Main.Get().Scheduler.IssuerQueueWork(block.ProtocolBlock().Header.IssuerID)), block.ProtocolBlock().Header.IssuerID.String()) }, event.WithWorkerPool(Component.WorkerPool)) deps.Protocol.Events.Engine.Scheduler.BlockDropped.Hook(func(block *blocks.Block, _ error) { - deps.Collector.Update(schedulerNamespace, queueSizePerNodeWork, float64(deps.Protocol.MainEngineInstance().Scheduler.IssuerQueueWork(block.ProtocolBlock().Header.IssuerID)), block.ProtocolBlock().Header.IssuerID.String()) - + deps.Collector.Update(schedulerNamespace, queueSizePerNodeWork, float64(deps.Protocol.Engines.Main.Get().Scheduler.IssuerQueueWork(block.ProtocolBlock().Header.IssuerID)), block.ProtocolBlock().Header.IssuerID.String()) }, event.WithWorkerPool(Component.WorkerPool)) deps.Protocol.Events.Engine.Scheduler.BlockScheduled.Hook(func(block *blocks.Block) { - deps.Collector.Update(schedulerNamespace, queueSizePerNodeWork, float64(deps.Protocol.MainEngineInstance().Scheduler.IssuerQueueWork(block.ProtocolBlock().Header.IssuerID)), block.ProtocolBlock().Header.IssuerID.String()) - + deps.Collector.Update(schedulerNamespace, queueSizePerNodeWork, float64(deps.Protocol.Engines.Main.Get().Scheduler.IssuerQueueWork(block.ProtocolBlock().Header.IssuerID)), block.ProtocolBlock().Header.IssuerID.String()) }, event.WithWorkerPool(Component.WorkerPool)) }), )), @@ -66,25 +63,25 @@ var SchedulerMetrics = collector.NewCollection(schedulerNamespace, collector.WithInitFunc(func() { deps.Protocol.Events.Engine.Scheduler.BlockEnqueued.Hook(func(block *blocks.Block) { if _, isBasic := block.BasicBlock(); isBasic { - deps.Collector.Update(schedulerNamespace, queueSizePerNodeCount, float64(deps.Protocol.MainEngineInstance().Scheduler.IssuerQueueBlockCount(block.ProtocolBlock().Header.IssuerID)), block.ProtocolBlock().Header.IssuerID.String()) + deps.Collector.Update(schedulerNamespace, queueSizePerNodeCount, float64(deps.Protocol.Engines.Main.Get().Scheduler.IssuerQueueBlockCount(block.ProtocolBlock().Header.IssuerID)), block.ProtocolBlock().Header.IssuerID.String()) } }, event.WithWorkerPool(Component.WorkerPool)) deps.Protocol.Events.Engine.Scheduler.BlockSkipped.Hook(func(block *blocks.Block) { if _, isBasic := block.BasicBlock(); isBasic { - deps.Collector.Update(schedulerNamespace, queueSizePerNodeCount, float64(deps.Protocol.MainEngineInstance().Scheduler.IssuerQueueBlockCount(block.ProtocolBlock().Header.IssuerID)), block.ProtocolBlock().Header.IssuerID.String()) + deps.Collector.Update(schedulerNamespace, queueSizePerNodeCount, float64(deps.Protocol.Engines.Main.Get().Scheduler.IssuerQueueBlockCount(block.ProtocolBlock().Header.IssuerID)), block.ProtocolBlock().Header.IssuerID.String()) } }, event.WithWorkerPool(Component.WorkerPool)) deps.Protocol.Events.Engine.Scheduler.BlockDropped.Hook(func(block *blocks.Block, _ error) { if _, isBasic := block.BasicBlock(); isBasic { - deps.Collector.Update(schedulerNamespace, queueSizePerNodeCount, float64(deps.Protocol.MainEngineInstance().Scheduler.IssuerQueueBlockCount(block.ProtocolBlock().Header.IssuerID)), block.ProtocolBlock().Header.IssuerID.String()) + deps.Collector.Update(schedulerNamespace, queueSizePerNodeCount, float64(deps.Protocol.Engines.Main.Get().Scheduler.IssuerQueueBlockCount(block.ProtocolBlock().Header.IssuerID)), block.ProtocolBlock().Header.IssuerID.String()) } }, event.WithWorkerPool(Component.WorkerPool)) deps.Protocol.Events.Engine.Scheduler.BlockScheduled.Hook(func(block *blocks.Block) { if _, isBasic := block.BasicBlock(); isBasic { - deps.Collector.Update(schedulerNamespace, queueSizePerNodeCount, float64(deps.Protocol.MainEngineInstance().Scheduler.IssuerQueueBlockCount(block.ProtocolBlock().Header.IssuerID)), block.ProtocolBlock().Header.IssuerID.String()) + deps.Collector.Update(schedulerNamespace, queueSizePerNodeCount, float64(deps.Protocol.Engines.Main.Get().Scheduler.IssuerQueueBlockCount(block.ProtocolBlock().Header.IssuerID)), block.ProtocolBlock().Header.IssuerID.String()) } }, event.WithWorkerPool(Component.WorkerPool)) }), @@ -97,25 +94,25 @@ var SchedulerMetrics = collector.NewCollection(schedulerNamespace, collector.WithInitFunc(func() { deps.Protocol.Events.Engine.Scheduler.BlockEnqueued.Hook(func(block *blocks.Block) { if _, isValidation := block.ValidationBlock(); isValidation { - deps.Collector.Update(schedulerNamespace, validatorQueueSizePerNodeCount, float64(deps.Protocol.MainEngineInstance().Scheduler.ValidatorQueueBlockCount(block.ProtocolBlock().Header.IssuerID)), block.ProtocolBlock().Header.IssuerID.String()) + deps.Collector.Update(schedulerNamespace, validatorQueueSizePerNodeCount, float64(deps.Protocol.Engines.Main.Get().Scheduler.ValidatorQueueBlockCount(block.ProtocolBlock().Header.IssuerID)), block.ProtocolBlock().Header.IssuerID.String()) } }, event.WithWorkerPool(Component.WorkerPool)) deps.Protocol.Events.Engine.Scheduler.BlockSkipped.Hook(func(block *blocks.Block) { if _, isValidation := block.ValidationBlock(); isValidation { - deps.Collector.Update(schedulerNamespace, validatorQueueSizePerNodeCount, float64(deps.Protocol.MainEngineInstance().Scheduler.ValidatorQueueBlockCount(block.ProtocolBlock().Header.IssuerID)), block.ProtocolBlock().Header.IssuerID.String()) + deps.Collector.Update(schedulerNamespace, validatorQueueSizePerNodeCount, float64(deps.Protocol.Engines.Main.Get().Scheduler.ValidatorQueueBlockCount(block.ProtocolBlock().Header.IssuerID)), block.ProtocolBlock().Header.IssuerID.String()) } }, event.WithWorkerPool(Component.WorkerPool)) deps.Protocol.Events.Engine.Scheduler.BlockDropped.Hook(func(block *blocks.Block, _ error) { if _, isValidation := block.ValidationBlock(); isValidation { - deps.Collector.Update(schedulerNamespace, validatorQueueSizePerNodeCount, float64(deps.Protocol.MainEngineInstance().Scheduler.ValidatorQueueBlockCount(block.ProtocolBlock().Header.IssuerID)), block.ProtocolBlock().Header.IssuerID.String()) + deps.Collector.Update(schedulerNamespace, validatorQueueSizePerNodeCount, float64(deps.Protocol.Engines.Main.Get().Scheduler.ValidatorQueueBlockCount(block.ProtocolBlock().Header.IssuerID)), block.ProtocolBlock().Header.IssuerID.String()) } }, event.WithWorkerPool(Component.WorkerPool)) deps.Protocol.Events.Engine.Scheduler.BlockScheduled.Hook(func(block *blocks.Block) { if _, isValidation := block.ValidationBlock(); isValidation { - deps.Collector.Update(schedulerNamespace, validatorQueueSizePerNodeCount, float64(deps.Protocol.MainEngineInstance().Scheduler.ValidatorQueueBlockCount(block.ProtocolBlock().Header.IssuerID)), block.ProtocolBlock().Header.IssuerID.String()) + deps.Collector.Update(schedulerNamespace, validatorQueueSizePerNodeCount, float64(deps.Protocol.Engines.Main.Get().Scheduler.ValidatorQueueBlockCount(block.ProtocolBlock().Header.IssuerID)), block.ProtocolBlock().Header.IssuerID.String()) } }, event.WithWorkerPool(Component.WorkerPool)) }), @@ -127,9 +124,9 @@ var SchedulerMetrics = collector.NewCollection(schedulerNamespace, collector.WithHelp("Current amount of mana of each issuer in the queue."), collector.WithInitFunc(func() { deps.Protocol.Events.Engine.Scheduler.BlockEnqueued.Hook(func(block *blocks.Block) { - mana, err := deps.Protocol.MainEngineInstance().Ledger.ManaManager().GetManaOnAccount(block.ProtocolBlock().Header.IssuerID, block.SlotCommitmentID().Slot()) + mana, err := deps.Protocol.Engines.Main.Get().Ledger.ManaManager().GetManaOnAccount(block.ProtocolBlock().Header.IssuerID, block.SlotCommitmentID().Slot()) if err != nil { - deps.Protocol.MainEngineInstance().ErrorHandler("metrics")(ierrors.Wrapf(err, "failed to retrieve mana on account %s for slot %d", block.ProtocolBlock().Header.IssuerID, block.SlotCommitmentID().Slot())) + deps.Protocol.Engines.Main.Get().ErrorHandler("metrics")(ierrors.Wrapf(err, "failed to retrieve mana on account %s for slot %d", block.ProtocolBlock().Header.IssuerID, block.SlotCommitmentID().Slot())) return } @@ -168,42 +165,42 @@ var SchedulerMetrics = collector.NewCollection(schedulerNamespace, collector.WithType(collector.Gauge), collector.WithHelp("Maximum number of basic blocks that can be stored in the buffer."), collector.WithCollectFunc(func() (float64, []string) { - return float64(deps.Protocol.MainEngineInstance().CommittedAPI().ProtocolParameters().CongestionControlParameters().MaxBufferSize), []string{} + return float64(deps.Protocol.Engines.Main.Get().CommittedAPI().ProtocolParameters().CongestionControlParameters().MaxBufferSize), []string{} }), )), collector.WithMetric(collector.NewMetric(basicBufferReadyBlockCount, collector.WithType(collector.Gauge), collector.WithHelp("Number of ready blocks in the scheduler buffer."), collector.WithCollectFunc(func() (float64, []string) { - return float64(deps.Protocol.MainEngineInstance().Scheduler.ReadyBlocksCount()), []string{} + return float64(deps.Protocol.Engines.Main.Get().Scheduler.ReadyBlocksCount()), []string{} }), )), collector.WithMetric(collector.NewMetric(basicBufferTotalSize, collector.WithType(collector.Gauge), collector.WithHelp("Current number of basic blocks in the scheduler buffer."), collector.WithCollectFunc(func() (float64, []string) { - return float64(deps.Protocol.MainEngineInstance().Scheduler.BasicBufferSize()), []string{} + return float64(deps.Protocol.Engines.Main.Get().Scheduler.BasicBufferSize()), []string{} }), )), collector.WithMetric(collector.NewMetric(rate, collector.WithType(collector.Gauge), collector.WithHelp("Current scheduling rate of basic blocks."), collector.WithCollectFunc(func() (float64, []string) { - return float64(deps.Protocol.MainEngineInstance().CommittedAPI().ProtocolParameters().CongestionControlParameters().SchedulerRate), []string{} + return float64(deps.Protocol.Engines.Main.Get().CommittedAPI().ProtocolParameters().CongestionControlParameters().SchedulerRate), []string{} }), )), collector.WithMetric(collector.NewMetric(validatorBufferTotalSize, collector.WithType(collector.Gauge), collector.WithHelp("Current number of validation blocks in the scheduling buffer."), collector.WithCollectFunc(func() (float64, []string) { - return float64(deps.Protocol.MainEngineInstance().Scheduler.ValidatorBufferSize()), []string{} + return float64(deps.Protocol.Engines.Main.Get().Scheduler.ValidatorBufferSize()), []string{} }), )), collector.WithMetric(collector.NewMetric(validatorQueueMaxSize, collector.WithType(collector.Gauge), collector.WithHelp("Maximum number of validation blocks that can be stored in each validator queue."), collector.WithCollectFunc(func() (float64, []string) { - return float64(deps.Protocol.MainEngineInstance().CommittedAPI().ProtocolParameters().CongestionControlParameters().MaxValidationBufferSize), []string{} + return float64(deps.Protocol.Engines.Main.Get().CommittedAPI().ProtocolParameters().CongestionControlParameters().MaxValidationBufferSize), []string{} }), )), ) diff --git a/components/metrics/metrics_slots.go b/components/metrics/metrics_slots.go index fe1891410..764d784f9 100644 --- a/components/metrics/metrics_slots.go +++ b/components/metrics/metrics_slots.go @@ -69,10 +69,10 @@ var SlotMetrics = collector.NewCollection(slotNamespace, collector.WithPruningDelay(10*time.Minute), collector.WithHelp("Number of accepted attachments by the node per slot."), collector.WithInitFunc(func() { - deps.Protocol.MainEngineInstance().Ledger.OnTransactionAttached(func(transactionMetadata mempool.TransactionMetadata) { + deps.Protocol.Engines.Main.Get().Ledger.OnTransactionAttached(func(transactionMetadata mempool.TransactionMetadata) { transactionMetadata.OnAccepted(func() { for _, attachmentBlockID := range transactionMetadata.ValidAttachments() { - if block, exists := deps.Protocol.MainEngineInstance().BlockCache.Block(attachmentBlockID); exists && block.IsAccepted() { + if block, exists := deps.Protocol.Engines.Main.Get().BlockCache.Block(attachmentBlockID); exists && block.IsAccepted() { deps.Collector.Increment(slotNamespace, acceptedAttachments, strconv.Itoa(int(attachmentBlockID.Slot()))) } } @@ -94,7 +94,7 @@ var SlotMetrics = collector.NewCollection(slotNamespace, }, event.WithWorkerPool(Component.WorkerPool)) deps.Protocol.Events.Engine.SpendDAG.SpenderCreated.Hook(func(spendID iotago.TransactionID) { - if txMetadata, exists := deps.Protocol.MainEngineInstance().Ledger.TransactionMetadata(spendID); exists { + if txMetadata, exists := deps.Protocol.Engines.Main.Get().Ledger.TransactionMetadata(spendID); exists { for _, attachment := range txMetadata.ValidAttachments() { deps.Collector.Increment(slotNamespace, createdConflicts, strconv.Itoa(int(attachment.Slot()))) } @@ -116,9 +116,9 @@ var SlotMetrics = collector.NewCollection(slotNamespace, }, event.WithWorkerPool(Component.WorkerPool)) deps.Protocol.Events.Engine.SpendDAG.SpenderAccepted.Hook(func(spendID iotago.TransactionID) { - if txMetadata, exists := deps.Protocol.MainEngineInstance().Ledger.TransactionMetadata(spendID); exists { + if txMetadata, exists := deps.Protocol.Engines.Main.Get().Ledger.TransactionMetadata(spendID); exists { for _, attachmentBlockID := range txMetadata.ValidAttachments() { - if attachment, exists := deps.Protocol.MainEngineInstance().BlockCache.Block(attachmentBlockID); exists && attachment.IsAccepted() { + if attachment, exists := deps.Protocol.Engines.Main.Get().BlockCache.Block(attachmentBlockID); exists && attachment.IsAccepted() { deps.Collector.Increment(slotNamespace, acceptedConflicts, strconv.Itoa(int(attachment.ID().Slot()))) } } @@ -140,9 +140,9 @@ var SlotMetrics = collector.NewCollection(slotNamespace, }, event.WithWorkerPool(Component.WorkerPool)) deps.Protocol.Events.Engine.SpendDAG.SpenderRejected.Hook(func(spendID iotago.TransactionID) { - if txMetadata, exists := deps.Protocol.MainEngineInstance().Ledger.TransactionMetadata(spendID); exists { + if txMetadata, exists := deps.Protocol.Engines.Main.Get().Ledger.TransactionMetadata(spendID); exists { for _, attachmentBlockID := range txMetadata.ValidAttachments() { - if attachment, exists := deps.Protocol.MainEngineInstance().BlockCache.Block(attachmentBlockID); exists && attachment.IsAccepted() { + if attachment, exists := deps.Protocol.Engines.Main.Get().BlockCache.Block(attachmentBlockID); exists && attachment.IsAccepted() { deps.Collector.Increment(slotNamespace, rejectedConflicts, strconv.Itoa(int(attachment.ID().Slot()))) } } diff --git a/components/metrics/metrics_tangle.go b/components/metrics/metrics_tangle.go index 44a20ba72..82cca7b85 100644 --- a/components/metrics/metrics_tangle.go +++ b/components/metrics/metrics_tangle.go @@ -21,7 +21,7 @@ var TangleMetrics = collector.NewCollection(tangleNamespace, collector.WithType(collector.Gauge), collector.WithHelp("Number of strong tips in the tangle"), collector.WithCollectFunc(func() (metricValue float64, labelValues []string) { - count := len(deps.Protocol.MainEngineInstance().TipManager.StrongTips()) + count := len(deps.Protocol.Engines.Main.Get().TipManager.StrongTips()) return float64(count), nil }), @@ -30,7 +30,7 @@ var TangleMetrics = collector.NewCollection(tangleNamespace, collector.WithType(collector.Gauge), collector.WithHelp("Number of weak tips in the tangle"), collector.WithCollectFunc(func() (metricValue float64, labelValues []string) { - count := len(deps.Protocol.MainEngineInstance().TipManager.WeakTips()) + count := len(deps.Protocol.Engines.Main.Get().TipManager.WeakTips()) return float64(count), nil }), diff --git a/components/metricstracker/component.go b/components/metricstracker/component.go index a5d0ac03b..9cde574c9 100644 --- a/components/metricstracker/component.go +++ b/components/metricstracker/component.go @@ -46,7 +46,7 @@ func provide(c *dig.Container) error { } if err := c.Provide(func(deps metricsTrackerDeps) *MetricsTracker { - m := New(deps.Protocol.MainEngineInstance().SyncManager.IsBootstrapped) + m := New(deps.Protocol.Engines.Main.Get().SyncManager.IsBootstrapped) return m }); err != nil { diff --git a/components/protocol/component.go b/components/protocol/component.go index 64e407e63..037b67025 100644 --- a/components/protocol/component.go +++ b/components/protocol/component.go @@ -13,6 +13,7 @@ import ( "github.com/iotaledger/hive.go/app" "github.com/iotaledger/hive.go/ierrors" hivedb "github.com/iotaledger/hive.go/kvstore/database" + "github.com/iotaledger/hive.go/log" "github.com/iotaledger/hive.go/runtime/workerpool" "github.com/iotaledger/iota-core/pkg/core/account" "github.com/iotaledger/iota-core/pkg/daemon" @@ -139,6 +140,7 @@ func provide(c *dig.Container) error { } return protocol.New( + log.NewLogger("node"), workerpool.NewGroup("Protocol"), deps.P2PManager, protocol.WithBaseDirectory(ParamsDatabase.Path), @@ -176,16 +178,8 @@ func provide(c *dig.Container) error { } func configure() error { - deps.Protocol.Events.Error.Hook(func(err error) { - Component.LogErrorf("ProtocolError, error: %s", err) - }) - - deps.Protocol.Events.Network.Error.Hook(func(err error, id peer.ID) { - Component.LogErrorf("NetworkError, error: %s, peerID: %s", err.Error(), id) - }) - - deps.Protocol.Events.Network.BlockReceived.Hook(func(block *model.Block, source peer.ID) { - Component.LogDebugf("BlockReceived, blockID: %s, peerID: %s", block.ID(), source) + deps.Protocol.Network.OnBlockReceived(func(block *model.Block, source peer.ID) { + Component.LogDebugf("BlockReceived: %s", block.ID()) }) deps.Protocol.Events.Engine.BlockProcessed.Hook(func(blockID iotago.BlockID) { @@ -276,16 +270,12 @@ func configure() error { Component.LogDebugf("BlockSkipped, blockID: %s", block.ID()) }) - deps.Protocol.Events.ChainManager.RequestCommitment.Hook(func(id iotago.CommitmentID) { - Component.LogDebugf("RequestCommitment, commitmentID: %s", id) - }) - - deps.Protocol.Events.Network.SlotCommitmentRequestReceived.Hook(func(commitmentID iotago.CommitmentID, id peer.ID) { - Component.LogDebugf("SlotCommitmentRequestReceived, commitmentID: %s", commitmentID) + deps.Protocol.Network.OnCommitmentRequestReceived(func(commitmentID iotago.CommitmentID, id peer.ID) { + Component.LogDebugf("SlotCommitmentRequestReceived: %s", commitmentID) }) - deps.Protocol.Events.Network.SlotCommitmentReceived.Hook(func(commitment *model.Commitment, id peer.ID) { - Component.LogDebugf("SlotCommitmentReceived, commitmentID: %s", commitment.ID()) + deps.Protocol.Network.OnCommitmentReceived(func(commitment *model.Commitment, id peer.ID) { + Component.LogDebugf("SlotCommitmentReceived: %s", commitment.ID()) }) deps.Protocol.Events.Engine.SybilProtection.CommitteeSelected.Hook(func(committee *account.Accounts, epoch iotago.EpochIndex) { diff --git a/components/restapi/core/accounts.go b/components/restapi/core/accounts.go index 32800c774..d6d379fd5 100644 --- a/components/restapi/core/accounts.go +++ b/components/restapi/core/accounts.go @@ -22,7 +22,7 @@ func congestionByAccountAddress(c echo.Context) (*api.CongestionResponse, error) return nil, err } - commitment := deps.Protocol.MainEngineInstance().SyncManager.LatestCommitment() + commitment := deps.Protocol.Engines.Main.Get().SyncManager.LatestCommitment() if commitmentID != iotago.EmptyCommitmentID { // a commitment ID was provided, so we use the commitment for that ID commitment, err = getCommitmentByID(commitmentID, commitment) @@ -43,7 +43,7 @@ func congestionByAccountAddress(c echo.Context) (*api.CongestionResponse, error) } accountID := accountAddress.AccountID() - acc, exists, err := deps.Protocol.MainEngineInstance().Ledger.Account(accountID, commitment.Slot()) + acc, exists, err := deps.Protocol.Engines.Main.Get().Ledger.Account(accountID, commitment.Slot()) if err != nil { return nil, ierrors.Wrapf(echo.ErrInternalServerError, "failed to get account %s from the Ledger: %s", accountID.ToHex(), err) } @@ -53,7 +53,7 @@ func congestionByAccountAddress(c echo.Context) (*api.CongestionResponse, error) return &api.CongestionResponse{ Slot: commitment.Slot(), - Ready: deps.Protocol.MainEngineInstance().Scheduler.IsBlockIssuerReady(accountID), + Ready: deps.Protocol.Engines.Main.Get().Scheduler.IsBlockIssuerReady(accountID), ReferenceManaCost: commitment.ReferenceManaCost(), BlockIssuanceCredits: acc.Credits.Value, }, nil @@ -71,7 +71,7 @@ func validators(c echo.Context) (*api.ValidatorsResponse, error) { pageSize = restapi.ParamsRestAPI.MaxPageSize } } - latestCommittedSlot := deps.Protocol.MainEngineInstance().SyncManager.LatestCommitment().Slot() + latestCommittedSlot := deps.Protocol.Engines.Main.Get().SyncManager.LatestCommitment().Slot() // no cursor provided will be the first request requestedSlot := latestCommittedSlot var cursorIndex uint32 @@ -90,13 +90,13 @@ func validators(c echo.Context) (*api.ValidatorsResponse, error) { nextEpoch := deps.Protocol.APIForSlot(latestCommittedSlot).TimeProvider().EpochFromSlot(latestCommittedSlot) + 1 slotRange := uint32(requestedSlot) / restapi.ParamsRestAPI.RequestsMemoryCacheGranularity - registeredValidators, exists := deps.Protocol.MainEngineInstance().Retainer.RegisteredValidatorsCache(slotRange) + registeredValidators, exists := deps.Protocol.Engines.Main.Get().Retainer.RegisteredValidatorsCache(slotRange) if !exists { - registeredValidators, err = deps.Protocol.MainEngineInstance().SybilProtection.OrderedRegisteredCandidateValidatorsList(nextEpoch) + registeredValidators, err = deps.Protocol.Engines.Main.Get().SybilProtection.OrderedRegisteredCandidateValidatorsList(nextEpoch) if err != nil { return nil, ierrors.Wrapf(echo.ErrInternalServerError, "failed to get ordered registered validators list for epoch %d : %s", nextEpoch, err) } - deps.Protocol.MainEngineInstance().Retainer.RetainRegisteredValidatorsCache(slotRange, registeredValidators) + deps.Protocol.Engines.Main.Get().Retainer.RetainRegisteredValidatorsCache(slotRange, registeredValidators) } page := registeredValidators[cursorIndex:lo.Min(cursorIndex+pageSize, uint32(len(registeredValidators)))] @@ -126,10 +126,10 @@ func validatorByAccountAddress(c echo.Context) (*api.ValidatorResponse, error) { return nil, ierrors.Wrapf(httpserver.ErrInvalidParameter, "address %s is not an account address", c.Param(api.ParameterBech32Address)) } - latestCommittedSlot := deps.Protocol.MainEngineInstance().SyncManager.LatestCommitment().Slot() + latestCommittedSlot := deps.Protocol.Engines.Main.Get().SyncManager.LatestCommitment().Slot() accountID := accountAddress.AccountID() - accountData, exists, err := deps.Protocol.MainEngineInstance().Ledger.Account(accountID, latestCommittedSlot) + accountData, exists, err := deps.Protocol.Engines.Main.Get().Ledger.Account(accountID, latestCommittedSlot) if err != nil { return nil, ierrors.Wrapf(echo.ErrInternalServerError, "failed to get account %s from the Ledger: %s", accountID.ToHex(), err) } @@ -139,7 +139,7 @@ func validatorByAccountAddress(c echo.Context) (*api.ValidatorResponse, error) { nextEpoch := deps.Protocol.APIForSlot(latestCommittedSlot).TimeProvider().EpochFromSlot(latestCommittedSlot) + 1 - active, err := deps.Protocol.MainEngineInstance().SybilProtection.IsCandidateActive(accountID, nextEpoch) + active, err := deps.Protocol.Engines.Main.Get().SybilProtection.IsCandidateActive(accountID, nextEpoch) if err != nil { return nil, ierrors.Wrapf(err, "failed to check if account %s is an active candidate", accountID.ToHex()) } @@ -176,10 +176,10 @@ func rewardsByOutputID(c echo.Context) (*api.ManaRewardsResponse, error) { } else { // The slot index may be unset for requests that do not want to issue a transaction, such as displaying estimated rewards, // in which case we use latest committed slot. - slotIndex = deps.Protocol.MainEngineInstance().SyncManager.LatestCommitment().Slot() + slotIndex = deps.Protocol.Engines.Main.Get().SyncManager.LatestCommitment().Slot() } - utxoOutput, err := deps.Protocol.MainEngineInstance().Ledger.Output(outputID) + utxoOutput, err := deps.Protocol.Engines.Main.Get().Ledger.Output(outputID) if err != nil { return nil, ierrors.Wrapf(echo.ErrInternalServerError, "failed to get output %s from ledger: %s", outputID.ToHex(), err) } @@ -199,7 +199,7 @@ func rewardsByOutputID(c echo.Context) (*api.ManaRewardsResponse, error) { stakingFeature := feature.(*iotago.StakingFeature) // check if the account is a validator - reward, actualStart, actualEnd, err = deps.Protocol.MainEngineInstance().SybilProtection.ValidatorReward( + reward, actualStart, actualEnd, err = deps.Protocol.Engines.Main.Get().SybilProtection.ValidatorReward( accountOutput.AccountID, stakingFeature.StakedAmount, stakingFeature.StartEpoch, @@ -220,7 +220,7 @@ func rewardsByOutputID(c echo.Context) (*api.ManaRewardsResponse, error) { delegationEnd = apiForSlot.TimeProvider().EpochFromSlot(futureBoundedSlotIndex) - iotago.EpochIndex(1) } - reward, actualStart, actualEnd, err = deps.Protocol.MainEngineInstance().SybilProtection.DelegatorReward( + reward, actualStart, actualEnd, err = deps.Protocol.Engines.Main.Get().SybilProtection.DelegatorReward( delegationOutput.ValidatorAddress.AccountID(), delegationOutput.DelegatedAmount, delegationOutput.StartEpoch, @@ -252,7 +252,7 @@ func selectedCommittee(c echo.Context) (*api.CommitteeResponse, error) { slot = timeProvider.EpochEnd(epoch) } - seatedAccounts, exists := deps.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInSlot(slot) + seatedAccounts, exists := deps.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInSlot(slot) if !exists { return &api.CommitteeResponse{ Epoch: epoch, diff --git a/components/restapi/core/blocks.go b/components/restapi/core/blocks.go index 7343f4826..bab9de006 100644 --- a/components/restapi/core/blocks.go +++ b/components/restapi/core/blocks.go @@ -16,7 +16,7 @@ func blockByID(c echo.Context) (*iotago.Block, error) { return nil, ierrors.Wrapf(err, "failed to parse block ID %s", c.Param(api.ParameterBlockID)) } - block, exists := deps.Protocol.MainEngineInstance().Block(blockID) + block, exists := deps.Protocol.Engines.Main.Get().Block(blockID) if !exists { return nil, ierrors.Wrapf(echo.ErrNotFound, "block not found: %s", blockID.ToHex()) } @@ -25,7 +25,7 @@ func blockByID(c echo.Context) (*iotago.Block, error) { } func blockMetadataByBlockID(blockID iotago.BlockID) (*api.BlockMetadataResponse, error) { - blockMetadata, err := deps.Protocol.MainEngineInstance().Retainer.BlockMetadata(blockID) + blockMetadata, err := deps.Protocol.Engines.Main.Get().Retainer.BlockMetadata(blockID) if err != nil { return nil, ierrors.Wrapf(echo.ErrInternalServerError, "failed to get block metadata %s: %s", blockID.ToHex(), err) } @@ -34,7 +34,7 @@ func blockMetadataByBlockID(blockID iotago.BlockID) (*api.BlockMetadataResponse, } func transactionMetadataByBlockID(blockID iotago.BlockID) (*api.TransactionMetadataResponse, error) { - blockMetadata, err := deps.Protocol.MainEngineInstance().Retainer.BlockMetadata(blockID) + blockMetadata, err := deps.Protocol.Engines.Main.Get().Retainer.BlockMetadata(blockID) if err != nil { return nil, ierrors.Wrapf(echo.ErrInternalServerError, "failed to get block metadata %s: %s", blockID.ToHex(), err) } @@ -62,7 +62,7 @@ func blockWithMetadataByID(c echo.Context) (*api.BlockWithMetadataResponse, erro return nil, ierrors.Wrapf(err, "failed to parse block ID %s", c.Param(api.ParameterBlockID)) } - block, exists := deps.Protocol.MainEngineInstance().Block(blockID) + block, exists := deps.Protocol.Engines.Main.Get().Block(blockID) if !exists { return nil, ierrors.Wrapf(echo.ErrNotFound, "no transaction found for block ID %s", blockID.ToHex()) } @@ -79,7 +79,7 @@ func blockWithMetadataByID(c echo.Context) (*api.BlockWithMetadataResponse, erro } func blockIssuance() (*api.IssuanceBlockHeaderResponse, error) { - references := deps.Protocol.MainEngineInstance().TipSelection.SelectTips(iotago.BasicBlockMaxParents) + references := deps.Protocol.Engines.Main.Get().TipSelection.SelectTips(iotago.BasicBlockMaxParents) if len(references[iotago.StrongParentType]) == 0 { return nil, ierrors.Wrap(echo.ErrServiceUnavailable, "no strong parents available") } @@ -88,8 +88,8 @@ func blockIssuance() (*api.IssuanceBlockHeaderResponse, error) { StrongParents: references[iotago.StrongParentType], WeakParents: references[iotago.WeakParentType], ShallowLikeParents: references[iotago.ShallowLikeParentType], - LatestFinalizedSlot: deps.Protocol.MainEngineInstance().SyncManager.LatestFinalizedSlot(), - LatestCommitment: deps.Protocol.MainEngineInstance().SyncManager.LatestCommitment().Commitment(), + LatestFinalizedSlot: deps.Protocol.Engines.Main.Get().SyncManager.LatestFinalizedSlot(), + LatestCommitment: deps.Protocol.Engines.Main.Get().SyncManager.LatestCommitment().Commitment(), } return resp, nil diff --git a/components/restapi/core/commitment.go b/components/restapi/core/commitment.go index 38f68cb84..7b9a1cd8d 100644 --- a/components/restapi/core/commitment.go +++ b/components/restapi/core/commitment.go @@ -14,14 +14,14 @@ func getCommitmentBySlot(slot iotago.SlotIndex, latestCommitment ...*model.Commi if len(latestCommitment) > 0 { latest = latestCommitment[0] } else { - latest = deps.Protocol.MainEngineInstance().SyncManager.LatestCommitment() + latest = deps.Protocol.Engines.Main.Get().SyncManager.LatestCommitment() } if slot > latest.Slot() { return nil, ierrors.Wrapf(echo.ErrBadRequest, "commitment is from a future slot (%d > %d)", slot, latest.Slot()) } - commitment, err := deps.Protocol.MainEngineInstance().Storage.Commitments().Load(slot) + commitment, err := deps.Protocol.Engines.Main.Get().Storage.Commitments().Load(slot) if err != nil { return nil, ierrors.Wrapf(echo.ErrInternalServerError, "failed to load commitment, slot: %d, error: %w", slot, err) } @@ -34,14 +34,14 @@ func getCommitmentByID(commitmentID iotago.CommitmentID, latestCommitment ...*mo if len(latestCommitment) > 0 { latest = latestCommitment[0] } else { - latest = deps.Protocol.MainEngineInstance().SyncManager.LatestCommitment() + latest = deps.Protocol.Engines.Main.Get().SyncManager.LatestCommitment() } if commitmentID.Slot() > latest.Slot() { return nil, ierrors.Wrapf(echo.ErrBadRequest, "commitment ID (%s) is from a future slot (%d > %d)", commitmentID, commitmentID.Slot(), latest.Slot()) } - commitment, err := deps.Protocol.MainEngineInstance().Storage.Commitments().Load(commitmentID.Slot()) + commitment, err := deps.Protocol.Engines.Main.Get().Storage.Commitments().Load(commitmentID.Slot()) if err != nil { return nil, ierrors.Wrapf(echo.ErrInternalServerError, "failed to load commitment, commitmentID: %s, slot: %d, error: %w", commitmentID, commitmentID.Slot(), err) } @@ -54,7 +54,7 @@ func getCommitmentByID(commitmentID iotago.CommitmentID, latestCommitment ...*mo } func getUTXOChanges(commitmentID iotago.CommitmentID) (*api.UTXOChangesResponse, error) { - diffs, err := deps.Protocol.MainEngineInstance().Ledger.SlotDiffs(commitmentID.Slot()) + diffs, err := deps.Protocol.Engines.Main.Get().Ledger.SlotDiffs(commitmentID.Slot()) if err != nil { return nil, ierrors.Wrapf(echo.ErrInternalServerError, "failed to get slot diffs, commitmentID: %s, slot: %d, error: %w", commitmentID, commitmentID.Slot(), err) } diff --git a/components/restapi/core/component.go b/components/restapi/core/component.go index 94f95568e..603b0d375 100644 --- a/components/restapi/core/component.go +++ b/components/restapi/core/component.go @@ -285,7 +285,7 @@ func AddFeature(feature string) { func checkNodeSynced() echo.MiddlewareFunc { return func(next echo.HandlerFunc) echo.HandlerFunc { return func(c echo.Context) error { - if !deps.Protocol.MainEngineInstance().SyncManager.IsNodeSynced() { + if !deps.Protocol.Engines.Main.Get().SyncManager.IsNodeSynced() { return ierrors.Wrap(echo.ErrServiceUnavailable, "node is not synced") } diff --git a/components/restapi/core/node.go b/components/restapi/core/node.go index 5ddc1fcfa..4872eeaa4 100644 --- a/components/restapi/core/node.go +++ b/components/restapi/core/node.go @@ -4,7 +4,7 @@ import "github.com/iotaledger/iota.go/v4/api" func protocolParameters() []*api.InfoResProtocolParameters { protoParams := make([]*api.InfoResProtocolParameters, 0) - provider := deps.Protocol.MainEngineInstance().Storage.Settings().APIProvider() + provider := deps.Protocol.Engines.Main.Get().Storage.Settings().APIProvider() for _, version := range provider.ProtocolEpochVersions() { protocolParams := provider.ProtocolParameters(version.Version) if protocolParams == nil { @@ -21,8 +21,8 @@ func protocolParameters() []*api.InfoResProtocolParameters { } func info() *api.InfoResponse { - clSnapshot := deps.Protocol.MainEngineInstance().Clock.Snapshot() - syncStatus := deps.Protocol.MainEngineInstance().SyncManager.SyncStatus() + clSnapshot := deps.Protocol.Engines.Main.Get().Clock.Snapshot() + syncStatus := deps.Protocol.Engines.Main.Get().SyncManager.SyncStatus() metrics := deps.MetricsTracker.NodeMetrics() return &api.InfoResponse{ diff --git a/components/restapi/core/transaction.go b/components/restapi/core/transaction.go index 15e517ed4..cee75f756 100644 --- a/components/restapi/core/transaction.go +++ b/components/restapi/core/transaction.go @@ -23,7 +23,7 @@ func blockIDFromTransactionID(transactionID iotago.TransactionID) (iotago.BlockI // Get the first output of that transaction (using index 0) outputID := iotago.OutputIDFromTransactionIDAndIndex(transactionID, 0) - output, spent, err := deps.Protocol.MainEngineInstance().Ledger.OutputOrSpent(outputID) + output, spent, err := deps.Protocol.Engines.Main.Get().Ledger.OutputOrSpent(outputID) if err != nil { return iotago.EmptyBlockID, ierrors.Wrapf(echo.ErrInternalServerError, "failed to get output %s: %s", outputID.ToHex(), err) } @@ -41,7 +41,7 @@ func blockByTransactionID(c echo.Context) (*model.Block, error) { return nil, ierrors.Wrapf(echo.ErrBadRequest, "failed to get block ID by transaction ID: %s", err) } - block, exists := deps.Protocol.MainEngineInstance().Block(blockID) + block, exists := deps.Protocol.Engines.Main.Get().Block(blockID) if !exists { return nil, ierrors.Wrapf(echo.ErrNotFound, "block not found: %s", blockID.ToHex()) } diff --git a/components/restapi/core/utxo.go b/components/restapi/core/utxo.go index cfc880f2f..7d04faf8c 100644 --- a/components/restapi/core/utxo.go +++ b/components/restapi/core/utxo.go @@ -16,7 +16,7 @@ func outputByID(c echo.Context) (*api.OutputResponse, error) { return nil, ierrors.Wrapf(err, "failed to parse output ID %s", c.Param(api.ParameterOutputID)) } - output, err := deps.Protocol.MainEngineInstance().Ledger.Output(outputID) + output, err := deps.Protocol.Engines.Main.Get().Ledger.Output(outputID) if err != nil { return nil, ierrors.Wrapf(echo.ErrInternalServerError, "failed to get output %s from the Ledger: %s", outputID.ToHex(), err) } @@ -33,7 +33,7 @@ func outputMetadataByID(c echo.Context) (*api.OutputMetadata, error) { return nil, ierrors.Wrapf(err, "failed to parse output ID %s", c.Param(api.ParameterOutputID)) } - output, spent, err := deps.Protocol.MainEngineInstance().Ledger.OutputOrSpent(outputID) + output, spent, err := deps.Protocol.Engines.Main.Get().Ledger.OutputOrSpent(outputID) if err != nil { return nil, ierrors.Wrapf(echo.ErrInternalServerError, "failed to get output %s from the Ledger: %s", outputID.ToHex(), err) } @@ -51,7 +51,7 @@ func outputWithMetadataByID(c echo.Context) (*api.OutputWithMetadataResponse, er return nil, ierrors.Wrapf(err, "failed to parse output ID %s", c.Param(api.ParameterOutputID)) } - output, spent, err := deps.Protocol.MainEngineInstance().Ledger.OutputOrSpent(outputID) + output, spent, err := deps.Protocol.Engines.Main.Get().Ledger.OutputOrSpent(outputID) if err != nil { return nil, ierrors.Wrapf(echo.ErrInternalServerError, "failed to get output %s from the Ledger: %s", outputID.ToHex(), err) } @@ -82,14 +82,14 @@ func outputWithMetadataByID(c echo.Context) (*api.OutputWithMetadataResponse, er } func newOutputMetadataResponse(output *utxoledger.Output) (*api.OutputMetadata, error) { - latestCommitment := deps.Protocol.MainEngineInstance().SyncManager.LatestCommitment() + latestCommitment := deps.Protocol.Engines.Main.Get().SyncManager.LatestCommitment() includedSlot := output.SlotBooked() includedCommitmentID := iotago.EmptyCommitmentID if includedSlot <= latestCommitment.Slot() && - includedSlot >= deps.Protocol.MainEngineInstance().CommittedAPI().ProtocolParameters().GenesisSlot() { - includedCommitment, err := deps.Protocol.MainEngineInstance().Storage.Commitments().Load(includedSlot) + includedSlot >= deps.Protocol.Engines.Main.Get().CommittedAPI().ProtocolParameters().GenesisSlot() { + includedCommitment, err := deps.Protocol.Engines.Main.Get().Storage.Commitments().Load(includedSlot) if err != nil { return nil, ierrors.Wrapf(echo.ErrInternalServerError, "failed to load commitment with index %d: %s", includedSlot, err) } @@ -118,8 +118,8 @@ func newSpentMetadataResponse(spent *utxoledger.Spent) (*api.OutputMetadata, err spentCommitmentID := iotago.EmptyCommitmentID if spentSlot <= newOutputMetadataResponse.LatestCommitmentID.Slot() && - spentSlot >= deps.Protocol.MainEngineInstance().CommittedAPI().ProtocolParameters().GenesisSlot() { - spentCommitment, err := deps.Protocol.MainEngineInstance().Storage.Commitments().Load(spentSlot) + spentSlot >= deps.Protocol.Engines.Main.Get().CommittedAPI().ProtocolParameters().GenesisSlot() { + spentCommitment, err := deps.Protocol.Engines.Main.Get().Storage.Commitments().Load(spentSlot) if err != nil { return nil, ierrors.Wrapf(echo.ErrInternalServerError, "failed to load commitment with index %d: %s", spentSlot, err) } diff --git a/components/restapi/management/pruning.go b/components/restapi/management/pruning.go index 8f9af7399..88bf1f8a5 100644 --- a/components/restapi/management/pruning.go +++ b/components/restapi/management/pruning.go @@ -10,7 +10,7 @@ import ( ) func pruneDatabase(c echo.Context) (*api.PruneDatabaseResponse, error) { - if deps.Protocol.MainEngineInstance().Storage.IsPruning() { + if deps.Protocol.Engines.Main.Get().Storage.IsPruning() { return nil, ierrors.Wrapf(echo.ErrServiceUnavailable, "node is already pruning") } @@ -30,14 +30,14 @@ func pruneDatabase(c echo.Context) (*api.PruneDatabaseResponse, error) { var err error if request.Epoch != 0 { - err = deps.Protocol.MainEngineInstance().Storage.PruneByEpochIndex(request.Epoch) + err = deps.Protocol.Engines.Main.Get().Storage.PruneByEpochIndex(request.Epoch) if err != nil { return nil, ierrors.Wrapf(echo.ErrInternalServerError, "pruning database failed: %s", err) } } if request.Depth != 0 { - _, _, err := deps.Protocol.MainEngineInstance().Storage.PruneByDepth(request.Depth) + _, _, err := deps.Protocol.Engines.Main.Get().Storage.PruneByDepth(request.Depth) if err != nil { return nil, ierrors.Wrapf(echo.ErrInternalServerError, "pruning database failed: %s", err) } @@ -49,13 +49,13 @@ func pruneDatabase(c echo.Context) (*api.PruneDatabaseResponse, error) { return nil, ierrors.Wrapf(echo.ErrInternalServerError, "pruning database failed: %s", err) } - err = deps.Protocol.MainEngineInstance().Storage.PruneBySize(pruningTargetDatabaseSizeBytes) + err = deps.Protocol.Engines.Main.Get().Storage.PruneBySize(pruningTargetDatabaseSizeBytes) if err != nil { return nil, ierrors.Wrapf(echo.ErrInternalServerError, "pruning database failed: %s", err) } } - targetEpoch, hasPruned := deps.Protocol.MainEngineInstance().Storage.LastPrunedEpoch() + targetEpoch, hasPruned := deps.Protocol.Engines.Main.Get().Storage.LastPrunedEpoch() if hasPruned { targetEpoch++ } diff --git a/components/restapi/routes.go b/components/restapi/routes.go index 0ea71b46c..370d34928 100644 --- a/components/restapi/routes.go +++ b/components/restapi/routes.go @@ -16,7 +16,7 @@ type RoutesResponse struct { func setupRoutes() { deps.Echo.GET(api.RouteHealth, func(c echo.Context) error { - if deps.Protocol.MainEngineInstance().SyncManager.IsNodeSynced() { + if deps.Protocol.Engines.Main.Get().SyncManager.IsNodeSynced() { return c.NoContent(http.StatusOK) } diff --git a/deploy/ansible/roles/iota-core-node/templates/docker-compose-iota-core.yml.j2 b/deploy/ansible/roles/iota-core-node/templates/docker-compose-iota-core.yml.j2 index f4e112d10..f8f85afaa 100644 --- a/deploy/ansible/roles/iota-core-node/templates/docker-compose-iota-core.yml.j2 +++ b/deploy/ansible/roles/iota-core-node/templates/docker-compose-iota-core.yml.j2 @@ -143,6 +143,7 @@ services: environment: - "VALIDATOR_PRV_KEY={{validatorPrvKey}}" command: > + --logger.level=debug --inx.address=iota-core:9029 {% if 'node-01' in inventory_hostname %} --validator.ignoreBootstrapped=true diff --git a/go.mod b/go.mod index 89c8f04ec..b8c3c377b 100644 --- a/go.mod +++ b/go.mod @@ -10,19 +10,20 @@ require ( github.com/google/uuid v1.4.0 github.com/gorilla/websocket v1.5.1 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 - github.com/iotaledger/hive.go/ads v0.0.0-20231127134220-90b88e35bdb2 - github.com/iotaledger/hive.go/app v0.0.0-20231127134220-90b88e35bdb2 - github.com/iotaledger/hive.go/constraints v0.0.0-20231128121006-331a9e522dfe - github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231127134220-90b88e35bdb2 - github.com/iotaledger/hive.go/crypto v0.0.0-20231128121006-331a9e522dfe - github.com/iotaledger/hive.go/ds v0.0.0-20231127134220-90b88e35bdb2 - github.com/iotaledger/hive.go/ierrors v0.0.0-20231128121006-331a9e522dfe - github.com/iotaledger/hive.go/kvstore v0.0.0-20231127134220-90b88e35bdb2 - github.com/iotaledger/hive.go/lo v0.0.0-20231128121006-331a9e522dfe - github.com/iotaledger/hive.go/logger v0.0.0-20231127134220-90b88e35bdb2 - github.com/iotaledger/hive.go/runtime v0.0.0-20231128121006-331a9e522dfe - github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231127134220-90b88e35bdb2 - github.com/iotaledger/hive.go/stringify v0.0.0-20231128121006-331a9e522dfe + github.com/iotaledger/hive.go/ads v0.0.0-20231130155327-398db92f09a3 + github.com/iotaledger/hive.go/app v0.0.0-20231130155327-398db92f09a3 + github.com/iotaledger/hive.go/constraints v0.0.0-20231130155327-398db92f09a3 + github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231130155327-398db92f09a3 + github.com/iotaledger/hive.go/crypto v0.0.0-20231130155327-398db92f09a3 + github.com/iotaledger/hive.go/ds v0.0.0-20231130155327-398db92f09a3 + github.com/iotaledger/hive.go/ierrors v0.0.0-20231130155327-398db92f09a3 + github.com/iotaledger/hive.go/kvstore v0.0.0-20231130155327-398db92f09a3 + github.com/iotaledger/hive.go/lo v0.0.0-20231130155327-398db92f09a3 + github.com/iotaledger/hive.go/log v0.0.0-20231130155327-398db92f09a3 + github.com/iotaledger/hive.go/logger v0.0.0-20231130155327-398db92f09a3 + github.com/iotaledger/hive.go/runtime v0.0.0-20231130155327-398db92f09a3 + github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231130155327-398db92f09a3 + github.com/iotaledger/hive.go/stringify v0.0.0-20231130155327-398db92f09a3 github.com/iotaledger/inx-app v1.0.0-rc.3.0.20231201123347-1c44b3f24221 github.com/iotaledger/inx/go v1.0.0-rc.2.0.20231201114846-3bb5c3fd5665 github.com/iotaledger/iota.go/v4 v4.0.0-20231204142547-416c9a87403d @@ -88,7 +89,6 @@ require ( github.com/huin/goupnp v1.3.0 // indirect github.com/iancoleman/orderedmap v0.3.0 // indirect github.com/iotaledger/grocksdb v1.7.5-0.20230220105546-5162e18885c7 // indirect - github.com/iotaledger/hive.go/log v0.0.0-20231127134220-90b88e35bdb2 // indirect github.com/ipfs/boxo v0.13.1 // indirect github.com/ipfs/go-cid v0.4.1 // indirect github.com/ipfs/go-datastore v0.6.0 // indirect diff --git a/go.sum b/go.sum index 2a1ecbf91..eb62b9bb8 100644 --- a/go.sum +++ b/go.sum @@ -275,34 +275,34 @@ github.com/iancoleman/orderedmap v0.3.0/go.mod h1:XuLcCUkdL5owUCQeF2Ue9uuw1EptkJ github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/iotaledger/grocksdb v1.7.5-0.20230220105546-5162e18885c7 h1:dTrD7X2PTNgli6EbS4tV9qu3QAm/kBU3XaYZV2xdzys= github.com/iotaledger/grocksdb v1.7.5-0.20230220105546-5162e18885c7/go.mod h1:ZRdPu684P0fQ1z8sXz4dj9H5LWHhz4a9oCtvjunkSrw= -github.com/iotaledger/hive.go/ads v0.0.0-20231127134220-90b88e35bdb2 h1:0FynHsnJTZgxQuXk3/maXNgzyvbwQ+TnuiwY48kYSr4= -github.com/iotaledger/hive.go/ads v0.0.0-20231127134220-90b88e35bdb2/go.mod h1:gbUvr01B5ha530GnNm8K2OsHXOd2BtzBYOMxyTX3iDg= -github.com/iotaledger/hive.go/app v0.0.0-20231127134220-90b88e35bdb2 h1:WI6MQCxeANDyO7fOTovefuIusma+wT8VUJ3BisQLZEA= -github.com/iotaledger/hive.go/app v0.0.0-20231127134220-90b88e35bdb2/go.mod h1:+riYmeLApkLlj4+EpuJpEJAsj/KGfD7cqLGy7oTsPOM= -github.com/iotaledger/hive.go/constraints v0.0.0-20231128121006-331a9e522dfe h1:vgJj9kXM1IkLjbjWOV565Vil+RlzJwVhxG/KebMmrKE= -github.com/iotaledger/hive.go/constraints v0.0.0-20231128121006-331a9e522dfe/go.mod h1:dOBOM2s4se3HcWefPe8sQLUalGXJ8yVXw58oK8jke3s= -github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231127134220-90b88e35bdb2 h1:ZL4cGO4zy7IwCIfHQgpvu3yMbNnFFRvSvTqaZM5Uj5U= -github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231127134220-90b88e35bdb2/go.mod h1:CdixkrB7VdQzEDlVuwsxPtsiJL/WXrQgz3PELIqlLko= -github.com/iotaledger/hive.go/crypto v0.0.0-20231128121006-331a9e522dfe h1:/+Qw7fe5oSE5Jxm8RPzcfBKHbwjJ/zwHu3UWdSOvKW8= -github.com/iotaledger/hive.go/crypto v0.0.0-20231128121006-331a9e522dfe/go.mod h1:OQ9EVTTQT1mkO/16BgwSIyQlAhEg+Cptud/yutevWsI= -github.com/iotaledger/hive.go/ds v0.0.0-20231127134220-90b88e35bdb2 h1:8YQlcFMexyYvjh3V/YSYzldeYjaDZd+1mHt8SUh8Uqs= -github.com/iotaledger/hive.go/ds v0.0.0-20231127134220-90b88e35bdb2/go.mod h1:JE8cbZSvzbB5TrwXibg6M0B7ck35YxF30ItHBzQRlgc= -github.com/iotaledger/hive.go/ierrors v0.0.0-20231128121006-331a9e522dfe h1:kVkjbdBANpA8tyu9RM4/GeyVoyRfGcb4LT96PqHTIWc= -github.com/iotaledger/hive.go/ierrors v0.0.0-20231128121006-331a9e522dfe/go.mod h1:HcE8B5lP96enc/OALTb2/rIIi+yOLouRoHOKRclKmC8= -github.com/iotaledger/hive.go/kvstore v0.0.0-20231127134220-90b88e35bdb2 h1:b+AHpClIb7YAjpXgRHCsr+DRdBMuN4Q6k/wpFmT1wok= -github.com/iotaledger/hive.go/kvstore v0.0.0-20231127134220-90b88e35bdb2/go.mod h1:ytfKoHr/nF8u0y0G4mamfG0yjFtJiJVk0kgjnPOtsSY= -github.com/iotaledger/hive.go/lo v0.0.0-20231128121006-331a9e522dfe h1:dQBQ+ZOVwC6KJxTABHMMHjJto70gNU5Cn4dXeJp5xmM= -github.com/iotaledger/hive.go/lo v0.0.0-20231128121006-331a9e522dfe/go.mod h1:6Ee7i6b4tuTHuRYnPP8VUb0wr9XFI5qlqtnttBd9jRg= -github.com/iotaledger/hive.go/log v0.0.0-20231127134220-90b88e35bdb2 h1:3B6UFIJ+IJEiGmUbLt5+Zokv0i8RUs70IuSR9sB60DA= -github.com/iotaledger/hive.go/log v0.0.0-20231127134220-90b88e35bdb2/go.mod h1:vzO4/wRkEJDEZb/9fD10oKU9k1bj4qLir2Uhl5U1FkM= -github.com/iotaledger/hive.go/logger v0.0.0-20231127134220-90b88e35bdb2 h1:1r4fY+R9p2q5CzEkiXMuFr/UCM8RX3yPUllXkjm5/Fk= -github.com/iotaledger/hive.go/logger v0.0.0-20231127134220-90b88e35bdb2/go.mod h1:w1psHM2MuKsen1WdsPKrpqElYH7ZOQ+YdQIgJZg4HTo= -github.com/iotaledger/hive.go/runtime v0.0.0-20231128121006-331a9e522dfe h1:jewR0RJ7oTGWjzhTROdIwhMeBH4//frUHizKs/6Em+s= -github.com/iotaledger/hive.go/runtime v0.0.0-20231128121006-331a9e522dfe/go.mod h1:DrZPvUvLarK8C2qb+3H2vdypp/MuhpQmB3iMJbDCr/Q= -github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231127134220-90b88e35bdb2 h1:vTx/tPH+//CQcDjdC8DZv3s6x9KCqAfTqn3VTjYUUlw= -github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231127134220-90b88e35bdb2/go.mod h1:FoH3T6yKlZJp8xm8K+zsQiibSynp32v21CpWx8xkek8= -github.com/iotaledger/hive.go/stringify v0.0.0-20231128121006-331a9e522dfe h1:RcFUqhnJ+86+sA0XMrZ0q+086ULrdWQkWrjUt2OnJK4= -github.com/iotaledger/hive.go/stringify v0.0.0-20231128121006-331a9e522dfe/go.mod h1:FTo/UWzNYgnQ082GI9QVM9HFDERqf9rw9RivNpqrnTs= +github.com/iotaledger/hive.go/ads v0.0.0-20231130155327-398db92f09a3 h1:VLm18jYzB0wFceCXIfsbKzTwl3TDvCgRp4wJ/xMgXWM= +github.com/iotaledger/hive.go/ads v0.0.0-20231130155327-398db92f09a3/go.mod h1:gbUvr01B5ha530GnNm8K2OsHXOd2BtzBYOMxyTX3iDg= +github.com/iotaledger/hive.go/app v0.0.0-20231130155327-398db92f09a3 h1:O7okRQP8g8a9gvUDG/lzZ47MgaSGqbOqr98rvkT+NFE= +github.com/iotaledger/hive.go/app v0.0.0-20231130155327-398db92f09a3/go.mod h1:+riYmeLApkLlj4+EpuJpEJAsj/KGfD7cqLGy7oTsPOM= +github.com/iotaledger/hive.go/constraints v0.0.0-20231130155327-398db92f09a3 h1:qJe/BR6FpAM2tsoJBTbLSeQERjnxidNBZuJasvYhR7s= +github.com/iotaledger/hive.go/constraints v0.0.0-20231130155327-398db92f09a3/go.mod h1:dOBOM2s4se3HcWefPe8sQLUalGXJ8yVXw58oK8jke3s= +github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231130155327-398db92f09a3 h1:MsvSJAMWne3QUuU6Vo0jeVEt91foSZiuEM4xEyV79X4= +github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231130155327-398db92f09a3/go.mod h1:CdixkrB7VdQzEDlVuwsxPtsiJL/WXrQgz3PELIqlLko= +github.com/iotaledger/hive.go/crypto v0.0.0-20231130155327-398db92f09a3 h1:MNkwHDOeIUb62KbbsSCKwiWYHdzJM+6sNZ45AVhPmiA= +github.com/iotaledger/hive.go/crypto v0.0.0-20231130155327-398db92f09a3/go.mod h1:OQ9EVTTQT1mkO/16BgwSIyQlAhEg+Cptud/yutevWsI= +github.com/iotaledger/hive.go/ds v0.0.0-20231130155327-398db92f09a3 h1:gWSJjaIIRvYG778ITT4V9MVqxEuGy1ohE10Awx6HiRg= +github.com/iotaledger/hive.go/ds v0.0.0-20231130155327-398db92f09a3/go.mod h1:JE8cbZSvzbB5TrwXibg6M0B7ck35YxF30ItHBzQRlgc= +github.com/iotaledger/hive.go/ierrors v0.0.0-20231130155327-398db92f09a3 h1:4YjvERr9WQVwPjKirblj2grnwTzKBNLrT5KaHMZRFBQ= +github.com/iotaledger/hive.go/ierrors v0.0.0-20231130155327-398db92f09a3/go.mod h1:HcE8B5lP96enc/OALTb2/rIIi+yOLouRoHOKRclKmC8= +github.com/iotaledger/hive.go/kvstore v0.0.0-20231130155327-398db92f09a3 h1:k9EO005mzPj+2atTByhUdco3rDtLx3mY7ZW2B/dLKOA= +github.com/iotaledger/hive.go/kvstore v0.0.0-20231130155327-398db92f09a3/go.mod h1:ytfKoHr/nF8u0y0G4mamfG0yjFtJiJVk0kgjnPOtsSY= +github.com/iotaledger/hive.go/lo v0.0.0-20231130155327-398db92f09a3 h1:6rnSXCbIyYUVol3ihMoMFvxSNJsTdXgf1A1kqfa7FC8= +github.com/iotaledger/hive.go/lo v0.0.0-20231130155327-398db92f09a3/go.mod h1:6Ee7i6b4tuTHuRYnPP8VUb0wr9XFI5qlqtnttBd9jRg= +github.com/iotaledger/hive.go/log v0.0.0-20231130155327-398db92f09a3 h1:+GEZHE+oCj4PkH2S3BS9BlyDz1kUlw3eWKtzhCKS3ds= +github.com/iotaledger/hive.go/log v0.0.0-20231130155327-398db92f09a3/go.mod h1:vzO4/wRkEJDEZb/9fD10oKU9k1bj4qLir2Uhl5U1FkM= +github.com/iotaledger/hive.go/logger v0.0.0-20231130155327-398db92f09a3 h1:FBRbpEkEtfmymy4XUda72feIP81pRcr8zjXMmHRF7Lc= +github.com/iotaledger/hive.go/logger v0.0.0-20231130155327-398db92f09a3/go.mod h1:w1psHM2MuKsen1WdsPKrpqElYH7ZOQ+YdQIgJZg4HTo= +github.com/iotaledger/hive.go/runtime v0.0.0-20231130155327-398db92f09a3 h1:ibIJnyoBAbNDlGpRXZpSIcLLb5vbTorCxQPr+dLeGO8= +github.com/iotaledger/hive.go/runtime v0.0.0-20231130155327-398db92f09a3/go.mod h1:DrZPvUvLarK8C2qb+3H2vdypp/MuhpQmB3iMJbDCr/Q= +github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231130155327-398db92f09a3 h1:fF2gn/vkBZjLTbvf/vALLJ0wrPVUQH7Fg4cPd40RecY= +github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231130155327-398db92f09a3/go.mod h1:FoH3T6yKlZJp8xm8K+zsQiibSynp32v21CpWx8xkek8= +github.com/iotaledger/hive.go/stringify v0.0.0-20231130155327-398db92f09a3 h1:n0D5PTekFS1Vuktamz9e21AQ7kNcspr0Kv3Ob9u48Q0= +github.com/iotaledger/hive.go/stringify v0.0.0-20231130155327-398db92f09a3/go.mod h1:FTo/UWzNYgnQ082GI9QVM9HFDERqf9rw9RivNpqrnTs= github.com/iotaledger/inx-app v1.0.0-rc.3.0.20231201123347-1c44b3f24221 h1:+ozrau44uPy2kYv2fuj2Wks8+VkXR62WB9zONOJgzdE= github.com/iotaledger/inx-app v1.0.0-rc.3.0.20231201123347-1c44b3f24221/go.mod h1:6cLX3gnhP0WL+Q+mf3/rIqfACe5fWKVR8luPXWh2xiY= github.com/iotaledger/inx/go v1.0.0-rc.2.0.20231201114846-3bb5c3fd5665 h1:XdhojOpZ0t0pJFyNO0zlBogSAUrhEI67eCpTC9H6sGM= diff --git a/pkg/network/protocols/core/protocol.go b/pkg/network/protocols/core/protocol.go index 6b6d21228..6be570113 100644 --- a/pkg/network/protocols/core/protocol.go +++ b/pkg/network/protocols/core/protocol.go @@ -5,6 +5,7 @@ import ( "google.golang.org/protobuf/proto" "github.com/iotaledger/hive.go/ds/bytesfilter" + "github.com/iotaledger/hive.go/ds/reactive" "github.com/iotaledger/hive.go/ds/shrinkingmap" "github.com/iotaledger/hive.go/ds/types" "github.com/iotaledger/hive.go/ierrors" @@ -32,6 +33,8 @@ type Protocol struct { requestedBlockHashes *shrinkingmap.ShrinkingMap[iotago.Identifier, types.Empty] requestedBlockHashesMutex syncutils.Mutex + + shutdown reactive.Event } func NewProtocol(network network.Endpoint, workerPool *workerpool.WorkerPool, apiProvider iotago.APIProvider, opts ...options.Option[Protocol]) (protocol *Protocol) { @@ -43,6 +46,7 @@ func NewProtocol(network network.Endpoint, workerPool *workerpool.WorkerPool, ap apiProvider: apiProvider, duplicateBlockBytesFilter: bytesfilter.New(iotago.IdentifierFromData, 10000), requestedBlockHashes: shrinkingmap.New[iotago.Identifier, types.Empty](shrinkingmap.WithShrinkingThresholdCount(1000)), + shutdown: reactive.NewEvent(), }, opts, func(p *Protocol) { network.RegisterProtocol(newPacket, p.handlePacket) }) @@ -70,7 +74,7 @@ func (p *Protocol) SendSlotCommitment(cm *model.Commitment, to ...peer.ID) { }}}, to...) } -func (p *Protocol) SendAttestations(cm *model.Commitment, attestations []*iotago.Attestation, merkleProof *merklehasher.Proof[iotago.Identifier], to ...peer.ID) { +func (p *Protocol) SendAttestations(cm *model.Commitment, attestations []*iotago.Attestation, merkleProof *merklehasher.Proof[iotago.Identifier], to ...peer.ID) error { byteBuffer := stream.NewByteBuffer() if err := stream.WriteCollection(byteBuffer, serializer.SeriLengthPrefixTypeAsUint32, func() (elementsCount int, err error) { @@ -82,7 +86,7 @@ func (p *Protocol) SendAttestations(cm *model.Commitment, attestations []*iotago return len(attestations), nil }); err != nil { - panic(err) + return err } p.network.Send(&nwmodels.Packet{Body: &nwmodels.Packet_Attestations{Attestations: &nwmodels.Attestations{ @@ -90,6 +94,8 @@ func (p *Protocol) SendAttestations(cm *model.Commitment, attestations []*iotago Attestations: lo.PanicOnErr(byteBuffer.Bytes()), MerkleProof: lo.PanicOnErr(merkleProof.Bytes()), }}}, to...) + + return nil } func (p *Protocol) RequestSlotCommitment(id iotago.CommitmentID, to ...peer.ID) { @@ -104,11 +110,53 @@ func (p *Protocol) RequestAttestations(id iotago.CommitmentID, to ...peer.ID) { }}}, to...) } +func (p *Protocol) OnBlockReceived(callback func(block *model.Block, src peer.ID)) (unsubscribe func()) { + return p.Events.BlockReceived.Hook(callback).Unhook +} + +func (p *Protocol) OnBlockRequestReceived(callback func(blockID iotago.BlockID, src peer.ID)) (unsubscribe func()) { + return p.Events.BlockRequestReceived.Hook(callback).Unhook +} + +func (p *Protocol) OnCommitmentReceived(callback func(commitment *model.Commitment, src peer.ID)) (unsubscribe func()) { + return p.Events.SlotCommitmentReceived.Hook(callback).Unhook +} + +func (p *Protocol) OnCommitmentRequestReceived(callback func(commitmentID iotago.CommitmentID, src peer.ID)) (unsubscribe func()) { + return p.Events.SlotCommitmentRequestReceived.Hook(callback).Unhook +} + +func (p *Protocol) OnAttestationsReceived(callback func(*model.Commitment, []*iotago.Attestation, *merklehasher.Proof[iotago.Identifier], peer.ID)) (unsubscribe func()) { + return p.Events.AttestationsReceived.Hook(callback).Unhook +} + +func (p *Protocol) OnAttestationsRequestReceived(callback func(commitmentID iotago.CommitmentID, src peer.ID)) (unsubscribe func()) { + return p.Events.AttestationsRequestReceived.Hook(callback).Unhook +} + +func (p *Protocol) OnWarpSyncResponseReceived(callback func(commitmentID iotago.CommitmentID, blockIDs map[iotago.CommitmentID]iotago.BlockIDs, proof *merklehasher.Proof[iotago.Identifier], transactionIDs iotago.TransactionIDs, mutationProof *merklehasher.Proof[iotago.Identifier], src peer.ID)) (unsubscribe func()) { + return p.Events.WarpSyncResponseReceived.Hook(callback).Unhook +} + +func (p *Protocol) OnWarpSyncRequestReceived(callback func(commitmentID iotago.CommitmentID, src peer.ID)) (unsubscribe func()) { + return p.Events.WarpSyncRequestReceived.Hook(callback).Unhook +} + +func (p *Protocol) OnError(callback func(err error, src peer.ID)) (unsubscribe func()) { + return p.Events.Error.Hook(callback).Unhook +} + func (p *Protocol) Shutdown() { p.network.Shutdown() p.workerPool.Shutdown() p.workerPool.ShutdownComplete.Wait() + + p.shutdown.Trigger() +} + +func (p *Protocol) OnShutdown(callback func()) (unsubscribe func()) { + return p.shutdown.OnTrigger(callback) } func (p *Protocol) handlePacket(nbr peer.ID, packet proto.Message) (err error) { diff --git a/pkg/protocol/block_dispatcher.go b/pkg/protocol/block_dispatcher.go deleted file mode 100644 index 6988bd39d..000000000 --- a/pkg/protocol/block_dispatcher.go +++ /dev/null @@ -1,481 +0,0 @@ -package protocol - -import ( - "sync/atomic" - "time" - - "github.com/libp2p/go-libp2p/core/peer" - - "github.com/iotaledger/hive.go/ads" - "github.com/iotaledger/hive.go/core/eventticker" - "github.com/iotaledger/hive.go/ds" - "github.com/iotaledger/hive.go/ds/reactive" - "github.com/iotaledger/hive.go/ds/types" - "github.com/iotaledger/hive.go/ierrors" - "github.com/iotaledger/hive.go/kvstore/mapdb" - "github.com/iotaledger/hive.go/runtime/options" - "github.com/iotaledger/hive.go/runtime/workerpool" - "github.com/iotaledger/iota-core/pkg/core/buffer" - "github.com/iotaledger/iota-core/pkg/model" - "github.com/iotaledger/iota-core/pkg/protocol/chainmanager" - "github.com/iotaledger/iota-core/pkg/protocol/engine" - iotago "github.com/iotaledger/iota.go/v4" - "github.com/iotaledger/iota.go/v4/merklehasher" -) - -// BlockDispatcher is a component that is responsible for dispatching blocks to the correct engine instance or -// triggering a warp sync. -type BlockDispatcher struct { - // protocol is the protocol instance that is using this BlockDispatcher instance. - protocol *Protocol - - // dispatchWorkers is the worker pool that is used to dispatch blocks to the correct engine instance. - dispatchWorkers *workerpool.WorkerPool - - // warpSyncWorkers is the worker pool that is used to process the WarpSync requests and responses. - warpSyncWorkers *workerpool.WorkerPool - - // unsolidCommitmentBlocks is a buffer that stores blocks that have an unsolid slot commitment. - unsolidCommitmentBlocks *buffer.UnsolidCommitmentBuffer[*types.Tuple[*model.Block, peer.ID]] - - // pendingWarpSyncRequests is the set of pending requests that are waiting to be processed. - pendingWarpSyncRequests *eventticker.EventTicker[iotago.SlotIndex, iotago.CommitmentID] - - // processedWarpSyncRequests is the set of processed requests. - processedWarpSyncRequests ds.Set[iotago.CommitmentID] - - // shutdownEvent is a reactive event that is triggered when the BlockDispatcher instance is stopped. - shutdownEvent reactive.Event -} - -// NewBlockDispatcher creates a new BlockDispatcher instance. -func NewBlockDispatcher(protocol *Protocol, opts ...options.Option[BlockDispatcher]) *BlockDispatcher { - return options.Apply(&BlockDispatcher{ - protocol: protocol, - dispatchWorkers: protocol.Workers.CreatePool("BlockDispatcher.Dispatch", workerpool.WithCancelPendingTasksOnShutdown(true)), - warpSyncWorkers: protocol.Workers.CreatePool("BlockDispatcher.WarpSync", workerpool.WithWorkerCount(1), workerpool.WithCancelPendingTasksOnShutdown(true)), - unsolidCommitmentBlocks: buffer.NewUnsolidCommitmentBuffer[*types.Tuple[*model.Block, peer.ID]](20, 100), - pendingWarpSyncRequests: eventticker.New[iotago.SlotIndex, iotago.CommitmentID](eventticker.RetryInterval[iotago.SlotIndex, iotago.CommitmentID](WarpSyncRetryInterval)), - processedWarpSyncRequests: ds.NewSet[iotago.CommitmentID](), - shutdownEvent: reactive.NewEvent(), - }, opts, func(b *BlockDispatcher) { - protocol.HookConstructed(b.initEngineMonitoring) - protocol.HookInitialized(b.initNetworkConnection) - protocol.HookShutdown(b.shutdown) - }) -} - -// Dispatch dispatches the given block to the correct engine instance. -func (b *BlockDispatcher) Dispatch(block *model.Block, src peer.ID) error { - slotCommitment := b.protocol.ChainManager.LoadCommitmentOrRequestMissing(block.ProtocolBlock().Header.SlotCommitmentID) - if !slotCommitment.SolidEvent().WasTriggered() { - if !b.unsolidCommitmentBlocks.Add(slotCommitment.ID(), types.NewTuple(block, src)) { - return ierrors.Errorf("failed to add block %s to unsolid commitment buffer", block.ID()) - } - - return ierrors.Errorf("failed to dispatch block %s: slot commitment %s is not solid", block.ID(), slotCommitment.ID()) - } - - matchingEngineFound := false - for _, e := range []*engine.Engine{b.protocol.MainEngineInstance(), b.protocol.CandidateEngineInstance()} { - if e != nil { - // The engine is locked while it's being reset, so that no new blocks enter the dataflow, here we make sure that this process is exclusive - e.RLock() - - if !e.WasShutdown() && e.ChainID() == slotCommitment.Chain().ForkingPoint.ID() || e.BlockRequester.HasTicker(block.ID()) { - if b.inSyncWindow(e, block) { - e.ProcessBlockFromPeer(block, src) - } else { - // Stick too new blocks into the unsolid commitment buffer so that they can be dispatched once the - // engine instance is in sync (mostly needed for tests). - if !b.unsolidCommitmentBlocks.Add(slotCommitment.ID(), types.NewTuple(block, src)) { - e.RUnlock() - - return ierrors.Errorf("failed to add block %s to unsolid commitment buffer", block.ID()) - } - } - - matchingEngineFound = true - } - - e.RUnlock() - } - } - - if !matchingEngineFound { - return ierrors.Errorf("failed to dispatch block %s: no matching engine found", block.ID()) - } - - return nil -} - -// initEngineMonitoring initializes the automatic monitoring of the engine instances. -func (b *BlockDispatcher) initEngineMonitoring() { - b.monitorLatestEngineCommitment(b.protocol.MainEngineInstance()) - - b.protocol.EngineManager.OnEngineCreated(b.monitorLatestEngineCommitment) - - b.protocol.Events.ChainManager.CommitmentPublished.Hook(func(chainCommitment *chainmanager.ChainCommitment) { - // as soon as a commitment is solid, it's chain is known and it can be dispatched - chainCommitment.SolidEvent().OnTrigger(func() { - b.runTask(func() { - b.injectUnsolidCommitmentBlocks(chainCommitment.Commitment().ID()) - }, b.dispatchWorkers) - - b.runTask(func() { - b.warpSyncIfNecessary(b.targetEngine(chainCommitment), chainCommitment) - }, b.warpSyncWorkers) - }) - }) - - b.protocol.Events.Engine.Notarization.LatestCommitmentUpdated.Hook(func(commitment *model.Commitment) { - b.runTask(func() { - b.injectUnsolidCommitmentBlocks(commitment.ID()) - }, b.dispatchWorkers) - }) - - b.protocol.Events.Engine.SlotGadget.SlotFinalized.Hook(b.evict) -} - -// initNetworkConnection initializes the network connection of the BlockDispatcher instance. -func (b *BlockDispatcher) initNetworkConnection() { - b.protocol.Events.Engine.BlockRequester.Tick.Hook(func(blockID iotago.BlockID) { - b.runTask(func() { - b.protocol.networkProtocol.RequestBlock(blockID) - }, b.dispatchWorkers) - }) - - b.pendingWarpSyncRequests.Events.Tick.Hook(func(id iotago.CommitmentID) { - b.runTask(func() { - b.protocol.networkProtocol.SendWarpSyncRequest(id) - }, b.dispatchWorkers) - }) - - b.protocol.Events.Network.BlockReceived.Hook(func(block *model.Block, src peer.ID) { - b.runTask(func() { - b.protocol.HandleError(b.Dispatch(block, src)) - }, b.dispatchWorkers) - }) - - b.protocol.Events.Network.WarpSyncRequestReceived.Hook(func(commitmentID iotago.CommitmentID, src peer.ID) { - b.runTask(func() { - b.protocol.HandleError(b.processWarpSyncRequest(commitmentID, src)) - }, b.warpSyncWorkers) - }) - - b.protocol.Events.Network.WarpSyncResponseReceived.Hook(func(commitmentID iotago.CommitmentID, blockIDsBySlotCommitmentID map[iotago.CommitmentID]iotago.BlockIDs, tangleMerkleProof *merklehasher.Proof[iotago.Identifier], transactionIDs iotago.TransactionIDs, mutationMerkleProof *merklehasher.Proof[iotago.Identifier], src peer.ID) { - b.runTask(func() { - b.protocol.HandleError(b.processWarpSyncResponse(commitmentID, blockIDsBySlotCommitmentID, tangleMerkleProof, transactionIDs, mutationMerkleProof, src)) - }, b.warpSyncWorkers) - }) -} - -// processWarpSyncRequest processes a WarpSync request. -func (b *BlockDispatcher) processWarpSyncRequest(commitmentID iotago.CommitmentID, src peer.ID) error { - // TODO: check if the peer is allowed to request the warp sync - - committedSlot, err := b.protocol.MainEngineInstance().CommittedSlot(commitmentID) - if err != nil { - return ierrors.Wrapf(err, "failed to get slot %d (not committed yet)", commitmentID.Slot()) - } - - commitment, err := committedSlot.Commitment() - if err != nil { - return ierrors.Wrapf(err, "failed to get commitment from slot %d", commitmentID.Slot()) - } else if commitment.ID() != commitmentID { - return ierrors.Wrapf(err, "commitment ID mismatch: %s != %s", commitment.ID(), commitmentID) - } - - blocksIDsByCommitmentID, err := committedSlot.BlocksIDsBySlotCommitmentID() - if err != nil { - return ierrors.Wrapf(err, "failed to get block IDs from slot %d", commitmentID.Slot()) - } - - transactionIDs, err := committedSlot.TransactionIDs() - if err != nil { - return ierrors.Wrapf(err, "failed to get transaction IDs from slot %d", commitmentID.Slot()) - } - - roots, err := committedSlot.Roots() - if err != nil { - return ierrors.Wrapf(err, "failed to get roots from slot %d", commitmentID.Slot()) - } - - b.protocol.networkProtocol.SendWarpSyncResponse(commitmentID, blocksIDsByCommitmentID, roots.TangleProof(), transactionIDs, roots.MutationProof(), src) - - return nil -} - -// processWarpSyncResponse processes a WarpSync response. -func (b *BlockDispatcher) processWarpSyncResponse(commitmentID iotago.CommitmentID, blockIDsBySlotCommitmentID map[iotago.CommitmentID]iotago.BlockIDs, tangleMerkleProof *merklehasher.Proof[iotago.Identifier], transactionIDs iotago.TransactionIDs, mutationMerkleProof *merklehasher.Proof[iotago.Identifier], _ peer.ID) error { - if b.processedWarpSyncRequests.Has(commitmentID) { - return nil - } - - // First we make sure that the commitment, the provided blockIDs with tangle proof and the provided transactionIDs with mutation proof are valid. - chainCommitment, exists := b.protocol.ChainManager.Commitment(commitmentID) - if !exists { - return ierrors.Errorf("failed to get chain commitment for %s", commitmentID) - } - - targetEngine := b.targetEngine(chainCommitment) - if targetEngine == nil { - return ierrors.Errorf("failed to get target engine for %s", commitmentID) - } - - // Make sure that already evicted commitments are not processed. This might happen if there's a lot of slots to process - // and old responses are still in the task queue. - if loadedCommitment, err := targetEngine.Storage.Commitments().Load(commitmentID.Slot()); err == nil && loadedCommitment.ID() == commitmentID { - return nil - } - - // Flatten all blockIDs into a single slice. - var blockIDs iotago.BlockIDs - for _, ids := range blockIDsBySlotCommitmentID { - blockIDs = append(blockIDs, ids...) - } - - acceptedBlocks := ads.NewSet[iotago.Identifier, iotago.BlockID]( - mapdb.NewMapDB(), - iotago.Identifier.Bytes, - iotago.IdentifierFromBytes, - iotago.BlockID.Bytes, - iotago.BlockIDFromBytes, - ) - - for _, blockID := range blockIDs { - _ = acceptedBlocks.Add(blockID) // a mapdb can newer return an error - } - - if !iotago.VerifyProof(tangleMerkleProof, acceptedBlocks.Root(), chainCommitment.Commitment().RootsID()) { - return ierrors.Errorf("failed to verify tangle merkle proof for %s", commitmentID) - } - - acceptedTransactionIDs := ads.NewSet[iotago.Identifier, iotago.TransactionID]( - mapdb.NewMapDB(), - iotago.Identifier.Bytes, - iotago.IdentifierFromBytes, - iotago.TransactionID.Bytes, - iotago.TransactionIDFromBytes, - ) - - for _, transactionID := range transactionIDs { - _ = acceptedTransactionIDs.Add(transactionID) // a mapdb can never return an error - } - - if !iotago.VerifyProof(mutationMerkleProof, acceptedTransactionIDs.Root(), chainCommitment.Commitment().RootsID()) { - return ierrors.Errorf("failed to verify mutation merkle proof for %s", commitmentID) - } - - b.pendingWarpSyncRequests.StopTicker(commitmentID) - - b.processedWarpSyncRequests.Add(commitmentID) - - // make sure the engine is clean and requires a warp-sync before we start processing the blocks - if targetEngine.Workers.WaitChildren(); targetEngine.Storage.Settings().LatestCommitment().ID().Slot() > commitmentID.Slot() { - return nil - } - targetEngine.Reset() - - // Once all blocks are booked and their weight propagated we - // 1. Mark all transactions as accepted - // 2. Mark all blocks as accepted - // 3. Force commitment of the slot - totalBlocks := uint32(len(blockIDs)) - var bookedBlocks atomic.Uint32 - var notarizedBlocks atomic.Uint32 - - forceCommitmentFunc := func() { - // 3. Force commitment of the slot - producedCommitment, err := targetEngine.Notarization.ForceCommit(commitmentID.Slot()) - if err != nil { - b.protocol.HandleError(err) - return - } - - // 4. Verify that the produced commitment is the same as the initially requested one - if producedCommitment.ID() != commitmentID { - b.protocol.HandleError(ierrors.Errorf("producedCommitment ID mismatch: %s != %s", producedCommitment.ID(), commitmentID)) - return - } - - // 5. We add all blocks as root blocks. We can only do it after the commitment of the slot because otherwise - // confirmation of the blocks can't be properly propagated (as it skips propagation to root blocks). - for slotCommitmentID, blockIDsForCommitment := range blockIDsBySlotCommitmentID { - for _, blockID := range blockIDsForCommitment { - // We need to make sure that we add all blocks as root blocks because we don't know which blocks are root blocks without - // blocks from future slots. We're committing the current slot which then leads to the eviction of the blocks from the - // block cache and thus if not root blocks no block in the next slot can become solid. - targetEngine.EvictionState.AddRootBlock(blockID, slotCommitmentID) - } - } - } - - blockBookedFunc := func(_ bool, _ bool) { - if bookedBlocks.Add(1) != totalBlocks { - return - } - - // 1. Mark all transactions as accepted - for _, transactionID := range transactionIDs { - targetEngine.Ledger.SpendDAG().SetAccepted(transactionID) - } - - // 2. Mark all blocks as accepted - for _, blockID := range blockIDs { - block, exists := targetEngine.BlockCache.Block(blockID) - if !exists { // this should never happen as we just booked these blocks in this slot. - continue - } - - targetEngine.BlockGadget.SetAccepted(block) - - block.Notarized().OnUpdate(func(_ bool, _ bool) { - // Wait for all blocks to be notarized before forcing the commitment of the slot. - if notarizedBlocks.Add(1) != totalBlocks { - return - } - - forceCommitmentFunc() - }) - } - } - - if len(blockIDs) == 0 { - forceCommitmentFunc() - - return nil - } - - for _, blockIDsForCommitment := range blockIDsBySlotCommitmentID { - for _, blockID := range blockIDsForCommitment { - block, _ := targetEngine.BlockDAG.GetOrRequestBlock(blockID) - if block == nil { // this should never happen as we're requesting the blocks for this slot so it can't be evicted. - b.protocol.HandleError(ierrors.Errorf("failed to get block %s", blockID)) - continue - } - - block.WeightPropagated().OnUpdate(blockBookedFunc) - } - } - - return nil -} - -// inSyncWindow returns whether the given block is within the sync window of the given engine instance. -// -// We limit the amount of slots ahead of the latest commitment that we forward to the engine instance to prevent memory -// exhaustion while syncing. -func (b *BlockDispatcher) inSyncWindow(engine *engine.Engine, block *model.Block) bool { - if engine.BlockRequester.HasTicker(block.ID()) { - return true - } - - slotCommitmentID := block.ProtocolBlock().Header.SlotCommitmentID - latestCommitmentSlot := engine.Storage.Settings().LatestCommitment().Slot() - maxCommittableAge := engine.APIForSlot(slotCommitmentID.Slot()).ProtocolParameters().MaxCommittableAge() - - return block.ID().Slot() <= latestCommitmentSlot+maxCommittableAge -} - -// warpSyncIfNecessary triggers a warp sync if necessary. -func (b *BlockDispatcher) warpSyncIfNecessary(e *engine.Engine, chainCommitment *chainmanager.ChainCommitment) { - if e == nil || e.WasShutdown() { - return - } - - chain := chainCommitment.Chain() - latestCommitmentSlot := e.Storage.Settings().LatestCommitment().Slot() - - // We don't want to warpsync if the latest commitment of the engine is very close to the latest commitment of the - // chain as the node might just be about to commit it itself. This is important for tests, as we always need to issue - // 2 slots ahead of the latest commitment of the chain to make sure that the other nodes can warp sync. - if latestCommitmentSlot+1 >= chain.LatestCommitment().Commitment().Slot() { - return - } - - for slotToWarpSync := latestCommitmentSlot + 1; slotToWarpSync <= latestCommitmentSlot+1; slotToWarpSync++ { - commitmentToSync := chain.Commitment(slotToWarpSync) - if commitmentToSync == nil { - break - } - - if !b.processedWarpSyncRequests.Has(commitmentToSync.ID()) { - b.pendingWarpSyncRequests.StartTicker(commitmentToSync.ID()) - } - } -} - -// injectUnsolidCommitmentBlocks injects the unsolid blocks for the given commitment ID into the correct engine -// instance. -func (b *BlockDispatcher) injectUnsolidCommitmentBlocks(id iotago.CommitmentID) { - for _, tuple := range b.unsolidCommitmentBlocks.GetValues(id) { - b.protocol.HandleError(b.Dispatch(tuple.A, tuple.B)) - } -} - -// targetEngine returns the engine instance that should be used for the given commitment. -func (b *BlockDispatcher) targetEngine(commitment *chainmanager.ChainCommitment) *engine.Engine { - if chain := commitment.Chain(); chain != nil { - chainID := chain.ForkingPoint.Commitment().ID() - - if engine := b.protocol.MainEngineInstance(); engine.ChainID() == chainID { - return engine - } - - if engine := b.protocol.CandidateEngineInstance(); engine != nil && engine.ChainID() == chainID { - return engine - } - } - - return nil -} - -// monitorLatestEngineCommitment monitors the latest commitment of the given engine instance and triggers a warp sync if -// necessary. -func (b *BlockDispatcher) monitorLatestEngineCommitment(engineInstance *engine.Engine) { - unsubscribe := engineInstance.Events.Notarization.LatestCommitmentUpdated.Hook(func(commitment *model.Commitment) { - if latestEngineCommitment, exists := b.protocol.ChainManager.Commitment(commitment.ID()); exists { - b.processedWarpSyncRequests.Delete(commitment.ID()) - - b.warpSyncIfNecessary(engineInstance, latestEngineCommitment) - } - }).Unhook - - engineInstance.HookStopped(unsubscribe) -} - -// evict evicts all elements from the unsolid commitment blocks buffer and the pending warp sync requests that are older -// than the given index. -func (b *BlockDispatcher) evict(slot iotago.SlotIndex) { - b.pendingWarpSyncRequests.EvictUntil(slot) - b.unsolidCommitmentBlocks.EvictUntil(slot) -} - -// shutdown shuts down the BlockDispatcher instance. -func (b *BlockDispatcher) shutdown() { - b.shutdownEvent.Compute(func(isShutdown bool) bool { - if !isShutdown { - b.pendingWarpSyncRequests.Shutdown() - - b.dispatchWorkers.Shutdown().ShutdownComplete.Wait() - b.warpSyncWorkers.Shutdown().ShutdownComplete.Wait() - } - - return true - }) -} - -// runTask runs the given task on the given worker pool if the BlockDispatcher instance is not shutdown. -func (b *BlockDispatcher) runTask(task func(), pool *workerpool.WorkerPool) { - b.shutdownEvent.Compute(func(isShutdown bool) bool { - if !isShutdown { - pool.Submit(task) - } - - return isShutdown - }) -} - -// WarpSyncRetryInterval is the interval in which a warp sync request is retried. -const WarpSyncRetryInterval = 1 * time.Minute diff --git a/pkg/protocol/candidate_engine.go b/pkg/protocol/candidate_engine.go deleted file mode 100644 index a8606f34b..000000000 --- a/pkg/protocol/candidate_engine.go +++ /dev/null @@ -1,8 +0,0 @@ -package protocol - -import "github.com/iotaledger/iota-core/pkg/protocol/engine" - -type candidateEngine struct { - engine *engine.Engine - cleanupFunc func() -} diff --git a/pkg/protocol/chain.go b/pkg/protocol/chain.go new file mode 100644 index 000000000..3ec93212b --- /dev/null +++ b/pkg/protocol/chain.go @@ -0,0 +1,373 @@ +package protocol + +import ( + "github.com/libp2p/go-libp2p/core/peer" + + "github.com/iotaledger/hive.go/ds/reactive" + "github.com/iotaledger/hive.go/ds/shrinkingmap" + "github.com/iotaledger/hive.go/lo" + "github.com/iotaledger/hive.go/log" + "github.com/iotaledger/iota-core/pkg/model" + "github.com/iotaledger/iota-core/pkg/protocol/engine" + iotago "github.com/iotaledger/iota.go/v4" +) + +// Chain represents a chain of commitments. +type Chain struct { + // ForkingPoint contains the first commitment of this chain. + ForkingPoint reactive.Variable[*Commitment] + + // ParentChain contains the chain that this chain forked from. + ParentChain reactive.Variable[*Chain] + + // ChildChains contains the set of all chains that forked from this chain. + ChildChains reactive.Set[*Chain] + + // LatestCommitment contains the latest commitment of this chain. + LatestCommitment reactive.Variable[*Commitment] + + // LatestAttestedCommitment contains the latest commitment of this chain for which attestations were received. + LatestAttestedCommitment reactive.Variable[*Commitment] + + // LatestProducedCommitment contains the latest commitment of this chain that we produced ourselves by booking the + // corresponding blocks in the Engine. + LatestProducedCommitment reactive.Variable[*Commitment] + + // ClaimedWeight contains the claimed weight of this chain which is derived from the cumulative weight of the + // LatestCommitment. + ClaimedWeight reactive.Variable[uint64] + + // AttestedWeight contains the attested weight of this chain which is derived from the cumulative weight of all + // attestations up to the LatestAttestedCommitment. + AttestedWeight reactive.Variable[uint64] + + // VerifiedWeight contains the verified weight of this chain which is derived from the cumulative weight of the + // latest verified commitment. + VerifiedWeight reactive.Variable[uint64] + + // WarpSyncMode contains a flag that indicates whether this chain is in warp sync mode. + WarpSyncMode reactive.Variable[bool] + + // LatestSyncedSlot contains the latest commitment of this chain for which all blocks were booked. + LatestSyncedSlot reactive.Variable[iotago.SlotIndex] + + // OutOfSyncThreshold contains the slot at which the chain will consider itself to be out of sync and switch to warp + // sync mode. It is derived from the latest network slot minus two times the max committable age. + OutOfSyncThreshold reactive.Variable[iotago.SlotIndex] + + // RequestAttestations contains a flag that indicates whether this chain should verify the claimed weight by + // requesting attestations. + RequestAttestations reactive.Variable[bool] + + // StartEngine contains a flag that indicates whether this chain should verify the state by processing blocks in an + // engine. + StartEngine reactive.Variable[bool] + + // Engine contains the engine instance that is used to process blocks for this chain. + Engine reactive.Variable[*engine.Engine] + + // IsEvicted contains a flag that indicates whether this chain was evicted. + IsEvicted reactive.Event + + // chains contains a reference to the Chains instance that this chain belongs to. + chains *Chains + + // commitments contains the commitments that make up this chain. + commitments *shrinkingmap.ShrinkingMap[iotago.SlotIndex, *Commitment] + + // Logger embeds a logger that can be used to log messages emitted by this chain. + log.Logger +} + +// newChain creates a new chain instance. +func newChain(chains *Chains) *Chain { + c := &Chain{ + ForkingPoint: reactive.NewVariable[*Commitment](), + ParentChain: reactive.NewVariable[*Chain](), + ChildChains: reactive.NewSet[*Chain](), + LatestCommitment: reactive.NewVariable[*Commitment](), + LatestAttestedCommitment: reactive.NewVariable[*Commitment](), + LatestProducedCommitment: reactive.NewVariable[*Commitment](), + ClaimedWeight: reactive.NewVariable[uint64](), + AttestedWeight: reactive.NewVariable[uint64](), + VerifiedWeight: reactive.NewVariable[uint64](), + WarpSyncMode: reactive.NewVariable[bool]().Init(true), + LatestSyncedSlot: reactive.NewVariable[iotago.SlotIndex](), + OutOfSyncThreshold: reactive.NewVariable[iotago.SlotIndex](), + RequestAttestations: reactive.NewVariable[bool](), + StartEngine: reactive.NewVariable[bool](), + Engine: reactive.NewVariable[*engine.Engine](), + IsEvicted: reactive.NewEvent(), + + chains: chains, + commitments: shrinkingmap.New[iotago.SlotIndex, *Commitment](), + } + + shutdown := lo.Batch( + c.initLogger(), + c.initDerivedProperties(), + ) + + c.IsEvicted.OnTrigger(shutdown) + + return c +} + +// WithInitializedEngine is a reactive selector that executes the given callback once an Engine for this chain was +// initialized. +func (c *Chain) WithInitializedEngine(callback func(engineInstance *engine.Engine) (shutdown func())) (shutdown func()) { + return c.Engine.WithNonEmptyValue(func(engineInstance *engine.Engine) (shutdown func()) { + return engineInstance.Initialized.WithNonEmptyValue(func(_ bool) (shutdown func()) { + return callback(engineInstance) + }) + }) +} + +// LastCommonSlot returns the slot of the last commitment that is common to this chain and its parent chain. +func (c *Chain) LastCommonSlot() iotago.SlotIndex { + if forkingPoint := c.ForkingPoint.Get(); forkingPoint != nil { + if isRoot := forkingPoint.IsRoot.Get(); isRoot { + return forkingPoint.Slot() + } + + return forkingPoint.Slot() - 1 + } + + panic("chain has no forking point") +} + +// DispatchBlock dispatches the given block to the chain and its children (it is allowed to call this method on a nil +// receiver, in which case it will be a no-op with a return value of false). +func (c *Chain) DispatchBlock(block *model.Block, src peer.ID) (dispatched bool) { + if c == nil { + return false + } + + dispatched = c.dispatchBlockToSpawnedEngine(block, src) + + for _, childChain := range c.ChildChains.ToSlice() { + dispatched = childChain.DispatchBlock(block, src) || dispatched + } + + return dispatched +} + +// Commitment returns the Commitment for the given slot from the perspective of this chain. +func (c *Chain) Commitment(slot iotago.SlotIndex) (commitment *Commitment, exists bool) { + for currentChain := c; currentChain != nil; { + switch forkingPoint := currentChain.ForkingPoint.Get(); { + case forkingPoint.Slot() == slot: + return forkingPoint, true + case slot > forkingPoint.Slot(): + return currentChain.commitments.Get(slot) + default: + currentChain = c.ParentChain.Get() + } + } + + return nil, false +} + +// LatestEngine returns the latest engine instance that was spawned by the chain itself or one of its ancestors. +func (c *Chain) LatestEngine() *engine.Engine { + currentChain, currentEngine := c, c.Engine.Get() + for ; currentEngine == nil; currentEngine = currentChain.Engine.Get() { + if currentChain = c.ParentChain.Get(); currentChain == nil { + return nil + } + } + + return currentEngine +} + +// initLogger initializes the Logger of this chain. +func (c *Chain) initLogger() (shutdown func()) { + c.Logger, shutdown = c.chains.NewEntityLogger("") + + return lo.Batch( + c.WarpSyncMode.LogUpdates(c, log.LevelTrace, "WarpSyncMode"), + c.LatestSyncedSlot.LogUpdates(c, log.LevelTrace, "LatestSyncedSlot"), + c.OutOfSyncThreshold.LogUpdates(c, log.LevelTrace, "OutOfSyncThreshold"), + c.ForkingPoint.LogUpdates(c, log.LevelTrace, "ForkingPoint", (*Commitment).LogName), + c.ClaimedWeight.LogUpdates(c, log.LevelTrace, "ClaimedWeight"), + c.AttestedWeight.LogUpdates(c, log.LevelTrace, "AttestedWeight"), + c.VerifiedWeight.LogUpdates(c, log.LevelTrace, "VerifiedWeight"), + c.LatestCommitment.LogUpdates(c, log.LevelTrace, "LatestCommitment", (*Commitment).LogName), + c.LatestAttestedCommitment.LogUpdates(c, log.LevelTrace, "LatestAttestedCommitment", (*Commitment).LogName), + c.LatestProducedCommitment.LogUpdates(c, log.LevelDebug, "LatestProducedCommitment", (*Commitment).LogName), + c.RequestAttestations.LogUpdates(c, log.LevelTrace, "RequestAttestations"), + c.StartEngine.LogUpdates(c, log.LevelDebug, "StartEngine"), + c.Engine.LogUpdates(c, log.LevelTrace, "Engine", (*engine.Engine).LogName), + c.IsEvicted.LogUpdates(c, log.LevelTrace, "IsEvicted"), + + shutdown, + ) +} + +// initDerivedProperties initializes the behavior of this chain by setting up the relations between its properties. +func (c *Chain) initDerivedProperties() (shutdown func()) { + return lo.Batch( + c.deriveClaimedWeight(), + c.deriveVerifiedWeight(), + c.deriveLatestAttestedWeight(), + c.deriveWarpSyncMode(), + + c.ForkingPoint.WithValue(c.deriveParentChain), + c.ParentChain.WithNonEmptyValue(lo.Bind(c, (*Chain).deriveChildChains)), + c.Engine.WithNonEmptyValue(c.deriveOutOfSyncThreshold), + ) +} + +// deriveWarpSyncMode defines how a chain determines whether it is in warp sync mode or not. +func (c *Chain) deriveWarpSyncMode() func() { + return c.WarpSyncMode.DeriveValueFrom(reactive.NewDerivedVariable3(func(warpSyncMode bool, latestSyncedSlot iotago.SlotIndex, latestSeenSlot iotago.SlotIndex, outOfSyncThreshold iotago.SlotIndex) bool { + // if warp sync mode is enabled, keep it enabled until we have synced all slots + if warpSyncMode { + return latestSyncedSlot < latestSeenSlot + } + + // if warp sync mode is disabled, enable it only if we fall below the out of sync threshold + return latestSyncedSlot < outOfSyncThreshold + }, c.LatestSyncedSlot, c.chains.LatestSeenSlot, c.OutOfSyncThreshold, c.WarpSyncMode.Get())) +} + +// deriveClaimedWeight defines how a chain determines its claimed weight (by setting the cumulative weight of the +// latest commitment). +func (c *Chain) deriveClaimedWeight() (shutdown func()) { + return c.ClaimedWeight.DeriveValueFrom(reactive.NewDerivedVariable(func(_ uint64, latestCommitment *Commitment) uint64 { + if latestCommitment == nil { + return 0 + } + + return latestCommitment.CumulativeWeight() + }, c.LatestCommitment)) +} + +// deriveLatestAttestedWeight defines how a chain determines its attested weight (by inheriting the cumulative attested +// weight of the latest attested commitment). It uses inheritance instead of simply setting the value as the cumulative +// attested weight can change over time depending on the attestations that are received. +func (c *Chain) deriveLatestAttestedWeight() func() { + return c.LatestAttestedCommitment.WithNonEmptyValue(func(latestAttestedCommitment *Commitment) (shutdown func()) { + return c.AttestedWeight.InheritFrom(latestAttestedCommitment.CumulativeAttestedWeight) + }) +} + +// deriveVerifiedWeight defines how a chain determines its verified weight (by setting the cumulative weight of the +// latest produced commitment). +func (c *Chain) deriveVerifiedWeight() func() { + return c.VerifiedWeight.DeriveValueFrom(reactive.NewDerivedVariable(func(_ uint64, latestProducedCommitment *Commitment) uint64 { + if latestProducedCommitment == nil { + return 0 + } + + return latestProducedCommitment.CumulativeWeight() + }, c.LatestProducedCommitment)) +} + +// deriveChildChains defines how a chain determines its ChildChains (by adding each child to the set). +func (c *Chain) deriveChildChains(child *Chain) func() { + c.ChildChains.Add(child) + + return func() { + c.ChildChains.Delete(child) + } +} + +// deriveParentChain defines how a chain determines its parent chain from its forking point (it inherits the Chain from +// the parent commitment of the forking point or nil if either of them is still unknown). +func (c *Chain) deriveParentChain(forkingPoint *Commitment) (shutdown func()) { + if forkingPoint != nil { + return forkingPoint.Parent.WithValue(func(parentCommitment *Commitment) (shutdown func()) { + if parentCommitment != nil { + return c.ParentChain.InheritFrom(parentCommitment.Chain) + } + + c.ParentChain.Set(nil) + + return nil + }) + } + + c.ParentChain.Set(nil) + + return nil +} + +// deriveOutOfSyncThreshold defines how a chain determines its "out of sync" threshold (the latest seen slot minus 2 +// times the max committable age or 0 if this would cause an overflow to the negative numbers). +func (c *Chain) deriveOutOfSyncThreshold(engineInstance *engine.Engine) func() { + return c.OutOfSyncThreshold.DeriveValueFrom(reactive.NewDerivedVariable(func(_ iotago.SlotIndex, latestSeenSlot iotago.SlotIndex) iotago.SlotIndex { + if outOfSyncOffset := 2 * engineInstance.LatestAPI().ProtocolParameters().MaxCommittableAge(); outOfSyncOffset < latestSeenSlot { + return latestSeenSlot - outOfSyncOffset + } + + return 0 + }, c.chains.LatestSeenSlot)) +} + +// addCommitment adds the given commitment to this chain. +func (c *Chain) addCommitment(newCommitment *Commitment) (shutdown func()) { + c.commitments.Set(newCommitment.Slot(), newCommitment) + + c.LatestCommitment.Set(newCommitment) + + return lo.Batch( + newCommitment.IsAttested.OnTrigger(func() { c.LatestAttestedCommitment.Set(newCommitment) }), + newCommitment.IsVerified.OnTrigger(func() { c.LatestProducedCommitment.Set(newCommitment) }), + newCommitment.IsSynced.OnTrigger(func() { c.LatestSyncedSlot.Set(newCommitment.Slot()) }), + ) +} + +// dispatchBlockToSpawnedEngine dispatches the given block to the spawned engine of this chain (if it exists). +func (c *Chain) dispatchBlockToSpawnedEngine(block *model.Block, src peer.ID) (dispatched bool) { + // abort if we do not have a spawned engine + engineInstance := c.Engine.Get() + if engineInstance == nil { + return false + } + + // abort if the target slot is below the latest commitment + issuingTime := block.ProtocolBlock().Header.IssuingTime + targetSlot := engineInstance.APIForTime(issuingTime).TimeProvider().SlotFromTime(issuingTime) + if targetSlot <= engineInstance.LatestCommitment.Get().Slot() { + return false + } + + // perform additional checks if we are in warp sync mode (only let blocks pass that we requested) + if c.WarpSyncMode.Get() { + // abort if the target commitment does not exist + targetCommitment, targetCommitmentExists := c.Commitment(targetSlot) + if !targetCommitmentExists { + return false + } + + // abort if the block is not part of the blocks to warp sync + blocksToWarpSync := targetCommitment.BlocksToWarpSync.Get() + if blocksToWarpSync == nil || !blocksToWarpSync.Has(block.ID()) { + return false + } + } + + // dispatch the block to the spawned engine if all previous checks passed + engineInstance.ProcessBlockFromPeer(block, src) + + return true +} + +// claimedWeight is a getter for the ClaimedWeight variable of this chain, which is internally used to be able to +// "address" the variable across multiple chains in a generic way. +func (c *Chain) claimedWeight() reactive.Variable[uint64] { + return c.ClaimedWeight +} + +// verifiedWeight is a getter for the VerifiedWeight variable of this chain, which is internally used to be able to +// "address" the variable across multiple chains in a generic way. +func (c *Chain) verifiedWeight() reactive.Variable[uint64] { + return c.VerifiedWeight +} + +// attestedWeight is a getter for the AttestedWeight variable of this chain, which is internally used to be able to +// "address" the variable across multiple chains in a generic way. +func (c *Chain) attestedWeight() reactive.Variable[uint64] { + return c.AttestedWeight +} diff --git a/pkg/protocol/chainmanager/chain.go b/pkg/protocol/chainmanager/chain.go deleted file mode 100644 index 7233b86ca..000000000 --- a/pkg/protocol/chainmanager/chain.go +++ /dev/null @@ -1,82 +0,0 @@ -package chainmanager - -import ( - "github.com/iotaledger/hive.go/ds/reactive" - "github.com/iotaledger/hive.go/ds/shrinkingmap" - "github.com/iotaledger/hive.go/lo" - "github.com/iotaledger/hive.go/runtime/syncutils" - "github.com/iotaledger/hive.go/stringify" - iotago "github.com/iotaledger/iota.go/v4" -) - -type Chain struct { - ForkingPoint *ChainCommitment - - latestCommitmentIndex iotago.SlotIndex - commitmentsByIndex *shrinkingmap.ShrinkingMap[iotago.SlotIndex, *ChainCommitment] - - syncutils.RWMutex -} - -func NewChain(forkingPoint *ChainCommitment) (fork *Chain) { - forkingPointIndex := forkingPoint.Commitment().Slot() - - c := &Chain{ - ForkingPoint: forkingPoint, - latestCommitmentIndex: forkingPointIndex, - commitmentsByIndex: shrinkingmap.New[iotago.SlotIndex, *ChainCommitment](), - } - - c.commitmentsByIndex.Set(forkingPointIndex, forkingPoint) - - return c -} - -func (c *Chain) SolidEvent() reactive.Event { - return c.ForkingPoint.SolidEvent() -} - -func (c *Chain) Commitment(index iotago.SlotIndex) (commitment *ChainCommitment) { - c.RLock() - defer c.RUnlock() - - return lo.Return1(c.commitmentsByIndex.Get(index)) -} - -func (c *Chain) Size() int { - c.RLock() - defer c.RUnlock() - - return c.commitmentsByIndex.Size() -} - -func (c *Chain) LatestCommitment() *ChainCommitment { - c.RLock() - defer c.RUnlock() - - return lo.Return1(c.commitmentsByIndex.Get(c.latestCommitmentIndex)) -} - -func (c *Chain) addCommitment(commitment *ChainCommitment) { - c.Lock() - defer c.Unlock() - - commitmentIndex := commitment.Commitment().Slot() - if commitmentIndex > c.latestCommitmentIndex { - c.latestCommitmentIndex = commitmentIndex - } - - c.commitmentsByIndex.Set(commitmentIndex, commitment) -} - -func (c *Chain) String() string { - c.RLock() - defer c.RUnlock() - - builder := stringify.NewStructBuilder("Chain", - stringify.NewStructField("ForkingPoint", c.ForkingPoint.id), - stringify.NewStructField("LatestCommitmentIndex", c.latestCommitmentIndex), - ) - - return builder.String() -} diff --git a/pkg/protocol/chainmanager/chaincommitment.go b/pkg/protocol/chainmanager/chaincommitment.go deleted file mode 100644 index 5f5003961..000000000 --- a/pkg/protocol/chainmanager/chaincommitment.go +++ /dev/null @@ -1,159 +0,0 @@ -package chainmanager - -import ( - "fmt" - - "github.com/iotaledger/hive.go/ds/reactive" - "github.com/iotaledger/hive.go/ds/shrinkingmap" - "github.com/iotaledger/hive.go/ierrors" - "github.com/iotaledger/hive.go/lo" - "github.com/iotaledger/hive.go/runtime/syncutils" - "github.com/iotaledger/hive.go/stringify" - "github.com/iotaledger/iota-core/pkg/model" - iotago "github.com/iotaledger/iota.go/v4" -) - -type ChainCommitment struct { - id iotago.CommitmentID - commitment *model.Commitment - - solid reactive.Event - mainChildID iotago.CommitmentID - children *shrinkingmap.ShrinkingMap[iotago.CommitmentID, *ChainCommitment] - chain *Chain - - mutex syncutils.RWMutex -} - -func NewChainCommitment(id iotago.CommitmentID) *ChainCommitment { - return &ChainCommitment{ - id: id, - solid: reactive.NewEvent(), - children: shrinkingmap.New[iotago.CommitmentID, *ChainCommitment](), - } -} - -func (c *ChainCommitment) ID() iotago.CommitmentID { - c.mutex.RLock() - defer c.mutex.RUnlock() - - return c.id -} - -func (c *ChainCommitment) Commitment() *model.Commitment { - c.mutex.RLock() - defer c.mutex.RUnlock() - - return c.commitment -} - -func (c *ChainCommitment) Children() []*ChainCommitment { - c.mutex.RLock() - defer c.mutex.RUnlock() - - return c.children.Values() -} - -func (c *ChainCommitment) Chain() (chain *Chain) { - c.mutex.RLock() - defer c.mutex.RUnlock() - - return c.chain -} - -func (c *ChainCommitment) SolidEvent() reactive.Event { - return c.solid -} - -func (c *ChainCommitment) PublishCommitment(commitment *model.Commitment) (published bool) { - c.mutex.Lock() - defer c.mutex.Unlock() - - if published = c.commitment == nil; published { - c.commitment = commitment - } - - return -} - -func (c *ChainCommitment) registerChild(child *ChainCommitment) (isSolid bool, chain *Chain, wasForked bool) { - c.mutex.Lock() - defer c.mutex.Unlock() - - if c.children.Size() == 0 { - c.mainChildID = child.ID() - } - - if c.children.Set(child.ID(), child); c.children.Size() > 1 { - return c.solid.Get(), NewChain(child), true - } - - return c.solid.Get(), c.chain, false -} - -func (c *ChainCommitment) deleteChild(child *ChainCommitment) { - c.mutex.Lock() - defer c.mutex.Unlock() - - c.children.Delete(child.ID()) -} - -func (c *ChainCommitment) mainChild() *ChainCommitment { - c.mutex.RLock() - defer c.mutex.RUnlock() - - return lo.Return1(c.children.Get(c.mainChildID)) -} - -func (c *ChainCommitment) setMainChild(commitment *ChainCommitment) error { - c.mutex.Lock() - defer c.mutex.Unlock() - - if !c.children.Has(commitment.ID()) { - return ierrors.Errorf("trying to set a main child %s before registering it as a child", commitment.ID()) - } - c.mainChildID = commitment.ID() - - return nil -} - -func (c *ChainCommitment) publishChain(chain *Chain) (wasPublished bool) { - c.mutex.Lock() - defer c.mutex.Unlock() - - if wasPublished = c.chain == nil; wasPublished { - c.chain = chain - } - - return -} - -func (c *ChainCommitment) replaceChain(chain *Chain) { - c.mutex.Lock() - defer c.mutex.Unlock() - - c.chain = chain -} - -func (c *ChainCommitment) String() string { - // Generate chainString before locking c.mutex to avoid potential deadlock due to locking ChainCommitment and - // Chain mutexes in different order across different goroutines. - chainString := c.Chain().String() - - c.mutex.RLock() - defer c.mutex.RUnlock() - - builder := stringify.NewStructBuilder("ChainCommitment", - stringify.NewStructField("ID", c.id), - stringify.NewStructField("Commitment", c.commitment.String()), - stringify.NewStructField("Solid", c.solid), - stringify.NewStructField("Chain", chainString), - stringify.NewStructField("MainChildID", c.mainChildID), - ) - - for index, child := range c.children.AsMap() { - builder.AddField(stringify.NewStructField(fmt.Sprintf("children%d", index), child.ID())) - } - - return builder.String() -} diff --git a/pkg/protocol/chainmanager/events.go b/pkg/protocol/chainmanager/events.go deleted file mode 100644 index 7b146504a..000000000 --- a/pkg/protocol/chainmanager/events.go +++ /dev/null @@ -1,25 +0,0 @@ -package chainmanager - -import ( - "github.com/iotaledger/hive.go/runtime/event" - iotago "github.com/iotaledger/iota.go/v4" -) - -type Events struct { - CommitmentPublished *event.Event1[*ChainCommitment] - CommitmentBelowRoot *event.Event1[iotago.CommitmentID] - ForkDetected *event.Event1[*Fork] - - RequestCommitment *event.Event1[iotago.CommitmentID] - - event.Group[Events, *Events] -} - -var NewEvents = event.CreateGroupConstructor(func() *Events { - return &Events{ - CommitmentPublished: event.New1[*ChainCommitment](), - CommitmentBelowRoot: event.New1[iotago.CommitmentID](), - ForkDetected: event.New1[*Fork](), - RequestCommitment: event.New1[iotago.CommitmentID](), - } -}) diff --git a/pkg/protocol/chainmanager/manager.go b/pkg/protocol/chainmanager/manager.go deleted file mode 100644 index e70b7b9dc..000000000 --- a/pkg/protocol/chainmanager/manager.go +++ /dev/null @@ -1,524 +0,0 @@ -package chainmanager - -import ( - "fmt" - - "github.com/libp2p/go-libp2p/core/peer" - - "github.com/iotaledger/hive.go/core/eventticker" - "github.com/iotaledger/hive.go/core/memstorage" - "github.com/iotaledger/hive.go/ds/walker" - "github.com/iotaledger/hive.go/ierrors" - "github.com/iotaledger/hive.go/runtime/options" - "github.com/iotaledger/hive.go/runtime/syncutils" - "github.com/iotaledger/iota-core/pkg/model" - iotago "github.com/iotaledger/iota.go/v4" -) - -var ( - ErrCommitmentUnknown = ierrors.New("unknown commitment") - ErrCommitmentNotSolid = ierrors.New("commitment not solid") -) - -type Manager struct { - Events *Events - apiProvider iotago.APIProvider - handleError func(error) - - commitmentRequester *eventticker.EventTicker[iotago.SlotIndex, iotago.CommitmentID] - - commitmentsByID *memstorage.IndexedStorage[iotago.SlotIndex, iotago.CommitmentID, *ChainCommitment] - rootCommitment *ChainCommitment - - forksByForkingPoint *memstorage.IndexedStorage[iotago.SlotIndex, iotago.CommitmentID, *Fork] - evictionMutex syncutils.RWMutex - - optsCommitmentRequester []options.Option[eventticker.EventTicker[iotago.SlotIndex, iotago.CommitmentID]] - - commitmentEntityMutex *syncutils.DAGMutex[iotago.CommitmentID] - lastEvictedSlot *model.EvictionIndex[iotago.SlotIndex] -} - -func NewManager(apiProvider iotago.APIProvider, handleError func(error), opts ...options.Option[Manager]) (manager *Manager) { - return options.Apply(&Manager{ - Events: NewEvents(), - apiProvider: apiProvider, - handleError: handleError, - - commitmentsByID: memstorage.NewIndexedStorage[iotago.SlotIndex, iotago.CommitmentID, *ChainCommitment](), - commitmentEntityMutex: syncutils.NewDAGMutex[iotago.CommitmentID](), - forksByForkingPoint: memstorage.NewIndexedStorage[iotago.SlotIndex, iotago.CommitmentID, *Fork](), - lastEvictedSlot: model.NewEvictionIndex[iotago.SlotIndex](), - }, opts, func(m *Manager) { - m.commitmentRequester = eventticker.New(m.optsCommitmentRequester...) - m.Events.CommitmentBelowRoot.Hook(m.commitmentRequester.StopTicker) - - m.Events.RequestCommitment.LinkTo(m.commitmentRequester.Events.Tick) - }) -} - -func (m *Manager) Initialize(c *model.Commitment) { - m.evictionMutex.Lock() - defer m.evictionMutex.Unlock() - - m.rootCommitment, _ = m.getOrCreateCommitment(c.ID()) - m.rootCommitment.PublishCommitment(c) - m.rootCommitment.SolidEvent().Trigger() - m.rootCommitment.publishChain(NewChain(m.rootCommitment)) -} - -func (m *Manager) Shutdown() { - m.commitmentRequester.Shutdown() -} - -func (m *Manager) ProcessCommitmentFromSource(commitment *model.Commitment, source peer.ID) (isSolid bool, chain *Chain) { - m.evictionMutex.RLock() - defer m.evictionMutex.RUnlock() - - _, isSolid, chainCommitment, err := m.processCommitment(commitment) - if err != nil { - m.handleError(err) - return false, nil - } - if chainCommitment == nil { - return false, nil - } - - m.detectForks(chainCommitment, source) - - return isSolid, chainCommitment.Chain() -} - -func (m *Manager) ProcessCandidateCommitment(commitment *model.Commitment) (isSolid bool, chain *Chain) { - m.evictionMutex.RLock() - defer m.evictionMutex.RUnlock() - - _, isSolid, chainCommitment, err := m.processCommitment(commitment) - if err != nil { - m.handleError(err) - return false, nil - } - if chainCommitment == nil { - return false, nil - } - - return isSolid, chainCommitment.Chain() -} - -func (m *Manager) ProcessCommitment(commitment *model.Commitment) (isSolid bool, chain *Chain) { - m.evictionMutex.RLock() - defer m.evictionMutex.RUnlock() - - wasForked, isSolid, chainCommitment, err := m.processCommitment(commitment) - if err != nil { - m.handleError(err) - return false, nil - } - - if chainCommitment == nil { - return false, nil - } - - if wasForked { - if err := m.switchMainChainToCommitment(chainCommitment); err != nil { - panic(err) - } - } - - return isSolid, chainCommitment.Chain() -} - -func (m *Manager) EvictUntil(index iotago.SlotIndex) { - m.evictionMutex.Lock() - defer m.evictionMutex.Unlock() - - for currentIndex := m.lastEvictedSlot.NextIndex(); currentIndex <= index; currentIndex++ { - m.evict(currentIndex) - m.lastEvictedSlot.MarkEvicted(currentIndex) - } - - m.commitmentRequester.EvictUntil(index) -} - -// RootCommitment returns the root commitment of the manager. -func (m *Manager) RootCommitment() (rootCommitment *ChainCommitment) { - m.evictionMutex.RLock() - defer m.evictionMutex.RUnlock() - - return m.rootCommitment -} - -// SetRootCommitment sets the root commitment of the manager. -func (m *Manager) SetRootCommitment(commitment *model.Commitment) { - m.evictionMutex.Lock() - defer m.evictionMutex.Unlock() - - storage := m.commitmentsByID.Get(commitment.Slot()) - if storage == nil { - panic(fmt.Sprintf("we should always have commitment storage for confirmed index %s", commitment)) - } - - newRootCommitment, exists := storage.Get(commitment.ID()) - if !exists { - panic(fmt.Sprint("we should always have the latest commitment ID we confirmed with", commitment)) - } - - if commitment.Slot() <= m.rootCommitment.Commitment().Slot() && commitment.ID() != m.rootCommitment.Commitment().ID() { - panic(fmt.Sprintf("we should never set the root commitment to a commitment that is below the current root commitment %s - root: %s", commitment, m.rootCommitment.Commitment())) - } - - m.rootCommitment = newRootCommitment -} - -func (m *Manager) Chain(ec iotago.CommitmentID) (chain *Chain) { - m.evictionMutex.RLock() - defer m.evictionMutex.RUnlock() - - if commitment, exists := m.Commitment(ec); exists { - return commitment.Chain() - } - - return nil -} - -func (m *Manager) Commitment(id iotago.CommitmentID) (commitment *ChainCommitment, exists bool) { - storage := m.commitmentsByID.Get(id.Slot()) - if storage == nil { - return nil, false - } - - return storage.Get(id) -} - -func (m *Manager) LoadCommitmentOrRequestMissing(id iotago.CommitmentID) *ChainCommitment { - m.evictionMutex.RLock() - defer m.evictionMutex.RUnlock() - - chainCommitment, created := m.getOrCreateCommitment(id) - if created { - m.commitmentRequester.StartTicker(id) - } - - return chainCommitment -} - -func (m *Manager) Commitments(id iotago.CommitmentID, amount int) (commitments []*ChainCommitment, err error) { - m.evictionMutex.RLock() - defer m.evictionMutex.RUnlock() - - commitments = make([]*ChainCommitment, amount) - - for i := 0; i < amount; i++ { - currentCommitment, _ := m.Commitment(id) - if currentCommitment == nil { - return nil, ierrors.Wrap(ErrCommitmentUnknown, "not all commitments in the given range are known") - } - - commitments[i] = currentCommitment - - id = currentCommitment.Commitment().PreviousCommitmentID() - } - - return -} - -// ForkByForkingPoint returns the fork generated by a peer for the given forking point. -func (m *Manager) ForkByForkingPoint(forkingPoint iotago.CommitmentID) (fork *Fork, exists bool) { - m.evictionMutex.RLock() - defer m.evictionMutex.RUnlock() - - return m.forkByForkingPoint(forkingPoint) -} - -func (m *Manager) forkByForkingPoint(forkingPoint iotago.CommitmentID) (fork *Fork, exists bool) { - if indexStore := m.forksByForkingPoint.Get(forkingPoint.Slot()); indexStore != nil { - return indexStore.Get(forkingPoint) - } - - return nil, false -} - -func (m *Manager) SwitchMainChain(head iotago.CommitmentID) error { - m.evictionMutex.RLock() - defer m.evictionMutex.RUnlock() - - commitment, _ := m.Commitment(head) - if commitment == nil { - return ierrors.Wrapf(ErrCommitmentUnknown, "unknown commitment %s", head) - } - - return m.switchMainChainToCommitment(commitment) -} - -func (m *Manager) processCommitment(commitment *model.Commitment) (wasForked bool, isSolid bool, chainCommitment *ChainCommitment, err error) { - // Verify the commitment's version corresponds to the protocol version for the slot. - apiForSlot := m.apiProvider.APIForSlot(commitment.Slot()) - if apiForSlot.Version() != commitment.Commitment().ProtocolVersion { - return false, false, nil, ierrors.Errorf("") - } - - // Lock access to the parent commitment. We need to lock this first as we are trying to update children later within this function. - // Failure to do so, leads to a deadlock, where a child is locked and tries to lock its parent, which is locked by the parent which tries to lock the child. - m.commitmentEntityMutex.Lock(commitment.PreviousCommitmentID()) - defer m.commitmentEntityMutex.Unlock(commitment.PreviousCommitmentID()) - - // Lock access to the chainCommitment so no children are added while we are propagating solidity - m.commitmentEntityMutex.Lock(commitment.ID()) - defer m.commitmentEntityMutex.Unlock(commitment.ID()) - - if isBelowRootCommitment, isRootCommitment := m.evaluateAgainstRootCommitment(commitment.Commitment()); isBelowRootCommitment || isRootCommitment { - if isRootCommitment { - chainCommitment = m.rootCommitment - } else { - m.Events.CommitmentBelowRoot.Trigger(commitment.ID()) - } - - return false, isRootCommitment, chainCommitment, nil - } - - isNew, isSolid, wasForked, chainCommitment := m.registerCommitment(commitment) - if !isNew || chainCommitment.Chain() == nil { - return wasForked, isSolid, chainCommitment, nil - } - - if mainChild := chainCommitment.mainChild(); mainChild != nil { - for childWalker := walker.New[*ChainCommitment]().Push(chainCommitment.mainChild()); childWalker.HasNext(); { - childWalker.PushAll(m.propagateChainToMainChild(childWalker.Next(), chainCommitment.Chain())...) - } - } - - if isSolid { - if children := chainCommitment.Children(); len(children) != 0 { - for childWalker := walker.New[*ChainCommitment]().PushAll(children...); childWalker.HasNext(); { - childWalker.PushAll(m.propagateSolidity(childWalker.Next())...) - } - } - } - - return wasForked, isSolid, chainCommitment, nil -} - -func (m *Manager) evict(index iotago.SlotIndex) { - m.forksByForkingPoint.Evict(index) - m.commitmentsByID.Evict(index) -} - -func (m *Manager) getOrCreateCommitment(id iotago.CommitmentID) (commitment *ChainCommitment, created bool) { - return m.commitmentsByID.Get(id.Slot(), true).GetOrCreate(id, func() *ChainCommitment { - return NewChainCommitment(id) - }) -} - -func (m *Manager) evaluateAgainstRootCommitment(commitment *iotago.Commitment) (isBelow bool, isRootCommitment bool) { - isBelow = commitment.Slot <= m.rootCommitment.Commitment().Slot() - isRootCommitment = commitment.Equals(m.rootCommitment.Commitment().Commitment()) - - return -} - -func (m *Manager) detectForks(commitment *ChainCommitment, source peer.ID) { - forkingPoint, err := m.forkingPointAgainstMainChain(commitment) - if err != nil { - return - } - - if forkingPoint == nil { - return - } - - // Note: we rely on the fact that the block filter will not let (not yet committable) commitments through. - - forkedChainLatestCommitment := forkingPoint.Chain().LatestCommitment().Commitment() - mainChainLatestCommitment := m.rootCommitment.Chain().LatestCommitment().Commitment() - - // Check whether the chain is claiming to be heavier than the current main chain. - if forkedChainLatestCommitment.CumulativeWeight() <= mainChainLatestCommitment.CumulativeWeight() { - return - } - - var doNotTrigger bool - fork := m.forksByForkingPoint.Get(forkingPoint.ID().Slot(), true).Compute(forkingPoint.ID(), func(currentValue *Fork, exists bool) *Fork { - if exists { - if forkedChainLatestCommitment.Slot() <= currentValue.ForkLatestCommitment.Slot() { - // Do not trigger another event for the same forking point if the latest fork commitment did not change - doNotTrigger = true - return currentValue - } - - return &Fork{ - Source: currentValue.Source, - MainChain: currentValue.MainChain, - ForkedChain: currentValue.ForkedChain, - ForkingPoint: currentValue.ForkingPoint, - ForkLatestCommitment: forkedChainLatestCommitment, - } - } - - return &Fork{ - Source: source, - MainChain: m.rootCommitment.Chain(), - ForkedChain: forkingPoint.Chain(), - ForkingPoint: forkingPoint.Commitment(), - ForkLatestCommitment: forkedChainLatestCommitment, - } - }) - - if !doNotTrigger { - m.Events.ForkDetected.Trigger(fork) - } -} - -func (m *Manager) forkingPointAgainstMainChain(commitment *ChainCommitment) (*ChainCommitment, error) { - if !commitment.SolidEvent().WasTriggered() || commitment.Chain() == nil { - return nil, ierrors.Wrapf(ErrCommitmentNotSolid, "commitment %s is not solid", commitment) - } - - var forkingCommitment *ChainCommitment - // Walk all possible forks until we reach our main chain by jumping over each forking point - for chain := commitment.Chain(); chain != m.rootCommitment.Chain(); chain = commitment.Chain() { - forkingCommitment = chain.ForkingPoint - - if commitment, _ = m.Commitment(forkingCommitment.Commitment().PreviousCommitmentID()); commitment == nil { - return nil, ierrors.Wrapf(ErrCommitmentUnknown, "unknown parent of solid commitment %s", forkingCommitment.Commitment().ID()) - } - } - - return forkingCommitment, nil -} - -func (m *Manager) registerCommitment(commitment *model.Commitment) (isNew bool, isSolid bool, wasForked bool, chainCommitment *ChainCommitment) { - parentCommitment, commitmentCreated := m.getOrCreateCommitment(commitment.PreviousCommitmentID()) - if commitmentCreated { - m.commitmentRequester.StartTicker(parentCommitment.ID()) - } - - chainCommitment, created := m.getOrCreateCommitment(commitment.ID()) - - if !chainCommitment.PublishCommitment(commitment) { - return false, chainCommitment.SolidEvent().WasTriggered(), false, chainCommitment - } - - m.Events.CommitmentPublished.Trigger(chainCommitment) - - if !created { - m.commitmentRequester.StopTicker(chainCommitment.ID()) - } - - isSolid, _, wasForked = m.registerChild(parentCommitment, chainCommitment) - - return true, isSolid, wasForked, chainCommitment -} - -func (m *Manager) switchMainChainToCommitment(commitment *ChainCommitment) error { - forkingPoint, err := m.forkingPointAgainstMainChain(commitment) - if err != nil { - return err - } - - // commitment is already part of the main chain - if forkingPoint == nil { - return nil - } - - parentCommitment, _ := m.Commitment(forkingPoint.Commitment().PreviousCommitmentID()) - if parentCommitment == nil { - return ierrors.Wrapf(ErrCommitmentUnknown, "unknown parent of solid commitment %s", forkingPoint.ID()) - } - - // Separate the main chain by remove it from the parent - oldMainCommitment := parentCommitment.mainChild() - - // For each forking point coming out of the main chain we need to reorg the children - for fp := commitment.Chain().ForkingPoint; ; { - fpParent, _ := m.Commitment(fp.Commitment().PreviousCommitmentID()) - - mainChild := fpParent.mainChild() - newChildChain := NewChain(mainChild) - - if err := fpParent.setMainChild(fp); err != nil { - return err - } - - for childWalker := walker.New[*ChainCommitment]().Push(mainChild); childWalker.HasNext(); { - childWalker.PushAll(m.propagateReplaceChainToMainChild(childWalker.Next(), newChildChain)...) - } - - for childWalker := walker.New[*ChainCommitment]().Push(fp); childWalker.HasNext(); { - childWalker.PushAll(m.propagateReplaceChainToMainChild(childWalker.Next(), parentCommitment.Chain())...) - } - - if fp == forkingPoint { - break - } - - fp = fpParent.Chain().ForkingPoint - } - - // Separate the old main chain by removing it from the parent - parentCommitment.deleteChild(oldMainCommitment) - - return nil -} - -func (m *Manager) registerChild(parent *ChainCommitment, child *ChainCommitment) (isSolid bool, chain *Chain, wasForked bool) { - if isSolid, chain, wasForked = parent.registerChild(child); chain != nil { - chain.addCommitment(child) - child.publishChain(chain) - - if isSolid { - child.SolidEvent().Trigger() - } - } - - return -} - -func (m *Manager) propagateChainToMainChild(child *ChainCommitment, chain *Chain) (childrenToUpdate []*ChainCommitment) { - m.commitmentEntityMutex.Lock(child.ID()) - defer m.commitmentEntityMutex.Unlock(child.ID()) - - if !child.publishChain(chain) { - return - } - - chain.addCommitment(child) - - mainChild := child.mainChild() - if mainChild == nil { - return - } - - return []*ChainCommitment{mainChild} -} - -func (m *Manager) propagateReplaceChainToMainChild(child *ChainCommitment, chain *Chain) (childrenToUpdate []*ChainCommitment) { - m.commitmentEntityMutex.Lock(child.ID()) - defer m.commitmentEntityMutex.Unlock(child.ID()) - - child.replaceChain(chain) - chain.addCommitment(child) - - mainChild := child.mainChild() - if mainChild == nil { - return - } - - return []*ChainCommitment{mainChild} -} - -func (m *Manager) propagateSolidity(child *ChainCommitment) (childrenToUpdate []*ChainCommitment) { - m.commitmentEntityMutex.Lock(child.ID()) - defer m.commitmentEntityMutex.Unlock(child.ID()) - - if child.SolidEvent().Trigger() { - childrenToUpdate = child.Children() - } - - return -} - -func WithCommitmentRequesterOptions(opts ...options.Option[eventticker.EventTicker[iotago.SlotIndex, iotago.CommitmentID]]) options.Option[Manager] { - return func(m *Manager) { - m.optsCommitmentRequester = append(m.optsCommitmentRequester, opts...) - } -} diff --git a/pkg/protocol/chainmanager/manager_test.go b/pkg/protocol/chainmanager/manager_test.go deleted file mode 100644 index 21d24c997..000000000 --- a/pkg/protocol/chainmanager/manager_test.go +++ /dev/null @@ -1,536 +0,0 @@ -package chainmanager - -import ( - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "github.com/iotaledger/hive.go/ds/types" - "github.com/iotaledger/hive.go/lo" - "github.com/iotaledger/iota-core/pkg/model" - iotago "github.com/iotaledger/iota.go/v4" - "github.com/iotaledger/iota.go/v4/tpkg" -) - -func TestManager(t *testing.T) { - tf := NewTestFramework(t, tpkg.TestAPI) - tf.CreateCommitment("1", "Genesis", 10) - tf.CreateCommitment("2", "1", 20) - tf.CreateCommitment("3", "2", 30) - tf.CreateCommitment("4", "3", 40) - tf.CreateCommitment("5", "4", 50) - tf.CreateCommitment("4*", "3", 45) - tf.CreateCommitment("5*", "4*", 55) - tf.CreateCommitment("6*", "5*", 65) - tf.CreateCommitment("7*", "6*", 75) - tf.CreateCommitment("8*", "7*", 85) - tf.CreateCommitment("1-", "Genesis", 9) - tf.CreateCommitment("2-", "1-", 19) - - allForksDetected := make(chan struct{}, 1) - - detectedForksAtCommitments := []iotago.CommitmentID{ - tf.SlotCommitment("5*"), - tf.SlotCommitment("6*"), - tf.SlotCommitment("7*"), - tf.SlotCommitment("8*"), - } - - tf.Instance.Events.ForkDetected.Hook(func(fork *Fork) { - // The ForkDetected event should only be triggered once and only if the fork is deep enough - require.Equal(t, fork.ForkLatestCommitment.ID(), detectedForksAtCommitments[0]) - require.Equal(t, fork.ForkingPoint.ID(), tf.SlotCommitment("4*")) - - detectedForksAtCommitments = detectedForksAtCommitments[1:] - - if len(detectedForksAtCommitments) == 0 { - allForksDetected <- struct{}{} - close(allForksDetected) // closing channel here so that we are sure no second event with the same data is triggered - } - }) - - expectedChainMappings := map[string]string{ - "Genesis": "Genesis", - } - - { - isSolid, chain := tf.ProcessCommitment("1") - require.True(t, isSolid) - tf.AssertChainIsAlias(chain, "Genesis") - tf.AssertChainState(lo.MergeMaps(expectedChainMappings, map[string]string{ - "1": "Genesis", - })) - } - - { - isSolid, chain := tf.ProcessCommitment("1") - require.True(t, isSolid) - tf.AssertChainIsAlias(chain, "Genesis") - tf.AssertChainState(lo.MergeMaps(expectedChainMappings, map[string]string{})) - } - - { - isSolid, chain := tf.ProcessCommitmentFromOtherSource("1-") - require.True(t, isSolid) - tf.AssertChainIsAlias(chain, "1-") - tf.AssertChainState(lo.MergeMaps(expectedChainMappings, map[string]string{ - "1-": "1-", - })) - } - - { - isSolid, chain := tf.ProcessCommitmentFromOtherSource("4") - require.False(t, isSolid) - tf.AssertChainIsAlias(chain, "") - tf.AssertChainState(lo.MergeMaps(expectedChainMappings, map[string]string{ - "4": "", - })) - } - - // Generate a fork with higher CW than our main one - { - isSolid, chain := tf.ProcessCommitmentFromOtherSource("4*") - require.False(t, isSolid) - tf.AssertChainIsAlias(chain, "4*") - tf.AssertChainState(lo.MergeMaps(expectedChainMappings, map[string]string{ - "4*": "4*", - })) - } - - { - isSolid, chain := tf.ProcessCommitmentFromOtherSource("3") - require.False(t, isSolid) - tf.AssertChainIsAlias(chain, "") - tf.AssertChainState(lo.MergeMaps(expectedChainMappings, map[string]string{ - "3": "", - })) - } - - // Solidify our main chain - { - isSolid, chain := tf.ProcessCommitment("2") - require.True(t, isSolid) - tf.AssertChainIsAlias(chain, "Genesis") - tf.AssertChainState(lo.MergeMaps(expectedChainMappings, map[string]string{ - "2": "Genesis", - "3": "Genesis", - "4": "Genesis", - })) - } - - // Generate a fork with less CW than our main one - { - isSolid, chain := tf.ProcessCommitmentFromOtherSource("2-") - require.True(t, isSolid) - tf.AssertChainIsAlias(chain, "1-") - tf.AssertChainState(lo.MergeMaps(expectedChainMappings, map[string]string{ - "2-": "1-", - })) - } - - { - isSolid, chain := tf.ProcessCommitmentFromOtherSource("5*") - require.True(t, isSolid) - tf.AssertChainIsAlias(chain, "4*") - tf.AssertChainState(lo.MergeMaps(expectedChainMappings, map[string]string{ - "5*": "4*", - })) - } - - { - isSolid, chain := tf.ProcessCommitmentFromOtherSource("6*") - require.True(t, isSolid) - tf.AssertChainIsAlias(chain, "4*") - tf.AssertChainState(lo.MergeMaps(expectedChainMappings, map[string]string{ - "6*": "4*", - })) - } - - { - isSolid, chain := tf.ProcessCommitmentFromOtherSource("7*") - require.True(t, isSolid) - tf.AssertChainIsAlias(chain, "4*") - tf.AssertChainState(lo.MergeMaps(expectedChainMappings, map[string]string{ - "7*": "4*", - })) - } - - { - isSolid, chain := tf.ProcessCommitmentFromOtherSource("8*") - require.True(t, isSolid) - tf.AssertChainIsAlias(chain, "4*") - tf.AssertChainState(lo.MergeMaps(expectedChainMappings, map[string]string{ - "8*": "4*", - })) - } - - // Continue on the main chain - { - isSolid, chain := tf.ProcessCommitment("5") - require.True(t, isSolid) - tf.AssertChainIsAlias(chain, "Genesis") - tf.AssertChainState(lo.MergeMaps(expectedChainMappings, map[string]string{ - "5": "Genesis", - })) - } - - { - commitments, err := tf.Instance.Commitments(tf.SlotCommitment("8*"), 9) - require.NoError(t, err) - tf.AssertEqualChainCommitments(commitments, - "8*", - "7*", - "6*", - "5*", - "4*", - "3", - "2", - "1", - "Genesis", - ) - } - - { - commitments, err := tf.Instance.Commitments(tf.SlotCommitment("8*"), 10) - require.Error(t, err) - require.EqualValues(t, []*ChainCommitment(nil), commitments) - } - - { - require.Nil(t, tf.Instance.Chain(iotago.CommitmentIDRepresentingData(1, []byte{255, 255}))) - } - - require.Eventually(t, func() bool { - select { - case <-allForksDetected: - return true - default: - return false - } - }, 1*time.Second, 10*time.Millisecond) -} - -func TestManagerForkDetectedAgain(t *testing.T) { - tf := NewTestFramework(t, tpkg.TestAPI) - tf.CreateCommitment("1", "Genesis", 10) - tf.CreateCommitment("2", "1", 20) - tf.CreateCommitment("3", "2", 30) - tf.CreateCommitment("4", "3", 40) - tf.CreateCommitment("5", "4", 80) - tf.CreateCommitment("4*", "3", 45) - tf.CreateCommitment("1-", "Genesis", 9) - tf.CreateCommitment("2-", "1-", 19) - tf.CreateCommitment("5*", "4*", 55) - tf.CreateCommitment("6*", "5*", 65) - tf.CreateCommitment("7*", "6*", 75) - tf.CreateCommitment("8*", "7*", 85) - tf.CreateCommitment("9*", "8*", 95) - - forkRedetected := make(chan struct{}, 1) - expectedForks := map[iotago.CommitmentID]types.Empty{ - tf.SlotCommitment("4*"): types.Void, - tf.SlotCommitment("5*"): types.Void, - tf.SlotCommitment("8*"): types.Void, - tf.SlotCommitment("9*"): types.Void, - } - tf.Instance.Events.ForkDetected.Hook(func(fork *Fork) { - if _, has := expectedForks[fork.ForkLatestCommitment.ID()]; !has { - t.Fatalf("unexpected fork at: %s", fork.ForkLatestCommitment.ID()) - } - t.Logf("fork detected at %s", fork.ForkingPoint.ID()) - delete(expectedForks, fork.ForkLatestCommitment.ID()) - - require.Equal(t, fork.ForkingPoint.ID(), tf.SlotCommitment("4*")) - if len(expectedForks) == 0 { - forkRedetected <- struct{}{} - } - }) - - { - tf.ProcessCommitment("1") - tf.ProcessCommitmentFromOtherSource("1-") - tf.ProcessCommitmentFromOtherSource("4") - tf.ProcessCommitmentFromOtherSource("4*") - tf.ProcessCommitmentFromOtherSource("3") - tf.ProcessCommitment("2") - tf.ProcessCommitmentFromOtherSource("5*") - tf.ProcessCommitmentFromOtherSource("5") - tf.ProcessCommitmentFromOtherSource("6*") // This does not re-trigger the fork due to lower CW than 5 - tf.ProcessCommitmentFromOtherSource("7*") // This does not re-trigger the fork due to lower CW than 5 - tf.ProcessCommitmentFromOtherSource("8*") - } - - { - commitments, err := tf.Instance.Commitments(tf.SlotCommitment("8*"), 9) - require.NoError(t, err) - tf.AssertEqualChainCommitments(commitments, - "8*", - "7*", - "6*", - "5*", - "4*", - "3", - "2", - "1", - "Genesis", - ) - } - - { - require.Nil(t, tf.Instance.Chain(iotago.CommitmentIDRepresentingData(1, []byte{255, 255}))) - } - - // We now evict at 7 so that we forget about the fork we had before - { - tf.Instance.EvictUntil(8) - } - - // Processing the next commitment should trigger the event again - { - isSolid, chain := tf.ProcessCommitmentFromOtherSource("9*") - require.False(t, isSolid, "commitment should not be solid, as we evicted until epoch 7") - require.Nil(t, chain, "commitment chain should be nil, as we evicted until epoch 7") - } -} - -func TestManagerForkDetectedReorgChains(t *testing.T) { - tf := NewTestFramework(t, tpkg.TestAPI) - tf.CreateCommitment("1", "Genesis", 10) - tf.CreateCommitment("2", "1", 20) - tf.CreateCommitment("3", "2", 30) - tf.CreateCommitment("4", "3", 40) - tf.CreateCommitment("5", "4", 80) - tf.CreateCommitment("4*", "3", 45) - tf.CreateCommitment("5*", "4*", 55) - tf.CreateCommitment("6*", "5*", 65) - tf.CreateCommitment("7*", "6*", 75) - tf.CreateCommitment("8*", "7*", 85) - - forkRedetected := make(chan struct{}, 1) - expectedForks := map[iotago.CommitmentID]types.Empty{ - tf.SlotCommitment("4*"): types.Void, - tf.SlotCommitment("5*"): types.Void, - tf.SlotCommitment("6*"): types.Void, - tf.SlotCommitment("7*"): types.Void, - tf.SlotCommitment("8*"): types.Void, - } - tf.Instance.Events.ForkDetected.Hook(func(fork *Fork) { - if _, has := expectedForks[fork.ForkLatestCommitment.ID()]; !has { - t.Fatalf("unexpected fork at: %s", fork.ForkLatestCommitment.ID()) - } - t.Logf("fork detected at %s", fork.ForkingPoint.ID()) - delete(expectedForks, fork.ForkLatestCommitment.ID()) - - require.Equal(t, fork.ForkingPoint.ID(), tf.SlotCommitment("4*")) - if len(expectedForks) == 0 { - forkRedetected <- struct{}{} - } - }) - - { - tf.ProcessCommitment("1") - tf.ProcessCommitmentFromOtherSource("2") - tf.ProcessCommitmentFromOtherSource("3") - tf.ProcessCommitmentFromOtherSource("4") - tf.ProcessCommitmentFromOtherSource("5") - - tf.ProcessCommitmentFromOtherSource("5*") - tf.ProcessCommitmentFromOtherSource("6*") - tf.ProcessCommitmentFromOtherSource("7*") - tf.ProcessCommitmentFromOtherSource("8*") - tf.ProcessCommitment("4*") - } - - oldCommitments, err := tf.Instance.Commitments(tf.SlotCommitment("5"), 4) - require.NoError(t, err) - - require.Equalf(t, tf.ChainCommitment("4").ID(), oldCommitments[0].Chain().ForkingPoint.ID(), "expected %s; got %s", "4", oldCommitments[0].Chain().ForkingPoint.ID().String()) - - newMainCommitments, err := tf.Instance.Commitments(tf.SlotCommitment("8*"), 9) - require.NoError(t, err) - - require.Equalf(t, tf.ChainCommitment("Genesis").ID(), newMainCommitments[0].Chain().ForkingPoint.ID(), "expected %s; got %s", "Genesis", newMainCommitments[0].Chain().ForkingPoint.ID().String()) - - tf.AssertEqualChainCommitments(newMainCommitments, - "8*", - "7*", - "6*", - "5*", - "4*", - "3", - "2", - "1", - "Genesis", - ) - -} - -func TestEvaluateAgainstRootCommitment(t *testing.T) { - rootCommitment := iotago.NewCommitment(tpkg.TestAPI.Version(), 1, iotago.CommitmentIDRepresentingData(1, []byte{9}), iotago.Identifier{}, 0, 0) - - modelRootCommitment, err := model.CommitmentFromCommitment(rootCommitment, tpkg.TestAPI) - require.NoError(t, err) - - m := &Manager{ - rootCommitment: NewChainCommitment(modelRootCommitment.ID()), - } - - m.rootCommitment.PublishCommitment(modelRootCommitment) - - isBelow, isRootCommitment := m.evaluateAgainstRootCommitment(iotago.NewCommitment(tpkg.TestAPI.Version(), 0, iotago.CommitmentIDRepresentingData(0, []byte{}), iotago.Identifier{}, 0, 0)) - require.True(t, isBelow, "commitment with index 0 should be below root commitment") - require.False(t, isRootCommitment, "commitment with index 0 should not be the root commitment") - - isBelow, isRootCommitment = m.evaluateAgainstRootCommitment(rootCommitment) - require.True(t, isBelow, "commitment with index 1 should be below root commitment") - require.True(t, isRootCommitment, "commitment with index 1 should be the root commitment") - - isBelow, isRootCommitment = m.evaluateAgainstRootCommitment(iotago.NewCommitment(tpkg.TestAPI.Version(), 1, iotago.CommitmentIDRepresentingData(1, []byte{1}), iotago.Identifier{}, 0, 0)) - require.True(t, isBelow, "commitment with index 1 should be below root commitment") - require.False(t, isRootCommitment, "commitment with index 1 should be the root commitment") - - isBelow, isRootCommitment = m.evaluateAgainstRootCommitment(iotago.NewCommitment(tpkg.TestAPI.Version(), 1, iotago.CommitmentIDRepresentingData(1, []byte{9}), iotago.Identifier{}, 0, 0)) - require.True(t, isBelow, "commitment with index 1 should be below root commitment") - require.True(t, isRootCommitment, "commitment with index 1 should be the root commitment") - - isBelow, isRootCommitment = m.evaluateAgainstRootCommitment(iotago.NewCommitment(tpkg.TestAPI.Version(), 2, iotago.CommitmentIDRepresentingData(2, []byte{}), iotago.Identifier{}, 0, 0)) - require.False(t, isBelow, "commitment with index 2 should not be below root commitment") - require.False(t, isRootCommitment, "commitment with index 2 should not be the root commitment") -} - -func TestProcessCommitment(t *testing.T) { - tf := NewTestFramework(t, tpkg.TestAPI) - tf.CreateCommitment("1", "Genesis", 10) - tf.CreateCommitment("2", "1", 20) - - expectedChainMappings := map[string]string{ - "Genesis": "Genesis", - } - - { - isSolid, chain := tf.ProcessCommitment("1") - require.True(t, isSolid) - tf.AssertChainIsAlias(chain, "Genesis") - tf.AssertChainState(lo.MergeMaps(expectedChainMappings, map[string]string{ - "1": "Genesis", - })) - } - { - isSolid, chain := tf.ProcessCommitment("2") - require.True(t, isSolid) - tf.AssertChainIsAlias(chain, "Genesis") - tf.AssertChainState(lo.MergeMaps(expectedChainMappings, map[string]string{ - "2": "Genesis", - })) - } - - fmt.Println("------- root commitment is now 2 -------") - tf.Instance.SetRootCommitment(tf.commitment("2")) - tf.Instance.EvictUntil(2 - 1) - - { - require.Equal(t, tf.commitment("2").ID(), tf.Instance.rootCommitment.ID()) - } - - // Should not be processed after 2 becomes rootCommitment - tf.CreateCommitment("1*", "Genesis", 15) - tf.CreateCommitment("2*", "1*", 25) - tf.CreateCommitment("3*", "2*", 35) - tf.CreateCommitment("4*", "3*", 45) - tf.CreateCommitment("2+", "1", 26) - { - { - isSolid, chain := tf.ProcessCommitment("1*") - require.False(t, isSolid) - require.Nil(t, chain) - tf.AssertForkDetectedCount(0) - tf.AssertCommitmentMissingCount(0) - tf.AssertMissingCommitmentReceivedCount(0) - tf.AssertCommitmentBelowRootCount(1) - } - { - isSolid, chain := tf.ProcessCommitment("2*") - require.False(t, isSolid) - require.Nil(t, chain) - tf.AssertForkDetectedCount(0) - tf.AssertCommitmentMissingCount(0) - tf.AssertMissingCommitmentReceivedCount(0) - tf.AssertCommitmentBelowRootCount(2) - } - { - isSolid, chain := tf.ProcessCommitment("3*") - require.False(t, isSolid) - require.Nil(t, chain) - tf.AssertForkDetectedCount(0) - tf.AssertCommitmentMissingCount(1) - tf.AssertMissingCommitmentReceivedCount(0) - tf.AssertCommitmentBelowRootCount(2) - } - { - isSolid, chain := tf.ProcessCommitment("2+") - require.False(t, isSolid) - require.Nil(t, chain) - tf.AssertForkDetectedCount(0) - tf.AssertCommitmentMissingCount(1) - tf.AssertMissingCommitmentReceivedCount(0) - tf.AssertCommitmentBelowRootCount(3) - } - } - - // Should be processed after 2 becomes rootCommitment - tf.CreateCommitment("3", "2", 30) - tf.CreateCommitment("4", "3", 40) - { - { - isSolid, chain := tf.ProcessCommitment("2") - require.True(t, isSolid) - tf.AssertChainIsAlias(chain, "Genesis") - tf.AssertChainState(lo.MergeMaps(expectedChainMappings, map[string]string{ - "Genesis": "evicted", - "1": "evicted", - "2": "Genesis", - })) - tf.AssertForkDetectedCount(0) - tf.AssertCommitmentMissingCount(1) - tf.AssertMissingCommitmentReceivedCount(0) - tf.AssertCommitmentBelowRootCount(3) - } - { - isSolid, chain := tf.ProcessCommitment("4") - require.False(t, isSolid) - require.Nil(t, chain, "Genesis") - tf.AssertChainState(lo.MergeMaps(expectedChainMappings, map[string]string{ - "4": "", - })) - tf.AssertForkDetectedCount(0) - tf.AssertCommitmentMissingCount(2) - tf.AssertMissingCommitmentReceivedCount(0) - tf.AssertCommitmentBelowRootCount(3) - } - { - isSolid, chain := tf.ProcessCommitment("3") - require.True(t, isSolid) - tf.AssertChainIsAlias(chain, "Genesis") - tf.AssertChainState(lo.MergeMaps(expectedChainMappings, map[string]string{ - "3": "Genesis", - "4": "Genesis", - })) - tf.AssertForkDetectedCount(0) - tf.AssertCommitmentMissingCount(2) - tf.AssertMissingCommitmentReceivedCount(1) - tf.AssertCommitmentBelowRootCount(3) - } - { - isSolid, chain := tf.ProcessCommitment("4") - require.True(t, isSolid) - tf.AssertChainIsAlias(chain, "Genesis") - tf.AssertChainState(lo.MergeMaps(expectedChainMappings, map[string]string{})) - tf.AssertForkDetectedCount(0) - tf.AssertCommitmentMissingCount(2) - tf.AssertMissingCommitmentReceivedCount(1) - tf.AssertCommitmentBelowRootCount(3) - } - } -} diff --git a/pkg/protocol/chainmanager/testframework.go b/pkg/protocol/chainmanager/testframework.go deleted file mode 100644 index 9d939b82b..000000000 --- a/pkg/protocol/chainmanager/testframework.go +++ /dev/null @@ -1,195 +0,0 @@ -package chainmanager - -import ( - "fmt" - "sync/atomic" - "testing" - - "github.com/stretchr/testify/require" - "golang.org/x/crypto/blake2b" - - "github.com/iotaledger/hive.go/runtime/options" - "github.com/iotaledger/hive.go/runtime/syncutils" - "github.com/iotaledger/iota-core/pkg/model" - iotago "github.com/iotaledger/iota.go/v4" -) - -type TestFramework struct { - Instance *Manager - - test *testing.T - api iotago.API - commitmentsByAlias map[string]*model.Commitment - - forkDetected int32 - commitmentMissing int32 - missingCommitmentReceived int32 - commitmentBelowRoot int32 - - syncutils.RWMutex -} - -func NewTestFramework(test *testing.T, a iotago.API, opts ...options.Option[TestFramework]) (testFramework *TestFramework) { - snapshotCommitment := model.NewEmptyCommitment(a) - - handleError := func(err error) { - fmt.Println(err) - } - - return options.Apply(&TestFramework{ - Instance: NewManager(iotago.SingleVersionProvider(a), handleError), - - test: test, - api: a, - commitmentsByAlias: map[string]*model.Commitment{ - "Genesis": snapshotCommitment, - }, - }, opts, func(t *TestFramework) { - t.Instance.Initialize(snapshotCommitment) - t.Instance.Events.ForkDetected.Hook(func(fork *Fork) { - t.test.Logf("ForkDetected: %s", fork) - atomic.AddInt32(&t.forkDetected, 1) - }) - t.Instance.commitmentRequester.Events.TickerStarted.Hook(func(id iotago.CommitmentID) { - t.test.Logf("CommitmentMissing: %s", id) - atomic.AddInt32(&t.commitmentMissing, 1) - }) - t.Instance.commitmentRequester.Events.TickerStopped.Hook(func(id iotago.CommitmentID) { - t.test.Logf("MissingCommitmentReceived: %s", id) - atomic.AddInt32(&t.missingCommitmentReceived, 1) - }) - t.Instance.Events.CommitmentBelowRoot.Hook(func(id iotago.CommitmentID) { - t.test.Logf("CommitmentBelowRoot: %s", id) - atomic.AddInt32(&t.commitmentBelowRoot, 1) - }) - }) -} - -func (t *TestFramework) CreateCommitment(alias string, prevAlias string, cumulativeWeight uint64) { - t.Lock() - defer t.Unlock() - - prevCommitmentID, previousIndex := t.previousCommitmentID(prevAlias) - randomECR := blake2b.Sum256([]byte(alias + prevAlias)) - - cm, err := model.CommitmentFromCommitment(iotago.NewCommitment(t.api.ProtocolParameters().Version(), previousIndex+1, prevCommitmentID, randomECR, cumulativeWeight, 0), t.api) - require.NoError(t.test, err) - t.commitmentsByAlias[alias] = cm - t.commitmentsByAlias[alias].ID().RegisterAlias(alias) -} - -func (t *TestFramework) ProcessCommitment(alias string) (isSolid bool, chain *Chain) { - return t.Instance.ProcessCommitment(t.commitment(alias)) -} - -func (t *TestFramework) ProcessCommitmentFromOtherSource(alias string) (isSolid bool, chain *Chain) { - return t.Instance.ProcessCommitmentFromSource(t.commitment(alias), "otherid") -} - -func (t *TestFramework) Chain(alias string) (chain *Chain) { - return t.Instance.Chain(t.SlotCommitment(alias)) -} - -func (t *TestFramework) commitment(alias string) *model.Commitment { - t.RLock() - defer t.RUnlock() - - commitment, exists := t.commitmentsByAlias[alias] - if !exists { - panic("the commitment does not exist") - } - - return commitment -} - -func (t *TestFramework) ChainCommitment(alias string) *ChainCommitment { - cm, exists := t.Instance.Commitment(t.SlotCommitment(alias)) - require.True(t.test, exists) - - return cm -} - -func (t *TestFramework) AssertForkDetectedCount(expected int) { - require.EqualValues(t.test, expected, t.forkDetected, "forkDetected count does not match") -} - -func (t *TestFramework) AssertCommitmentMissingCount(expected int) { - require.EqualValues(t.test, expected, t.commitmentMissing, "commitmentMissing count does not match") -} - -func (t *TestFramework) AssertMissingCommitmentReceivedCount(expected int) { - require.EqualValues(t.test, expected, t.missingCommitmentReceived, "missingCommitmentReceived count does not match") -} - -func (t *TestFramework) AssertCommitmentBelowRootCount(expected int) { - require.EqualValues(t.test, expected, t.commitmentBelowRoot, "commitmentBelowRoot count does not match") -} - -func (t *TestFramework) AssertEqualChainCommitments(commitments []*ChainCommitment, aliases ...string) { - var chainCommitments []*ChainCommitment - for _, alias := range aliases { - chainCommitments = append(chainCommitments, t.ChainCommitment(alias)) - } - - require.EqualValues(t.test, commitments, chainCommitments) -} - -func (t *TestFramework) SlotCommitment(alias string) iotago.CommitmentID { - return t.commitment(alias).ID() -} - -func (t *TestFramework) SlotIndex(alias string) iotago.SlotIndex { - return t.commitment(alias).Slot() -} - -func (t *TestFramework) SlotCommitmentRoot(alias string) iotago.Identifier { - return t.commitment(alias).RootsID() -} - -func (t *TestFramework) PrevSlotCommitment(alias string) iotago.CommitmentID { - return t.commitment(alias).PreviousCommitmentID() -} - -func (t *TestFramework) AssertChainIsAlias(chain *Chain, alias string) { - if alias == "" { - require.Nil(t.test, chain) - return - } - - require.Equal(t.test, t.commitment(alias).ID(), chain.ForkingPoint.ID()) -} - -func (t *TestFramework) AssertChainState(chains map[string]string) { - commitmentsByChainAlias := make(map[string][]string) - - for commitmentAlias, chainAlias := range chains { - if chainAlias == "" { - require.Nil(t.test, t.Chain(commitmentAlias)) - continue - } - if chainAlias == "evicted" { - _, exists := t.Instance.Commitment(t.SlotCommitment(commitmentAlias)) - require.False(t.test, exists, "commitment %s should be evicted", commitmentAlias) - continue - } - commitmentsByChainAlias[chainAlias] = append(commitmentsByChainAlias[chainAlias], commitmentAlias) - - chain := t.Chain(commitmentAlias) - - require.NotNil(t.test, chain, "chain for commitment %s is nil", commitmentAlias) - require.Equal(t.test, t.SlotCommitment(chainAlias), chain.ForkingPoint.ID()) - } -} - -func (t *TestFramework) previousCommitmentID(alias string) (previousCommitmentID iotago.CommitmentID, previousIndex iotago.SlotIndex) { - if alias == "" { - return - } - - previousCommitment, exists := t.commitmentsByAlias[alias] - if !exists { - panic("the previous commitment does not exist") - } - - return previousCommitment.ID(), previousCommitment.Slot() -} diff --git a/pkg/protocol/chainmanager/types.go b/pkg/protocol/chainmanager/types.go deleted file mode 100644 index cf7b93c5f..000000000 --- a/pkg/protocol/chainmanager/types.go +++ /dev/null @@ -1,29 +0,0 @@ -package chainmanager - -import ( - "github.com/libp2p/go-libp2p/core/peer" - - "github.com/iotaledger/hive.go/stringify" - "github.com/iotaledger/iota-core/pkg/model" - iotago "github.com/iotaledger/iota.go/v4" -) - -type ChainID = iotago.CommitmentID - -type Fork struct { - Source peer.ID - MainChain *Chain - ForkedChain *Chain - ForkingPoint *model.Commitment - ForkLatestCommitment *model.Commitment -} - -func (f *Fork) String() string { - return stringify.Struct("Fork", - stringify.NewStructField("Source", f.Source), - stringify.NewStructField("MainChain", f.MainChain.String()), - stringify.NewStructField("ForkedChain", f.ForkedChain.String()), - stringify.NewStructField("ForkingPoint", f.ForkingPoint), - stringify.NewStructField("ForkLatestCommitment", f.ForkLatestCommitment), - ) -} diff --git a/pkg/protocol/chains.go b/pkg/protocol/chains.go new file mode 100644 index 000000000..9704fca55 --- /dev/null +++ b/pkg/protocol/chains.go @@ -0,0 +1,184 @@ +package protocol + +import ( + "cmp" + + "github.com/libp2p/go-libp2p/core/peer" + + "github.com/iotaledger/hive.go/ds/reactive" + "github.com/iotaledger/hive.go/lo" + "github.com/iotaledger/hive.go/log" + "github.com/iotaledger/iota-core/pkg/model" + "github.com/iotaledger/iota-core/pkg/protocol/engine" + iotago "github.com/iotaledger/iota.go/v4" +) + +// Chains is a subcomponent of the protocol that exposes the chains that are managed by the protocol and that implements +// the chain switching logic. +type Chains struct { + // Set contains all non-evicted chains that are managed by the protocol. + reactive.Set[*Chain] + + // Main contains the main chain. + Main reactive.Variable[*Chain] + + // HeaviestClaimedCandidate contains the candidate chain with the heaviest claimed weight according to its latest commitment. The weight has neither been checked via attestations nor verified by downloading all data. + HeaviestClaimedCandidate reactive.Variable[*Chain] + + // HeaviestAttestedCandidate contains the candidate chain with the heaviest weight as checked by attestations. The chain has not been instantiated into an engine yet. + HeaviestAttestedCandidate reactive.Variable[*Chain] + + // HeaviestVerifiedCandidate contains the candidate chain with the heaviest verified weight, meaning the chain has been instantiated into an engine and the commitments have been produced by the engine itself. + HeaviestVerifiedCandidate reactive.Variable[*Chain] + + // LatestSeenSlot contains the slot of the latest commitment of any received block. + LatestSeenSlot reactive.Variable[iotago.SlotIndex] + + // protocol contains a reference to the Protocol instance that this component belongs to. + protocol *Protocol + + // Logger contains a reference to the logger that is used by this component. + log.Logger +} + +// newChains creates a new chains instance for the given protocol. +func newChains(protocol *Protocol) *Chains { + c := &Chains{ + Set: reactive.NewSet[*Chain](), + Main: reactive.NewVariable[*Chain](), + HeaviestClaimedCandidate: reactive.NewVariable[*Chain](), + HeaviestAttestedCandidate: reactive.NewVariable[*Chain](), + HeaviestVerifiedCandidate: reactive.NewVariable[*Chain](), + LatestSeenSlot: reactive.NewVariable[iotago.SlotIndex](increasing[iotago.SlotIndex]), + protocol: protocol, + } + + shutdown := lo.Batch( + c.initLogger(protocol.NewChildLogger("Chains")), + c.initChainSwitching(protocol.Options.ChainSwitchingThreshold), + + protocol.Constructed.WithNonEmptyValue(func(_ bool) (shutdown func()) { + return c.deriveLatestSeenSlot(protocol) + }), + ) + + protocol.Shutdown.OnTrigger(shutdown) + + return c +} + +// WithInitializedEngines is a reactive selector that executes the given callback for each managed chain that +// initialized its engine. +func (c *Chains) WithInitializedEngines(callback func(chain *Chain, engine *engine.Engine) (shutdown func())) (shutdown func()) { + return c.WithElements(func(chain *Chain) (shutdown func()) { + return chain.WithInitializedEngine(func(engine *engine.Engine) (shutdown func()) { + return callback(chain, engine) + }) + }) +} + +// initLogger initializes the logger for this component. +func (c *Chains) initLogger(logger log.Logger, shutdownLogger func()) (shutdown func()) { + c.Logger = logger + + return lo.Batch( + c.Main.LogUpdates(c, log.LevelTrace, "Main", (*Chain).LogName), + c.HeaviestClaimedCandidate.LogUpdates(c, log.LevelTrace, "HeaviestClaimedCandidate", (*Chain).LogName), + c.HeaviestAttestedCandidate.LogUpdates(c, log.LevelTrace, "HeaviestAttestedCandidate", (*Chain).LogName), + c.HeaviestVerifiedCandidate.LogUpdates(c, log.LevelTrace, "HeaviestVerifiedCandidate", (*Chain).LogName), + + shutdownLogger, + ) +} + +// initChainSwitching initializes the chain switching logic. +func (c *Chains) initChainSwitching(chainSwitchingThreshold iotago.SlotIndex) (shutdown func()) { + mainChain := c.newChain() + mainChain.StartEngine.Set(true) + + c.Main.Set(mainChain) + + // only switch to the heavier chain if the latest commitment is enough slots away from the forking point. + forkingPointBelowChainSwitchingThreshold := func(chain *Chain) func(_ *Commitment, latestCommitment *Commitment) bool { + return func(_ *Commitment, latestCommitment *Commitment) bool { + forkingPoint := chain.ForkingPoint.Get() + + return forkingPoint != nil && latestCommitment != nil && (latestCommitment.ID().Slot()-forkingPoint.ID().Slot()) > chainSwitchingThreshold + } + } + + return lo.Batch( + c.HeaviestClaimedCandidate.WithNonEmptyValue(func(heaviestClaimedCandidate *Chain) (shutdown func()) { + return heaviestClaimedCandidate.RequestAttestations.ToggleValue(true) + }), + + c.HeaviestAttestedCandidate.WithNonEmptyValue(func(heaviestAttestedCandidate *Chain) (shutdown func()) { + return heaviestAttestedCandidate.LatestAttestedCommitment.OnUpdateOnce(func(_ *Commitment, _ *Commitment) { + heaviestAttestedCandidate.StartEngine.Set(true) + }, forkingPointBelowChainSwitchingThreshold(heaviestAttestedCandidate)) + }), + + c.HeaviestVerifiedCandidate.WithNonEmptyValue(func(heaviestVerifiedCandidate *Chain) (shutdown func()) { + return heaviestVerifiedCandidate.LatestProducedCommitment.OnUpdateOnce(func(_ *Commitment, latestProducedCommitment *Commitment) { + c.Main.Set(heaviestVerifiedCandidate) + }, forkingPointBelowChainSwitchingThreshold(heaviestVerifiedCandidate)) + }), + + c.WithElements(func(candidateChain *Chain) (shutdown func()) { + return lo.Batch( + c.initHeaviestCandidateTracking(c.HeaviestClaimedCandidate, (*Chain).claimedWeight, candidateChain), + c.initHeaviestCandidateTracking(c.HeaviestVerifiedCandidate, (*Chain).verifiedWeight, candidateChain), + c.initHeaviestCandidateTracking(c.HeaviestAttestedCandidate, (*Chain).attestedWeight, candidateChain), + ) + }), + ) +} + +// initHeaviestCandidateTracking initializes the tracking of the heaviest candidates according to the given parameters. +func (c *Chains) initHeaviestCandidateTracking(candidateVar reactive.Variable[*Chain], weightVar func(*Chain) reactive.Variable[uint64], newCandidate *Chain) (unsubscribe func()) { + return weightVar(newCandidate).OnUpdate(func(_ uint64, newWeight uint64) { + // abort if the candidate is not heavier than the main chain. + if mainChain := c.Main.Get(); newCandidate == mainChain || newWeight <= mainChain.VerifiedWeight.Get() { + return + } + + // atomically replace the existing candidate if the new one is heavier. + candidateVar.Compute(func(currentCandidate *Chain) *Chain { + if currentCandidate != nil && !currentCandidate.IsEvicted.WasTriggered() && newWeight <= weightVar(currentCandidate).Get() { + return currentCandidate + } + + return newCandidate + }) + }, true) +} + +// deriveLatestSeenSlot derives the latest seen slot from the protocol. +func (c *Chains) deriveLatestSeenSlot(protocol *Protocol) func() { + return protocol.Engines.Main.WithNonEmptyValue(func(mainEngine *engine.Engine) (shutdown func()) { + return lo.Batch( + mainEngine.Initialized.OnTrigger(func() { + c.LatestSeenSlot.Set(mainEngine.LatestCommitment.Get().Slot()) + }), + + protocol.Network.OnBlockReceived(func(block *model.Block, src peer.ID) { + c.LatestSeenSlot.Set(block.ProtocolBlock().Header.SlotCommitmentID.Slot()) + }), + ) + }) +} + +// newChain creates a new chain instance and adds it to the set of chains. +func (c *Chains) newChain() *Chain { + chain := newChain(c) + if c.Add(chain) { + chain.IsEvicted.OnTrigger(func() { c.Delete(chain) }) + } + + return chain +} + +// increasing is a generic function that returns the maximum of the two given values. +func increasing[T cmp.Ordered](currentValue T, newValue T) T { + return max(currentValue, newValue) +} diff --git a/pkg/protocol/commitment.go b/pkg/protocol/commitment.go new file mode 100644 index 000000000..a2df4e77e --- /dev/null +++ b/pkg/protocol/commitment.go @@ -0,0 +1,301 @@ +package protocol + +import ( + "fmt" + + "github.com/iotaledger/hive.go/ds" + "github.com/iotaledger/hive.go/ds/reactive" + "github.com/iotaledger/hive.go/lo" + "github.com/iotaledger/hive.go/log" + "github.com/iotaledger/iota-core/pkg/model" + "github.com/iotaledger/iota-core/pkg/protocol/engine" + iotago "github.com/iotaledger/iota.go/v4" +) + +// Commitment represents a commitment to a specific ledger state at a specific point in time that is part of a chain of +// commitments produced by the nodes in the network. +type Commitment struct { + // Commitment contains the underlying model.Commitment that is represented by this Commitment. + *model.Commitment + + // Parent contains the Commitment that is referenced as a parent in this Commitment. + Parent reactive.Variable[*Commitment] + + // Children contains the Commitments that reference this Commitment as a parent. + Children reactive.Set[*Commitment] + + // MainChild contains the Commitment that is the main child of this Commitment (continues the chain). + MainChild reactive.Variable[*Commitment] + + // Chain contains the Chain that this Commitment is part of. + Chain reactive.Variable[*Chain] + + // RequestAttestations contains a flag indicating if the node should request attestations for this Commitment. + RequestAttestations reactive.Variable[bool] + + // WarpSyncBlocks contains a flag indicating if the node should request the blocks of this Commitment using warp + // sync. + WarpSyncBlocks reactive.Variable[bool] + + // BlocksToWarpSync contains the set of blocks that should be requested using warp sync. + BlocksToWarpSync reactive.Variable[ds.Set[iotago.BlockID]] + + // Weight contains the weight of this Commitment (the difference between the cumulative weight of this Commitment + // and its parent). + Weight reactive.Variable[uint64] + + // AttestedWeight contains the weight of the Commitment that was attested by other nodes. + AttestedWeight reactive.Variable[uint64] + + // CumulativeAttestedWeight contains the cumulative weight of all attested Commitments up to this point. + CumulativeAttestedWeight reactive.Variable[uint64] + + // IsRoot contains a flag indicating if this Commitment is the root of the Chain. + IsRoot reactive.Event + + // IsAttested contains a flag indicating if we have received attestations for this Commitment. + IsAttested reactive.Event + + // IsSynced contains a flag that indicates if a Commitment was fully downloaded and processed. + IsSynced reactive.Event + + // IsCommittable contains a flag that indicates if a Commitment is ready to be committed by the warp sync process. + IsCommittable reactive.Event + + // IsVerified contains a flag indicating if this Commitment is verified (we produced this Commitment ourselves by + // booking all the contained blocks and transactions). + IsVerified reactive.Event + + // IsAboveLatestVerifiedCommitment contains a flag indicating if this Commitment is above the latest verified + // Commitment. + IsAboveLatestVerifiedCommitment reactive.Variable[bool] + + // ReplayDroppedBlocks contains a flag indicating if we should replay the blocks that were dropped while the + // Commitment was pending. + ReplayDroppedBlocks reactive.Variable[bool] + + // IsEvicted contains a flag indicating if this Commitment was evicted from the Protocol. + IsEvicted reactive.Event + + // commitments contains a reference to the Commitments instance that this Commitment belongs to. + commitments *Commitments + + // Logger embeds a logger that can be used to log messages emitted by this Commitment. + log.Logger +} + +// NewCommitment creates a new Commitment from the given model.Commitment. +func newCommitment(commitments *Commitments, model *model.Commitment) *Commitment { + c := &Commitment{ + Commitment: model, + Parent: reactive.NewVariable[*Commitment](), + Children: reactive.NewSet[*Commitment](), + MainChild: reactive.NewVariable[*Commitment](), + Chain: reactive.NewVariable[*Chain](), + RequestAttestations: reactive.NewVariable[bool](), + WarpSyncBlocks: reactive.NewVariable[bool](), + BlocksToWarpSync: reactive.NewVariable[ds.Set[iotago.BlockID]](), + Weight: reactive.NewVariable[uint64](), + AttestedWeight: reactive.NewVariable[uint64](func(currentValue uint64, newValue uint64) uint64 { return max(currentValue, newValue) }), + CumulativeAttestedWeight: reactive.NewVariable[uint64](), + IsRoot: reactive.NewEvent(), + IsAttested: reactive.NewEvent(), + IsSynced: reactive.NewEvent(), + IsCommittable: reactive.NewEvent(), + IsVerified: reactive.NewEvent(), + IsAboveLatestVerifiedCommitment: reactive.NewVariable[bool](), + ReplayDroppedBlocks: reactive.NewVariable[bool](), + IsEvicted: reactive.NewEvent(), + commitments: commitments, + } + + shutdown := lo.Batch( + c.initLogger(), + c.initDerivedProperties(), + ) + + c.IsEvicted.OnTrigger(shutdown) + + return c +} + +// TargetEngine returns the engine that is responsible for booking the blocks of this Commitment. +func (c *Commitment) TargetEngine() *engine.Engine { + if chain := c.Chain.Get(); chain != nil { + return chain.Engine.Get() + } + + return nil +} + +// initLogger initializes the Logger of this Commitment. +func (c *Commitment) initLogger() (shutdown func()) { + c.Logger, shutdown = c.commitments.NewEntityLogger(fmt.Sprintf("Slot%d.", c.Slot())) + + return lo.Batch( + c.Parent.LogUpdates(c, log.LevelTrace, "Parent", (*Commitment).LogName), + c.MainChild.LogUpdates(c, log.LevelTrace, "MainChild", (*Commitment).LogName), + c.Chain.LogUpdates(c, log.LevelTrace, "Chain", (*Chain).LogName), + c.RequestAttestations.LogUpdates(c, log.LevelTrace, "RequestAttestations"), + c.WarpSyncBlocks.LogUpdates(c, log.LevelTrace, "WarpSyncBlocks"), + c.Weight.LogUpdates(c, log.LevelTrace, "Weight"), + c.AttestedWeight.LogUpdates(c, log.LevelTrace, "AttestedWeight"), + c.CumulativeAttestedWeight.LogUpdates(c, log.LevelTrace, "CumulativeAttestedWeight"), + c.IsRoot.LogUpdates(c, log.LevelTrace, "IsRoot"), + c.IsAttested.LogUpdates(c, log.LevelTrace, "IsAttested"), + c.IsSynced.LogUpdates(c, log.LevelTrace, "IsSynced"), + c.IsCommittable.LogUpdates(c, log.LevelTrace, "IsCommittable"), + c.IsVerified.LogUpdates(c, log.LevelTrace, "IsVerified"), + c.ReplayDroppedBlocks.LogUpdates(c, log.LevelTrace, "ReplayDroppedBlocks"), + c.IsEvicted.LogUpdates(c, log.LevelTrace, "IsEvicted"), + + shutdown, + ) +} + +// initDerivedProperties initializes the behavior of this Commitment by setting up the relations between its properties. +func (c *Commitment) initDerivedProperties() (shutdown func()) { + return lo.Batch( + // mark commitments that are marked as root as verified + c.IsVerified.InheritFrom(c.IsRoot), + + // mark commitments that are marked as verified as attested and synced + c.IsAttested.InheritFrom(c.IsVerified), + c.IsSynced.InheritFrom(c.IsVerified), + + c.Parent.WithNonEmptyValue(func(parent *Commitment) func() { + // the weight can be fixed as a one time operation (it only relies on static information) + if parent.CumulativeWeight() < c.CumulativeWeight() { + c.Weight.Set(c.CumulativeWeight() - parent.CumulativeWeight()) + } + + return lo.Batch( + parent.deriveChildren(c), + + c.deriveChain(parent), + c.deriveCumulativeAttestedWeight(parent), + c.deriveIsAboveLatestVerifiedCommitment(parent), + + c.Chain.WithNonEmptyValue(func(chain *Chain) func() { + return lo.Batch( + c.deriveRequestAttestations(chain, parent), + + // only start requesting blocks once the engine is ready + chain.WithInitializedEngine(func(_ *engine.Engine) (shutdown func()) { + return c.deriveWarpSyncBlocks(chain, parent) + }), + ) + }), + ) + }), + + c.Chain.WithNonEmptyValue(func(chain *Chain) func() { + return lo.Batch( + chain.addCommitment(c), + + c.deriveReplayDroppedBlocks(chain), + ) + }), + ) +} + +// deriveChildren derives the children of this Commitment by adding the given child to the Children set. +func (c *Commitment) deriveChildren(child *Commitment) (unregisterChild func()) { + c.MainChild.Compute(func(mainChild *Commitment) *Commitment { + if !c.Children.Add(child) || mainChild != nil { + return mainChild + } + + return child + }) + + return func() { + c.MainChild.Compute(func(mainChild *Commitment) *Commitment { + if !c.Children.Delete(child) || child != mainChild { + return mainChild + } + + return lo.Return1(c.Children.Any()) + }) + } +} + +// deriveChain derives the Chain of this Commitment which is either inherited from the parent if we are the main child +// or a newly created chain. +func (c *Commitment) deriveChain(parent *Commitment) func() { + return c.Chain.DeriveValueFrom(reactive.NewDerivedVariable3(func(currentChain *Chain, isRoot bool, mainChild *Commitment, parentChain *Chain) *Chain { + // do not adjust the chain of the root commitment (it is set from the outside) + if isRoot { + return currentChain + } + + // if we are not the main child of our parent, we spawn a new chain + if c != mainChild { + if currentChain == nil { + currentChain = c.commitments.protocol.Chains.newChain() + currentChain.ForkingPoint.Set(c) + } + + return currentChain + } + + // if we are the main child of our parent, and our chain is not the parent chain (that we are supposed to + // inherit), then we evict our current chain (we will spawn a new one if we ever change back to not being the + // main child) + if currentChain != nil && currentChain != parentChain { + currentChain.IsEvicted.Trigger() + } + + return parentChain + }, c.IsRoot, parent.MainChild, parent.Chain, c.Chain.Get())) +} + +// deriveCumulativeAttestedWeight derives the CumulativeAttestedWeight of this Commitment which is the sum of the +// parent's CumulativeAttestedWeight and the AttestedWeight of this Commitment. +func (c *Commitment) deriveCumulativeAttestedWeight(parent *Commitment) func() { + return c.CumulativeAttestedWeight.DeriveValueFrom(reactive.NewDerivedVariable2(func(_ uint64, parentCumulativeAttestedWeight uint64, attestedWeight uint64) uint64 { + return parentCumulativeAttestedWeight + attestedWeight + }, parent.CumulativeAttestedWeight, c.AttestedWeight)) +} + +// deriveIsAboveLatestVerifiedCommitment derives the IsAboveLatestVerifiedCommitment flag of this Commitment which is +// true if the parent is already above the latest verified Commitment or if the parent is verified and we are not. +func (c *Commitment) deriveIsAboveLatestVerifiedCommitment(parent *Commitment) func() { + return c.IsAboveLatestVerifiedCommitment.DeriveValueFrom(reactive.NewDerivedVariable3(func(_ bool, parentAboveLatestVerifiedCommitment bool, parentIsVerified bool, isVerified bool) bool { + return parentAboveLatestVerifiedCommitment || (parentIsVerified && !isVerified) + }, parent.IsAboveLatestVerifiedCommitment, parent.IsVerified, c.IsVerified)) +} + +// deriveRequestAttestations derives the RequestAttestations flag of this Commitment which is true if our Chain is +// requesting attestations (while not having an engine), and we are the directly above the latest attested Commitment. +func (c *Commitment) deriveRequestAttestations(chain *Chain, parent *Commitment) func() { + return c.RequestAttestations.DeriveValueFrom(reactive.NewDerivedVariable4(func(_ bool, startEngine bool, verifyAttestations bool, parentIsAttested bool, isAttested bool) bool { + return !startEngine && verifyAttestations && parentIsAttested && !isAttested + }, chain.StartEngine, chain.RequestAttestations, parent.IsAttested, c.IsAttested)) +} + +// deriveWarpSyncBlocks derives the WarpSyncBlocks flag of this Commitment which is true if our Chain is requesting +// warp sync, and we are the directly above the latest commitment that is synced (has downloaded everything). +func (c *Commitment) deriveWarpSyncBlocks(chain *Chain, parent *Commitment) func() { + return c.WarpSyncBlocks.DeriveValueFrom(reactive.NewDerivedVariable3(func(_ bool, warpSyncMode bool, parentIsSynced bool, isSynced bool) bool { + return warpSyncMode && parentIsSynced && !isSynced + }, chain.WarpSyncMode, parent.IsSynced, c.IsSynced)) +} + +// deriveReplayDroppedBlocks derives the ReplayDroppedBlocks flag of this Commitment which is true if our Chain has an +// engine, is no longer requesting warp sync, and we are above the latest verified Commitment. +func (c *Commitment) deriveReplayDroppedBlocks(chain *Chain) func() { + return c.ReplayDroppedBlocks.DeriveValueFrom(reactive.NewDerivedVariable3(func(_ bool, engineInstance *engine.Engine, warpSyncing bool, isAboveLatestVerifiedCommitment bool) bool { + return engineInstance != nil && !warpSyncing && isAboveLatestVerifiedCommitment + }, chain.Engine, chain.WarpSyncMode, c.IsAboveLatestVerifiedCommitment)) +} + +// forceChain forces the Chain of this Commitment to the given Chain by promoting it to the main child of its parent if +// the parent is on the target Chain. +func (c *Commitment) forceChain(targetChain *Chain) { + if currentChain := c.Chain.Get(); currentChain != targetChain { + if parent := c.Parent.Get(); parent != nil && parent.Chain.Get() == targetChain { + parent.MainChild.Set(c) + } + } +} diff --git a/pkg/protocol/commitment_verifier.go b/pkg/protocol/commitment_verifier.go index 371eaf8a0..4a830215b 100644 --- a/pkg/protocol/commitment_verifier.go +++ b/pkg/protocol/commitment_verifier.go @@ -5,6 +5,7 @@ import ( "github.com/iotaledger/hive.go/ds" "github.com/iotaledger/hive.go/ierrors" "github.com/iotaledger/hive.go/kvstore/mapdb" + "github.com/iotaledger/hive.go/runtime/syncutils" "github.com/iotaledger/iota-core/pkg/model" "github.com/iotaledger/iota-core/pkg/protocol/engine" "github.com/iotaledger/iota-core/pkg/protocol/engine/accounts" @@ -16,18 +17,18 @@ type CommitmentVerifier struct { engine *engine.Engine lastCommonSlotBeforeFork iotago.SlotIndex - // cumulativeWeight is the cumulative weight of the verified commitments. It is updated after each verification. - cumulativeWeight uint64 - // epoch is the epoch of the currently verified commitment. Initially, it is set to the epoch of the last common commitment before the fork. epoch iotago.EpochIndex // validatorAccountsData is the accounts data of the validators for the current epoch as known at lastCommonSlotBeforeFork. // Initially, it is set to the accounts data of the validators for the epoch of the last common commitment before the fork. validatorAccountsData map[iotago.AccountID]*accounts.AccountData + + // mutex is used to synchronize access to validatorAccountsData and epoch. + mutex syncutils.RWMutex } -func NewCommitmentVerifier(mainEngine *engine.Engine, lastCommonCommitmentBeforeFork *model.Commitment) (*CommitmentVerifier, error) { +func newCommitmentVerifier(mainEngine *engine.Engine, lastCommonCommitmentBeforeFork *model.Commitment) (*CommitmentVerifier, error) { apiForSlot := mainEngine.APIForSlot(lastCommonCommitmentBeforeFork.Slot()) epoch := apiForSlot.TimeProvider().EpochFromSlot(lastCommonCommitmentBeforeFork.Slot()) @@ -48,14 +49,13 @@ func NewCommitmentVerifier(mainEngine *engine.Engine, lastCommonCommitmentBefore return &CommitmentVerifier{ engine: mainEngine, - cumulativeWeight: lastCommonCommitmentBeforeFork.CumulativeWeight(), lastCommonSlotBeforeFork: lastCommonCommitmentBeforeFork.Slot(), epoch: epoch, validatorAccountsData: validatorAccountsDataAtForkingPoint, }, nil } -func (c *CommitmentVerifier) verifyCommitment(commitment *model.Commitment, attestations []*iotago.Attestation, merkleProof *merklehasher.Proof[iotago.Identifier]) (blockIDsFromAttestations iotago.BlockIDs, cumulativeWeight uint64, err error) { +func (c *CommitmentVerifier) verifyCommitment(commitment *Commitment, attestations []*iotago.Attestation, merkleProof *merklehasher.Proof[iotago.Identifier]) (blockIDsFromAttestations iotago.BlockIDs, cumulativeWeight uint64, err error) { // 1. Verify that the provided attestations are indeed the ones that were included in the commitment. tree := ads.NewMap[iotago.Identifier](mapdb.NewMapDB(), iotago.Identifier.Bytes, @@ -80,6 +80,7 @@ func (c *CommitmentVerifier) verifyCommitment(commitment *model.Commitment, atte // This is necessary because the committee might have rotated at the epoch boundary and different validators might be part of it. // In case anything goes wrong we keep using previously known accounts data (initially set to the accounts data // of the validators for the epoch of the last common commitment before the fork). + c.mutex.Lock() apiForSlot := c.engine.APIForSlot(commitment.Slot()) commitmentEpoch := apiForSlot.TimeProvider().EpochFromSlot(commitment.Slot()) if commitmentEpoch > c.epoch { @@ -96,6 +97,7 @@ func (c *CommitmentVerifier) verifyCommitment(commitment *model.Commitment, atte } } } + c.mutex.Unlock() // 3. Verify attestations. blockIDs, seatCount, err := c.verifyAttestations(attestations) @@ -103,7 +105,7 @@ func (c *CommitmentVerifier) verifyCommitment(commitment *model.Commitment, atte return nil, 0, ierrors.Wrapf(err, "error validating attestations for commitment %s", commitment.ID()) } - // 4. Verify that calculated cumulative weight from attestations is lower or equal to cumulative weight of commitment. + // 4. Verify that calculated weight from attestations is lower or equal to weight of commitment. // This is necessary due to public key changes of validators in the window of forking point and the current state of // the other chain (as validators could have added/removed public keys that we don't know about yet). // @@ -125,15 +127,17 @@ func (c *CommitmentVerifier) verifyCommitment(commitment *model.Commitment, atte // fabricate attestations and thus a theoretically heavier chain (solely when looking on the chain backed by attestations) // than it actually is. Nodes might consider to switch to this chain, even though it is invalid which will be discovered // before the candidate chain/engine is activated (it will never get heavier than the current chain). - c.cumulativeWeight += seatCount - if c.cumulativeWeight > commitment.CumulativeWeight() { - return nil, 0, ierrors.Errorf("invalid cumulative weight for commitment %s: expected %d, got %d", commitment.ID(), commitment.CumulativeWeight(), c.cumulativeWeight) + if seatCount > commitment.Weight.Get() { + return nil, 0, ierrors.Errorf("invalid cumulative weight for commitment %s: expected %d, got %d", commitment.ID(), commitment.CumulativeWeight(), seatCount) } - return blockIDs, c.cumulativeWeight, nil + return blockIDs, seatCount, nil } func (c *CommitmentVerifier) verifyAttestations(attestations []*iotago.Attestation) (iotago.BlockIDs, uint64, error) { + c.mutex.RLock() + defer c.mutex.RUnlock() + visitedIdentities := ds.NewSet[iotago.AccountID]() var blockIDs iotago.BlockIDs var seatCount uint64 diff --git a/pkg/protocol/commitments.go b/pkg/protocol/commitments.go new file mode 100644 index 000000000..857b1c00d --- /dev/null +++ b/pkg/protocol/commitments.go @@ -0,0 +1,244 @@ +package protocol + +import ( + "github.com/iotaledger/hive.go/ds/reactive" + "github.com/iotaledger/hive.go/ds/shrinkingmap" + "github.com/iotaledger/hive.go/ierrors" + "github.com/iotaledger/hive.go/lo" + "github.com/iotaledger/hive.go/log" + "github.com/iotaledger/iota-core/pkg/core/promise" + "github.com/iotaledger/iota-core/pkg/model" + "github.com/iotaledger/iota-core/pkg/protocol/engine" + iotago "github.com/iotaledger/iota.go/v4" +) + +// Commitments is a subcomponent of the protocol that exposes the commitments that are managed by the protocol and that +// are either published from the network or created by an engine of the node. +type Commitments struct { + // Set contains all non-evicted commitments that are managed by the protocol. + reactive.Set[*Commitment] + + // Root contains the root commitment. + Root reactive.Variable[*Commitment] + + // protocol contains a reference to the Protocol instance that this component belongs to. + protocol *Protocol + + // cachedRequests contains Promise instances for all non-evicted commitments that were requested by the Protocol. + // It acts as a cache and a way to address commitments generically even if they are still unsolid. + cachedRequests *shrinkingmap.ShrinkingMap[iotago.CommitmentID, *promise.Promise[*Commitment]] + + // Logger contains a reference to the logger that is used by this component. + log.Logger +} + +// newCommitments creates a new commitments instance for the given protocol. +func newCommitments(protocol *Protocol) *Commitments { + c := &Commitments{ + Set: reactive.NewSet[*Commitment](), + Root: reactive.NewVariable[*Commitment](), + protocol: protocol, + cachedRequests: shrinkingmap.New[iotago.CommitmentID, *promise.Promise[*Commitment]](), + } + + shutdown := lo.Batch( + c.initLogger(), + c.initEngineCommitmentSynchronization(), + ) + + protocol.Shutdown.OnTrigger(shutdown) + + return c +} + +// Get returns the Commitment for the given commitmentID. If the Commitment is not available yet, it will return an +// ErrorCommitmentNotFound. It is possible to trigger a request for the Commitment by passing true as the second +// argument. +func (c *Commitments) Get(commitmentID iotago.CommitmentID, requestIfMissing ...bool) (commitment *Commitment, err error) { + cachedRequest, exists := c.cachedRequests.Get(commitmentID) + if !exists && lo.First(requestIfMissing) { + if cachedRequest = c.cachedRequest(commitmentID, true); cachedRequest.WasRejected() { + return nil, ierrors.Wrapf(cachedRequest.Err(), "failed to request commitment %s", commitmentID) + } + } + + if cachedRequest == nil || !cachedRequest.WasCompleted() { + return nil, ErrorCommitmentNotFound + } + + return cachedRequest.Result(), cachedRequest.Err() +} + +// initLogger initializes the logger for this component. +func (c *Commitments) initLogger() (shutdown func()) { + c.Logger, shutdown = c.protocol.NewChildLogger("Commitments") + + return lo.Batch( + c.Root.LogUpdates(c, log.LevelTrace, "Root", (*Commitment).LogName), + + shutdown, + ) +} + +// initEngineCommitmentSynchronization initializes the synchronization of commitments that are published by the engines. +func (c *Commitments) initEngineCommitmentSynchronization() func() { + return c.protocol.Constructed.WithNonEmptyValue(func(_ bool) (shutdown func()) { + return lo.Batch( + // advance the root commitment of the main chain + c.protocol.Chains.Main.WithNonEmptyValue(func(mainChain *Chain) (shutdown func()) { + return mainChain.WithInitializedEngine(func(mainEngine *engine.Engine) (shutdown func()) { + return c.publishRootCommitment(mainChain, mainEngine) + }) + }), + + // publish the commitments that are produced by the engines + c.protocol.Chains.WithInitializedEngines(func(chain *Chain, engine *engine.Engine) (shutdown func()) { + return c.publishEngineCommitments(chain, engine) + }), + ) + }) +} + +// publishRootCommitment publishes the root commitment of the main engine. +func (c *Commitments) publishRootCommitment(mainChain *Chain, mainEngine *engine.Engine) func() { + return mainEngine.RootCommitment.OnUpdate(func(_ *model.Commitment, newRootCommitmentModel *model.Commitment) { + newRootCommitment, published, err := c.publishCommitmentModel(newRootCommitmentModel) + if err != nil { + c.LogError("failed to publish new root commitment", "id", newRootCommitmentModel.ID(), "error", err) + + return + } + + newRootCommitment.IsRoot.Set(true) + if published { + newRootCommitment.Chain.Set(mainChain) + } + + // TODO: USE SET HERE (debug eviction issues) + mainChain.ForkingPoint.DefaultTo(newRootCommitment) + + c.Root.Set(newRootCommitment) + }) +} + +// publishEngineCommitments publishes the commitments of the given engine to its chain. +func (c *Commitments) publishEngineCommitments(chain *Chain, engine *engine.Engine) (shutdown func()) { + latestPublishedSlot := chain.LastCommonSlot() + + return engine.LatestCommitment.OnUpdate(func(_ *model.Commitment, latestCommitment *model.Commitment) { + loadModel := func(slot iotago.SlotIndex) (*model.Commitment, error) { + // prevent disk access if possible + if slot == latestCommitment.Slot() { + return latestCommitment, nil + } + + return engine.Storage.Commitments().Load(slot) + } + + for ; latestPublishedSlot < latestCommitment.Slot(); latestPublishedSlot++ { + // retrieve the model to publish + modelToPublish, err := loadModel(latestPublishedSlot + 1) + if err != nil { + c.LogError("failed to load commitment to publish from engine", "slot", latestPublishedSlot+1, "err", err) + + return + } + + // publish the model + publishedCommitment, _, err := c.publishCommitmentModel(modelToPublish) + if err != nil { + c.LogError("failed to publish commitment from engine", "engine", engine.LogName(), "commitment", modelToPublish, "err", err) + + return + } + + // mark it as produced by ourselves and force it to be on the right chain (in case our chain produced a + // different commitment than the one we erroneously expected it to be - we always trust our engine most). + publishedCommitment.AttestedWeight.Set(publishedCommitment.Weight.Get()) + publishedCommitment.IsVerified.Set(true) + publishedCommitment.forceChain(chain) + } + }) +} + +// publishCommitmentModel publishes the given commitment model as a Commitment instance. If the Commitment was already +// published, it will return the existing Commitment instance. Otherwise, it will create a new Commitment instance and +// resolve the Promise that was created for it. +func (c *Commitments) publishCommitmentModel(model *model.Commitment) (commitment *Commitment, published bool, err error) { + // retrieve promise and abort if it was already rejected + cachedRequest := c.cachedRequest(model.ID()) + if cachedRequest.WasRejected() { + return nil, false, ierrors.Wrapf(cachedRequest.Err(), "failed to request commitment %s", model.ID()) + } + + // otherwise try to provideCommitment it and determine if we were the goroutine that resolved it + commitment = newCommitment(c, model) + cachedRequest.Resolve(commitment).OnSuccess(func(resolvedCommitment *Commitment) { + if published = resolvedCommitment == commitment; !published { + commitment = resolvedCommitment + } + }) + + return commitment, published, nil +} + +// cachedRequest returns a singleton Promise for the given commitmentID. If the Promise does not exist yet, it will be +// created and optionally requested from the network if missing. Once the promise is resolved, the Commitment is +// initialized and provided to the consumers. +func (c *Commitments) cachedRequest(commitmentID iotago.CommitmentID, requestIfMissing ...bool) *promise.Promise[*Commitment] { + // handle evicted slots + slotEvicted := c.protocol.EvictionEvent(commitmentID.Index()) + if slotEvicted.WasTriggered() && c.protocol.LastEvictedSlot().Get() != 0 { + return promise.New[*Commitment]().Reject(ErrorSlotEvicted) + } + + // create a new promise or return the existing one + cachedRequest, promiseCreated := c.cachedRequests.GetOrCreate(commitmentID, lo.NoVariadic(promise.New[*Commitment])) + if !promiseCreated { + return cachedRequest + } + + // start ticker if requested + if lo.First(requestIfMissing) { + c.protocol.CommitmentsProtocol.StartTicker(cachedRequest, commitmentID) + } + + // handle successful resolutions + cachedRequest.OnSuccess(func(commitment *Commitment) { + c.initCommitment(commitment, slotEvicted) + }) + + // handle failed resolutions + cachedRequest.OnError(func(err error) { + c.LogDebug("request failed", "commitmentID", commitmentID, "error", err) + }) + + // tear down the promise once the slot is evicted + slotEvicted.OnTrigger(func() { + c.cachedRequests.Delete(commitmentID) + + cachedRequest.Reject(ErrorSlotEvicted) + }) + + return cachedRequest +} + +// initCommitment initializes the given commitment. +func (c *Commitments) initCommitment(commitment *Commitment, slotEvicted reactive.Event) { + commitment.LogDebug("created", "id", commitment.ID()) + + // solidify the parent of the commitment + c.cachedRequest(commitment.PreviousCommitmentID(), true).OnSuccess(func(parent *Commitment) { + commitment.Parent.Set(parent) + }) + + // add commitment to the set + c.Add(commitment) + + // tear down the commitment once the slot is evicted + slotEvicted.OnTrigger(func() { + c.Delete(commitment) + + commitment.IsEvicted.Trigger() + }) +} diff --git a/pkg/protocol/engine/blockdag/inmemoryblockdag/blockdag.go b/pkg/protocol/engine/blockdag/inmemoryblockdag/blockdag.go index 7eba033e6..6a9c3cdf4 100644 --- a/pkg/protocol/engine/blockdag/inmemoryblockdag/blockdag.go +++ b/pkg/protocol/engine/blockdag/inmemoryblockdag/blockdag.go @@ -43,7 +43,7 @@ func NewProvider(opts ...options.Option[BlockDAG]) module.Provider[*engine.Engin return module.Provide(func(e *engine.Engine) blockdag.BlockDAG { b := New(e.Workers.CreateGroup("BlockDAG"), int(e.Storage.Settings().APIProvider().CommittedAPI().ProtocolParameters().MaxCommittableAge())*2, e.EvictionState, e.BlockCache, e.ErrorHandler("blockdag"), opts...) - e.HookConstructed(func() { + e.Constructed.OnTrigger(func() { wp := b.workers.CreatePool("BlockDAG.Attach", workerpool.WithWorkerCount(2)) e.Events.PreSolidFilter.BlockPreAllowed.Hook(func(block *model.Block) { diff --git a/pkg/protocol/engine/blocks/block.go b/pkg/protocol/engine/blocks/block.go index 0dc76a6db..e1263a460 100644 --- a/pkg/protocol/engine/blocks/block.go +++ b/pkg/protocol/engine/blocks/block.go @@ -49,7 +49,7 @@ type Block struct { dropped bool // Notarization - notarized reactive.Variable[bool] + notarized reactive.Event mutex syncutils.RWMutex @@ -88,7 +88,7 @@ func NewBlock(data *model.Block) *Block { booked: reactive.NewVariable[bool](), accepted: reactive.NewVariable[bool](), weightPropagated: reactive.NewVariable[bool](), - notarized: reactive.NewVariable[bool](), + notarized: reactive.NewEvent(), workScore: data.WorkScore(), } } @@ -112,7 +112,7 @@ func NewRootBlock(blockID iotago.BlockID, commitmentID iotago.CommitmentID, issu preAccepted: true, accepted: reactive.NewVariable[bool](), weightPropagated: reactive.NewVariable[bool](), - notarized: reactive.NewVariable[bool](), + notarized: reactive.NewEvent(), scheduled: true, } @@ -140,7 +140,7 @@ func NewMissingBlock(blockID iotago.BlockID) *Block { booked: reactive.NewVariable[bool](), accepted: reactive.NewVariable[bool](), weightPropagated: reactive.NewVariable[bool](), - notarized: reactive.NewVariable[bool](), + notarized: reactive.NewEvent(), } } @@ -622,7 +622,7 @@ func (b *Block) SetWeightPropagated() (wasUpdated bool) { return !b.weightPropagated.Set(true) } -func (b *Block) Notarized() reactive.Variable[bool] { +func (b *Block) Notarized() reactive.Event { return b.notarized } @@ -631,7 +631,7 @@ func (b *Block) IsNotarized() (isBooked bool) { } func (b *Block) SetNotarized() (wasUpdated bool) { - return !b.notarized.Set(true) + return b.notarized.Trigger() } func (b *Block) String() string { diff --git a/pkg/protocol/engine/booker/inmemorybooker/booker.go b/pkg/protocol/engine/booker/inmemorybooker/booker.go index 6d420ea69..040775508 100644 --- a/pkg/protocol/engine/booker/inmemorybooker/booker.go +++ b/pkg/protocol/engine/booker/inmemorybooker/booker.go @@ -40,7 +40,7 @@ type Booker struct { func NewProvider(opts ...options.Option[Booker]) module.Provider[*engine.Engine, booker.Booker] { return module.Provide(func(e *engine.Engine) booker.Booker { b := New(e, e.BlockCache, e.ErrorHandler("booker"), opts...) - e.HookConstructed(func() { + e.Constructed.OnTrigger(func() { b.ledger = e.Ledger b.ledger.HookConstructed(func() { b.spendDAG = b.ledger.SpendDAG() diff --git a/pkg/protocol/engine/clock/blocktime/clock.go b/pkg/protocol/engine/clock/blocktime/clock.go index 3c2279d01..506387df0 100644 --- a/pkg/protocol/engine/clock/blocktime/clock.go +++ b/pkg/protocol/engine/clock/blocktime/clock.go @@ -40,7 +40,7 @@ func NewProvider(opts ...options.Option[Clock]) module.Provider[*engine.Engine, confirmedTime: NewRelativeTime(), workerPool: e.Workers.CreatePool("Clock", workerpool.WithWorkerCount(1), workerpool.WithCancelPendingTasksOnShutdown(true), workerpool.WithPanicOnSubmitAfterShutdown(true)), }, opts, func(c *Clock) { - e.HookConstructed(func() { + e.Constructed.OnTrigger(func() { latestCommitmentIndex := e.Storage.Settings().LatestCommitment().Slot() c.acceptedTime.Set(e.APIForSlot(latestCommitmentIndex).TimeProvider().SlotEndTime(latestCommitmentIndex)) @@ -72,7 +72,7 @@ func NewProvider(opts ...options.Option[Clock]) module.Provider[*engine.Engine, )) }) - e.HookStopped(c.TriggerStopped) + e.Stopped.OnTrigger(c.TriggerStopped) }, (*Clock).TriggerConstructed) }) } diff --git a/pkg/protocol/engine/committed_slot_api.go b/pkg/protocol/engine/committed_slot_api.go index ad70e4785..64ed9a2a5 100644 --- a/pkg/protocol/engine/committed_slot_api.go +++ b/pkg/protocol/engine/committed_slot_api.go @@ -51,6 +51,8 @@ func (c *CommittedSlotAPI) Roots() (committedRoots *iotago.Roots, err error) { roots, _, err := rootsStorage.Load(c.CommitmentID) if err != nil { return nil, ierrors.Wrapf(err, "failed to load roots for slot %d", c.CommitmentID) + } else if roots == nil { + return nil, ierrors.Errorf("roots for slot %d are not known, yet", c.CommitmentID) } return roots, nil diff --git a/pkg/protocol/engine/congestioncontrol/scheduler/drr/scheduler.go b/pkg/protocol/engine/congestioncontrol/scheduler/drr/scheduler.go index f4bba78bf..3bc5ac351 100644 --- a/pkg/protocol/engine/congestioncontrol/scheduler/drr/scheduler.go +++ b/pkg/protocol/engine/congestioncontrol/scheduler/drr/scheduler.go @@ -57,7 +57,7 @@ func NewProvider(opts ...options.Option[Scheduler]) module.Provider[*engine.Engi s.errorHandler = e.ErrorHandler("scheduler") s.basicBuffer = NewBufferQueue() - e.HookConstructed(func() { + e.Constructed.OnTrigger(func() { s.latestCommittedSlot = func() iotago.SlotIndex { return e.Storage.Settings().LatestCommitment().Slot() } @@ -121,7 +121,7 @@ func NewProvider(opts ...options.Option[Scheduler]) module.Provider[*engine.Engi s.removeIssuer(accountID, ierrors.New("account destroyed")) }) - e.HookInitialized(s.Start) + e.Initialized.OnTrigger(s.Start) }) return s diff --git a/pkg/protocol/engine/congestioncontrol/scheduler/passthrough/scheduler.go b/pkg/protocol/engine/congestioncontrol/scheduler/passthrough/scheduler.go index 2b8f9183e..f748777b1 100644 --- a/pkg/protocol/engine/congestioncontrol/scheduler/passthrough/scheduler.go +++ b/pkg/protocol/engine/congestioncontrol/scheduler/passthrough/scheduler.go @@ -17,7 +17,7 @@ type Scheduler struct { func NewProvider() module.Provider[*engine.Engine, scheduler.Scheduler] { return module.Provide(func(e *engine.Engine) scheduler.Scheduler { s := New() - e.HookConstructed(func() { + e.Constructed.OnTrigger(func() { e.Events.Scheduler.LinkTo(s.events) s.TriggerConstructed() e.Events.Booker.BlockBooked.Hook(func(block *blocks.Block) { diff --git a/pkg/protocol/engine/consensus/slotgadget/totalweightslotgadget/gadget.go b/pkg/protocol/engine/consensus/slotgadget/totalweightslotgadget/gadget.go index 70da1be59..2ed47a74d 100644 --- a/pkg/protocol/engine/consensus/slotgadget/totalweightslotgadget/gadget.go +++ b/pkg/protocol/engine/consensus/slotgadget/totalweightslotgadget/gadget.go @@ -47,7 +47,7 @@ func NewProvider(opts ...options.Option[Gadget]) module.Provider[*engine.Engine, e.Events.SlotGadget.LinkTo(g.events) - e.HookConstructed(func() { + e.Constructed.OnTrigger(func() { g.seatManager = e.SybilProtection.SeatManager() g.TriggerConstructed() @@ -60,7 +60,7 @@ func NewProvider(opts ...options.Option[Gadget]) module.Provider[*engine.Engine, } } - e.HookInitialized(func() { + e.Initialized.OnTrigger(func() { // Can't use setter here as it has a side effect. func() { g.mutex.Lock() diff --git a/pkg/protocol/engine/engine.go b/pkg/protocol/engine/engine.go index e1f6fdea2..880816d87 100644 --- a/pkg/protocol/engine/engine.go +++ b/pkg/protocol/engine/engine.go @@ -1,7 +1,6 @@ package engine import ( - "fmt" "io" "os" "path/filepath" @@ -10,8 +9,10 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/iotaledger/hive.go/core/eventticker" + "github.com/iotaledger/hive.go/ds/reactive" "github.com/iotaledger/hive.go/ierrors" "github.com/iotaledger/hive.go/lo" + "github.com/iotaledger/hive.go/log" "github.com/iotaledger/hive.go/runtime/event" "github.com/iotaledger/hive.go/runtime/module" "github.com/iotaledger/hive.go/runtime/options" @@ -67,6 +68,13 @@ type Engine struct { SyncManager syncmanager.SyncManager UpgradeOrchestrator upgrade.Orchestrator + // RootCommitment contains the earliest commitment that that blocks we are solidifying will refer to, and is mainly + // used to determine the cut-off point for the actively managed commitments in the protocol. + RootCommitment reactive.Variable[*model.Commitment] + + // LatestCommitment contains the latest commitment that we have produced. + LatestCommitment reactive.Variable[*model.Commitment] + Workers *workerpool.Group errorHandler func(error) @@ -75,19 +83,17 @@ type Engine struct { chainID iotago.CommitmentID mutex syncutils.RWMutex - accessMutex syncutils.RWMutex - optsSnapshotPath string optsEntryPointsDepth int optsSnapshotDepth int optsBlockRequester []options.Option[eventticker.EventTicker[iotago.SlotIndex, iotago.BlockID]] - module.Module + *module.ReactiveModule } func New( + logger log.Logger, workers *workerpool.Group, - errorHandler func(error), storageInstance *storage.Storage, preSolidFilterProvider module.Provider[*Engine, presolidfilter.PreSolidFilter], postSolidFilterProvider module.Provider[*Engine, postsolidfilter.PostSolidFilter], @@ -108,25 +114,30 @@ func New( syncManagerProvider module.Provider[*Engine, syncmanager.SyncManager], opts ...options.Option[Engine], ) (engine *Engine) { - var needsToImportSnapshot bool + var importSnapshot bool var file *os.File var fileErr error return options.Apply( &Engine{ - Events: NewEvents(), - Storage: storageInstance, - EvictionState: eviction.NewState(storageInstance.LatestNonEmptySlot(), storageInstance.RootBlocks, storageInstance.GenesisRootBlockID), - Workers: workers, - errorHandler: errorHandler, + Events: NewEvents(), + Storage: storageInstance, + EvictionState: eviction.NewState(storageInstance.LatestNonEmptySlot(), storageInstance.RootBlocks, storageInstance.GenesisRootBlockID), + RootCommitment: reactive.NewVariable[*model.Commitment](), + LatestCommitment: reactive.NewVariable[*model.Commitment](), + Workers: workers, optsSnapshotPath: "snapshot.bin", optsSnapshotDepth: 5, }, opts, func(e *Engine) { - needsToImportSnapshot = !e.Storage.Settings().IsSnapshotImported() && e.optsSnapshotPath != "" + e.ReactiveModule = e.initReactiveModule(logger) + + e.errorHandler = func(err error) { + e.LogTrace("engine error", "err", err) + } // Import the settings from the snapshot file if needed. - if needsToImportSnapshot { + if importSnapshot = !e.Storage.Settings().IsSnapshotImported() && e.optsSnapshotPath != ""; importSnapshot { file, fileErr = os.Open(e.optsSnapshotPath) if fileErr != nil { panic(ierrors.Wrap(fileErr, "failed to open snapshot file")) @@ -138,7 +149,11 @@ func New( } }, func(e *Engine) { - // Setup all components + // setup reactive variables + e.initRootCommitment() + e.initLatestCommitment() + + // setup all components e.BlockCache = blocks.New(e.EvictionState, e.Storage.Settings().APIProvider()) e.BlockRequester = eventticker.New(e.optsBlockRequester...) e.SybilProtection = sybilProtectionProvider(e) @@ -164,18 +179,18 @@ func New( (*Engine).setupBlockRequester, (*Engine).setupPruning, (*Engine).acceptanceHandler, - (*Engine).TriggerConstructed, func(e *Engine) { + e.Constructed.Trigger() + // Make sure that we have the protocol parameters for the latest supported iota.go protocol version of the software. // If not the user needs to update the protocol parameters file. // This can only happen after a user updated the node version and the new protocol version is not yet active. if _, err := e.APIForVersion(iotago.LatestProtocolVersion()); err != nil { panic(ierrors.Wrap(err, "no protocol parameters for latest protocol version found")) } - }, - func(e *Engine) { + // Import the rest of the snapshot if needed. - if needsToImportSnapshot { + if importSnapshot { if err := e.ImportContents(file); err != nil { panic(ierrors.Wrap(err, "failed to import snapshot contents")) } @@ -211,11 +226,11 @@ func New( e.Reset() } + + e.Initialized.Trigger() + + e.LogDebug("initialized", "settings", e.Storage.Settings().String()) }, - func(e *Engine) { - fmt.Println("Engine Settings", e.Storage.Settings().String()) - }, - (*Engine).TriggerInitialized, ) } @@ -226,11 +241,7 @@ func (e *Engine) ProcessBlockFromPeer(block *model.Block, source peer.ID) { // Reset resets the component to a clean state as if it was created at the last commitment. func (e *Engine) Reset() { - e.accessMutex.Lock() - defer e.accessMutex.Unlock() - - // Waits for all pending tasks to be processed. - e.Workers.WaitChildren() + e.LogDebug("resetting", "target-slot", e.Storage.Settings().LatestCommitment().Slot()) // Reset should be performed in the same order as Shutdown. e.BlockRequester.Clear() @@ -252,7 +263,6 @@ func (e *Engine) Reset() { e.Retainer.Reset() e.EvictionState.Reset() e.BlockCache.Reset() - e.Storage.Reset() latestCommittedSlot := e.Storage.Settings().LatestCommitment().Slot() @@ -260,36 +270,6 @@ func (e *Engine) Reset() { e.Clock.Reset(latestCommittedTime) } -func (e *Engine) Shutdown() { - if !e.WasShutdown() { - e.TriggerShutdown() - - // Shutdown should be performed in the reverse dataflow order. - e.BlockRequester.Shutdown() - e.Scheduler.Shutdown() - e.TipSelection.Shutdown() - e.TipManager.Shutdown() - e.Attestations.Shutdown() - e.SyncManager.Shutdown() - e.Notarization.Shutdown() - e.Clock.Shutdown() - e.SlotGadget.Shutdown() - e.BlockGadget.Shutdown() - e.UpgradeOrchestrator.Shutdown() - e.SybilProtection.Shutdown() - e.Booker.Shutdown() - e.Ledger.Shutdown() - e.PostSolidFilter.Shutdown() - e.BlockDAG.Shutdown() - e.PreSolidFilter.Shutdown() - e.Retainer.Shutdown() - e.Workers.Shutdown() - e.Storage.Shutdown() - - e.TriggerStopped() - } -} - func (e *Engine) BlockFromCache(id iotago.BlockID) (*blocks.Block, bool) { return e.BlockCache.Block(id) } @@ -297,7 +277,11 @@ func (e *Engine) BlockFromCache(id iotago.BlockID) (*blocks.Block, bool) { func (e *Engine) Block(id iotago.BlockID) (*model.Block, bool) { cachedBlock, exists := e.BlockCache.Block(id) if exists && !cachedBlock.IsRootBlock() { - return cachedBlock.ModelBlock(), !cachedBlock.IsMissing() + if cachedBlock.IsMissing() { + return nil, false + } + + return cachedBlock.ModelBlock(), true } s, err := e.Storage.Blocks(id.Slot()) @@ -522,46 +506,105 @@ func (e *Engine) setupPruning() { }, event.WithWorkerPool(e.Workers.CreatePool("PruneEngine", workerpool.WithWorkerCount(1)))) } -// EarliestRootCommitment is used to make sure that the chainManager knows the earliest possible -// commitment that blocks we are solidifying will refer to. Failing to do so will prevent those blocks -// from being processed as their chain will be deemed unsolid. -// lastFinalizedSlot is needed to make sure that the root commitment is not younger than the last finalized slot. -// If setting the root commitment based on the last evicted slot this basically means we won't be able to solidify another -// chain beyond a window based on eviction, which in turn is based on acceptance. In case of a partition, this behavior is -// clearly not desired. -func (e *Engine) EarliestRootCommitment(lastFinalizedSlot iotago.SlotIndex) (earliestCommitment *model.Commitment) { - api := e.APIForSlot(lastFinalizedSlot) - - genesisSlot := api.ProtocolParameters().GenesisSlot() - maxCommittableAge := api.ProtocolParameters().MaxCommittableAge() - - var earliestRootCommitmentSlot iotago.SlotIndex - if lastFinalizedSlot <= genesisSlot+maxCommittableAge { - earliestRootCommitmentSlot = genesisSlot - } else { - earliestRootCommitmentSlot = lastFinalizedSlot - maxCommittableAge +func (e *Engine) ErrorHandler(componentName string) func(error) { + return func(err error) { + e.errorHandler(ierrors.Wrap(err, componentName)) } +} - rootCommitment, err := e.Storage.Commitments().Load(earliestRootCommitmentSlot) - if err != nil { - panic(fmt.Sprintf("could not load earliest commitment %d after engine initialization: %s", earliestRootCommitmentSlot, err)) +func (e *Engine) initRootCommitment() { + updateRootCommitment := func(lastFinalizedSlot iotago.SlotIndex) { + e.RootCommitment.Compute(func(rootCommitment *model.Commitment) *model.Commitment { + protocolParams := e.APIForSlot(lastFinalizedSlot).ProtocolParameters() + maxCommittableAge := protocolParams.MaxCommittableAge() + + targetSlot := protocolParams.GenesisSlot() + if lastFinalizedSlot > targetSlot+maxCommittableAge { + targetSlot = lastFinalizedSlot - maxCommittableAge + } + + if rootCommitment != nil && targetSlot == rootCommitment.Slot() { + return rootCommitment + } + + commitment, err := e.Storage.Commitments().Load(targetSlot) + if err != nil { + e.LogError("failed to load root commitment", "slot", targetSlot, "err", err) + } + + return commitment + }) } - return rootCommitment + e.Constructed.OnTrigger(func() { + unsubscribe := e.Events.SlotGadget.SlotFinalized.Hook(updateRootCommitment).Unhook + + e.Initialized.OnTrigger(func() { + updateRootCommitment(e.Storage.Settings().LatestFinalizedSlot()) + }) + + e.Shutdown.OnTrigger(unsubscribe) + }) } -func (e *Engine) ErrorHandler(componentName string) func(error) { - return func(err error) { - e.errorHandler(ierrors.Wrap(err, componentName)) +func (e *Engine) initLatestCommitment() { + updateLatestCommitment := func(latestCommitment *model.Commitment) { + e.LatestCommitment.Compute(func(currentLatestCommitment *model.Commitment) *model.Commitment { + return lo.Cond(currentLatestCommitment == nil || currentLatestCommitment.Slot() < latestCommitment.Slot(), latestCommitment, currentLatestCommitment) + }) } -} -func (e *Engine) RLock() { - e.accessMutex.RLock() + e.Constructed.OnTrigger(func() { + unsubscribe := e.Events.Notarization.LatestCommitmentUpdated.Hook(updateLatestCommitment).Unhook + + e.Initialized.OnTrigger(func() { + updateLatestCommitment(e.Storage.Settings().LatestCommitment()) + }) + + e.Shutdown.OnTrigger(unsubscribe) + }) } -func (e *Engine) RUnlock() { - e.accessMutex.RUnlock() +func (e *Engine) initReactiveModule(parentLogger log.Logger) (reactiveModule *module.ReactiveModule) { + logger, unsubscribeFromParentLogger := parentLogger.NewEntityLogger("Engine") + reactiveModule = module.NewReactiveModule(logger) + + e.RootCommitment.LogUpdates(reactiveModule, log.LevelTrace, "RootCommitment") + e.LatestCommitment.LogUpdates(reactiveModule, log.LevelTrace, "LatestCommitment") + + reactiveModule.Shutdown.OnTrigger(func() { + reactiveModule.LogDebug("shutting down") + + unsubscribeFromParentLogger() + + // Shutdown should be performed in the reverse dataflow order. + e.BlockRequester.Shutdown() + e.Scheduler.Shutdown() + e.TipSelection.Shutdown() + e.TipManager.Shutdown() + e.Attestations.Shutdown() + e.SyncManager.Shutdown() + e.Notarization.Shutdown() + e.Clock.Shutdown() + e.SlotGadget.Shutdown() + e.BlockGadget.Shutdown() + e.UpgradeOrchestrator.Shutdown() + e.SybilProtection.Shutdown() + e.Booker.Shutdown() + e.Ledger.Shutdown() + e.PostSolidFilter.Shutdown() + e.BlockDAG.Shutdown() + e.PreSolidFilter.Shutdown() + e.Retainer.Shutdown() + e.Workers.Shutdown() + e.Storage.Shutdown() + + reactiveModule.LogDebug("stopped") + + e.Stopped.Trigger() + }) + + return reactiveModule } // endregion /////////////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/pkg/protocol/engine/filter/postsolidfilter/postsolidblockfilter/post_solid_block_filter.go b/pkg/protocol/engine/filter/postsolidfilter/postsolidblockfilter/post_solid_block_filter.go index d1176f4a8..1229b24cf 100644 --- a/pkg/protocol/engine/filter/postsolidfilter/postsolidblockfilter/post_solid_block_filter.go +++ b/pkg/protocol/engine/filter/postsolidfilter/postsolidblockfilter/post_solid_block_filter.go @@ -28,7 +28,7 @@ type PostSolidBlockFilter struct { func NewProvider(opts ...options.Option[PostSolidBlockFilter]) module.Provider[*engine.Engine, postsolidfilter.PostSolidFilter] { return module.Provide(func(e *engine.Engine) postsolidfilter.PostSolidFilter { c := New(opts...) - e.HookConstructed(func() { + e.Constructed.OnTrigger(func() { c.accountRetrieveFunc = e.Ledger.Account c.blockCacheRetrieveFunc = e.BlockCache.Block diff --git a/pkg/protocol/engine/filter/presolidfilter/presolidblockfilter/pre_solid_block_filter.go b/pkg/protocol/engine/filter/presolidfilter/presolidblockfilter/pre_solid_block_filter.go index 244a835fc..3b0847389 100644 --- a/pkg/protocol/engine/filter/presolidfilter/presolidblockfilter/pre_solid_block_filter.go +++ b/pkg/protocol/engine/filter/presolidfilter/presolidblockfilter/pre_solid_block_filter.go @@ -39,7 +39,7 @@ func NewProvider(opts ...options.Option[PreSolidBlockFilter]) module.Provider[*e f := New(e, opts...) f.TriggerConstructed() - e.HookConstructed(func() { + e.Constructed.OnTrigger(func() { e.Events.PreSolidFilter.LinkTo(f.events) e.SybilProtection.HookInitialized(func() { f.committeeFunc = e.SybilProtection.SeatManager().CommitteeInSlot diff --git a/pkg/protocol/engine/ledger/ledger/ledger.go b/pkg/protocol/engine/ledger/ledger/ledger.go index d4031a609..17b79e8ef 100644 --- a/pkg/protocol/engine/ledger/ledger/ledger.go +++ b/pkg/protocol/engine/ledger/ledger/ledger.go @@ -61,7 +61,7 @@ func NewProvider() module.Provider[*engine.Engine, ledger.Ledger] { e.ErrorHandler("ledger"), ) - e.HookConstructed(func() { + e.Constructed.OnTrigger(func() { e.Events.Ledger.LinkTo(l.events) l.spendDAG = spenddagv1.New[iotago.TransactionID, mempool.StateID, ledger.BlockVoteRank](l.sybilProtection.SeatManager().OnlineCommittee().Size) e.Events.SpendDAG.LinkTo(l.spendDAG.Events()) diff --git a/pkg/protocol/engine/notarization/slotnotarization/manager.go b/pkg/protocol/engine/notarization/slotnotarization/manager.go index b894794b3..a9a39e2c2 100644 --- a/pkg/protocol/engine/notarization/slotnotarization/manager.go +++ b/pkg/protocol/engine/notarization/slotnotarization/manager.go @@ -4,6 +4,7 @@ import ( "time" "github.com/iotaledger/hive.go/ierrors" + "github.com/iotaledger/hive.go/log" "github.com/iotaledger/hive.go/runtime/event" "github.com/iotaledger/hive.go/runtime/module" "github.com/iotaledger/hive.go/runtime/syncutils" @@ -44,16 +45,21 @@ type Manager struct { commitmentMutex syncutils.RWMutex + log.Logger + module.Module } func NewProvider() module.Provider[*engine.Engine, notarization.Notarization] { return module.Provide(func(e *engine.Engine) notarization.Notarization { - m := NewManager(e.Workers.CreateGroup("NotarizationManager"), e.ErrorHandler("notarization")) + logger, shutdownLogger := e.NewChildLogger("NotarizationManager") + + m := NewManager(logger, e.Workers.CreateGroup("NotarizationManager"), e.ErrorHandler("notarization")) + m.HookShutdown(shutdownLogger) m.apiProvider = e - e.HookConstructed(func() { + e.Constructed.OnTrigger(func() { m.storage = e.Storage m.acceptedTimeFunc = e.Clock.Accepted().Time @@ -81,12 +87,15 @@ func NewProvider() module.Provider[*engine.Engine, notarization.Notarization] { m.TriggerConstructed() }) + e.Shutdown.OnTrigger(m.Shutdown) + return m }) } -func NewManager(workers *workerpool.Group, errorHandler func(error)) *Manager { +func NewManager(logger log.Logger, workers *workerpool.Group, errorHandler func(error)) *Manager { return &Manager{ + Logger: logger, events: notarization.NewEvents(), workers: workers, errorHandler: errorHandler, @@ -94,12 +103,15 @@ func NewManager(workers *workerpool.Group, errorHandler func(error)) *Manager { } func (m *Manager) Shutdown() { - m.TriggerStopped() + m.TriggerShutdown() + // Alternative 2 if m.acceptedBlockProcessedDetach != nil { m.acceptedBlockProcessedDetach() } m.workers.Shutdown() + + m.TriggerStopped() } // tryCommitUntil tries to create slot commitments until the new provided acceptance time. @@ -249,6 +261,8 @@ func (m *Manager) createCommitment(slot iotago.SlotIndex) (*model.Commitment, er rmc, ) + m.LogTrace("Committing", "commitment", newCommitment, "roots ", roots) + newModelCommitment, err := model.CommitmentFromCommitment(newCommitment, apiForSlot, serix.WithValidation()) if err != nil { return nil, ierrors.Wrapf(err, "failed to create model commitment for commitment %s", newCommitment.MustID()) diff --git a/pkg/protocol/engine/tipmanager/v1/provider.go b/pkg/protocol/engine/tipmanager/v1/provider.go index f2b9ad4e4..eb859133d 100644 --- a/pkg/protocol/engine/tipmanager/v1/provider.go +++ b/pkg/protocol/engine/tipmanager/v1/provider.go @@ -14,7 +14,7 @@ func NewProvider() module.Provider[*engine.Engine, tipmanager.TipManager] { return module.Provide(func(e *engine.Engine) tipmanager.TipManager { t := New(e.BlockCache.Block) - e.HookConstructed(func() { + e.Constructed.OnTrigger(func() { tipWorker := e.Workers.CreatePool("AddTip", workerpool.WithWorkerCount(2)) e.Events.Scheduler.BlockScheduled.Hook(lo.Void(t.AddBlock), event.WithWorkerPool(tipWorker)) e.Events.Scheduler.BlockSkipped.Hook(lo.Void(t.AddBlock), event.WithWorkerPool(tipWorker)) @@ -25,7 +25,10 @@ func NewProvider() module.Provider[*engine.Engine, tipmanager.TipManager] { t.TriggerInitialized() }) - e.HookStopped(t.TriggerStopped) + e.Shutdown.OnTrigger(func() { + t.TriggerShutdown() + t.TriggerStopped() + }) return t }) diff --git a/pkg/protocol/engine/tipselection/v1/provider.go b/pkg/protocol/engine/tipselection/v1/provider.go index e600818c0..be28ebc50 100644 --- a/pkg/protocol/engine/tipselection/v1/provider.go +++ b/pkg/protocol/engine/tipselection/v1/provider.go @@ -18,14 +18,14 @@ func NewProvider(opts ...options.Option[TipSelection]) module.Provider[*engine.E return module.Provide(func(e *engine.Engine) tipselection.TipSelection { t := New(opts...) - e.HookConstructed(func() { + e.Constructed.OnTrigger(func() { // wait for submodules to be constructed (so all of their properties are available) module.OnAllConstructed(func() { t.Construct(e.TipManager, e.Ledger.SpendDAG(), e.Ledger.MemPool().TransactionMetadata, func() iotago.BlockIDs { return lo.Keys(e.EvictionState.ActiveRootBlocks()) }, DynamicLivenessThreshold(e.SybilProtection.SeatManager().OnlineCommittee().Size)) }, e.TipManager, e.Ledger, e.SybilProtection) }) - e.HookShutdown(t.Shutdown) + e.Shutdown.OnTrigger(t.Shutdown) return t }) diff --git a/pkg/protocol/enginemanager/enginemanager.go b/pkg/protocol/enginemanager/enginemanager.go deleted file mode 100644 index ce5688260..000000000 --- a/pkg/protocol/enginemanager/enginemanager.go +++ /dev/null @@ -1,349 +0,0 @@ -package enginemanager - -import ( - "os" - "path/filepath" - - "github.com/google/uuid" - - "github.com/iotaledger/hive.go/ierrors" - "github.com/iotaledger/hive.go/lo" - "github.com/iotaledger/hive.go/runtime/event" - "github.com/iotaledger/hive.go/runtime/ioutils" - "github.com/iotaledger/hive.go/runtime/module" - "github.com/iotaledger/hive.go/runtime/options" - "github.com/iotaledger/hive.go/runtime/workerpool" - "github.com/iotaledger/iota-core/pkg/protocol/engine" - "github.com/iotaledger/iota-core/pkg/protocol/engine/accounts/accountsledger" - "github.com/iotaledger/iota-core/pkg/protocol/engine/attestation" - "github.com/iotaledger/iota-core/pkg/protocol/engine/blockdag" - "github.com/iotaledger/iota-core/pkg/protocol/engine/blocks" - "github.com/iotaledger/iota-core/pkg/protocol/engine/booker" - "github.com/iotaledger/iota-core/pkg/protocol/engine/clock" - "github.com/iotaledger/iota-core/pkg/protocol/engine/congestioncontrol/scheduler" - "github.com/iotaledger/iota-core/pkg/protocol/engine/consensus/blockgadget" - "github.com/iotaledger/iota-core/pkg/protocol/engine/consensus/slotgadget" - "github.com/iotaledger/iota-core/pkg/protocol/engine/eviction" - "github.com/iotaledger/iota-core/pkg/protocol/engine/filter/postsolidfilter" - "github.com/iotaledger/iota-core/pkg/protocol/engine/filter/presolidfilter" - "github.com/iotaledger/iota-core/pkg/protocol/engine/ledger" - "github.com/iotaledger/iota-core/pkg/protocol/engine/notarization" - "github.com/iotaledger/iota-core/pkg/protocol/engine/syncmanager" - "github.com/iotaledger/iota-core/pkg/protocol/engine/tipmanager" - "github.com/iotaledger/iota-core/pkg/protocol/engine/tipselection" - "github.com/iotaledger/iota-core/pkg/protocol/engine/upgrade" - "github.com/iotaledger/iota-core/pkg/protocol/sybilprotection" - "github.com/iotaledger/iota-core/pkg/retainer" - "github.com/iotaledger/iota-core/pkg/storage" - "github.com/iotaledger/iota-core/pkg/storage/utils" - iotago "github.com/iotaledger/iota.go/v4" -) - -const engineInfoFile = "info" - -type engineInfo struct { - Name string `json:"name"` -} - -type EngineManager struct { - directory *utils.Directory - dbVersion byte - workers *workerpool.Group - errorHandler func(error) - engineCreated *event.Event1[*engine.Engine] - activeInstance *engine.Engine - - storageOptions []options.Option[storage.Storage] - engineOptions []options.Option[engine.Engine] - preSolidFilterProvider module.Provider[*engine.Engine, presolidfilter.PreSolidFilter] - postSolidFilterProvider module.Provider[*engine.Engine, postsolidfilter.PostSolidFilter] - blockDAGProvider module.Provider[*engine.Engine, blockdag.BlockDAG] - bookerProvider module.Provider[*engine.Engine, booker.Booker] - clockProvider module.Provider[*engine.Engine, clock.Clock] - blockGadgetProvider module.Provider[*engine.Engine, blockgadget.Gadget] - slotGadgetProvider module.Provider[*engine.Engine, slotgadget.Gadget] - sybilProtectionProvider module.Provider[*engine.Engine, sybilprotection.SybilProtection] - notarizationProvider module.Provider[*engine.Engine, notarization.Notarization] - attestationProvider module.Provider[*engine.Engine, attestation.Attestations] - ledgerProvider module.Provider[*engine.Engine, ledger.Ledger] - schedulerProvider module.Provider[*engine.Engine, scheduler.Scheduler] - tipManagerProvider module.Provider[*engine.Engine, tipmanager.TipManager] - tipSelectionProvider module.Provider[*engine.Engine, tipselection.TipSelection] - retainerProvider module.Provider[*engine.Engine, retainer.Retainer] - upgradeOrchestratorProvider module.Provider[*engine.Engine, upgrade.Orchestrator] - syncManagerProvider module.Provider[*engine.Engine, syncmanager.SyncManager] -} - -func New( - workers *workerpool.Group, - errorHandler func(error), - dir string, - dbVersion byte, - storageOptions []options.Option[storage.Storage], - engineOptions []options.Option[engine.Engine], - preSolidFilterProvider module.Provider[*engine.Engine, presolidfilter.PreSolidFilter], - postSolidFilterProvider module.Provider[*engine.Engine, postsolidfilter.PostSolidFilter], - blockDAGProvider module.Provider[*engine.Engine, blockdag.BlockDAG], - bookerProvider module.Provider[*engine.Engine, booker.Booker], - clockProvider module.Provider[*engine.Engine, clock.Clock], - blockGadgetProvider module.Provider[*engine.Engine, blockgadget.Gadget], - slotGadgetProvider module.Provider[*engine.Engine, slotgadget.Gadget], - sybilProtectionProvider module.Provider[*engine.Engine, sybilprotection.SybilProtection], - notarizationProvider module.Provider[*engine.Engine, notarization.Notarization], - attestationProvider module.Provider[*engine.Engine, attestation.Attestations], - ledgerProvider module.Provider[*engine.Engine, ledger.Ledger], - schedulerProvider module.Provider[*engine.Engine, scheduler.Scheduler], - tipManagerProvider module.Provider[*engine.Engine, tipmanager.TipManager], - tipSelectionProvider module.Provider[*engine.Engine, tipselection.TipSelection], - retainerProvider module.Provider[*engine.Engine, retainer.Retainer], - upgradeOrchestratorProvider module.Provider[*engine.Engine, upgrade.Orchestrator], - syncManagerProvider module.Provider[*engine.Engine, syncmanager.SyncManager], -) *EngineManager { - return &EngineManager{ - directory: utils.NewDirectory(dir), - dbVersion: dbVersion, - workers: workers, - errorHandler: errorHandler, - engineCreated: event.New1[*engine.Engine](), - storageOptions: storageOptions, - engineOptions: engineOptions, - preSolidFilterProvider: preSolidFilterProvider, - postSolidFilterProvider: postSolidFilterProvider, - blockDAGProvider: blockDAGProvider, - bookerProvider: bookerProvider, - clockProvider: clockProvider, - blockGadgetProvider: blockGadgetProvider, - slotGadgetProvider: slotGadgetProvider, - sybilProtectionProvider: sybilProtectionProvider, - notarizationProvider: notarizationProvider, - attestationProvider: attestationProvider, - ledgerProvider: ledgerProvider, - schedulerProvider: schedulerProvider, - tipManagerProvider: tipManagerProvider, - tipSelectionProvider: tipSelectionProvider, - retainerProvider: retainerProvider, - upgradeOrchestratorProvider: upgradeOrchestratorProvider, - syncManagerProvider: syncManagerProvider, - } -} - -func (e *EngineManager) LoadActiveEngine(snapshotPath string) (*engine.Engine, error) { - info := &engineInfo{} - if err := ioutils.ReadJSONFromFile(e.infoFilePath(), info); err != nil { - if !ierrors.Is(err, os.ErrNotExist) { - return nil, ierrors.Errorf("unable to read engine info file: %w", err) - } - } - - if len(info.Name) > 0 { - if exists, isDirectory, err := ioutils.PathExists(e.directory.Path(info.Name)); err == nil && exists && isDirectory { - // Load previous engine as active - e.activeInstance = e.loadEngineInstanceFromSnapshot(info.Name, snapshotPath) - - // Clear the storage of the active instance to be consistent with the latest committed slot. - // Everything after the latest committed slot is pruned to ensure a consistent state (e.g. accepted blocks). - targetSlot := e.activeInstance.Storage.Settings().LatestCommitment().Slot() - if err := e.rollbackStorage(e.activeInstance.Storage, targetSlot); err != nil { - return nil, ierrors.Wrapf(err, "failed to rollback storage to slot %d", targetSlot) - } - - // Rollback attestations already on created engine instance, because this action modifies the in-memory storage. - if err := e.activeInstance.Attestations.Rollback(targetSlot); err != nil { - return nil, ierrors.Wrap(err, "error while rolling back attestations storage on candidate engine") - } - } - } - - if e.activeInstance == nil { - // Start with a new instance and set to active - instance := e.loadEngineInstanceFromSnapshot(lo.PanicOnErr(uuid.NewUUID()).String(), snapshotPath) - - if err := e.SetActiveInstance(instance); err != nil { - return nil, err - } - } - - // Cleanup non-active instances - if err := e.CleanupNonActive(); err != nil { - return nil, err - } - - return e.activeInstance, nil -} - -func (e *EngineManager) CleanupNonActive() error { - activeDir := filepath.Base(e.activeInstance.Storage.Directory()) - - dirs, err := e.directory.SubDirs() - if err != nil { - return ierrors.Wrapf(err, "unable to list subdirectories of %s", e.directory.Path()) - } - for _, dir := range dirs { - if dir == activeDir { - continue - } - if err := e.directory.RemoveSubdir(dir); err != nil { - return ierrors.Wrapf(err, "unable to remove subdirectory %s", dir) - } - } - - return nil -} - -func (e *EngineManager) infoFilePath() string { - return e.directory.Path(engineInfoFile) -} - -func (e *EngineManager) SetActiveInstance(instance *engine.Engine) error { - e.activeInstance = instance - - info := &engineInfo{ - Name: filepath.Base(instance.Storage.Directory()), - } - - return ioutils.WriteJSONToFile(e.infoFilePath(), info, 0o644) -} - -func (e *EngineManager) loadEngineInstanceFromSnapshot(engineAlias string, snapshotPath string) *engine.Engine { - errorHandler := func(err error) { - e.errorHandler(ierrors.Wrapf(err, "engine (%s)", engineAlias[0:8])) - } - - e.engineOptions = append(e.engineOptions, engine.WithSnapshotPath(snapshotPath)) - - return e.loadEngineInstanceWithStorage(engineAlias, storage.Create(e.directory.Path(engineAlias), e.dbVersion, errorHandler, e.storageOptions...)) -} - -func (e *EngineManager) loadEngineInstanceWithStorage(engineAlias string, storage *storage.Storage) *engine.Engine { - errorHandler := func(err error) { - e.errorHandler(ierrors.Wrapf(err, "engine (%s)", engineAlias[0:8])) - } - - newEngine := engine.New(e.workers.CreateGroup(engineAlias), - errorHandler, - storage, - e.preSolidFilterProvider, - e.postSolidFilterProvider, - e.blockDAGProvider, - e.bookerProvider, - e.clockProvider, - e.blockGadgetProvider, - e.slotGadgetProvider, - e.sybilProtectionProvider, - e.notarizationProvider, - e.attestationProvider, - e.ledgerProvider, - e.schedulerProvider, - e.tipManagerProvider, - e.tipSelectionProvider, - e.retainerProvider, - e.upgradeOrchestratorProvider, - e.syncManagerProvider, - e.engineOptions..., - ) - - e.engineCreated.Trigger(newEngine) - - return newEngine -} - -func (e *EngineManager) ForkEngineAtSlot(index iotago.SlotIndex) (*engine.Engine, error) { - engineAlias := newEngineAlias() - errorHandler := func(err error) { - e.errorHandler(ierrors.Wrapf(err, "engine (%s)", engineAlias[0:8])) - } - - // Copy raw data on disk. - newStorage, err := storage.Clone(e.activeInstance.Storage, e.directory.Path(engineAlias), e.dbVersion, errorHandler, e.storageOptions...) - if err != nil { - return nil, ierrors.Wrapf(err, "failed to copy storage from active engine instance (%s) to new engine instance (%s)", e.activeInstance.Storage.Directory(), e.directory.Path(engineAlias)) - } - - if err := e.rollbackStorage(newStorage, index); err != nil { - return nil, ierrors.Wrapf(err, "failed to rollback storage to slot %d", index) - } - - candidateEngine := e.loadEngineInstanceWithStorage(engineAlias, newStorage) - - // Rollback attestations already on created engine instance, because this action modifies the in-memory storage. - if err := candidateEngine.Attestations.Rollback(index); err != nil { - return nil, ierrors.Wrap(err, "error while rolling back attestations storage on candidate engine") - } - - return candidateEngine, nil -} - -func (e *EngineManager) rollbackStorage(newStorage *storage.Storage, slot iotago.SlotIndex) error { - // Remove commitments that after forking point. - latestCommitment := newStorage.Settings().LatestCommitment() - if err := newStorage.Commitments().Rollback(slot, latestCommitment.Slot()); err != nil { - return ierrors.Wrap(err, "failed to rollback commitments") - } - // Create temporary components and rollback their permanent state, which will be reflected on disk. - evictionState := eviction.NewState(newStorage.LatestNonEmptySlot(), newStorage.RootBlocks, newStorage.GenesisRootBlockID) - evictionState.Initialize(latestCommitment.Slot()) - - blockCache := blocks.New(evictionState, newStorage.Settings().APIProvider()) - accountsManager := accountsledger.New(newStorage.Settings().APIProvider(), blockCache.Block, newStorage.AccountDiffs, newStorage.Accounts()) - - accountsManager.SetLatestCommittedSlot(latestCommitment.Slot()) - if err := accountsManager.Rollback(slot); err != nil { - return ierrors.Wrap(err, "failed to rollback accounts manager") - } - - if err := evictionState.Rollback(newStorage.Settings().LatestFinalizedSlot(), slot); err != nil { - return ierrors.Wrap(err, "failed to rollback eviction state") - } - if err := newStorage.Ledger().Rollback(slot); err != nil { - return ierrors.Wrapf(err, "failed to rollback ledger to slot %d", slot) - } - - targetCommitment, err := newStorage.Commitments().Load(slot) - if err != nil { - return ierrors.Wrapf(err, "error while retrieving commitment for target slot %d", slot) - } - - if err := newStorage.Settings().Rollback(targetCommitment); err != nil { - return ierrors.Wrap(err, "failed to rollback settings") - } - - if err := newStorage.Rollback(slot); err != nil { - return ierrors.Wrap(err, "failed to rollback prunable data") - } - - return nil -} - -func (e *EngineManager) RollbackEngine(slot iotago.SlotIndex) (*engine.Engine, error) { - engineAlias := e.activeInstance.Name() - errorHandler := func(err error) { - e.errorHandler(ierrors.Wrapf(err, "engine (%s)", engineAlias[0:8])) - } - - dir := e.activeInstance.Storage.Directory() - e.activeInstance.Shutdown() - - newStorage := storage.Create(dir, e.dbVersion, errorHandler, e.storageOptions...) - - if err := e.rollbackStorage(newStorage, slot); err != nil { - return nil, ierrors.Wrapf(err, "failed to rollback storage to slot %d", slot) - } - - newEngine := e.loadEngineInstanceWithStorage(engineAlias, newStorage) - - // Rollback attestations already on created engine instance, because this action modifies the in-memory storage. - if err := newEngine.Attestations.Rollback(slot); err != nil { - return nil, ierrors.Wrap(err, "error while rolling back attestations storage on candidate engine") - } - - return newEngine, nil - -} - -func (e *EngineManager) OnEngineCreated(handler func(*engine.Engine)) (unsubscribe func()) { - return e.engineCreated.Hook(handler).Unhook -} - -func newEngineAlias() string { - return lo.PanicOnErr(uuid.NewUUID()).String() -} diff --git a/pkg/protocol/engines.go b/pkg/protocol/engines.go new file mode 100644 index 000000000..847713cd2 --- /dev/null +++ b/pkg/protocol/engines.go @@ -0,0 +1,278 @@ +package protocol + +import ( + "os" + "path/filepath" + + "github.com/google/uuid" + + "github.com/iotaledger/hive.go/ds/reactive" + "github.com/iotaledger/hive.go/ierrors" + "github.com/iotaledger/hive.go/lo" + "github.com/iotaledger/hive.go/runtime/ioutils" + "github.com/iotaledger/hive.go/runtime/module" + "github.com/iotaledger/hive.go/runtime/options" + "github.com/iotaledger/hive.go/runtime/workerpool" + "github.com/iotaledger/iota-core/pkg/protocol/engine" + "github.com/iotaledger/iota-core/pkg/protocol/engine/accounts/accountsledger" + "github.com/iotaledger/iota-core/pkg/protocol/engine/blocks" + "github.com/iotaledger/iota-core/pkg/protocol/engine/eviction" + "github.com/iotaledger/iota-core/pkg/storage" + "github.com/iotaledger/iota-core/pkg/storage/utils" + iotago "github.com/iotaledger/iota.go/v4" +) + +// Engines is a subcomponent of the protocol that exposes the engines that are managed by the protocol. +type Engines struct { + // Main contains the main engine. + Main reactive.Variable[*engine.Engine] + + // protocol contains a reference to the Protocol instance that this component belongs to. + protocol *Protocol + + // worker contains the worker pool that is used to process changes to the engine instances asynchronously. + worker *workerpool.WorkerPool + + // directory contains the directory that is used to store the engine instances on disk. + directory *utils.Directory + + // ReactiveModule embeds a reactive module that provides default API for logging and lifecycle management. + *module.ReactiveModule +} + +// newEngines creates a new Engines instance. +func newEngines(protocol *Protocol) *Engines { + e := &Engines{ + Main: reactive.NewVariable[*engine.Engine](), + ReactiveModule: protocol.NewReactiveSubModule("Engines"), + protocol: protocol, + worker: protocol.Workers.CreatePool("Engines", workerpool.WithWorkerCount(1)), + directory: utils.NewDirectory(protocol.Options.BaseDirectory), + } + + protocol.Constructed.OnTrigger(func() { + shutdown := lo.Batch( + e.syncMainEngineFromMainChain(), + e.syncMainEngineInfoFile(), + e.injectEngineInstances(), + ) + + e.Shutdown.OnTrigger(func() { + shutdown() + + e.Stopped.Trigger() + }) + + e.Initialized.Trigger() + }) + + e.Constructed.Trigger() + + return e +} + +// ForkAtSlot creates a new engine instance that forks from the main engine at the given slot. +func (e *Engines) ForkAtSlot(slot iotago.SlotIndex) (*engine.Engine, error) { + newEngineAlias := lo.PanicOnErr(uuid.NewUUID()).String() + errorHandler := func(err error) { + e.protocol.LogError("engine error", "err", err, "name", newEngineAlias[0:8]) + } + + // copy raw data on disk. + newStorage, err := storage.Clone(e.Main.Get().Storage, e.directory.Path(newEngineAlias), DatabaseVersion, errorHandler, e.protocol.Options.StorageOptions...) + if err != nil { + return nil, ierrors.Wrapf(err, "failed to copy storage from active engine instance (%s) to new engine instance (%s)", e.Main.Get().Storage.Directory(), e.directory.Path(newEngineAlias)) + } + + // remove commitments that after forking point. + latestCommitment := newStorage.Settings().LatestCommitment() + if err = newStorage.Commitments().Rollback(slot, latestCommitment.Slot()); err != nil { + return nil, ierrors.Wrap(err, "failed to rollback commitments") + } + // create temporary components and rollback their permanent state, which will be reflected on disk. + evictionState := eviction.NewState(newStorage.LatestNonEmptySlot(), newStorage.RootBlocks, newStorage.GenesisRootBlockID) + evictionState.Initialize(latestCommitment.Slot()) + + blockCache := blocks.New(evictionState, newStorage.Settings().APIProvider()) + accountsManager := accountsledger.New(newStorage.Settings().APIProvider(), blockCache.Block, newStorage.AccountDiffs, newStorage.Accounts()) + + accountsManager.SetLatestCommittedSlot(latestCommitment.Slot()) + if err = accountsManager.Rollback(slot); err != nil { + return nil, ierrors.Wrap(err, "failed to rollback accounts manager") + } + + if err = evictionState.Rollback(newStorage.Settings().LatestFinalizedSlot(), slot); err != nil { + return nil, ierrors.Wrap(err, "failed to rollback eviction state") + } + if err = newStorage.Ledger().Rollback(slot); err != nil { + return nil, err + } + + targetCommitment, err := newStorage.Commitments().Load(slot) + if err != nil { + return nil, ierrors.Wrapf(err, "error while retrieving commitment for target index %d", slot) + } + + if err = newStorage.Settings().Rollback(targetCommitment); err != nil { + return nil, err + } + + if err = newStorage.Rollback(slot); err != nil { + return nil, err + } + + candidateEngine := e.loadEngineInstanceWithStorage(newEngineAlias, newStorage) + + // rollback attestations already on created engine instance, because this action modifies the in-memory storage. + if err = candidateEngine.Attestations.Rollback(slot); err != nil { + return nil, ierrors.Wrap(err, "error while rolling back attestations storage on candidate engine") + } + + return candidateEngine, nil +} + +// loadMainEngine loads the main engine from disk or creates a new one if no engine exists. +func (e *Engines) loadMainEngine(snapshotPath string) (*engine.Engine, error) { + info := &engineInfo{} + if err := ioutils.ReadJSONFromFile(e.infoFilePath(), info); err != nil && !ierrors.Is(err, os.ErrNotExist) { + return nil, ierrors.Errorf("unable to read engine info file: %w", err) + } + + e.Main.Compute(func(mainEngine *engine.Engine) *engine.Engine { + // load previous engine as main engine if it exists. + if len(info.Name) > 0 { + if exists, isDirectory, err := ioutils.PathExists(e.directory.Path(info.Name)); err == nil && exists && isDirectory { + return e.loadEngineInstanceFromSnapshot(info.Name, snapshotPath) + } + } + + // load new engine if no previous engine exists. + return e.loadEngineInstanceFromSnapshot(lo.PanicOnErr(uuid.NewUUID()).String(), snapshotPath) + }) + + // cleanup candidates + if err := e.cleanupCandidates(); err != nil { + return nil, err + } + + return e.Main.Get(), nil +} + +// cleanupCandidates removes all engine instances that are not the main engine. +func (e *Engines) cleanupCandidates() error { + activeDir := filepath.Base(e.Main.Get().Storage.Directory()) + + dirs, err := e.directory.SubDirs() + if err != nil { + return ierrors.Wrapf(err, "unable to list subdirectories of %s", e.directory.Path()) + } + for _, dir := range dirs { + if dir == activeDir { + continue + } + if err := e.directory.RemoveSubdir(dir); err != nil { + return ierrors.Wrapf(err, "unable to remove subdirectory %s", dir) + } + } + + return nil +} + +// infoFilePath returns the path to the engine info file. +func (e *Engines) infoFilePath() string { + return e.directory.Path(engineInfoFile) +} + +// loadEngineInstanceFromSnapshot loads an engine instance from a snapshot. +func (e *Engines) loadEngineInstanceFromSnapshot(engineAlias string, snapshotPath string) *engine.Engine { + errorHandler := func(err error) { + e.protocol.LogError("engine error", "err", err, "name", engineAlias[0:8]) + } + + return e.loadEngineInstanceWithStorage(engineAlias, storage.Create(e.directory.Path(engineAlias), DatabaseVersion, errorHandler, e.protocol.Options.StorageOptions...), engine.WithSnapshotPath(snapshotPath)) +} + +// loadEngineInstanceWithStorage loads an engine instance with the given storage. +func (e *Engines) loadEngineInstanceWithStorage(engineAlias string, storage *storage.Storage, engineOptions ...options.Option[engine.Engine]) *engine.Engine { + return engine.New( + e.protocol.Logger, + e.protocol.Workers.CreateGroup(engineAlias), + storage, + e.protocol.Options.PreSolidFilterProvider, + e.protocol.Options.PostSolidFilterProvider, + e.protocol.Options.BlockDAGProvider, + e.protocol.Options.BookerProvider, + e.protocol.Options.ClockProvider, + e.protocol.Options.BlockGadgetProvider, + e.protocol.Options.SlotGadgetProvider, + e.protocol.Options.SybilProtectionProvider, + e.protocol.Options.NotarizationProvider, + e.protocol.Options.AttestationProvider, + e.protocol.Options.LedgerProvider, + e.protocol.Options.SchedulerProvider, + e.protocol.Options.TipManagerProvider, + e.protocol.Options.TipSelectionProvider, + e.protocol.Options.RetainerProvider, + e.protocol.Options.UpgradeOrchestratorProvider, + e.protocol.Options.SyncManagerProvider, + append(e.protocol.Options.EngineOptions, engineOptions...)..., + ) +} + +// syncMainEngineFromMainChain syncs the main engine from the main chain. +func (e *Engines) syncMainEngineFromMainChain() (shutdown func()) { + return e.protocol.Chains.Main.WithNonEmptyValue(func(mainChain *Chain) (shutdown func()) { + return e.Main.DeriveValueFrom(reactive.NewDerivedVariable(func(currentMainEngine *engine.Engine, newMainEngine *engine.Engine) *engine.Engine { + return lo.Cond(newMainEngine == nil, currentMainEngine, newMainEngine) + }, mainChain.Engine)) + }) +} + +// syncMainEngineInfoFile syncs the engine info file with the main engine. +func (e *Engines) syncMainEngineInfoFile() (shutdown func()) { + return e.Main.OnUpdate(func(_ *engine.Engine, mainEngine *engine.Engine) { + if mainEngine != nil { + if err := ioutils.WriteJSONToFile(e.infoFilePath(), &engineInfo{Name: filepath.Base(mainEngine.Storage.Directory())}, 0o644); err != nil { + e.LogError("unable to write engine info file", "err", err) + } + } + }) +} + +// injectEngineInstances injects engine instances into the chains (when requested). +func (e *Engines) injectEngineInstances() (shutdown func()) { + return e.protocol.Chains.WithElements(func(chain *Chain) (shutdown func()) { + return chain.StartEngine.OnUpdate(func(_ bool, startEngine bool) { + e.worker.Submit(func() { + if !startEngine { + chain.Engine.Set(nil) + + return + } + + if newEngine, err := func() (*engine.Engine, error) { + if e.Main.Get() == nil { + return e.loadMainEngine(e.protocol.Options.SnapshotPath) + } + + return e.ForkAtSlot(chain.ForkingPoint.Get().Slot() - 1) + }(); err != nil { + e.LogError("failed to create new engine instance", "err", err) + } else { + e.protocol.Network.OnShutdown(func() { newEngine.Shutdown.Trigger() }) + + chain.Engine.Set(newEngine) + } + }) + }) + }) +} + +// engineInfoFile is the name of the engine info file. +const engineInfoFile = "info" + +// engineInfo is the structure of the engine info file. +type engineInfo struct { + // Name contains the name of the engine. + Name string `json:"name"` +} diff --git a/pkg/protocol/errors.go b/pkg/protocol/errors.go new file mode 100644 index 000000000..e534d02b8 --- /dev/null +++ b/pkg/protocol/errors.go @@ -0,0 +1,13 @@ +package protocol + +import ( + "github.com/iotaledger/hive.go/ierrors" +) + +var ( + // ErrorCommitmentNotFound is returned for requests for commitments that are not available yet. + ErrorCommitmentNotFound = ierrors.New("commitment not found") + + // ErrorSlotEvicted is returned for requests for commitments that belong to evicted slots. + ErrorSlotEvicted = ierrors.New("slot evicted") +) diff --git a/pkg/protocol/events.go b/pkg/protocol/events.go index a39963221..098c143a7 100644 --- a/pkg/protocol/events.go +++ b/pkg/protocol/events.go @@ -1,34 +1,18 @@ package protocol -import ( - "github.com/iotaledger/hive.go/runtime/event" - "github.com/iotaledger/iota-core/pkg/network/protocols/core" - "github.com/iotaledger/iota-core/pkg/protocol/chainmanager" - "github.com/iotaledger/iota-core/pkg/protocol/engine" -) +import "github.com/iotaledger/iota-core/pkg/protocol/engine" +// Events exposes the Events of the main engine of the protocol at a single endpoint. +// +// TODO: It should be replaced with reactive calls to the corresponding events and be deleted but we can do this in a +// later PR (to minimize the code changes to review). type Events struct { - CandidateEngineActivated *event.Event1[*engine.Engine] - MainEngineSwitched *event.Event1[*engine.Engine] - MainEngineRestarted *event.Event1[*engine.Engine] - Error *event.Event1[error] - - Network *core.Events - Engine *engine.Events - ChainManager *chainmanager.Events - - event.Group[Events, *Events] + Engine *engine.Events } -var NewEvents = event.CreateGroupConstructor(func() (newEvents *Events) { +// NewEvents creates a new Events instance. +func NewEvents() *Events { return &Events{ - CandidateEngineActivated: event.New1[*engine.Engine](), - MainEngineSwitched: event.New1[*engine.Engine](), - MainEngineRestarted: event.New1[*engine.Engine](), - Error: event.New1[error](), - - Network: core.NewEvents(), - Engine: engine.NewEvents(), - ChainManager: chainmanager.NewEvents(), + Engine: engine.NewEvents(), } -}) +} diff --git a/pkg/protocol/options.go b/pkg/protocol/options.go index 51dec343b..ac9605bf8 100644 --- a/pkg/protocol/options.go +++ b/pkg/protocol/options.go @@ -1,170 +1,309 @@ package protocol import ( - "time" - + "github.com/iotaledger/hive.go/core/eventticker" "github.com/iotaledger/hive.go/runtime/module" "github.com/iotaledger/hive.go/runtime/options" - "github.com/iotaledger/iota-core/pkg/protocol/chainmanager" "github.com/iotaledger/iota-core/pkg/protocol/engine" "github.com/iotaledger/iota-core/pkg/protocol/engine/attestation" + "github.com/iotaledger/iota-core/pkg/protocol/engine/attestation/slotattestation" "github.com/iotaledger/iota-core/pkg/protocol/engine/blockdag" + "github.com/iotaledger/iota-core/pkg/protocol/engine/blockdag/inmemoryblockdag" "github.com/iotaledger/iota-core/pkg/protocol/engine/booker" + "github.com/iotaledger/iota-core/pkg/protocol/engine/booker/inmemorybooker" "github.com/iotaledger/iota-core/pkg/protocol/engine/clock" + "github.com/iotaledger/iota-core/pkg/protocol/engine/clock/blocktime" + "github.com/iotaledger/iota-core/pkg/protocol/engine/congestioncontrol/scheduler" + "github.com/iotaledger/iota-core/pkg/protocol/engine/congestioncontrol/scheduler/drr" "github.com/iotaledger/iota-core/pkg/protocol/engine/consensus/blockgadget" + "github.com/iotaledger/iota-core/pkg/protocol/engine/consensus/blockgadget/thresholdblockgadget" "github.com/iotaledger/iota-core/pkg/protocol/engine/consensus/slotgadget" + "github.com/iotaledger/iota-core/pkg/protocol/engine/consensus/slotgadget/totalweightslotgadget" "github.com/iotaledger/iota-core/pkg/protocol/engine/filter/postsolidfilter" + "github.com/iotaledger/iota-core/pkg/protocol/engine/filter/postsolidfilter/postsolidblockfilter" "github.com/iotaledger/iota-core/pkg/protocol/engine/filter/presolidfilter" + "github.com/iotaledger/iota-core/pkg/protocol/engine/filter/presolidfilter/presolidblockfilter" "github.com/iotaledger/iota-core/pkg/protocol/engine/ledger" + ledger1 "github.com/iotaledger/iota-core/pkg/protocol/engine/ledger/ledger" "github.com/iotaledger/iota-core/pkg/protocol/engine/notarization" + "github.com/iotaledger/iota-core/pkg/protocol/engine/notarization/slotnotarization" "github.com/iotaledger/iota-core/pkg/protocol/engine/syncmanager" + "github.com/iotaledger/iota-core/pkg/protocol/engine/syncmanager/trivialsyncmanager" "github.com/iotaledger/iota-core/pkg/protocol/engine/tipmanager" + tipmanagerv1 "github.com/iotaledger/iota-core/pkg/protocol/engine/tipmanager/v1" "github.com/iotaledger/iota-core/pkg/protocol/engine/tipselection" + tipselectionv1 "github.com/iotaledger/iota-core/pkg/protocol/engine/tipselection/v1" "github.com/iotaledger/iota-core/pkg/protocol/engine/upgrade" + "github.com/iotaledger/iota-core/pkg/protocol/engine/upgrade/signalingupgradeorchestrator" "github.com/iotaledger/iota-core/pkg/protocol/sybilprotection" + "github.com/iotaledger/iota-core/pkg/protocol/sybilprotection/sybilprotectionv1" + "github.com/iotaledger/iota-core/pkg/retainer" + retainer1 "github.com/iotaledger/iota-core/pkg/retainer/retainer" "github.com/iotaledger/iota-core/pkg/storage" + iotago "github.com/iotaledger/iota.go/v4" ) +// Options contains the options for the Protocol. +type Options struct { + // BaseDirectory is the directory where the protocol will store its data. + BaseDirectory string + + // SnapshotPath is the path to the snapshot file that should be used to initialize the protocol. + SnapshotPath string + + // ChainSwitchingThreshold is the threshold that defines how far away a heavier chain needs to be from its forking + // point to be considered for switching. + ChainSwitchingThreshold iotago.SlotIndex + + // EngineOptions contains the options for the Engines. + EngineOptions []options.Option[engine.Engine] + + // StorageOptions contains the options for the Storage. + StorageOptions []options.Option[storage.Storage] + + CommitmentRequesterOptions []options.Option[eventticker.EventTicker[iotago.SlotIndex, iotago.CommitmentID]] + AttestationRequesterOptions []options.Option[eventticker.EventTicker[iotago.SlotIndex, iotago.CommitmentID]] + WarpSyncRequesterOptions []options.Option[eventticker.EventTicker[iotago.SlotIndex, iotago.CommitmentID]] + + // PreSolidFilterProvider contains the provider for the PreSolidFilter engine modules. + PreSolidFilterProvider module.Provider[*engine.Engine, presolidfilter.PreSolidFilter] + + // PostSolidFilterProvider contains the provider for the PostSolidFilter engine modules. + PostSolidFilterProvider module.Provider[*engine.Engine, postsolidfilter.PostSolidFilter] + + // BlockDAGProvider contains the provider for the BlockDAG engine modules. + BlockDAGProvider module.Provider[*engine.Engine, blockdag.BlockDAG] + + // TipManagerProvider contains the provider for the TipManager engine modules. + TipManagerProvider module.Provider[*engine.Engine, tipmanager.TipManager] + + // TipSelectionProvider contains the provider for the TipSelection engine modules. + TipSelectionProvider module.Provider[*engine.Engine, tipselection.TipSelection] + + // BookerProvider contains the provider for the Booker engine modules. + BookerProvider module.Provider[*engine.Engine, booker.Booker] + + // ClockProvider contains the provider for the Clock engine modules. + ClockProvider module.Provider[*engine.Engine, clock.Clock] + + // BlockGadgetProvider contains the provider for the BlockGadget engine modules. + BlockGadgetProvider module.Provider[*engine.Engine, blockgadget.Gadget] + + // SlotGadgetProvider contains the provider for the SlotGadget engine modules. + SlotGadgetProvider module.Provider[*engine.Engine, slotgadget.Gadget] + + // SybilProtectionProvider contains the provider for the SybilProtection engine modules. + SybilProtectionProvider module.Provider[*engine.Engine, sybilprotection.SybilProtection] + + // NotarizationProvider contains the provider for the Notarization engine modules. + NotarizationProvider module.Provider[*engine.Engine, notarization.Notarization] + + // AttestationProvider contains the provider for the Attestation engine modules. + AttestationProvider module.Provider[*engine.Engine, attestation.Attestations] + + // SyncManagerProvider contains the provider for the SyncManager engine modules. + SyncManagerProvider module.Provider[*engine.Engine, syncmanager.SyncManager] + + // LedgerProvider contains the provider for the Ledger engine modules. + LedgerProvider module.Provider[*engine.Engine, ledger.Ledger] + + // RetainerProvider contains the provider for the Retainer engine modules. + RetainerProvider module.Provider[*engine.Engine, retainer.Retainer] + + // SchedulerProvider contains the provider for the Scheduler engine modules. + SchedulerProvider module.Provider[*engine.Engine, scheduler.Scheduler] + + // UpgradeOrchestratorProvider contains the provider for the UpgradeOrchestrator engine modules. + UpgradeOrchestratorProvider module.Provider[*engine.Engine, upgrade.Orchestrator] +} + +// NewDefaultOptions creates new default options instance for the Protocol. +func NewDefaultOptions() *Options { + return &Options{ + BaseDirectory: "", + ChainSwitchingThreshold: 3, + + PreSolidFilterProvider: presolidblockfilter.NewProvider(), + PostSolidFilterProvider: postsolidblockfilter.NewProvider(), + BlockDAGProvider: inmemoryblockdag.NewProvider(), + TipManagerProvider: tipmanagerv1.NewProvider(), + TipSelectionProvider: tipselectionv1.NewProvider(), + BookerProvider: inmemorybooker.NewProvider(), + ClockProvider: blocktime.NewProvider(), + BlockGadgetProvider: thresholdblockgadget.NewProvider(), + SlotGadgetProvider: totalweightslotgadget.NewProvider(), + SybilProtectionProvider: sybilprotectionv1.NewProvider(), + NotarizationProvider: slotnotarization.NewProvider(), + AttestationProvider: slotattestation.NewProvider(), + SyncManagerProvider: trivialsyncmanager.NewProvider(), + LedgerProvider: ledger1.NewProvider(), + RetainerProvider: retainer1.NewProvider(), + SchedulerProvider: drr.NewProvider(), + UpgradeOrchestratorProvider: signalingupgradeorchestrator.NewProvider(), + } +} + +// WithBaseDirectory is an option for the Protocol that allows to set the base directory. func WithBaseDirectory(baseDirectory string) options.Option[Protocol] { return func(p *Protocol) { - p.optsBaseDirectory = baseDirectory + p.Options.BaseDirectory = baseDirectory } } +// WithSnapshotPath is an option for the Protocol that allows to set the snapshot path. func WithSnapshotPath(snapshot string) options.Option[Protocol] { return func(p *Protocol) { - p.optsSnapshotPath = snapshot + p.Options.SnapshotPath = snapshot } } -func WithChainSwitchingThreshold(threshold int) options.Option[Protocol] { +// WithChainSwitchingThreshold is an option for the Protocol that allows to set the chain switching threshold. +func WithChainSwitchingThreshold(threshold iotago.SlotIndex) options.Option[Protocol] { return func(p *Protocol) { - p.optsChainSwitchingThreshold = threshold + p.Options.ChainSwitchingThreshold = threshold } } -func WithPreSolidFilterProvider(optsPreSolidFilterProvider module.Provider[*engine.Engine, presolidfilter.PreSolidFilter]) options.Option[Protocol] { +// WithPreSolidFilterProvider is an option for the Protocol that allows to set the PreSolidFilterProvider. +func WithPreSolidFilterProvider(optsFilterProvider module.Provider[*engine.Engine, presolidfilter.PreSolidFilter]) options.Option[Protocol] { return func(p *Protocol) { - p.optsPreSolidFilterProvider = optsPreSolidFilterProvider + p.Options.PreSolidFilterProvider = optsFilterProvider } } -func WithPostSolidFilterProvider(optsPostSolidFilterProvider module.Provider[*engine.Engine, postsolidfilter.PostSolidFilter]) options.Option[Protocol] { +// WithPostSolidFilter is an option for the Protocol that allows to set the PostSolidFilterProvider. +func WithPostSolidFilter(optsCommitmentFilterProvider module.Provider[*engine.Engine, postsolidfilter.PostSolidFilter]) options.Option[Protocol] { return func(p *Protocol) { - p.optsPostSolidFilterProvider = optsPostSolidFilterProvider + p.Options.PostSolidFilterProvider = optsCommitmentFilterProvider } } +// WithBlockDAGProvider is an option for the Protocol that allows to set the BlockDAGProvider. func WithBlockDAGProvider(optsBlockDAGProvider module.Provider[*engine.Engine, blockdag.BlockDAG]) options.Option[Protocol] { return func(p *Protocol) { - p.optsBlockDAGProvider = optsBlockDAGProvider + p.Options.BlockDAGProvider = optsBlockDAGProvider } } +// WithTipManagerProvider is an option for the Protocol that allows to set the TipManagerProvider. func WithTipManagerProvider(optsTipManagerProvider module.Provider[*engine.Engine, tipmanager.TipManager]) options.Option[Protocol] { return func(p *Protocol) { - p.optsTipManagerProvider = optsTipManagerProvider + p.Options.TipManagerProvider = optsTipManagerProvider } } +// WithTipSelectionProvider is an option for the Protocol that allows to set the TipSelectionProvider. func WithTipSelectionProvider(optsTipSelectionProvider module.Provider[*engine.Engine, tipselection.TipSelection]) options.Option[Protocol] { return func(p *Protocol) { - p.optsTipSelectionProvider = optsTipSelectionProvider + p.Options.TipSelectionProvider = optsTipSelectionProvider } } +// WithBookerProvider is an option for the Protocol that allows to set the BookerProvider. func WithBookerProvider(optsBookerProvider module.Provider[*engine.Engine, booker.Booker]) options.Option[Protocol] { return func(p *Protocol) { - p.optsBookerProvider = optsBookerProvider + p.Options.BookerProvider = optsBookerProvider } } +// WithClockProvider is an option for the Protocol that allows to set the ClockProvider. func WithClockProvider(optsClockProvider module.Provider[*engine.Engine, clock.Clock]) options.Option[Protocol] { return func(p *Protocol) { - p.optsClockProvider = optsClockProvider + p.Options.ClockProvider = optsClockProvider } } +// WithSybilProtectionProvider is an option for the Protocol that allows to set the SybilProtectionProvider. func WithSybilProtectionProvider(optsSybilProtectionProvider module.Provider[*engine.Engine, sybilprotection.SybilProtection]) options.Option[Protocol] { return func(p *Protocol) { - p.optsSybilProtectionProvider = optsSybilProtectionProvider + p.Options.SybilProtectionProvider = optsSybilProtectionProvider } } +// WithBlockGadgetProvider is an option for the Protocol that allows to set the BlockGadgetProvider. func WithBlockGadgetProvider(optsBlockGadgetProvider module.Provider[*engine.Engine, blockgadget.Gadget]) options.Option[Protocol] { return func(p *Protocol) { - p.optsBlockGadgetProvider = optsBlockGadgetProvider + p.Options.BlockGadgetProvider = optsBlockGadgetProvider } } +// WithSlotGadgetProvider is an option for the Protocol that allows to set the SlotGadgetProvider. func WithSlotGadgetProvider(optsSlotGadgetProvider module.Provider[*engine.Engine, slotgadget.Gadget]) options.Option[Protocol] { return func(p *Protocol) { - p.optsSlotGadgetProvider = optsSlotGadgetProvider + p.Options.SlotGadgetProvider = optsSlotGadgetProvider } } +// WithEpochGadgetProvider is an option for the Protocol that allows to set the EpochGadgetProvider. func WithEpochGadgetProvider(optsEpochGadgetProvider module.Provider[*engine.Engine, sybilprotection.SybilProtection]) options.Option[Protocol] { return func(p *Protocol) { - p.optsSybilProtectionProvider = optsEpochGadgetProvider + p.Options.SybilProtectionProvider = optsEpochGadgetProvider } } +// WithNotarizationProvider is an option for the Protocol that allows to set the NotarizationProvider. func WithNotarizationProvider(optsNotarizationProvider module.Provider[*engine.Engine, notarization.Notarization]) options.Option[Protocol] { return func(p *Protocol) { - p.optsNotarizationProvider = optsNotarizationProvider + p.Options.NotarizationProvider = optsNotarizationProvider } } +// WithAttestationProvider is an option for the Protocol that allows to set the AttestationProvider. func WithAttestationProvider(optsAttestationProvider module.Provider[*engine.Engine, attestation.Attestations]) options.Option[Protocol] { return func(p *Protocol) { - p.optsAttestationProvider = optsAttestationProvider + p.Options.AttestationProvider = optsAttestationProvider } } +// WithLedgerProvider is an option for the Protocol that allows to set the LedgerProvider. func WithLedgerProvider(optsLedgerProvider module.Provider[*engine.Engine, ledger.Ledger]) options.Option[Protocol] { return func(p *Protocol) { - p.optsLedgerProvider = optsLedgerProvider + p.Options.LedgerProvider = optsLedgerProvider } } +// WithUpgradeOrchestratorProvider is an option for the Protocol that allows to set the UpgradeOrchestratorProvider. func WithUpgradeOrchestratorProvider(optsUpgradeOrchestratorProvider module.Provider[*engine.Engine, upgrade.Orchestrator]) options.Option[Protocol] { return func(p *Protocol) { - p.optsUpgradeOrchestratorProvider = optsUpgradeOrchestratorProvider + p.Options.UpgradeOrchestratorProvider = optsUpgradeOrchestratorProvider } } +// WithSyncManagerProvider is an option for the Protocol that allows to set the SyncManagerProvider. func WithSyncManagerProvider(optsSyncManagerProvider module.Provider[*engine.Engine, syncmanager.SyncManager]) options.Option[Protocol] { return func(p *Protocol) { - p.optsSyncManagerProvider = optsSyncManagerProvider + p.Options.SyncManagerProvider = optsSyncManagerProvider } } +// WithEngineOptions is an option for the Protocol that allows to set the EngineOptions. func WithEngineOptions(opts ...options.Option[engine.Engine]) options.Option[Protocol] { return func(p *Protocol) { - p.optsEngineOptions = append(p.optsEngineOptions, opts...) + p.Options.EngineOptions = append(p.Options.EngineOptions, opts...) } } -func WithChainManagerOptions(opts ...options.Option[chainmanager.Manager]) options.Option[Protocol] { +// WithStorageOptions is an option for the Protocol that allows to set the StorageOptions. +func WithStorageOptions(opts ...options.Option[storage.Storage]) options.Option[Protocol] { return func(p *Protocol) { - p.optsChainManagerOptions = append(p.optsChainManagerOptions, opts...) + p.Options.StorageOptions = append(p.Options.StorageOptions, opts...) } } -func WithStorageOptions(opts ...options.Option[storage.Storage]) options.Option[Protocol] { +func WithCommitmentRequesterOptions(opts ...options.Option[eventticker.EventTicker[iotago.SlotIndex, iotago.CommitmentID]]) options.Option[Protocol] { return func(p *Protocol) { - p.optsStorageOptions = append(p.optsStorageOptions, opts...) + p.Options.CommitmentRequesterOptions = append(p.Options.CommitmentRequesterOptions, opts...) } } -func WithAttestationRequesterTryInterval(t time.Duration) options.Option[Protocol] { +func WithAttestationRequesterOptions(opts ...options.Option[eventticker.EventTicker[iotago.SlotIndex, iotago.CommitmentID]]) options.Option[Protocol] { return func(p *Protocol) { - p.optsAttestationRequesterTryInterval = t + p.Options.AttestationRequesterOptions = append(p.Options.AttestationRequesterOptions, opts...) } } -func WithAttestationRequesterMaxTries(n int) options.Option[Protocol] { +func WithWarpSyncRequesterOptions(opts ...options.Option[eventticker.EventTicker[iotago.SlotIndex, iotago.CommitmentID]]) options.Option[Protocol] { return func(p *Protocol) { - p.optsAttestationRequesterMaxRetries = n + p.Options.WarpSyncRequesterOptions = append(p.Options.WarpSyncRequesterOptions, opts...) } } diff --git a/pkg/protocol/protocol.go b/pkg/protocol/protocol.go index 3f6b73b1f..436ef0dcd 100644 --- a/pkg/protocol/protocol.go +++ b/pkg/protocol/protocol.go @@ -2,316 +2,216 @@ package protocol import ( "context" - "fmt" + "sync" "time" - "github.com/iotaledger/hive.go/runtime/event" + "github.com/libp2p/go-libp2p/core/peer" + + "github.com/iotaledger/hive.go/ds/reactive" + "github.com/iotaledger/hive.go/ierrors" + "github.com/iotaledger/hive.go/lo" + "github.com/iotaledger/hive.go/log" "github.com/iotaledger/hive.go/runtime/module" "github.com/iotaledger/hive.go/runtime/options" - "github.com/iotaledger/hive.go/runtime/syncutils" "github.com/iotaledger/hive.go/runtime/workerpool" "github.com/iotaledger/iota-core/pkg/model" "github.com/iotaledger/iota-core/pkg/network" "github.com/iotaledger/iota-core/pkg/network/protocols/core" - "github.com/iotaledger/iota-core/pkg/protocol/chainmanager" "github.com/iotaledger/iota-core/pkg/protocol/engine" - "github.com/iotaledger/iota-core/pkg/protocol/engine/attestation" - "github.com/iotaledger/iota-core/pkg/protocol/engine/attestation/slotattestation" - "github.com/iotaledger/iota-core/pkg/protocol/engine/blockdag" - "github.com/iotaledger/iota-core/pkg/protocol/engine/blockdag/inmemoryblockdag" - "github.com/iotaledger/iota-core/pkg/protocol/engine/booker" - "github.com/iotaledger/iota-core/pkg/protocol/engine/booker/inmemorybooker" - "github.com/iotaledger/iota-core/pkg/protocol/engine/clock" - "github.com/iotaledger/iota-core/pkg/protocol/engine/clock/blocktime" - "github.com/iotaledger/iota-core/pkg/protocol/engine/congestioncontrol/scheduler" - "github.com/iotaledger/iota-core/pkg/protocol/engine/congestioncontrol/scheduler/drr" - "github.com/iotaledger/iota-core/pkg/protocol/engine/consensus/blockgadget" - "github.com/iotaledger/iota-core/pkg/protocol/engine/consensus/blockgadget/thresholdblockgadget" - "github.com/iotaledger/iota-core/pkg/protocol/engine/consensus/slotgadget" - "github.com/iotaledger/iota-core/pkg/protocol/engine/consensus/slotgadget/totalweightslotgadget" - "github.com/iotaledger/iota-core/pkg/protocol/engine/filter/postsolidfilter" - "github.com/iotaledger/iota-core/pkg/protocol/engine/filter/postsolidfilter/postsolidblockfilter" - "github.com/iotaledger/iota-core/pkg/protocol/engine/filter/presolidfilter" - "github.com/iotaledger/iota-core/pkg/protocol/engine/filter/presolidfilter/presolidblockfilter" - "github.com/iotaledger/iota-core/pkg/protocol/engine/ledger" - ledger1 "github.com/iotaledger/iota-core/pkg/protocol/engine/ledger/ledger" - "github.com/iotaledger/iota-core/pkg/protocol/engine/notarization" - "github.com/iotaledger/iota-core/pkg/protocol/engine/notarization/slotnotarization" - "github.com/iotaledger/iota-core/pkg/protocol/engine/syncmanager" - "github.com/iotaledger/iota-core/pkg/protocol/engine/syncmanager/trivialsyncmanager" - "github.com/iotaledger/iota-core/pkg/protocol/engine/tipmanager" - tipmanagerv1 "github.com/iotaledger/iota-core/pkg/protocol/engine/tipmanager/v1" - "github.com/iotaledger/iota-core/pkg/protocol/engine/tipselection" - tipselectionv1 "github.com/iotaledger/iota-core/pkg/protocol/engine/tipselection/v1" - "github.com/iotaledger/iota-core/pkg/protocol/engine/upgrade" - "github.com/iotaledger/iota-core/pkg/protocol/engine/upgrade/signalingupgradeorchestrator" - "github.com/iotaledger/iota-core/pkg/protocol/enginemanager" - "github.com/iotaledger/iota-core/pkg/protocol/sybilprotection" - "github.com/iotaledger/iota-core/pkg/protocol/sybilprotection/sybilprotectionv1" - "github.com/iotaledger/iota-core/pkg/retainer" - retainer1 "github.com/iotaledger/iota-core/pkg/retainer/retainer" - "github.com/iotaledger/iota-core/pkg/storage" iotago "github.com/iotaledger/iota.go/v4" ) +// Protocol is an implementation of the IOTA core protocol. type Protocol struct { - context context.Context - Events *Events - BlockDispatcher *BlockDispatcher - EngineManager *enginemanager.EngineManager - ChainManager *chainmanager.Manager - - Workers *workerpool.Group - networkDispatcher network.Endpoint - networkProtocol *core.Protocol - - activeEngineMutex syncutils.RWMutex - mainEngine *engine.Engine - candidateEngine *candidateEngine - - optsBaseDirectory string - optsSnapshotPath string - optsChainSwitchingThreshold int - - optsEngineOptions []options.Option[engine.Engine] - optsChainManagerOptions []options.Option[chainmanager.Manager] - optsStorageOptions []options.Option[storage.Storage] - - optsPreSolidFilterProvider module.Provider[*engine.Engine, presolidfilter.PreSolidFilter] - optsPostSolidFilterProvider module.Provider[*engine.Engine, postsolidfilter.PostSolidFilter] - optsBlockDAGProvider module.Provider[*engine.Engine, blockdag.BlockDAG] - optsTipManagerProvider module.Provider[*engine.Engine, tipmanager.TipManager] - optsTipSelectionProvider module.Provider[*engine.Engine, tipselection.TipSelection] - optsBookerProvider module.Provider[*engine.Engine, booker.Booker] - optsClockProvider module.Provider[*engine.Engine, clock.Clock] - optsBlockGadgetProvider module.Provider[*engine.Engine, blockgadget.Gadget] - optsSlotGadgetProvider module.Provider[*engine.Engine, slotgadget.Gadget] - optsSybilProtectionProvider module.Provider[*engine.Engine, sybilprotection.SybilProtection] - optsNotarizationProvider module.Provider[*engine.Engine, notarization.Notarization] - optsAttestationProvider module.Provider[*engine.Engine, attestation.Attestations] - optsSyncManagerProvider module.Provider[*engine.Engine, syncmanager.SyncManager] - optsLedgerProvider module.Provider[*engine.Engine, ledger.Ledger] - optsRetainerProvider module.Provider[*engine.Engine, retainer.Retainer] - optsSchedulerProvider module.Provider[*engine.Engine, scheduler.Scheduler] - optsUpgradeOrchestratorProvider module.Provider[*engine.Engine, upgrade.Orchestrator] - - optsAttestationRequesterTryInterval time.Duration - optsAttestationRequesterMaxRetries int - - module.Module -} - -func New(workers *workerpool.Group, dispatcher network.Endpoint, opts ...options.Option[Protocol]) (protocol *Protocol) { - return options.Apply(&Protocol{ - Events: NewEvents(), - Workers: workers, - networkDispatcher: dispatcher, - optsPreSolidFilterProvider: presolidblockfilter.NewProvider(), - optsPostSolidFilterProvider: postsolidblockfilter.NewProvider(), - optsBlockDAGProvider: inmemoryblockdag.NewProvider(), - optsTipManagerProvider: tipmanagerv1.NewProvider(), - optsTipSelectionProvider: tipselectionv1.NewProvider(), - optsBookerProvider: inmemorybooker.NewProvider(), - optsClockProvider: blocktime.NewProvider(), - optsBlockGadgetProvider: thresholdblockgadget.NewProvider(), - optsSlotGadgetProvider: totalweightslotgadget.NewProvider(), - optsSybilProtectionProvider: sybilprotectionv1.NewProvider(), - optsNotarizationProvider: slotnotarization.NewProvider(), - optsAttestationProvider: slotattestation.NewProvider(), - optsSyncManagerProvider: trivialsyncmanager.NewProvider(), - optsLedgerProvider: ledger1.NewProvider(), - optsRetainerProvider: retainer1.NewProvider(), - optsSchedulerProvider: drr.NewProvider(), - optsUpgradeOrchestratorProvider: signalingupgradeorchestrator.NewProvider(), - - optsBaseDirectory: "", - optsChainSwitchingThreshold: 3, - - optsAttestationRequesterTryInterval: 3 * time.Second, - optsAttestationRequesterMaxRetries: 3, - }, opts, func(p *Protocol) { - p.BlockDispatcher = NewBlockDispatcher(p) - }, (*Protocol).initEngineManager, (*Protocol).initChainManager, (*Protocol).initNetworkProtocol, (*Protocol).TriggerConstructed) -} + // Events contains a centralized access point for all events that are triggered by the main engine of the protocol. + Events *Events -// Run runs the protocol. -func (p *Protocol) Run(ctx context.Context) error { - var innerCtxCancel func() - - p.context, innerCtxCancel = context.WithCancel(ctx) - defer innerCtxCancel() + // Workers contains the worker pools that are used by the protocol. + Workers *workerpool.Group - p.linkToEngine(p.mainEngine) + // Network contains the network endpoint of the protocol. + Network *core.Protocol - rootCommitment := p.mainEngine.EarliestRootCommitment(p.mainEngine.Storage.Settings().LatestFinalizedSlot()) + // Commitments contains the commitments that are managed by the protocol. + Commitments *Commitments - // The root commitment is the earliest commitment we will ever need to know to solidify commitment chains, we can - // then initialize the chain manager with it, and identify our engine to be on such chain. - // Upon engine restart, such chain will be loaded with the latest finalized slot, and the chain manager, not needing - // persistent storage, will be able to continue from there. - p.mainEngine.SetChainID(rootCommitment.ID()) - p.ChainManager.Initialize(rootCommitment) + // Chains contains the chains that are managed by the protocol. + Chains *Chains - // Fill the chain manager with all our known commitments so that the chain is solid - for i := rootCommitment.Slot(); i <= p.mainEngine.Storage.Settings().LatestCommitment().Slot(); i++ { - if cm, err := p.mainEngine.Storage.Commitments().Load(i); err == nil { - p.ChainManager.ProcessCommitment(cm) - } - } + // BlocksProtocol contains the subcomponent that is responsible for handling block requests and responses. + BlocksProtocol *BlocksProtocol - p.runNetworkProtocol() + // CommitmentsProtocol contains the subcomponent that is responsible for handling commitment requests and responses. + CommitmentsProtocol *CommitmentsProtocol - p.TriggerInitialized() + // AttestationsProtocol contains the subcomponent that is responsible for handling attestation requests and + // responses. + AttestationsProtocol *AttestationsProtocol - <-p.context.Done() + // WarpSyncProtocol contains the subcomponent that is responsible for handling warp sync requests and responses. + WarpSyncProtocol *WarpSyncProtocol - p.TriggerShutdown() + // Engines contains the engines that are managed by the protocol. + Engines *Engines - p.shutdown() + // Options contains the options that were used to create the protocol. + Options *Options - p.TriggerStopped() + // EvictionState contains the eviction state of the protocol. + reactive.EvictionState[iotago.SlotIndex] - return p.context.Err() + // ReactiveModule embeds the reactive module logic of the protocol. + *module.ReactiveModule } -func (p *Protocol) linkToEngine(engineInstance *engine.Engine) { - p.Events.Engine.LinkTo(engineInstance.Events) -} +// New creates a new protocol instance from the given parameters. +func New(logger log.Logger, workers *workerpool.Group, networkEndpoint network.Endpoint, opts ...options.Option[Protocol]) *Protocol { + return options.Apply(&Protocol{ + Events: NewEvents(), + Workers: workers, + Options: NewDefaultOptions(), + ReactiveModule: module.NewReactiveModule(logger), + EvictionState: reactive.NewEvictionState[iotago.SlotIndex](), + }, opts, func(p *Protocol) { + shutdownSubComponents := p.initSubcomponents(networkEndpoint) -func (p *Protocol) shutdown() { - if p.networkProtocol != nil { - p.networkProtocol.Shutdown() - } + p.Initialized.OnTrigger(func() { + shutdown := lo.Batch( + p.initEviction(), + p.initGlobalEventsRedirection(), + p.initNetwork(), - p.ChainManager.Shutdown() - p.Workers.Shutdown() + shutdownSubComponents, + ) - p.activeEngineMutex.RLock() - p.mainEngine.Shutdown() - if p.candidateEngine != nil { - p.candidateEngine.engine.Shutdown() - } - p.activeEngineMutex.RUnlock() -} + p.Shutdown.OnTrigger(shutdown) + }) -func (p *Protocol) initEngineManager() { - p.EngineManager = enginemanager.New( - p.Workers.CreateGroup("EngineManager"), - p.HandleError, - p.optsBaseDirectory, - DatabaseVersion, - p.optsStorageOptions, - p.optsEngineOptions, - p.optsPreSolidFilterProvider, - p.optsPostSolidFilterProvider, - p.optsBlockDAGProvider, - p.optsBookerProvider, - p.optsClockProvider, - p.optsBlockGadgetProvider, - p.optsSlotGadgetProvider, - p.optsSybilProtectionProvider, - p.optsNotarizationProvider, - p.optsAttestationProvider, - p.optsLedgerProvider, - p.optsSchedulerProvider, - p.optsTipManagerProvider, - p.optsTipSelectionProvider, - p.optsRetainerProvider, - p.optsUpgradeOrchestratorProvider, - p.optsSyncManagerProvider, - ) + p.Constructed.Trigger() - mainEngine, err := p.EngineManager.LoadActiveEngine(p.optsSnapshotPath) - if err != nil { - panic(fmt.Sprintf("could not load active engine: %s", err)) - } - p.mainEngine = mainEngine + p.waitInitialized() + }) } -func (p *Protocol) initChainManager() { - p.ChainManager = chainmanager.NewManager(p, p.HandleError, p.optsChainManagerOptions...) - p.Events.ChainManager.LinkTo(p.ChainManager.Events) - - // This needs to be hooked so that the ChainManager always knows the commitments we issued. - // Else our own BlockIssuer might use a commitment that the ChainManager does not know yet. - p.Events.Engine.Notarization.SlotCommitted.Hook(func(details *notarization.SlotCommittedDetails) { - p.ChainManager.ProcessCommitment(details.Commitment) - }) +// IssueBlock issues a block to the node. +func (p *Protocol) IssueBlock(block *model.Block) error { + p.Network.Events.BlockReceived.Trigger(block, "self") - p.Events.Engine.SlotGadget.SlotFinalized.Hook(func(slot iotago.SlotIndex) { - rootCommitment := p.MainEngineInstance().EarliestRootCommitment(slot) + return nil +} - // It is essential that we set the rootCommitment before evicting the chainManager's state, this way - // we first specify the chain's cut-off point, and only then evict the state. It is also important to - // note that no multiple goroutines should be allowed to perform this operation at once, hence the - // hooking worker pool should always have a single worker or these two calls should be protected by a lock. - p.ChainManager.SetRootCommitment(rootCommitment) +// Run starts the protocol. +func (p *Protocol) Run(ctx context.Context) error { + p.Initialized.Trigger() - // We want to evict just below the height of our new root commitment (so that the slot of the root commitment - // stays in memory storage and with it the root commitment itself as well). - if rootCommitment.ID().Slot() > 0 { - p.ChainManager.EvictUntil(rootCommitment.ID().Slot() - 1) - } - }) + <-ctx.Done() - wpForking := p.Workers.CreatePool("Protocol.Forking", workerpool.WithWorkerCount(1)) // Using just 1 worker to avoid contention - p.Events.ChainManager.ForkDetected.Hook(p.onForkDetected, event.WithWorkerPool(wpForking)) -} + p.Shutdown.Trigger() + p.Stopped.Trigger() -func (p *Protocol) IssueBlock(block *model.Block) error { - return p.BlockDispatcher.Dispatch(block, p.networkDispatcher.LocalPeerID()) + return ctx.Err() } -func (p *Protocol) MainEngineInstance() *engine.Engine { - p.activeEngineMutex.RLock() - defer p.activeEngineMutex.RUnlock() +// APIForVersion returns the API for the given version. +func (p *Protocol) APIForVersion(version iotago.Version) (api iotago.API, err error) { + if mainEngineInstance := p.Engines.Main.Get(); mainEngineInstance != nil { + return mainEngineInstance.APIForVersion(version) + } - return p.mainEngine + return nil, ierrors.New("no engine instance available") } -func (p *Protocol) CandidateEngineInstance() *engine.Engine { - p.activeEngineMutex.RLock() - defer p.activeEngineMutex.RUnlock() - - if p.candidateEngine == nil { - return nil - } +// APIForSlot returns the API for the given slot. +func (p *Protocol) APIForSlot(slot iotago.SlotIndex) iotago.API { + return p.Engines.Main.Get().APIForSlot(slot) +} - return p.candidateEngine.engine +// APIForEpoch returns the API for the given epoch. +func (p *Protocol) APIForEpoch(epoch iotago.EpochIndex) iotago.API { + return p.Engines.Main.Get().APIForEpoch(epoch) } -func (p *Protocol) Network() *core.Protocol { - return p.networkProtocol +// APIForTime returns the API for the given time. +func (p *Protocol) APIForTime(t time.Time) iotago.API { + return p.Engines.Main.Get().APIForTime(t) } +// CommittedAPI returns the API for the committed state. func (p *Protocol) CommittedAPI() iotago.API { - return p.MainEngineInstance().CommittedAPI() + return p.Engines.Main.Get().CommittedAPI() } +// LatestAPI returns the latest API. func (p *Protocol) LatestAPI() iotago.API { - return p.MainEngineInstance().LatestAPI() + return p.Engines.Main.Get().LatestAPI() +} + +// initSubcomponents initializes the subcomponents of the protocol and returns a function that shuts them down. +func (p *Protocol) initSubcomponents(networkEndpoint network.Endpoint) (shutdown func()) { + p.Network = core.NewProtocol(networkEndpoint, p.Workers.CreatePool("NetworkProtocol"), p) + p.BlocksProtocol = newBlocksProtocol(p) + p.CommitmentsProtocol = newCommitmentsProtocol(p) + p.AttestationsProtocol = newAttestationsProtocol(p) + p.WarpSyncProtocol = newWarpSyncProtocol(p) + p.Commitments = newCommitments(p) + p.Chains = newChains(p) + p.Engines = newEngines(p) + + return func() { + p.BlocksProtocol.Shutdown() + p.CommitmentsProtocol.Shutdown() + p.AttestationsProtocol.Shutdown() + p.WarpSyncProtocol.Shutdown() + p.Network.Shutdown() + p.Workers.WaitChildren() + p.Engines.Shutdown.Trigger() + p.Workers.Shutdown() + } } -func (p *Protocol) APIForVersion(version iotago.Version) (iotago.API, error) { - return p.MainEngineInstance().APIForVersion(version) +// initEviction initializes the eviction of old data when the engine advances and returns a function that shuts it down. +func (p *Protocol) initEviction() (shutdown func()) { + return p.Commitments.Root.OnUpdate(func(_ *Commitment, rootCommitment *Commitment) { + // TODO: DECIDE ON DATA AVAILABILITY TIMESPAN / EVICTION STRATEGY + // p.Evict(rootCommitment.Slot() - 1) + }) } -func (p *Protocol) APIForTime(t time.Time) iotago.API { - return p.MainEngineInstance().APIForTime(t) -} +// initGlobalEventsRedirection initializes the global events redirection of the protocol and returns a function that +// shuts it down. +func (p *Protocol) initGlobalEventsRedirection() (shutdown func()) { + return p.Engines.Main.WithNonEmptyValue(func(mainEngine *engine.Engine) (shutdown func()) { + p.Events.Engine.LinkTo(mainEngine.Events) -func (p *Protocol) APIForSlot(slot iotago.SlotIndex) iotago.API { - return p.MainEngineInstance().APIForSlot(slot) + return func() { + p.Events.Engine.LinkTo(nil) + } + }) } -func (p *Protocol) APIForEpoch(epoch iotago.EpochIndex) iotago.API { - return p.MainEngineInstance().APIForEpoch(epoch) +// initNetwork initializes the network of the protocol and returns a function that shuts it down. +func (p *Protocol) initNetwork() (shutdown func()) { + return lo.Batch( + p.Network.OnError(func(err error, peer peer.ID) { p.LogError("network error", "peer", peer, "error", err) }), + p.Network.OnBlockReceived(p.BlocksProtocol.ProcessResponse), + p.Network.OnBlockRequestReceived(p.BlocksProtocol.ProcessRequest), + p.Network.OnCommitmentReceived(p.CommitmentsProtocol.ProcessResponse), + p.Network.OnCommitmentRequestReceived(p.CommitmentsProtocol.ProcessRequest), + p.Network.OnAttestationsReceived(p.AttestationsProtocol.ProcessResponse), + p.Network.OnAttestationsRequestReceived(p.AttestationsProtocol.ProcessRequest), + p.Network.OnWarpSyncResponseReceived(p.WarpSyncProtocol.ProcessResponse), + p.Network.OnWarpSyncRequestReceived(p.WarpSyncProtocol.ProcessRequest), + ) } -func (p *Protocol) HandleError(err error) { - if err != nil { - p.Events.Error.Trigger(err) - } -} +// waitInitialized waits until the main engine is initialized (published its root commitment). +func (p *Protocol) waitInitialized() { + var waitInitialized sync.WaitGroup + + waitInitialized.Add(1) + p.Commitments.Root.OnUpdateOnce(func(_ *Commitment, _ *Commitment) { + waitInitialized.Done() + }, func(_ *Commitment, rootCommitment *Commitment) bool { return rootCommitment != nil }) -var _ iotago.APIProvider = &Protocol{} + waitInitialized.Wait() +} diff --git a/pkg/protocol/protocol_attestations.go b/pkg/protocol/protocol_attestations.go new file mode 100644 index 000000000..16a7f509c --- /dev/null +++ b/pkg/protocol/protocol_attestations.go @@ -0,0 +1,260 @@ +package protocol + +import ( + "github.com/libp2p/go-libp2p/core/peer" + + "github.com/iotaledger/hive.go/core/eventticker" + "github.com/iotaledger/hive.go/ds/shrinkingmap" + "github.com/iotaledger/hive.go/ierrors" + "github.com/iotaledger/hive.go/kvstore" + "github.com/iotaledger/hive.go/lo" + "github.com/iotaledger/hive.go/log" + "github.com/iotaledger/hive.go/runtime/workerpool" + "github.com/iotaledger/iota-core/pkg/model" + iotago "github.com/iotaledger/iota.go/v4" + "github.com/iotaledger/iota.go/v4/merklehasher" +) + +// AttestationsProtocol is a subcomponent of the protocol that is responsible for handling attestation requests and +// responses. +type AttestationsProtocol struct { + // protocol contains a reference to the Protocol instance that this component belongs to. + protocol *Protocol + + // workerPool contains the worker pool that is used to process attestation requests and responses asynchronously. + workerPool *workerpool.WorkerPool + + // ticker contains the ticker that is used to send attestation requests. + ticker *eventticker.EventTicker[iotago.SlotIndex, iotago.CommitmentID] + + // commitmentVerifiers contains the commitment verifiers that are used to verify received attestations. + commitmentVerifiers *shrinkingmap.ShrinkingMap[iotago.CommitmentID, *CommitmentVerifier] + + // Logger embeds a logger that can be used to log messages emitted by this component. + log.Logger +} + +// newAttestationsProtocol creates a new attestation protocol instance for the given protocol. +func newAttestationsProtocol(protocol *Protocol) *AttestationsProtocol { + a := &AttestationsProtocol{ + Logger: lo.Return1(protocol.Logger.NewChildLogger("Attestations")), + protocol: protocol, + workerPool: protocol.Workers.CreatePool("Attestations"), + ticker: eventticker.New[iotago.SlotIndex, iotago.CommitmentID](protocol.Options.AttestationRequesterOptions...), + commitmentVerifiers: shrinkingmap.New[iotago.CommitmentID, *CommitmentVerifier](), + } + + a.ticker.Events.Tick.Hook(a.sendRequest) + + protocol.Constructed.OnTrigger(func() { + protocol.Chains.WithElements(func(chain *Chain) (shutdown func()) { + return chain.RequestAttestations.WithNonEmptyValue(func(requestAttestations bool) (shutdown func()) { + return a.setupCommitmentVerifier(chain) + }) + }) + + protocol.Commitments.WithElements(func(commitment *Commitment) (shutdown func()) { + return commitment.RequestAttestations.OnUpdate(func(_ bool, requestAttestations bool) { + if requestAttestations { + if commitment.CumulativeWeight() == 0 { + commitment.IsAttested.Set(true) + } else { + a.ticker.StartTicker(commitment.ID()) + } + } else { + a.ticker.StopTicker(commitment.ID()) + } + }) + }) + }) + + return a +} + +// ProcessResponse processes the given attestation response. +func (a *AttestationsProtocol) ProcessResponse(commitmentModel *model.Commitment, attestations []*iotago.Attestation, merkleProof *merklehasher.Proof[iotago.Identifier], from peer.ID) { + a.workerPool.Submit(func() { + commitment, _, err := a.protocol.Commitments.publishCommitmentModel(commitmentModel) + if err != nil { + a.LogDebug("failed to publish commitment when processing attestations", "commitmentID", commitmentModel.ID(), "peer", from, "error", err) + + return + } + + if commitment.AttestedWeight.Compute(func(currentWeight uint64) uint64 { + if !commitment.RequestAttestations.Get() { + a.LogTrace("received attestations for previously attested commitment", "commitment", commitment.LogName()) + + return currentWeight + } + + chain := commitment.Chain.Get() + if chain == nil { + a.LogDebug("failed to find chain for commitment when processing attestations", "commitment", commitment.LogName()) + + return currentWeight + } + + commitmentVerifier, exists := a.commitmentVerifiers.Get(chain.ForkingPoint.Get().ID()) + if !exists || commitmentVerifier == nil { + a.LogDebug("failed to retrieve commitment verifier", "commitment", commitment.LogName()) + + return currentWeight + } + + _, actualWeight, err := commitmentVerifier.verifyCommitment(commitment, attestations, merkleProof) + if err != nil { + a.LogError("failed to verify commitment", "commitment", commitment.LogName(), "error", err) + + return currentWeight + } + + if actualWeight > currentWeight { + a.LogDebug("received response", "commitment", commitment.LogName(), "fromPeer", from) + } + + return actualWeight + }) > 0 { + commitment.IsAttested.Set(true) + } + }) +} + +// ProcessRequest processes the given attestation request. +func (a *AttestationsProtocol) ProcessRequest(commitmentID iotago.CommitmentID, from peer.ID) { + a.workerPool.Submit(func() { + commitment, err := a.protocol.Commitments.Get(commitmentID, false) + if err != nil { + if !ierrors.Is(err, ErrorCommitmentNotFound) { + a.LogError("failed to load requested commitment", "commitmentID", commitmentID, "fromPeer", from, "err", err) + } else { + a.LogTrace("failed to load requested commitment", "commitmentID", commitmentID, "fromPeer", from, "err", err) + } + + return + } + + chain := commitment.Chain.Get() + if chain == nil { + a.LogTrace("request for unsolid commitment", "commitmentID", commitment.LogName(), "fromPeer", from) + + return + } + + targetEngine := commitment.TargetEngine() + if targetEngine == nil { + a.LogTrace("request for chain without engine", "chain", chain.LogName(), "fromPeer", from) + + return + } + + if targetEngine.Storage.Settings().LatestCommitment().Slot() < commitmentID.Slot() { + a.LogTrace("requested commitment not verified", "commitment", commitment.LogName(), "fromPeer", from) + + return + } + + commitmentModel, err := targetEngine.Storage.Commitments().Load(commitmentID.Slot()) + if err != nil { + if !ierrors.Is(err, kvstore.ErrKeyNotFound) { + a.LogError("failed to load requested commitment from engine", "commitment", commitment.LogName(), "fromPeer", from, "err", err) + } else { + a.LogTrace("requested commitment not found in engine", "commitment", commitment.LogName(), "fromPeer", from) + } + + return + } + + if commitmentModel.ID() != commitmentID { + a.LogTrace("commitment ID mismatch", "requestedCommitment", commitment.LogName(), "loadedCommitment", commitmentModel.ID(), "fromPeer", from) + + return + } + + attestations, err := targetEngine.Attestations.Get(commitmentID.Slot()) + if err != nil { + a.LogError("failed to load requested attestations", "commitment", commitment.LogName(), "fromPeer", from) + + return + } + + rootsStorage, err := targetEngine.Storage.Roots(commitmentID.Slot()) + if err != nil { + a.LogError("failed to load roots storage for requested attestations", "commitment", commitment.LogName(), "fromPeer", from) + + return + } + + roots, exists, err := rootsStorage.Load(commitmentID) + if err != nil { + a.LogError("failed to load roots for requested attestations", "commitment", commitment.LogName(), "err", err, "fromPeer", from) + + return + } else if !exists { + a.LogTrace("roots not found for requested attestations", "commitment", commitment.LogName(), "fromPeer", from) + + return + } + + if err = a.protocol.Network.SendAttestations(commitmentModel, attestations, roots.AttestationsProof(), from); err != nil { + a.LogError("failed to send attestations", "commitment", commitment.LogName(), "fromPeer", from, "err", err) + } else { + a.LogTrace("processed request", "commitment", commitment.LogName(), "fromPeer", from) + } + }) +} + +// Shutdown shuts down the attestation protocol. +func (a *AttestationsProtocol) Shutdown() { + a.ticker.Shutdown() + a.workerPool.Shutdown().ShutdownComplete.Wait() +} + +// setupCommitmentVerifier sets up the commitment verifier for the given chain. +func (a *AttestationsProtocol) setupCommitmentVerifier(chain *Chain) (shutdown func()) { + forkingPoint := chain.ForkingPoint.Get() + if forkingPoint == nil { + a.LogError("failed to retrieve forking point", "chain", chain.LogName()) + + return nil + } + + if forkingPoint.IsRoot.Get() { + a.LogTrace("skipping commitment verifier setup for main chain", "chain", chain.LogName()) + + return nil + } + + parentOfForkingPoint := forkingPoint.Parent.Get() + if parentOfForkingPoint == nil { + a.LogError("failed to retrieve parent of forking point", "chain", chain.LogName()) + + return nil + } + + a.commitmentVerifiers.GetOrCreate(forkingPoint.ID(), func() (commitmentVerifier *CommitmentVerifier) { + commitmentVerifier, err := newCommitmentVerifier(forkingPoint.Chain.Get().LatestEngine(), parentOfForkingPoint.Commitment) + if err != nil { + a.LogError("failed to create commitment verifier", "chain", chain.LogName(), "error", err) + } + + return commitmentVerifier + }) + + return func() { + a.commitmentVerifiers.Delete(forkingPoint.ID()) + } +} + +// sendRequest sends an attestation request for the given commitment ID. +func (a *AttestationsProtocol) sendRequest(commitmentID iotago.CommitmentID) { + a.workerPool.Submit(func() { + if commitment, err := a.protocol.Commitments.Get(commitmentID, false); err == nil { + a.protocol.Network.RequestAttestations(commitmentID) + + a.LogDebug("request", "commitment", commitment.LogName()) + } else { + a.LogError("failed to load commitment", "commitmentID", commitmentID, "err", err) + } + }) +} diff --git a/pkg/protocol/protocol_blocks.go b/pkg/protocol/protocol_blocks.go new file mode 100644 index 000000000..83bdc8fc6 --- /dev/null +++ b/pkg/protocol/protocol_blocks.go @@ -0,0 +1,131 @@ +package protocol + +import ( + "github.com/libp2p/go-libp2p/core/peer" + + "github.com/iotaledger/hive.go/ds/types" + "github.com/iotaledger/hive.go/ierrors" + "github.com/iotaledger/hive.go/lo" + "github.com/iotaledger/hive.go/log" + "github.com/iotaledger/hive.go/runtime/workerpool" + "github.com/iotaledger/iota-core/pkg/core/buffer" + "github.com/iotaledger/iota-core/pkg/model" + "github.com/iotaledger/iota-core/pkg/protocol/engine" + "github.com/iotaledger/iota-core/pkg/protocol/engine/blocks" + iotago "github.com/iotaledger/iota.go/v4" +) + +// BlocksProtocol is a subcomponent of the protocol that is responsible for handling block requests and responses. +type BlocksProtocol struct { + // protocol contains a reference to the Protocol instance that this component belongs to. + protocol *Protocol + + // workerPool contains the worker pool that is used to process block requests and responses asynchronously. + workerPool *workerpool.WorkerPool + + // droppedBlocksBuffer contains a buffer for dropped blocks that reference unsolid commitments that can be replayed + // at a later point in time (to make tests more reliable as we have no continuous activity). + droppedBlocksBuffer *buffer.UnsolidCommitmentBuffer[*types.Tuple[*model.Block, peer.ID]] + + // Logger embeds a logger that can be used to log messages emitted by this chain. + log.Logger +} + +// newBlocksProtocol creates a new blocks protocol instance for the given protocol. +func newBlocksProtocol(protocol *Protocol) *BlocksProtocol { + b := &BlocksProtocol{ + Logger: lo.Return1(protocol.Logger.NewChildLogger("Blocks")), + protocol: protocol, + workerPool: protocol.Workers.CreatePool("Blocks"), + droppedBlocksBuffer: buffer.NewUnsolidCommitmentBuffer[*types.Tuple[*model.Block, peer.ID]](20, 100), + } + + protocol.Constructed.OnTrigger(func() { + protocol.Commitments.WithElements(func(commitment *Commitment) (shutdown func()) { + return commitment.ReplayDroppedBlocks.OnUpdate(func(_ bool, replayBlocks bool) { + if replayBlocks { + for _, droppedBlock := range b.droppedBlocksBuffer.GetValues(commitment.ID()) { + b.LogTrace("replaying dropped block", "commitmentID", commitment.ID(), "blockID", droppedBlock.A.ID()) + + b.ProcessResponse(droppedBlock.A, droppedBlock.B) + } + } + }) + }) + + protocol.Chains.WithInitializedEngines(func(chain *Chain, engine *engine.Engine) (shutdown func()) { + return lo.Batch( + engine.Events.BlockRequester.Tick.Hook(b.SendRequest).Unhook, + engine.Events.Scheduler.BlockScheduled.Hook(func(block *blocks.Block) { b.SendResponse(block.ModelBlock()) }).Unhook, + engine.Events.Scheduler.BlockSkipped.Hook(func(block *blocks.Block) { b.SendResponse(block.ModelBlock()) }).Unhook, + ) + }) + }) + + return b +} + +// SendRequest sends a request for the given block to all peers. +func (b *BlocksProtocol) SendRequest(blockID iotago.BlockID) { + b.workerPool.Submit(func() { + b.protocol.Network.RequestBlock(blockID) + + b.LogTrace("request", "blockID", blockID) + }) +} + +// SendResponse sends the given block to all peers. +func (b *BlocksProtocol) SendResponse(block *model.Block) { + b.workerPool.Submit(func() { + b.protocol.Network.SendBlock(block) + + b.LogTrace("sent", "blockID", block.ID()) + }) +} + +// ProcessResponse processes the given block response. +func (b *BlocksProtocol) ProcessResponse(block *model.Block, from peer.ID) { + b.workerPool.Submit(func() { + // abort if the commitment belongs to an evicted slot + commitment, err := b.protocol.Commitments.Get(block.ProtocolBlock().Header.SlotCommitmentID, true) + if err != nil && ierrors.Is(ErrorSlotEvicted, err) { + b.LogError("dropped block referencing unsolidifiable commitment", "commitmentID", block.ProtocolBlock().Header.SlotCommitmentID, "blockID", block.ID(), "err", err) + + return + } + + // add the block to the dropped blocks buffer if we could not dispatch it to the chain + if commitment == nil || !commitment.Chain.Get().DispatchBlock(block, from) { + if !b.droppedBlocksBuffer.Add(block.ProtocolBlock().Header.SlotCommitmentID, types.NewTuple(block, from)) { + b.LogError("failed to add dropped block referencing unsolid commitment to dropped blocks buffer", "commitmentID", block.ProtocolBlock().Header.SlotCommitmentID, "blockID", block.ID()) + } else { + b.LogTrace("dropped block referencing unsolid commitment added to dropped blocks buffer", "commitmentID", block.ProtocolBlock().Header.SlotCommitmentID, "blockID", block.ID()) + } + + return + } + + b.LogTrace("received block", "blockID", block.ID(), "commitment", commitment.LogName()) + }) +} + +// ProcessRequest processes the given block request. +func (b *BlocksProtocol) ProcessRequest(blockID iotago.BlockID, from peer.ID) { + b.workerPool.Submit(func() { + block, exists := b.protocol.Engines.Main.Get().Block(blockID) + if !exists { + b.LogTrace("requested block not found", "blockID", blockID) + + return + } + + b.protocol.Network.SendBlock(block, from) + + b.LogTrace("processed block request", "blockID", blockID) + }) +} + +// Shutdown shuts down the blocks protocol and waits for all pending requests to be finished. +func (b *BlocksProtocol) Shutdown() { + b.workerPool.Shutdown().ShutdownComplete.Wait() +} diff --git a/pkg/protocol/protocol_commitments.go b/pkg/protocol/protocol_commitments.go new file mode 100644 index 000000000..56516d18f --- /dev/null +++ b/pkg/protocol/protocol_commitments.go @@ -0,0 +1,112 @@ +package protocol + +import ( + "github.com/libp2p/go-libp2p/core/peer" + + "github.com/iotaledger/hive.go/core/eventticker" + "github.com/iotaledger/hive.go/ierrors" + "github.com/iotaledger/hive.go/lo" + "github.com/iotaledger/hive.go/log" + "github.com/iotaledger/hive.go/runtime/workerpool" + "github.com/iotaledger/iota-core/pkg/core/promise" + "github.com/iotaledger/iota-core/pkg/model" + iotago "github.com/iotaledger/iota.go/v4" +) + +// CommitmentsProtocol is a subcomponent of the protocol that is responsible for handling commitment requests and +// responses. +type CommitmentsProtocol struct { + // protocol contains a reference to the Protocol instance that this component belongs to. + protocol *Protocol + + // workerPool contains the worker pool that is used to process commitment requests and responses asynchronously. + workerPool *workerpool.WorkerPool + + // ticker contains the ticker that is used to send commitment requests. + ticker *eventticker.EventTicker[iotago.SlotIndex, iotago.CommitmentID] + + // Logger embeds a logger that can be used to log messages emitted by this chain. + log.Logger +} + +// newCommitmentsProtocol creates a new commitment protocol instance for the given protocol. +func newCommitmentsProtocol(protocol *Protocol) *CommitmentsProtocol { + c := &CommitmentsProtocol{ + Logger: lo.Return1(protocol.Logger.NewChildLogger("Commitments")), + protocol: protocol, + workerPool: protocol.Workers.CreatePool("Commitments"), + ticker: eventticker.New[iotago.SlotIndex, iotago.CommitmentID](protocol.Options.CommitmentRequesterOptions...), + } + + c.ticker.Events.Tick.Hook(c.SendRequest) + + return c +} + +// StartTicker starts the ticker for the given commitment. +func (c *CommitmentsProtocol) StartTicker(commitmentPromise *promise.Promise[*Commitment], commitmentID iotago.CommitmentID) { + c.ticker.StartTicker(commitmentID) + + commitmentPromise.OnComplete(func() { + c.ticker.StopTicker(commitmentID) + }) +} + +// SendRequest sends a commitment request for the given commitment ID to all peers. +func (c *CommitmentsProtocol) SendRequest(commitmentID iotago.CommitmentID) { + c.workerPool.Submit(func() { + c.protocol.Network.RequestSlotCommitment(commitmentID) + + c.LogDebug("request", "commitment", commitmentID) + }) +} + +// SendResponse sends a commitment response for the given commitment to the given peer. +func (c *CommitmentsProtocol) SendResponse(commitment *Commitment, to peer.ID) { + c.workerPool.Submit(func() { + c.protocol.Network.SendSlotCommitment(commitment.Commitment, to) + + c.LogTrace("sent commitment", "commitment", commitment.LogName(), "toPeer", to) + }) +} + +// ProcessResponse processes the given commitment response. +func (c *CommitmentsProtocol) ProcessResponse(commitmentModel *model.Commitment, from peer.ID) { + c.workerPool.Submit(func() { + // Verify the commitment's version corresponds to the protocol version for the slot. + apiForSlot := c.protocol.APIForSlot(commitmentModel.Slot()) + if apiForSlot.Version() != commitmentModel.Commitment().ProtocolVersion { + c.LogDebug("received commitment with invalid protocol version", "commitment", commitmentModel.ID(), "version", commitmentModel.Commitment().ProtocolVersion, "expectedVersion", apiForSlot.Version(), "fromPeer", from) + + return + } + + if commitment, published, err := c.protocol.Commitments.publishCommitmentModel(commitmentModel); err != nil { + c.LogError("failed to process commitment", "fromPeer", from, "err", err) + } else if published { + c.LogTrace("received response", "commitment", commitment.LogName(), "fromPeer", from) + } + }) +} + +// ProcessRequest processes the given commitment request. +func (c *CommitmentsProtocol) ProcessRequest(commitmentID iotago.CommitmentID, from peer.ID) { + c.workerPool.Submit(func() { + commitment, err := c.protocol.Commitments.Get(commitmentID) + if err != nil { + logLevel := lo.Cond(ierrors.Is(err, ErrorCommitmentNotFound), log.LevelTrace, log.LevelError) + + c.Log("failed to load commitment for commitment request", logLevel, "commitmentID", commitmentID, "fromPeer", from, "error", err) + + return + } + + c.SendResponse(commitment, from) + }) +} + +// Shutdown shuts down the commitment protocol and waits for all pending requests to be processed. +func (c *CommitmentsProtocol) Shutdown() { + c.ticker.Shutdown() + c.workerPool.Shutdown().ShutdownComplete.Wait() +} diff --git a/pkg/protocol/protocol_fork.go b/pkg/protocol/protocol_fork.go deleted file mode 100644 index e463e3b28..000000000 --- a/pkg/protocol/protocol_fork.go +++ /dev/null @@ -1,344 +0,0 @@ -package protocol - -import ( - "context" - "fmt" - "time" - - "github.com/libp2p/go-libp2p/core/peer" - - "github.com/iotaledger/hive.go/ierrors" - "github.com/iotaledger/hive.go/runtime/event" - "github.com/iotaledger/hive.go/runtime/timeutil" - "github.com/iotaledger/hive.go/runtime/workerpool" - "github.com/iotaledger/iota-core/pkg/model" - "github.com/iotaledger/iota-core/pkg/protocol/chainmanager" - "github.com/iotaledger/iota-core/pkg/protocol/engine" - iotago "github.com/iotaledger/iota.go/v4" - "github.com/iotaledger/iota.go/v4/merklehasher" -) - -func (p *Protocol) processAttestationsRequest(commitmentID iotago.CommitmentID, src peer.ID) { - mainEngine := p.MainEngineInstance() - - if mainEngine.Storage.Settings().LatestCommitment().Slot() < commitmentID.Slot() { - return - } - - commitment, err := mainEngine.Storage.Commitments().Load(commitmentID.Slot()) - if err != nil { - p.HandleError(ierrors.Wrapf(err, "failed to load commitment %s", commitmentID)) - return - } - - if commitment.ID() != commitmentID { - return - } - - attestations, err := mainEngine.Attestations.Get(commitmentID.Slot()) - if err != nil { - p.HandleError(ierrors.Wrapf(err, "failed to load attestations for commitment %s", commitmentID)) - return - } - - rootsStorage, err := mainEngine.Storage.Roots(commitmentID.Slot()) - if err != nil { - p.HandleError(ierrors.Errorf("failed to get roots storage for commitment %s", commitmentID)) - return - } - roots, _, err := rootsStorage.Load(commitmentID) - if err != nil { - p.HandleError(ierrors.Wrapf(err, "failed to load roots for commitment %s", commitmentID)) - return - } - - p.networkProtocol.SendAttestations(commitment, attestations, roots.AttestationsProof(), src) -} - -func (p *Protocol) onForkDetected(fork *chainmanager.Fork) { - if candidateEngineInstance := p.CandidateEngineInstance(); candidateEngineInstance != nil && candidateEngineInstance.ChainID() == fork.ForkingPoint.ID() { - p.HandleError(ierrors.Errorf("we are already processing the fork at forkingPoint %s", fork.ForkingPoint.ID())) - return - } - - if p.MainEngineInstance().ChainID() == fork.ForkingPoint.ID() { - p.HandleError(ierrors.Errorf("we already switched our main engine to the fork at forkingPoint %s", fork.ForkingPoint.ID())) - return - } - - blockIDs, shouldSwitch, banSrc, err := p.processFork(fork) - if err != nil { - p.HandleError(ierrors.Wrapf(err, "failed to handle fork %s at forking point %s from source %s", fork.ForkedChain.LatestCommitment().ID(), fork.ForkingPoint.ID(), fork.Source)) - return - } - - if banSrc { - fmt.Println("TODO: ban source") - // TODO: ban/drop peer - } - - if !shouldSwitch { - return - } - - // When creating the candidate engine, there are 3 possible scenarios: - // 1. The candidate engine becomes synced and its chain is heavier than the main chain -> switch to it. - // 2. The candidate engine never becomes synced or its chain is not heavier than the main chain -> discard it after a timeout. - // 3. The candidate engine is not creating the same commitments as the chain we decided to switch to -> discard it immediately. - snapshotTargetIndex := fork.ForkingPoint.Slot() - 1 - candidateEngineInstance, err := p.EngineManager.ForkEngineAtSlot(snapshotTargetIndex) - if err != nil { - p.HandleError(ierrors.Wrap(err, "error creating new candidate engine")) - return - } - - // Set the chain to the correct forking point - candidateEngineInstance.SetChainID(fork.ForkingPoint.ID()) - - // Attach the engine block requests to the protocol and detach as soon as we switch to that engine - detachRequestBlocks := candidateEngineInstance.Events.BlockRequester.Tick.Hook(func(blockID iotago.BlockID) { - p.networkProtocol.RequestBlock(blockID) - }, event.WithWorkerPool(candidateEngineInstance.Workers.CreatePool("CandidateBlockRequester", workerpool.WithWorkerCount(2)))).Unhook - - var detachProcessCommitment, detachMainEngineSwitched func() - candidateEngineTimeoutTimer := time.NewTimer(10 * time.Minute) - - cleanupFunc := func() { - detachRequestBlocks() - detachProcessCommitment() - detachMainEngineSwitched() - - p.activeEngineMutex.Lock() - p.candidateEngine = nil - p.activeEngineMutex.Unlock() - - candidateEngineInstance.Shutdown() - if err := candidateEngineInstance.RemoveFromFilesystem(); err != nil { - p.HandleError(ierrors.Wrapf(err, "error cleaning up candidate engine %s from file system", candidateEngineInstance.Name())) - } - } - - // Attach slot commitments to the chain manager and detach as soon as we switch to that engine - detachProcessCommitment = candidateEngineInstance.Events.Notarization.LatestCommitmentUpdated.Hook(func(commitment *model.Commitment) { - // Check whether the commitment produced by syncing the candidate engine is actually part of the forked chain. - if fork.ForkedChain.LatestCommitment().ID().Slot() >= commitment.Slot() { - forkedChainCommitmentID := fork.ForkedChain.Commitment(commitment.Slot()).ID() - if forkedChainCommitmentID != commitment.ID() { - p.HandleError(ierrors.Errorf("candidate engine %s produced a commitment %s that is not part of the forked chain %s", candidateEngineInstance.Name(), commitment.ID(), forkedChainCommitmentID)) - cleanupFunc() - - return - } - } - - p.ChainManager.ProcessCandidateCommitment(commitment) - - if candidateEngineInstance.SyncManager.IsBootstrapped() && - commitment.CumulativeWeight() > p.MainEngineInstance().Storage.Settings().LatestCommitment().CumulativeWeight() { - p.switchEngines() - } - }, event.WithWorkerPool(candidateEngineInstance.Workers.CreatePool("ProcessCandidateCommitment", workerpool.WithWorkerCount(1)))).Unhook - - // Clean up events when we switch to the candidate engine. - detachMainEngineSwitched = p.Events.MainEngineSwitched.Hook(func(_ *engine.Engine) { - candidateEngineTimeoutTimer.Stop() - detachRequestBlocks() - detachProcessCommitment() - }, event.WithMaxTriggerCount(1)).Unhook - - // Clean up candidate engine if we never switch to it. - go func() { - defer timeutil.CleanupTimer(candidateEngineTimeoutTimer) - - select { - case <-candidateEngineTimeoutTimer.C: - p.HandleError(ierrors.Errorf("timeout waiting for candidate engine %s to sync", candidateEngineInstance.Name())) - cleanupFunc() - case <-p.context.Done(): - // Nothing to do here. The candidate engine will be shutdown on protocol shutdown and cleaned up when starting the node again. - } - }() - - // Set the engine as the new candidate - p.activeEngineMutex.Lock() - oldCandidateEngine := p.candidateEngine - p.candidateEngine = &candidateEngine{ - engine: candidateEngineInstance, - cleanupFunc: cleanupFunc, - } - p.activeEngineMutex.Unlock() - - // Add all the blocks from the forking point attestations to the requester since those will not be passed to the engine by the protocol - candidateEngineInstance.BlockRequester.StartTickers(blockIDs) - - p.Events.CandidateEngineActivated.Trigger(candidateEngineInstance) - - if oldCandidateEngine != nil { - oldCandidateEngine.cleanupFunc() - } -} - -type commitmentVerificationResult struct { - commitment *model.Commitment - actualCumulativeWeight uint64 - blockIDs iotago.BlockIDs - err error -} - -func (p *Protocol) processFork(fork *chainmanager.Fork) (anchorBlockIDs iotago.BlockIDs, shouldSwitch bool, banSource bool, err error) { - // Flow: - // 1. request attestations starting from forking point + AttestationCommitmentOffset - // 2. request 1 by 1 - // 3. verify commitment and attestations: evaluate CW until this point - // 4. evaluate heuristic to determine if we should switch to the fork - ch := make(chan *commitmentVerificationResult) - defer close(ch) - - commitmentVerifier, err := NewCommitmentVerifier(p.MainEngineInstance(), fork.MainChain.Commitment(fork.ForkingPoint.Slot()-1).Commitment()) - if err != nil { - return nil, false, true, ierrors.Wrapf(err, "failed to create commitment verifier for %s", fork.MainChain.Commitment(fork.ForkingPoint.Slot()-1).ID()) - } - - verifyCommitmentFunc := func(commitment *model.Commitment, attestations []*iotago.Attestation, merkleProof *merklehasher.Proof[iotago.Identifier], _ peer.ID) { - blockIDs, actualCumulativeWeight, err := commitmentVerifier.verifyCommitment(commitment, attestations, merkleProof) - - result := &commitmentVerificationResult{ - commitment: commitment, - actualCumulativeWeight: actualCumulativeWeight, - blockIDs: blockIDs, - err: err, - } - ch <- result - } - - wp := p.Workers.CreatePool("AttestationsVerifier", workerpool.WithWorkerCount(1)) - unhook := p.Events.Network.AttestationsReceived.Hook( - verifyCommitmentFunc, - event.WithWorkerPool(wp), - ).Unhook - defer unhook() - defer wp.Shutdown() - - ctx, cancel := context.WithTimeout(p.context, 2*time.Minute) - defer cancel() - - // Fork-choice rule: switch if p.optsChainSwitchingThreshold slots in a row are heavier than main chain. - forkChoiceRule := func(heavierCount int) (decided bool, shouldSwitch bool) { - switch heavierCount { - case p.optsChainSwitchingThreshold: - return true, true - case -p.optsChainSwitchingThreshold: - return true, false - default: - return false, false - } - } - - var heavierCount int - // We start from the forking point to have all starting blocks for each slot. Even though the chain weight will only - // start to diverge at forking point + AttestationCommitmentOffset. - start := fork.ForkingPoint.Slot() - end := fork.ForkedChain.LatestCommitment().ID().Slot() - for i := start; i <= end; i++ { - mainChainChainCommitment := fork.MainChain.Commitment(i) - if mainChainChainCommitment == nil { - // If the forked chain is longer than our main chain, we consider it to be heavier - heavierCount++ - - if decided, doSwitch := forkChoiceRule(heavierCount); decided { - return anchorBlockIDs, doSwitch, false, nil - } - - continue - } - mainChainCommitment := mainChainChainCommitment.Commitment() - - result, err := p.requestAttestation(ctx, fork.ForkedChain.Commitment(i).ID(), fork.Source, ch) - if err != nil { - return nil, false, true, ierrors.Wrapf(err, "failed to verify commitment %s", fork.ForkedChain.Commitment(i).ID()) - } - - // Count how many consecutive slots are heavier/lighter than the main chain. - switch { - case result.actualCumulativeWeight > mainChainCommitment.CumulativeWeight(): - heavierCount++ - case result.actualCumulativeWeight < mainChainCommitment.CumulativeWeight(): - heavierCount-- - default: - heavierCount = 0 - } - - if decided, doSwitch := forkChoiceRule(heavierCount); decided { - return anchorBlockIDs, doSwitch, false, nil - } - } - - // If the condition is not met in either direction, we don't switch the chain. - return nil, false, false, nil -} - -func (p *Protocol) requestAttestation(ctx context.Context, requestedID iotago.CommitmentID, src peer.ID, resultChan chan *commitmentVerificationResult) (*commitmentVerificationResult, error) { - ticker := time.NewTicker(p.optsAttestationRequesterTryInterval) - defer ticker.Stop() - - for i := 0; i < p.optsAttestationRequesterMaxRetries; i++ { - p.networkProtocol.RequestAttestations(requestedID, src) - - select { - case <-ticker.C: - continue - case result := <-resultChan: - return result, result.err - case <-ctx.Done(): - return nil, ctx.Err() - } - } - - return nil, ierrors.Errorf("request attestation exceeds max retries from src: %s", src.String()) -} - -func (p *Protocol) switchEngines() { - var oldEngine *engine.Engine - success := func() bool { - p.activeEngineMutex.Lock() - defer p.activeEngineMutex.Unlock() - - if p.candidateEngine == nil { - return false - } - - candidateEngineInstance := p.candidateEngine.engine - // Try to re-org the chain manager - if err := p.ChainManager.SwitchMainChain(candidateEngineInstance.Storage.Settings().LatestCommitment().ID()); err != nil { - p.HandleError(ierrors.Wrap(err, "switching main chain failed")) - - return false - } - - if err := p.EngineManager.SetActiveInstance(candidateEngineInstance); err != nil { - p.HandleError(ierrors.Wrap(err, "error switching engines")) - - return false - } - - p.linkToEngine(candidateEngineInstance) - - // Save a reference to the current main engine and storage so that we can shut it down and prune it after switching - oldEngine = p.mainEngine - oldEngine.Shutdown() - - p.mainEngine = candidateEngineInstance - p.candidateEngine = nil - - return true - }() - - if success { - p.Events.MainEngineSwitched.Trigger(p.MainEngineInstance()) - - // Cleanup filesystem - if err := oldEngine.RemoveFromFilesystem(); err != nil { - p.HandleError(ierrors.Wrap(err, "error removing storage directory after switching engines")) - } - } -} diff --git a/pkg/protocol/protocol_network.go b/pkg/protocol/protocol_network.go deleted file mode 100644 index 6c977bfd6..000000000 --- a/pkg/protocol/protocol_network.go +++ /dev/null @@ -1,57 +0,0 @@ -package protocol - -import ( - "github.com/libp2p/go-libp2p/core/peer" - - "github.com/iotaledger/hive.go/runtime/event" - "github.com/iotaledger/hive.go/runtime/workerpool" - "github.com/iotaledger/iota-core/pkg/model" - "github.com/iotaledger/iota-core/pkg/network/protocols/core" - "github.com/iotaledger/iota-core/pkg/protocol/engine/blocks" - iotago "github.com/iotaledger/iota.go/v4" -) - -func (p *Protocol) initNetworkProtocol() { - p.networkProtocol = core.NewProtocol(p.networkDispatcher, p.Workers.CreatePool("NetworkProtocol"), p) // Use max amount of workers for networking -} - -func (p *Protocol) runNetworkProtocol() { - p.Events.Network.LinkTo(p.networkProtocol.Events) - - wpBlocks := p.Workers.CreatePool("NetworkEvents.Blocks") // Use max amount of workers for sending, receiving and requesting blocks - - p.Events.Network.BlockRequestReceived.Hook(func(blockID iotago.BlockID, id peer.ID) { - if block, exists := p.MainEngineInstance().Block(blockID); exists { - p.networkProtocol.SendBlock(block, id) - } - }, event.WithWorkerPool(wpBlocks)) - - // Blocks are gossiped when they are scheduled or skipped. - p.Events.Engine.Scheduler.BlockScheduled.Hook(func(block *blocks.Block) { - p.networkProtocol.SendBlock(block.ModelBlock()) - }, event.WithWorkerPool(wpBlocks)) - p.Events.Engine.Scheduler.BlockSkipped.Hook(func(block *blocks.Block) { - p.networkProtocol.SendBlock(block.ModelBlock()) - }, event.WithWorkerPool(wpBlocks)) - - wpCommitments := p.Workers.CreatePool("NetworkEvents.SlotCommitments") - - p.Events.Network.SlotCommitmentRequestReceived.Hook(func(commitmentID iotago.CommitmentID, source peer.ID) { - // when we receive a commitment request, do not look it up in the ChainManager but in the storage, else we might answer with commitments we did not issue ourselves and for which we cannot provide attestations - if requestedCommitment, err := p.MainEngineInstance().Storage.Commitments().Load(commitmentID.Slot()); err == nil && requestedCommitment.ID() == commitmentID { - p.networkProtocol.SendSlotCommitment(requestedCommitment, source) - } - }, event.WithWorkerPool(wpCommitments)) - - p.Events.Network.SlotCommitmentReceived.Hook(func(commitment *model.Commitment, source peer.ID) { - p.ChainManager.ProcessCommitmentFromSource(commitment, source) - }, event.WithWorkerPool(wpCommitments)) - - p.Events.ChainManager.RequestCommitment.Hook(func(commitmentID iotago.CommitmentID) { - p.networkProtocol.RequestSlotCommitment(commitmentID) - }, event.WithWorkerPool(wpCommitments)) - - wpAttestations := p.Workers.CreatePool("NetworkEvents.Attestations", workerpool.WithWorkerCount(1)) // Using just 1 worker to avoid contention - - p.Events.Network.AttestationsRequestReceived.Hook(p.processAttestationsRequest, event.WithWorkerPool(wpAttestations)) -} diff --git a/pkg/protocol/protocol_warp_sync.go b/pkg/protocol/protocol_warp_sync.go new file mode 100644 index 000000000..b74bdcf33 --- /dev/null +++ b/pkg/protocol/protocol_warp_sync.go @@ -0,0 +1,356 @@ +package protocol + +import ( + "sync/atomic" + + "github.com/libp2p/go-libp2p/core/peer" + + "github.com/iotaledger/hive.go/ads" + "github.com/iotaledger/hive.go/core/eventticker" + "github.com/iotaledger/hive.go/ds" + "github.com/iotaledger/hive.go/ds/reactive" + "github.com/iotaledger/hive.go/ierrors" + "github.com/iotaledger/hive.go/kvstore/mapdb" + "github.com/iotaledger/hive.go/lo" + "github.com/iotaledger/hive.go/log" + "github.com/iotaledger/hive.go/runtime/workerpool" + "github.com/iotaledger/iota-core/pkg/protocol/engine" + iotago "github.com/iotaledger/iota.go/v4" + "github.com/iotaledger/iota.go/v4/merklehasher" +) + +// WarpSyncProtocol is a subcomponent of the protocol that is responsible for handling warp sync requests and responses. +type WarpSyncProtocol struct { + // protocol contains a reference to the Protocol instance that this component belongs to. + protocol *Protocol + + // workerPool contains the worker pool that is used to process warp sync requests and responses asynchronously. + workerPool *workerpool.WorkerPool + + // ticker contains the ticker that is used to send warp sync requests. + ticker *eventticker.EventTicker[iotago.SlotIndex, iotago.CommitmentID] + + // Logger embeds a logger that can be used to log messages emitted by this chain. + log.Logger +} + +// newWarpSyncProtocol creates a new warp sync protocol instance for the given protocol. +func newWarpSyncProtocol(protocol *Protocol) *WarpSyncProtocol { + c := &WarpSyncProtocol{ + Logger: lo.Return1(protocol.Logger.NewChildLogger("WarpSync")), + protocol: protocol, + workerPool: protocol.Workers.CreatePool("WarpSync", workerpool.WithWorkerCount(1)), + ticker: eventticker.New[iotago.SlotIndex, iotago.CommitmentID](protocol.Options.WarpSyncRequesterOptions...), + } + + c.ticker.Events.Tick.Hook(c.SendRequest) + + protocol.Constructed.OnTrigger(func() { + protocol.Chains.WithInitializedEngines(func(chain *Chain, engine *engine.Engine) (shutdown func()) { + return chain.WarpSyncMode.OnUpdate(func(_ bool, warpSyncModeEnabled bool) { + if warpSyncModeEnabled { + engine.Workers.WaitChildren() + engine.Reset() + } + }) + }) + + protocol.Commitments.WithElements(func(commitment *Commitment) (shutdown func()) { + return commitment.WarpSyncBlocks.OnUpdate(func(_ bool, warpSyncBlocks bool) { + if warpSyncBlocks { + c.ticker.StartTicker(commitment.ID()) + } else { + c.ticker.StopTicker(commitment.ID()) + } + }) + }) + }) + + return c +} + +// SendRequest sends a warp sync request for the given commitment ID to all peers. +func (w *WarpSyncProtocol) SendRequest(commitmentID iotago.CommitmentID) { + w.workerPool.Submit(func() { + if commitment, err := w.protocol.Commitments.Get(commitmentID, false); err == nil { + w.protocol.Network.SendWarpSyncRequest(commitmentID) + + w.LogDebug("request", "commitment", commitment.LogName()) + } + }) +} + +// SendResponse sends a warp sync response for the given commitment ID to the given peer. +func (w *WarpSyncProtocol) SendResponse(commitment *Commitment, blockIDsBySlotCommitment map[iotago.CommitmentID]iotago.BlockIDs, roots *iotago.Roots, transactionIDs iotago.TransactionIDs, to peer.ID) { + w.workerPool.Submit(func() { + w.protocol.Network.SendWarpSyncResponse(commitment.ID(), blockIDsBySlotCommitment, roots.TangleProof(), transactionIDs, roots.MutationProof(), to) + + w.LogTrace("sent response", "commitment", commitment.LogName(), "toPeer", to) + }) +} + +// ProcessResponse processes the given warp sync response. +func (w *WarpSyncProtocol) ProcessResponse(commitmentID iotago.CommitmentID, blockIDsBySlotCommitment map[iotago.CommitmentID]iotago.BlockIDs, proof *merklehasher.Proof[iotago.Identifier], transactionIDs iotago.TransactionIDs, mutationProof *merklehasher.Proof[iotago.Identifier], from peer.ID) { + w.workerPool.Submit(func() { + commitment, err := w.protocol.Commitments.Get(commitmentID) + if err != nil { + if !ierrors.Is(err, ErrorCommitmentNotFound) { + w.LogError("failed to load commitment for response", "commitmentID", commitmentID, "fromPeer", from, "err", err) + } else { + w.LogTrace("failed to load commitment for response", "commitmentID", commitmentID, "fromPeer", from, "err", err) + } + + return + } + + chain := commitment.Chain.Get() + if chain == nil { + w.LogTrace("failed to get chain for response", "commitment", commitment.LogName(), "fromPeer", from) + + return + } + + if !chain.WarpSyncMode.Get() { + w.LogTrace("response for chain without warp-sync", "chain", chain.LogName(), "fromPeer", from) + + return + } + + targetEngine := commitment.TargetEngine() + if targetEngine == nil { + w.LogDebug("failed to get target engine for response", "commitment", commitment.LogName()) + + return + } + + commitment.BlocksToWarpSync.Compute(func(blocksToWarpSync ds.Set[iotago.BlockID]) ds.Set[iotago.BlockID] { + if blocksToWarpSync != nil || !commitment.WarpSyncBlocks.Get() { + w.LogTrace("response for already synced commitment", "commitment", commitment.LogName(), "fromPeer", from) + + return blocksToWarpSync + } + + totalBlocks := uint32(0) + acceptedBlocks := ads.NewSet[iotago.Identifier](mapdb.NewMapDB(), iotago.Identifier.Bytes, iotago.IdentifierFromBytes, iotago.BlockID.Bytes, iotago.BlockIDFromBytes) + for _, blockIDs := range blockIDsBySlotCommitment { + for _, blockID := range blockIDs { + _ = acceptedBlocks.Add(blockID) // a mapdb can newer return an error + + totalBlocks++ + } + } + + if !iotago.VerifyProof(proof, acceptedBlocks.Root(), commitment.RootsID()) { + w.LogError("failed to verify blocks proof", "commitment", commitment.LogName(), "blockIDs", blockIDsBySlotCommitment, "proof", proof, "fromPeer", from) + + return blocksToWarpSync + } + + acceptedTransactionIDs := ads.NewSet[iotago.Identifier](mapdb.NewMapDB(), iotago.Identifier.Bytes, iotago.IdentifierFromBytes, iotago.TransactionID.Bytes, iotago.TransactionIDFromBytes) + for _, transactionID := range transactionIDs { + _ = acceptedTransactionIDs.Add(transactionID) // a mapdb can never return an error + } + + if !iotago.VerifyProof(mutationProof, acceptedTransactionIDs.Root(), commitment.RootsID()) { + w.LogError("failed to verify mutations proof", "commitment", commitment.LogName(), "transactionIDs", transactionIDs, "proof", mutationProof, "fromPeer", from) + + return blocksToWarpSync + } + + w.ticker.StopTicker(commitmentID) + + targetEngine.Workers.WaitChildren() + + if !chain.WarpSyncMode.Get() { + w.LogTrace("response for chain without warp-sync", "chain", chain.LogName(), "fromPeer", from) + + return blocksToWarpSync + } + + // Once all blocks are booked we + // 1. Mark all transactions as accepted + // 2. Mark all blocks as accepted + // 3. Force commitment of the slot + commitmentFunc := func() { + if !chain.WarpSyncMode.Get() { + return + } + + // 0. Prepare data flow + var ( + notarizedBlocksCount uint64 + allBlocksNotarized = reactive.NewEvent() + ) + + // 1. Mark all transactions as accepted + for _, transactionID := range transactionIDs { + targetEngine.Ledger.SpendDAG().SetAccepted(transactionID) + } + + // 2. Mark all blocks as accepted and wait for them to be notarized + if totalBlocks == 0 { + allBlocksNotarized.Trigger() + } else { + for _, blockIDs := range blockIDsBySlotCommitment { + for _, blockID := range blockIDs { + block, exists := targetEngine.BlockCache.Block(blockID) + if !exists { // this should never happen as we just booked these blocks in this slot. + continue + } + + targetEngine.BlockGadget.SetAccepted(block) + + block.Notarized().OnTrigger(func() { + if atomic.AddUint64(¬arizedBlocksCount, 1) == uint64(totalBlocks) { + allBlocksNotarized.Trigger() + } + }) + } + } + } + + allBlocksNotarized.OnTrigger(func() { + // This needs to happen in a separate worker since the trigger for block notarized while the lock in + // the notarization is still held. + w.workerPool.Submit(func() { + // 3. Force commitment of the slot + producedCommitment, err := targetEngine.Notarization.ForceCommit(commitmentID.Slot()) + if err != nil { + w.protocol.LogError("failed to force commitment", "commitmentID", commitmentID, "err", err) + + return + } + + // 4. Verify that the produced commitment is the same as the initially requested one + if producedCommitment.ID() != commitmentID { + w.protocol.LogError("commitment does not match", "expectedCommitmentID", commitmentID, "producedCommitmentID", producedCommitment.ID()) + + return + } + }) + }) + } + + // Once all blocks are fully booked we can mark the commitment that is minCommittableAge older as this + // commitment to be committable. + commitment.IsSynced.OnUpdateOnce(func(_ bool, _ bool) { + // update the flag in a worker since it can potentially cause a commit + w.workerPool.Submit(func() { + if committableCommitment, exists := chain.Commitment(commitmentID.Slot() - targetEngine.LatestAPI().ProtocolParameters().MinCommittableAge()); exists { + committableCommitment.IsCommittable.Set(true) + } + }) + }) + + // force commit one by one and wait for the parent to be verified before we commit the next one + commitment.Parent.WithNonEmptyValue(func(parent *Commitment) (teardown func()) { + return parent.IsVerified.WithNonEmptyValue(func(_ bool) (teardown func()) { + return commitment.IsCommittable.OnTrigger(commitmentFunc) + }) + }) + + if totalBlocks == 0 { + // mark empty slots as committable and synced + commitment.IsCommittable.Set(true) + commitment.IsSynced.Set(true) + + return blocksToWarpSync + } + + var bookedBlocks atomic.Uint32 + blocksToWarpSync = ds.NewSet[iotago.BlockID]() + for _, blockIDs := range blockIDsBySlotCommitment { + for _, blockID := range blockIDs { + blocksToWarpSync.Add(blockID) + + block, _ := targetEngine.BlockDAG.GetOrRequestBlock(blockID) + if block == nil { + w.protocol.LogError("failed to request block", "blockID", blockID) + + continue + } + + // We need to make sure that all blocks are fully booked and their weight propagated before we can + // move the window forward. This is in order to ensure that confirmation and finalization is correctly propagated. + block.WeightPropagated().OnUpdate(func(_ bool, _ bool) { + if bookedBlocks.Add(1) != totalBlocks { + return + } + + commitment.IsSynced.Set(true) + }) + } + } + + w.LogDebug("received response", "commitment", commitment.LogName()) + + return blocksToWarpSync + }) + }) +} + +// ProcessRequest processes the given warp sync request. +func (w *WarpSyncProtocol) ProcessRequest(commitmentID iotago.CommitmentID, from peer.ID) { + w.workerPool.Submit(func() { + commitment, err := w.protocol.Commitments.Get(commitmentID) + if err != nil { + if !ierrors.Is(err, ErrorCommitmentNotFound) { + w.LogError("failed to load commitment for warp-sync request", "commitmentID", commitmentID, "fromPeer", from, "err", err) + } else { + w.LogTrace("failed to load commitment for warp-sync request", "commitmentID", commitmentID, "fromPeer", from, "err", err) + } + + return + } + + chain := commitment.Chain.Get() + if chain == nil { + w.LogTrace("warp-sync request for unsolid commitment", "commitment", commitment.LogName(), "fromPeer", from) + + return + } + + targetEngine := commitment.TargetEngine() + if targetEngine == nil { + w.LogTrace("warp-sync request for chain without engine", "chain", chain.LogName(), "fromPeer", from) + + return + } + + committedSlot, err := targetEngine.CommittedSlot(commitmentID) + if err != nil { + w.LogTrace("warp-sync request for uncommitted slot", "chain", chain.LogName(), "commitment", commitment.LogName(), "fromPeer", from) + + return + } + + blockIDsBySlotCommitment, err := committedSlot.BlocksIDsBySlotCommitmentID() + if err != nil { + w.LogTrace("failed to get block ids for warp-sync request", "chain", chain.LogName(), "commitment", commitment.LogName(), "fromPeer", from, "err", err) + + return + } + + roots, err := committedSlot.Roots() + if err != nil { + w.LogTrace("failed to get roots for warp-sync request", "chain", chain.LogName(), "commitment", commitment.LogName(), "fromPeer", from, "err", err) + + return + } + + transactionIDs, err := committedSlot.TransactionIDs() + if err != nil { + w.LogTrace("failed to get transaction ids for warp-sync request", "chain", chain.LogName(), "commitment", commitment.LogName(), "fromPeer", from, "err", err) + + return + } + + w.SendResponse(commitment, blockIDsBySlotCommitment, roots, transactionIDs, from) + }) +} + +// Shutdown shuts down the warp sync protocol. +func (w *WarpSyncProtocol) Shutdown() { + w.ticker.Shutdown() + w.workerPool.Shutdown().ShutdownComplete.Wait() +} diff --git a/pkg/protocol/sybilprotection/seatmanager/poa/poa.go b/pkg/protocol/sybilprotection/seatmanager/poa/poa.go index b3ef3fd7c..d211750b6 100644 --- a/pkg/protocol/sybilprotection/seatmanager/poa/poa.go +++ b/pkg/protocol/sybilprotection/seatmanager/poa/poa.go @@ -54,7 +54,7 @@ func NewProvider(opts ...options.Option[SeatManager]) module.Provider[*engine.En e.Events.SeatManager.LinkTo(s.events) - e.HookConstructed(func() { + e.Constructed.OnTrigger(func() { s.TriggerConstructed() // We need to mark validators as active upon solidity of blocks as otherwise we would not be able to diff --git a/pkg/protocol/sybilprotection/seatmanager/topstakers/topstakers.go b/pkg/protocol/sybilprotection/seatmanager/topstakers/topstakers.go index 83fc61251..cc359e617 100644 --- a/pkg/protocol/sybilprotection/seatmanager/topstakers/topstakers.go +++ b/pkg/protocol/sybilprotection/seatmanager/topstakers/topstakers.go @@ -55,7 +55,7 @@ func NewProvider(opts ...options.Option[SeatManager]) module.Provider[*engine.En e.Events.SeatManager.LinkTo(s.events) - e.HookConstructed(func() { + e.Constructed.OnTrigger(func() { s.TriggerConstructed() // We need to mark validators as active upon solidity of blocks as otherwise we would not be able to diff --git a/pkg/protocol/sybilprotection/sybilprotectionv1/sybilprotection.go b/pkg/protocol/sybilprotection/sybilprotectionv1/sybilprotection.go index f0d3bcc18..487c1e0f3 100644 --- a/pkg/protocol/sybilprotection/sybilprotectionv1/sybilprotection.go +++ b/pkg/protocol/sybilprotection/sybilprotectionv1/sybilprotection.go @@ -56,7 +56,7 @@ func NewProvider(opts ...options.Option[SybilProtection]) module.Provider[*engin func(o *SybilProtection) { o.seatManager = o.optsSeatManagerProvider(e) - e.HookConstructed(func() { + e.Constructed.OnTrigger(func() { o.ledger = e.Ledger o.errHandler = e.ErrorHandler("SybilProtection") @@ -75,7 +75,7 @@ func NewProvider(opts ...options.Option[SybilProtection]) module.Provider[*engin // When the engine is triggered initialized, snapshot has been read or database has been initialized properly, // so the committee should be available in the performance manager. - e.HookInitialized(func() { + e.Initialized.OnTrigger(func() { // Mark the committee for the last committed slot as active. currentEpoch := e.CommittedAPI().TimeProvider().EpochFromSlot(e.Storage.Settings().LatestCommitment().Slot()) err := o.seatManager.InitializeCommittee(currentEpoch, e.Clock.Accepted().RelativeTime()) diff --git a/pkg/protocol/versioning.go b/pkg/protocol/versioning.go index eb2d355d2..1c543d63f 100644 --- a/pkg/protocol/versioning.go +++ b/pkg/protocol/versioning.go @@ -1,3 +1,6 @@ package protocol -const DatabaseVersion byte = 1 +const ( + // DatabaseVersion defines the current version of the database. + DatabaseVersion byte = 1 +) diff --git a/pkg/retainer/retainer/retainer.go b/pkg/retainer/retainer/retainer.go index f42406547..622ab43d0 100644 --- a/pkg/retainer/retainer/retainer.go +++ b/pkg/retainer/retainer/retainer.go @@ -87,7 +87,7 @@ func NewProvider() module.Provider[*engine.Engine, retainer.Retainer] { r.RetainBlockFailure(b.ID(), api.BlockFailureDroppedDueToCongestion) }) - e.HookInitialized(func() { + e.Initialized.OnTrigger(func() { e.Ledger.MemPool().OnSignedTransactionAttached(func(signedTransactionMetadata mempool.SignedTransactionMetadata) { attachment := signedTransactionMetadata.Attachments()[0] diff --git a/pkg/storage/storage_test.go b/pkg/storage/storage_test.go index 3c79adae7..9178f7de3 100644 --- a/pkg/storage/storage_test.go +++ b/pkg/storage/storage_test.go @@ -168,6 +168,7 @@ func TestStorage_PruneBySize(t *testing.T) { func TestStorage_RestoreFromDisk(t *testing.T) { tf := NewTestFramework(t, t.TempDir(), storage.WithPruningDelay(1)) + defer tf.Shutdown() totalEpochs := 9 tf.GeneratePermanentData(5 * MB) @@ -213,6 +214,7 @@ func TestStorage_RestoreFromDisk(t *testing.T) { func TestStorage_CopyFromForkedStorageEmpty(t *testing.T) { tf1 := NewTestFramework(t, t.TempDir()) + defer tf1.Shutdown() totalEpochs := 14 // Generate data in the old storage (source). It contains data since the genesis and one epoch after the fork. diff --git a/pkg/tests/accounts_test.go b/pkg/tests/accounts_test.go index 39a6deb69..3c2b2c15a 100644 --- a/pkg/tests/accounts_test.go +++ b/pkg/tests/accounts_test.go @@ -219,7 +219,7 @@ func Test_StakeDelegateAndDelayedClaim(t *testing.T) { // CREATE NEW ACCOUNT WITH BLOCK ISSUER AND STAKING FEATURES FROM BASIC UTXO newAccountBlockIssuerKey := tpkg.RandBlockIssuerKey() // set the expiry slot of the transitioned genesis account to the latest committed + MaxCommittableAge - newAccountExpirySlot := node1.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Slot() + ts.API.ProtocolParameters().MaxCommittableAge() + newAccountExpirySlot := node1.Protocol.Engines.Main.Get().Storage.Settings().LatestCommitment().Slot() + ts.API.ProtocolParameters().MaxCommittableAge() stakedAmount := iotago.BaseToken(10000) @@ -442,7 +442,7 @@ func Test_ImplicitAccounts(t *testing.T) { ), mock.WithAccountAmount(mock.MinIssuerAccountAmount(ts.API.ProtocolParameters())), ) - block2Commitment := node1.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Commitment() + block2Commitment := node1.Protocol.Engines.Main.Get().Storage.Settings().LatestCommitment().Commitment() block2 := ts.IssueBasicBlockWithOptions("block2", newUserWallet, tx2, mock.WithStrongParents(latestParents...)) latestParents = ts.CommitUntilSlot(block2Slot, block2.ID()) @@ -571,7 +571,7 @@ func Test_NegativeBIC_BlockIssuerLocked(t *testing.T) { // Try to issue more blocks from each of the issuers - one succeeds in issuing a block, // the other has the block rejected in the PostSolidFilter as his account has negative BIC value. { - block2Commitment := node1.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Commitment() + block2Commitment := node1.Protocol.Engines.Main.Get().Storage.Settings().LatestCommitment().Commitment() block21 := ts.IssueBasicBlockWithOptions("block2.1", wallet1, &iotago.TaggedData{}, mock.WithSlotCommitment(block2Commitment)) @@ -614,7 +614,7 @@ func Test_NegativeBIC_BlockIssuerLocked(t *testing.T) { Mana: iotago.Mana(allottedBIC), }}, "Genesis:0") - block3Commitment := node1.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Commitment() + block3Commitment := node1.Protocol.Engines.Main.Get().Storage.Settings().LatestCommitment().Commitment() // Wallet 1 whose account is not locked is issuing the block to unlock the account of wallet 2. block31 := ts.IssueBasicBlockWithOptions("block3.1", wallet1, tx1, mock.WithStrongParents(latestParents...), mock.WithSlotCommitment(block3Commitment)) @@ -646,7 +646,7 @@ func Test_NegativeBIC_BlockIssuerLocked(t *testing.T) { // Issue block from the unlocked account of wallet 2 to make sure that it's actually unlocked. { - block4Commitment := node1.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Commitment() + block4Commitment := node1.Protocol.Engines.Main.Get().Storage.Settings().LatestCommitment().Commitment() block4 := ts.IssueBasicBlockWithOptions("block4", wallet2, &iotago.TaggedData{}, mock.WithStrongParents(latestParents...), mock.WithSlotCommitment(block4Commitment)) @@ -803,7 +803,7 @@ func Test_NegativeBIC_AccountOutput(t *testing.T) { Mana: iotago.Mana(allottedBIC), }}, "Genesis:0") - block2Commitment := node1.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Commitment() + block2Commitment := node1.Protocol.Engines.Main.Get().Storage.Settings().LatestCommitment().Commitment() // Wallet 2 whose account is not locked is issuing the block to unlock the account of wallet 1. block2 := ts.IssueBasicBlockWithOptions("block2", wallet2, tx2, mock.WithStrongParents(latestParents...), mock.WithSlotCommitment(block2Commitment)) @@ -840,7 +840,7 @@ func Test_NegativeBIC_AccountOutput(t *testing.T) { mock.WithBlockIssuerExpirySlot(newExpirySlot), ) - block3Commitment := node1.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Commitment() + block3Commitment := node1.Protocol.Engines.Main.Get().Storage.Settings().LatestCommitment().Commitment() // Wallet 1, which already has non-negative BIC issues the block. block3 := ts.IssueBasicBlockWithOptions("block3", wallet1, tx3, mock.WithStrongParents(latestParents...), mock.WithSlotCommitment(block3Commitment)) @@ -878,7 +878,7 @@ func Test_NegativeBIC_AccountOutput(t *testing.T) { // create a transaction which destroys the genesis account. tx4 := wallet1.DestroyAccount("TX4", "TX3:0") - block4Commitment := node1.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Commitment() + block4Commitment := node1.Protocol.Engines.Main.Get().Storage.Settings().LatestCommitment().Commitment() block4 := ts.IssueBasicBlockWithOptions("block4", wallet2, tx4, mock.WithStrongParents(latestParents...), mock.WithSlotCommitment(block4Commitment)) latestParents = ts.CommitUntilSlot(block4Slot, block4.ID()) @@ -1021,7 +1021,7 @@ func Test_NegativeBIC_AccountOwnedBasicOutputLocked(t *testing.T) { // TRY TO SPEND THE BASIC OUTPUT FROM AN ACCOUNT ADDRESS { - block2Commitment := node1.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Commitment() + block2Commitment := node1.Protocol.Engines.Main.Get().Storage.Settings().LatestCommitment().Commitment() tx2 := wallet1.SendFundsFromAccount( "TX2", @@ -1068,7 +1068,7 @@ func Test_NegativeBIC_AccountOwnedBasicOutputLocked(t *testing.T) { Mana: iotago.Mana(allottedBIC), }}, "TX0:1") - block3Commitment := node1.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Commitment() + block3Commitment := node1.Protocol.Engines.Main.Get().Storage.Settings().LatestCommitment().Commitment() // Wallet 2 whose account is not locked is issuing the block to unlock the account of wallet 1. block3 := ts.IssueBasicBlockWithOptions("block3", wallet2, tx3, mock.WithStrongParents(latestParents...), mock.WithSlotCommitment(block3Commitment)) @@ -1102,7 +1102,7 @@ func Test_NegativeBIC_AccountOwnedBasicOutputLocked(t *testing.T) { block4Slot := ts.CurrentSlot() // SPEND THE BASIC OUTPUT FROM AN ACCOUNT ADDRESS { - block4Commitment := node1.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Commitment() + block4Commitment := node1.Protocol.Engines.Main.Get().Storage.Settings().LatestCommitment().Commitment() tx4 := wallet1.SendFundsFromAccount( "TX4", diff --git a/pkg/tests/booker_test.go b/pkg/tests/booker_test.go index 8f37d0f57..79654b44f 100644 --- a/pkg/tests/booker_test.go +++ b/pkg/tests/booker_test.go @@ -349,7 +349,7 @@ func Test_SpendRejectedCommittedRace(t *testing.T) { node2.Validator.AccountID, }, ts.Nodes()...) - genesisCommitment := lo.PanicOnErr(node1.Protocol.MainEngineInstance().Storage.Commitments().Load(0)).Commitment() + genesisCommitment := lo.PanicOnErr(node1.Protocol.Engines.Main.Get().Storage.Commitments().Load(0)).Commitment() // Create and issue double spends { @@ -458,7 +458,7 @@ func Test_SpendRejectedCommittedRace(t *testing.T) { ) } - commitment1 := lo.PanicOnErr(node2.Protocol.MainEngineInstance().Storage.Commitments().Load(1)).Commitment() + commitment1 := lo.PanicOnErr(node2.Protocol.Engines.Main.Get().Storage.Commitments().Load(1)).Commitment() // This should be booked on the rejected tx1 conflict tx4 := wallet.CreateBasicOutputsEquallyFromInput("tx4", 1, "tx1:0") @@ -622,7 +622,7 @@ func Test_SpendPendingCommittedRace(t *testing.T) { node2.Validator.AccountID, }, ts.Nodes()...) - genesisCommitment := lo.PanicOnErr(node1.Protocol.MainEngineInstance().Storage.Commitments().Load(0)).Commitment() + genesisCommitment := lo.PanicOnErr(node1.Protocol.Engines.Main.Get().Storage.Commitments().Load(0)).Commitment() // Create and issue double spends { @@ -707,7 +707,7 @@ func Test_SpendPendingCommittedRace(t *testing.T) { ) } - commitment1 := lo.PanicOnErr(node2.Protocol.MainEngineInstance().Storage.Commitments().Load(1)).Commitment() + commitment1 := lo.PanicOnErr(node2.Protocol.Engines.Main.Get().Storage.Commitments().Load(1)).Commitment() // Issue a block booked on a pending conflict on node2 { diff --git a/pkg/tests/confirmation_state_test.go b/pkg/tests/confirmation_state_test.go index 6cc530e59..78c6cc293 100644 --- a/pkg/tests/confirmation_state_test.go +++ b/pkg/tests/confirmation_state_test.go @@ -83,7 +83,7 @@ func TestConfirmationFlags(t *testing.T) { testsuite.WithChainID(genesisCommitment.MustID()), testsuite.WithStorageCommitments([]*iotago.Commitment{genesisCommitment}), testsuite.WithSybilProtectionCommittee(0, expectedCommittee), - testsuite.WithSybilProtectionOnlineCommittee(lo.Return1(lo.Return1(nodeA.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(nodeA.Validator.AccountID))), + testsuite.WithSybilProtectionOnlineCommittee(lo.Return1(lo.Return1(nodeA.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(nodeA.Validator.AccountID))), testsuite.WithEvictedSlot(0), testsuite.WithActiveRootBlocks(ts.Blocks("Genesis")), testsuite.WithStorageRootBlocks(ts.Blocks("Genesis")), @@ -123,7 +123,7 @@ func TestConfirmationFlags(t *testing.T) { ) // Issue in the next slot so that slot 2 becomes committed. - slot1Commitment := lo.PanicOnErr(nodeA.Protocol.MainEngineInstance().Storage.Commitments().Load(1)).Commitment() + slot1Commitment := lo.PanicOnErr(nodeA.Protocol.Engines.Main.Get().Storage.Commitments().Load(1)).Commitment() slot2CommittableSlot := slot1CommittableSlot + 1 ts.SetCurrentSlot(slot2CommittableSlot) alias2A0 := fmt.Sprintf("A.%d.0", slot2CommittableSlot) @@ -144,14 +144,14 @@ func TestConfirmationFlags(t *testing.T) { testsuite.WithEqualStoredCommitmentAtIndex(2), testsuite.WithSybilProtectionCommittee(ts.API.TimeProvider().EpochFromSlot(slot2CommittableSlot), expectedCommittee), testsuite.WithSybilProtectionOnlineCommittee( - lo.Return1(lo.Return1(nodeA.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(nodeA.Validator.AccountID)), - lo.Return1(lo.Return1(nodeA.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(nodeB.Validator.AccountID)), + lo.Return1(lo.Return1(nodeA.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(nodeA.Validator.AccountID)), + lo.Return1(lo.Return1(nodeA.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(nodeB.Validator.AccountID)), ), testsuite.WithEvictedSlot(2), ) // Confirm aliasA0 by pre-confirming a block a 3rd validator in the next slot. - slot2Commitment := lo.PanicOnErr(nodeA.Protocol.MainEngineInstance().Storage.Commitments().Load(2)).Commitment() + slot2Commitment := lo.PanicOnErr(nodeA.Protocol.Engines.Main.Get().Storage.Commitments().Load(2)).Commitment() slot3CommittableSlot := slot2CommittableSlot + 1 ts.SetCurrentSlot(slot3CommittableSlot) alias3C0 := fmt.Sprintf("C.%d.0", slot3CommittableSlot) @@ -183,9 +183,9 @@ func TestConfirmationFlags(t *testing.T) { testsuite.WithEqualStoredCommitmentAtIndex(2), testsuite.WithSybilProtectionCommittee(ts.API.TimeProvider().EpochFromSlot(slot3CommittableSlot), expectedCommittee), testsuite.WithSybilProtectionOnlineCommittee( - lo.Return1(lo.Return1(nodeA.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(nodeA.Validator.AccountID)), - lo.Return1(lo.Return1(nodeA.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(nodeB.Validator.AccountID)), - lo.Return1(lo.Return1(nodeA.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(nodeC.Validator.AccountID)), + lo.Return1(lo.Return1(nodeA.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(nodeA.Validator.AccountID)), + lo.Return1(lo.Return1(nodeA.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(nodeB.Validator.AccountID)), + lo.Return1(lo.Return1(nodeA.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(nodeC.Validator.AccountID)), ), testsuite.WithEvictedSlot(2), ) @@ -239,9 +239,9 @@ func TestConfirmationFlags(t *testing.T) { testsuite.WithEqualStoredCommitmentAtIndex(3), testsuite.WithSybilProtectionCommittee(ts.API.TimeProvider().EpochFromSlot(slot4CommittableSlot), expectedCommittee), testsuite.WithSybilProtectionOnlineCommittee( - lo.Return1(lo.Return1(nodeA.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(nodeA.Validator.AccountID)), - lo.Return1(lo.Return1(nodeA.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(nodeB.Validator.AccountID)), - lo.Return1(lo.Return1(nodeA.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(nodeC.Validator.AccountID)), + lo.Return1(lo.Return1(nodeA.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(nodeA.Validator.AccountID)), + lo.Return1(lo.Return1(nodeA.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(nodeB.Validator.AccountID)), + lo.Return1(lo.Return1(nodeA.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(nodeC.Validator.AccountID)), ), testsuite.WithEvictedSlot(3), ) diff --git a/pkg/tests/loss_of_acceptance_test.go b/pkg/tests/loss_of_acceptance_test.go index 8cbd6cb42..2baef78b1 100644 --- a/pkg/tests/loss_of_acceptance_test.go +++ b/pkg/tests/loss_of_acceptance_test.go @@ -44,7 +44,7 @@ func TestLossOfAcceptanceFromGenesis(t *testing.T) { // Create snapshot to use later. snapshotPath := ts.Directory.Path(fmt.Sprintf("%d_snapshot", time.Now().Unix())) - require.NoError(t, ts.Node("node0").Protocol.MainEngineInstance().WriteSnapshot(snapshotPath)) + require.NoError(t, ts.Node("node0").Protocol.Engines.Main.Get().WriteSnapshot(snapshotPath)) // Revive chain on node0. { @@ -140,7 +140,7 @@ func TestLossOfAcceptanceFromSnapshot(t *testing.T) { var node0restarted *mock.Node { snapshotPath := ts.Directory.Path(fmt.Sprintf("%d_snapshot", time.Now().Unix())) - require.NoError(t, ts.Node("node0").Protocol.MainEngineInstance().WriteSnapshot(snapshotPath)) + require.NoError(t, ts.Node("node0").Protocol.Engines.Main.Get().WriteSnapshot(snapshotPath)) node0restarted = ts.AddNode("node0-restarted") node0restarted.Validator = node0.Validator diff --git a/pkg/tests/protocol_engine_rollback_test.go b/pkg/tests/protocol_engine_rollback_test.go index 60a491111..639932e65 100644 --- a/pkg/tests/protocol_engine_rollback_test.go +++ b/pkg/tests/protocol_engine_rollback_test.go @@ -13,7 +13,6 @@ import ( "github.com/iotaledger/hive.go/runtime/options" "github.com/iotaledger/iota-core/pkg/core/account" "github.com/iotaledger/iota-core/pkg/protocol" - "github.com/iotaledger/iota-core/pkg/protocol/chainmanager" "github.com/iotaledger/iota-core/pkg/protocol/engine" "github.com/iotaledger/iota-core/pkg/protocol/engine/blocks" "github.com/iotaledger/iota-core/pkg/protocol/sybilprotection/seatmanager" @@ -69,12 +68,6 @@ func TestProtocol_EngineRollbackFinalization(t *testing.T) { nodeOptions := make(map[string][]options.Option[protocol.Protocol]) for _, node := range ts.Nodes() { nodeOptions[node.Name] = []options.Option[protocol.Protocol]{ - protocol.WithChainManagerOptions( - chainmanager.WithCommitmentRequesterOptions( - eventticker.RetryInterval[iotago.SlotIndex, iotago.CommitmentID](1*time.Second), - eventticker.RetryJitter[iotago.SlotIndex, iotago.CommitmentID](500*time.Millisecond), - ), - ), protocol.WithSybilProtectionProvider( sybilprotectionv1.NewProvider( sybilprotectionv1.WithSeatManagerProvider( @@ -105,14 +98,14 @@ func TestProtocol_EngineRollbackFinalization(t *testing.T) { node3.Validator.AccountID, } expectedOnlineCommitteeFull := []account.SeatIndex{ - lo.Return1(lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node0.Validator.AccountID)), - lo.Return1(lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node1.Validator.AccountID)), - lo.Return1(lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node2.Validator.AccountID)), - lo.Return1(lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node3.Validator.AccountID)), + lo.Return1(lo.Return1(node0.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node0.Validator.AccountID)), + lo.Return1(lo.Return1(node0.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node1.Validator.AccountID)), + lo.Return1(lo.Return1(node0.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node2.Validator.AccountID)), + lo.Return1(lo.Return1(node0.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node3.Validator.AccountID)), } for _, node := range ts.Nodes() { - node.Protocol.MainEngineInstance().SybilProtection.SeatManager().(*mock2.ManualPOA).SetOnline("node0", "node1", "node2", "node3") + node.Protocol.Engines.Main.Get().SybilProtection.SeatManager().(*mock2.ManualPOA).SetOnline("node0", "node1", "node2", "node3") } { @@ -178,7 +171,7 @@ func TestProtocol_EngineRollbackFinalization(t *testing.T) { ts.AssertBlocksExist(ts.BlocksWithPrefix("P0"), true, ts.Nodes()...) } - newEngine, err := node3.Protocol.EngineManager.ForkEngineAtSlot(13) + newEngine, err := node3.Protocol.Engines.ForkAtSlot(13) require.NoError(t, err) // Assert state of the forked engine after rollback. @@ -201,7 +194,7 @@ func TestProtocol_EngineRollbackFinalization(t *testing.T) { for slot := 1; slot <= 13; slot++ { copiedCommitment, err := newEngine.Storage.Commitments().Load(iotago.SlotIndex(slot)) require.NoError(t, err) - sourceCommitment, err := node1.Protocol.MainEngineInstance().Storage.Commitments().Load(iotago.SlotIndex(slot)) + sourceCommitment, err := node1.Protocol.Engines.Main.Get().Storage.Commitments().Load(iotago.SlotIndex(slot)) require.NoError(t, err) require.Equal(t, sourceCommitment.ID(), copiedCommitment.ID()) } @@ -257,12 +250,6 @@ func TestProtocol_EngineRollbackNoFinalization(t *testing.T) { nodeOptions := make(map[string][]options.Option[protocol.Protocol]) for _, node := range ts.Nodes() { nodeOptions[node.Name] = []options.Option[protocol.Protocol]{ - protocol.WithChainManagerOptions( - chainmanager.WithCommitmentRequesterOptions( - eventticker.RetryInterval[iotago.SlotIndex, iotago.CommitmentID](1*time.Second), - eventticker.RetryJitter[iotago.SlotIndex, iotago.CommitmentID](500*time.Millisecond), - ), - ), protocol.WithSybilProtectionProvider( sybilprotectionv1.NewProvider( sybilprotectionv1.WithSeatManagerProvider( @@ -293,19 +280,19 @@ func TestProtocol_EngineRollbackNoFinalization(t *testing.T) { node3.Validator.AccountID, } expectedOnlineCommitteeFull := []account.SeatIndex{ - lo.Return1(lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node0.Validator.AccountID)), - lo.Return1(lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node1.Validator.AccountID)), - lo.Return1(lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node2.Validator.AccountID)), - lo.Return1(lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node3.Validator.AccountID)), + lo.Return1(lo.Return1(node0.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node0.Validator.AccountID)), + lo.Return1(lo.Return1(node0.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node1.Validator.AccountID)), + lo.Return1(lo.Return1(node0.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node2.Validator.AccountID)), + lo.Return1(lo.Return1(node0.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node3.Validator.AccountID)), } expectedOnlineCommitteeHalf := []account.SeatIndex{ - lo.Return1(lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node0.Validator.AccountID)), - lo.Return1(lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node1.Validator.AccountID)), + lo.Return1(lo.Return1(node0.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node0.Validator.AccountID)), + lo.Return1(lo.Return1(node0.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node1.Validator.AccountID)), } for _, node := range ts.Nodes() { - node.Protocol.MainEngineInstance().SybilProtection.SeatManager().(*mock2.ManualPOA).SetOnline("node0", "node1", "node2", "node3") + node.Protocol.Engines.Main.Get().SybilProtection.SeatManager().(*mock2.ManualPOA).SetOnline("node0", "node1", "node2", "node3") } { @@ -357,7 +344,7 @@ func TestProtocol_EngineRollbackNoFinalization(t *testing.T) { // Update online committee. for _, node := range ts.Nodes() { - manualPOA := node.Protocol.MainEngineInstance().SybilProtection.SeatManager().(*mock2.ManualPOA) + manualPOA := node.Protocol.Engines.Main.Get().SybilProtection.SeatManager().(*mock2.ManualPOA) manualPOA.SetOnline("node0", "node1") manualPOA.SetOffline("node2", "node3") } @@ -378,7 +365,7 @@ func TestProtocol_EngineRollbackNoFinalization(t *testing.T) { ts.AssertBlocksExist(ts.BlocksWithPrefix("P0"), true, ts.Nodes()...) } - newEngine, err := node3.Protocol.EngineManager.ForkEngineAtSlot(13) + newEngine, err := node3.Protocol.Engines.ForkAtSlot(13) require.NoError(t, err) // Assert state of the forked engine after rollback. @@ -401,7 +388,7 @@ func TestProtocol_EngineRollbackNoFinalization(t *testing.T) { for slot := 1; slot <= 13; slot++ { copiedCommitment, err := newEngine.Storage.Commitments().Load(iotago.SlotIndex(slot)) require.NoError(t, err) - sourceCommitment, err := node1.Protocol.MainEngineInstance().Storage.Commitments().Load(iotago.SlotIndex(slot)) + sourceCommitment, err := node1.Protocol.Engines.Main.Get().Storage.Commitments().Load(iotago.SlotIndex(slot)) require.NoError(t, err) require.Equal(t, sourceCommitment.ID(), copiedCommitment.ID()) } @@ -457,12 +444,6 @@ func TestProtocol_EngineRollbackNoFinalizationLastSlot(t *testing.T) { nodeOptions := make(map[string][]options.Option[protocol.Protocol]) for _, node := range ts.Nodes() { nodeOptions[node.Name] = []options.Option[protocol.Protocol]{ - protocol.WithChainManagerOptions( - chainmanager.WithCommitmentRequesterOptions( - eventticker.RetryInterval[iotago.SlotIndex, iotago.CommitmentID](1*time.Second), - eventticker.RetryJitter[iotago.SlotIndex, iotago.CommitmentID](500*time.Millisecond), - ), - ), protocol.WithSybilProtectionProvider( sybilprotectionv1.NewProvider( sybilprotectionv1.WithSeatManagerProvider( @@ -493,19 +474,19 @@ func TestProtocol_EngineRollbackNoFinalizationLastSlot(t *testing.T) { node3.Validator.AccountID, } expectedOnlineCommitteeFull := []account.SeatIndex{ - lo.Return1(lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node0.Validator.AccountID)), - lo.Return1(lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node1.Validator.AccountID)), - lo.Return1(lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node2.Validator.AccountID)), - lo.Return1(lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node3.Validator.AccountID)), + lo.Return1(lo.Return1(node0.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node0.Validator.AccountID)), + lo.Return1(lo.Return1(node0.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node1.Validator.AccountID)), + lo.Return1(lo.Return1(node0.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node2.Validator.AccountID)), + lo.Return1(lo.Return1(node0.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node3.Validator.AccountID)), } expectedOnlineCommitteeHalf := []account.SeatIndex{ - lo.Return1(lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node0.Validator.AccountID)), - lo.Return1(lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node1.Validator.AccountID)), + lo.Return1(lo.Return1(node0.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node0.Validator.AccountID)), + lo.Return1(lo.Return1(node0.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node1.Validator.AccountID)), } for _, node := range ts.Nodes() { - node.Protocol.MainEngineInstance().SybilProtection.SeatManager().(*mock2.ManualPOA).SetOnline("node0", "node1", "node2", "node3") + node.Protocol.Engines.Main.Get().SybilProtection.SeatManager().(*mock2.ManualPOA).SetOnline("node0", "node1", "node2", "node3") } { @@ -557,7 +538,7 @@ func TestProtocol_EngineRollbackNoFinalizationLastSlot(t *testing.T) { // Update online committee. for _, node := range ts.Nodes() { - manualPOA := node.Protocol.MainEngineInstance().SybilProtection.SeatManager().(*mock2.ManualPOA) + manualPOA := node.Protocol.Engines.Main.Get().SybilProtection.SeatManager().(*mock2.ManualPOA) manualPOA.SetOnline("node0", "node1") manualPOA.SetOffline("node2", "node3") } @@ -578,7 +559,7 @@ func TestProtocol_EngineRollbackNoFinalizationLastSlot(t *testing.T) { ts.AssertBlocksExist(ts.BlocksWithPrefix("P0"), true, ts.Nodes()...) } - newEngine, err := node3.Protocol.EngineManager.ForkEngineAtSlot(15) + newEngine, err := node3.Protocol.Engines.ForkAtSlot(15) require.NoError(t, err) // Assert state of the forked engine after rollback. @@ -601,7 +582,7 @@ func TestProtocol_EngineRollbackNoFinalizationLastSlot(t *testing.T) { for slot := 1; slot <= 15; slot++ { copiedCommitment, err := newEngine.Storage.Commitments().Load(iotago.SlotIndex(slot)) require.NoError(t, err) - sourceCommitment, err := node1.Protocol.MainEngineInstance().Storage.Commitments().Load(iotago.SlotIndex(slot)) + sourceCommitment, err := node1.Protocol.Engines.Main.Get().Storage.Commitments().Load(iotago.SlotIndex(slot)) require.NoError(t, err) require.Equal(t, sourceCommitment.ID(), copiedCommitment.ID()) } @@ -657,12 +638,6 @@ func TestProtocol_EngineRollbackNoFinalizationBeforePointOfNoReturn(t *testing.T nodeOptions := make(map[string][]options.Option[protocol.Protocol]) for _, node := range ts.Nodes() { nodeOptions[node.Name] = []options.Option[protocol.Protocol]{ - protocol.WithChainManagerOptions( - chainmanager.WithCommitmentRequesterOptions( - eventticker.RetryInterval[iotago.SlotIndex, iotago.CommitmentID](1*time.Second), - eventticker.RetryJitter[iotago.SlotIndex, iotago.CommitmentID](500*time.Millisecond), - ), - ), protocol.WithSybilProtectionProvider( sybilprotectionv1.NewProvider( sybilprotectionv1.WithSeatManagerProvider( @@ -693,19 +668,19 @@ func TestProtocol_EngineRollbackNoFinalizationBeforePointOfNoReturn(t *testing.T node3.Validator.AccountID, } expectedOnlineCommitteeFull := []account.SeatIndex{ - lo.Return1(lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node0.Validator.AccountID)), - lo.Return1(lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node1.Validator.AccountID)), - lo.Return1(lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node2.Validator.AccountID)), - lo.Return1(lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node3.Validator.AccountID)), + lo.Return1(lo.Return1(node0.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node0.Validator.AccountID)), + lo.Return1(lo.Return1(node0.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node1.Validator.AccountID)), + lo.Return1(lo.Return1(node0.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node2.Validator.AccountID)), + lo.Return1(lo.Return1(node0.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node3.Validator.AccountID)), } expectedOnlineCommitteeHalf := []account.SeatIndex{ - lo.Return1(lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node0.Validator.AccountID)), - lo.Return1(lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node1.Validator.AccountID)), + lo.Return1(lo.Return1(node0.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node0.Validator.AccountID)), + lo.Return1(lo.Return1(node0.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node1.Validator.AccountID)), } for _, node := range ts.Nodes() { - node.Protocol.MainEngineInstance().SybilProtection.SeatManager().(*mock2.ManualPOA).SetOnline("node0", "node1", "node2", "node3") + node.Protocol.Engines.Main.Get().SybilProtection.SeatManager().(*mock2.ManualPOA).SetOnline("node0", "node1", "node2", "node3") } { @@ -757,7 +732,7 @@ func TestProtocol_EngineRollbackNoFinalizationBeforePointOfNoReturn(t *testing.T // Update online committee. for _, node := range ts.Nodes() { - manualPOA := node.Protocol.MainEngineInstance().SybilProtection.SeatManager().(*mock2.ManualPOA) + manualPOA := node.Protocol.Engines.Main.Get().SybilProtection.SeatManager().(*mock2.ManualPOA) manualPOA.SetOnline("node0", "node1") manualPOA.SetOffline("node2", "node3") } @@ -778,7 +753,7 @@ func TestProtocol_EngineRollbackNoFinalizationBeforePointOfNoReturn(t *testing.T ts.AssertBlocksExist(ts.BlocksWithPrefix("P0"), true, ts.Nodes()...) } - newEngine, err := node3.Protocol.EngineManager.ForkEngineAtSlot(9) + newEngine, err := node3.Protocol.Engines.ForkAtSlot(9) require.NoError(t, err) // Assert state of the forked engine after rollback. @@ -801,7 +776,7 @@ func TestProtocol_EngineRollbackNoFinalizationBeforePointOfNoReturn(t *testing.T for slot := 1; slot <= 9; slot++ { copiedCommitment, err := newEngine.Storage.Commitments().Load(iotago.SlotIndex(slot)) require.NoError(t, err) - sourceCommitment, err := node1.Protocol.MainEngineInstance().Storage.Commitments().Load(iotago.SlotIndex(slot)) + sourceCommitment, err := node1.Protocol.Engines.Main.Get().Storage.Commitments().Load(iotago.SlotIndex(slot)) require.NoError(t, err) require.Equal(t, sourceCommitment.ID(), copiedCommitment.ID()) } diff --git a/pkg/tests/protocol_engine_switching_test.go b/pkg/tests/protocol_engine_switching_test.go index 81fcd675a..046ffbd99 100644 --- a/pkg/tests/protocol_engine_switching_test.go +++ b/pkg/tests/protocol_engine_switching_test.go @@ -13,7 +13,6 @@ import ( "github.com/iotaledger/hive.go/runtime/options" "github.com/iotaledger/iota-core/pkg/core/account" "github.com/iotaledger/iota-core/pkg/protocol" - "github.com/iotaledger/iota-core/pkg/protocol/chainmanager" "github.com/iotaledger/iota-core/pkg/protocol/engine" "github.com/iotaledger/iota-core/pkg/protocol/engine/blocks" "github.com/iotaledger/iota-core/pkg/protocol/engine/notarization/slotnotarization" @@ -61,7 +60,7 @@ func TestProtocol_EngineSwitching(t *testing.T) { node8 := ts.AddNode("node8") ts.AddDefaultWallet(node0) - const expectedCommittedSlotAfterPartitionMerge = 19 + const expectedCommittedSlotAfterPartitionMerge = 18 nodesP1 := []*mock.Node{node0, node1, node2, node3, node4, node5} nodesP2 := []*mock.Node{node6, node7, node8} @@ -83,12 +82,6 @@ func TestProtocol_EngineSwitching(t *testing.T) { nodeOptions := make(map[string][]options.Option[protocol.Protocol]) for _, node := range ts.Nodes() { nodeOptions[node.Name] = []options.Option[protocol.Protocol]{ - protocol.WithChainManagerOptions( - chainmanager.WithCommitmentRequesterOptions( - eventticker.RetryInterval[iotago.SlotIndex, iotago.CommitmentID](1*time.Second), - eventticker.RetryJitter[iotago.SlotIndex, iotago.CommitmentID](500*time.Millisecond), - ), - ), protocol.WithSybilProtectionProvider( sybilprotectionv1.NewProvider( sybilprotectionv1.WithSeatManagerProvider( @@ -127,20 +120,20 @@ func TestProtocol_EngineSwitching(t *testing.T) { node7.Validator.AccountID, } expectedP1OnlineCommittee := []account.SeatIndex{ - lo.Return1(lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node0.Validator.AccountID)), - lo.Return1(lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node1.Validator.AccountID)), - lo.Return1(lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node2.Validator.AccountID)), - lo.Return1(lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node3.Validator.AccountID)), - lo.Return1(lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node4.Validator.AccountID)), + lo.Return1(lo.Return1(node0.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node0.Validator.AccountID)), + lo.Return1(lo.Return1(node0.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node1.Validator.AccountID)), + lo.Return1(lo.Return1(node0.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node2.Validator.AccountID)), + lo.Return1(lo.Return1(node0.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node3.Validator.AccountID)), + lo.Return1(lo.Return1(node0.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node4.Validator.AccountID)), } expectedP2OnlineCommittee := []account.SeatIndex{ - lo.Return1(lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node6.Validator.AccountID)), - lo.Return1(lo.Return1(node0.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node7.Validator.AccountID)), + lo.Return1(lo.Return1(node0.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node6.Validator.AccountID)), + lo.Return1(lo.Return1(node0.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(node7.Validator.AccountID)), } expectedOnlineCommittee := append(expectedP1OnlineCommittee, expectedP2OnlineCommittee...) for _, node := range ts.Nodes() { - node.Protocol.MainEngineInstance().SybilProtection.SeatManager().(*mock2.ManualPOA).SetOnline("node0", "node1", "node2", "node3", "node4", "node6", "node7") + node.Protocol.Engines.Main.Get().SybilProtection.SeatManager().(*mock2.ManualPOA).SetOnline("node0", "node1", "node2", "node3", "node4", "node6", "node7") } // Verify that nodes have the expected states. @@ -205,7 +198,7 @@ func TestProtocol_EngineSwitching(t *testing.T) { // Set online committee for each partition. for _, node := range ts.Nodes() { - manualPOA := node.Protocol.MainEngineInstance().SybilProtection.SeatManager().(*mock2.ManualPOA) + manualPOA := node.Protocol.Engines.Main.Get().SybilProtection.SeatManager().(*mock2.ManualPOA) if node.Partition == "P1" { manualPOA.SetOnline("node0", "node1", "node2", "node3", "node4") manualPOA.SetOffline("node6", "node7") @@ -327,7 +320,7 @@ func TestProtocol_EngineSwitching(t *testing.T) { } for _, node := range ts.Nodes() { - manualPOA := node.Protocol.MainEngineInstance().SybilProtection.SeatManager().(*mock2.ManualPOA) + manualPOA := node.Protocol.Engines.Main.Get().SybilProtection.SeatManager().(*mock2.ManualPOA) manualPOA.SetOnline("node0", "node1", "node2", "node3", "node4", "node6", "node7") } // Merge the partitions @@ -384,7 +377,7 @@ func TestProtocol_EngineSwitching(t *testing.T) { func TestProtocol_EngineSwitching_CommitteeRotation(t *testing.T) { ts := testsuite.NewTestSuite(t, - testsuite.WithWaitFor(30*time.Second), + testsuite.WithWaitFor(15*time.Second), testsuite.WithProtocolParametersOptions( iotago.WithTimeProviderOptions( @@ -409,7 +402,7 @@ func TestProtocol_EngineSwitching_CommitteeRotation(t *testing.T) { node2 := ts.AddValidatorNode("node2") node3 := ts.AddValidatorNode("node3") - const expectedCommittedSlotAfterPartitionMerge = 19 + const expectedCommittedSlotAfterPartitionMerge = 18 nodesP1 := []*mock.Node{node0, node1, node2} nodesP2 := []*mock.Node{node3} @@ -612,6 +605,9 @@ func TestProtocol_EngineSwitching_CommitteeRotation(t *testing.T) { // Here we need to let enough time pass for the nodes to sync up the candidate engines and switch them ts.AssertMainEngineSwitchedCount(1, nodesP2...) + // Make sure that enough activity messages are issued so that a block in slot 21 gets accepted and triggers commitment of slot 18. + time.Sleep(3 * time.Second) + ctxP1Cancel() wg.Wait() } @@ -620,12 +616,10 @@ func TestProtocol_EngineSwitching_CommitteeRotation(t *testing.T) { // Those nodes should also have all the blocks from the target fork P1 and should not have blocks from P2. // This is to make sure that the storage was copied correctly during engine switching. ts.AssertBlocksExist(ts.BlocksWithPrefix("P0"), true, ts.Nodes()...) - ts.AssertBlocksExist(ts.BlocksWithPrefix("P1"), true, ts.Nodes()...) // not all blocks of slot 19 are available on node3 (buffer issue?) + ts.AssertBlocksExist(ts.BlocksWithPrefix("P1"), true, ts.Nodes()...) ts.AssertBlocksExist(ts.BlocksWithPrefix("P2"), false, ts.Nodes()...) - ts.AssertNodeState(ts.Nodes(), - testsuite.WithEqualStoredCommitmentAtIndex(expectedCommittedSlotAfterPartitionMerge), - ) + ts.AssertEqualStoredCommitmentAtIndex(expectedCommittedSlotAfterPartitionMerge, ts.Nodes()...) // Assert committee in epoch 1. ts.AssertSybilProtectionCandidates(0, ts.AccountsOfNodes("node1", "node2"), ts.Nodes()...) diff --git a/pkg/tests/protocol_startup_test.go b/pkg/tests/protocol_startup_test.go index c2596c178..c40c8421a 100644 --- a/pkg/tests/protocol_startup_test.go +++ b/pkg/tests/protocol_startup_test.go @@ -60,7 +60,7 @@ func Test_BookInCommittedSlot(t *testing.T) { } expectedOnlineCommittee := []account.SeatIndex{ - lo.Return1(lo.Return1(nodeA.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(nodeA.Validator.AccountID)), + lo.Return1(lo.Return1(nodeA.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(nodeA.Validator.AccountID)), } // Verify that nodes have the expected states. @@ -116,7 +116,7 @@ func Test_BookInCommittedSlot(t *testing.T) { ts.AssertAttestationsForSlot(slot, ts.Blocks(aliases...), ts.Nodes()...) } ts.SetCurrentSlot(5) - commitment := lo.PanicOnErr(nodeA.Protocol.MainEngineInstance().Storage.Commitments().Load(3)).Commitment() + commitment := lo.PanicOnErr(nodeA.Protocol.Engines.Main.Get().Storage.Commitments().Load(3)).Commitment() ts.IssueValidationBlockWithHeaderOptions("5*", ts.Node("nodeA"), mock.WithSlotCommitment(commitment), mock.WithStrongParents(ts.BlockIDsWithPrefix("4.3-")...)) ts.AssertBlocksExist(ts.Blocks("5*"), false, ts.Nodes("nodeA")...) @@ -173,8 +173,8 @@ func Test_StartNodeFromSnapshotAndDisk(t *testing.T) { } expectedOnlineCommittee := []account.SeatIndex{ - lo.Return1(lo.Return1(nodeA.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(nodeA.Validator.AccountID)), - lo.Return1(lo.Return1(nodeA.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(nodeB.Validator.AccountID)), + lo.Return1(lo.Return1(nodeA.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(nodeA.Validator.AccountID)), + lo.Return1(lo.Return1(nodeA.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInSlot(1)).GetSeat(nodeB.Validator.AccountID)), } // Verify that nodes have the expected states. @@ -328,7 +328,7 @@ func Test_StartNodeFromSnapshotAndDisk(t *testing.T) { { // Create snapshot. snapshotPath := ts.Directory.Path(fmt.Sprintf("%d_snapshot", time.Now().Unix())) - require.NoError(t, ts.Node("nodeA").Protocol.MainEngineInstance().WriteSnapshot(snapshotPath)) + require.NoError(t, ts.Node("nodeA").Protocol.Engines.Main.Get().WriteSnapshot(snapshotPath)) nodeD := ts.AddNode("nodeD") nodeD.Initialize(true, append(nodeOptions, @@ -349,7 +349,7 @@ func Test_StartNodeFromSnapshotAndDisk(t *testing.T) { ts.AssertStorageRootBlocks(expectedStorageRootBlocksFrom9, ts.Nodes("nodeD")...) } - slot7Commitment := lo.PanicOnErr(nodeA.Protocol.MainEngineInstance().Storage.Commitments().Load(7)) + slot7Commitment := lo.PanicOnErr(nodeA.Protocol.Engines.Main.Get().Storage.Commitments().Load(7)) ts.AssertNodeState(ts.Nodes("nodeC-restarted", "nodeD"), testsuite.WithSnapshotImported(true), @@ -478,7 +478,7 @@ func Test_StartNodeFromSnapshotAndDisk(t *testing.T) { { // Create snapshot. snapshotPath := ts.Directory.Path(fmt.Sprintf("%d_snapshot", time.Now().Unix())) - require.NoError(t, ts.Node("nodeA").Protocol.MainEngineInstance().WriteSnapshot(snapshotPath)) + require.NoError(t, ts.Node("nodeA").Protocol.Engines.Main.Get().WriteSnapshot(snapshotPath)) nodeD := ts.AddNode("nodeE") nodeD.Initialize(true, append(nodeOptions, diff --git a/pkg/tests/reward_test.go b/pkg/tests/reward_test.go index 67af8e176..e09d64c69 100644 --- a/pkg/tests/reward_test.go +++ b/pkg/tests/reward_test.go @@ -113,7 +113,7 @@ func Test_Delegation_DelayedClaimingDestroyOutputWithoutRewards(t *testing.T) { block1 := ts.IssueBasicBlockWithOptions("block1", ts.DefaultWallet(), tx1) // TRANSITION TO DELAYED CLAIMING (IN THE SAME SLOT) - latestCommitment := ts.DefaultWallet().Node.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment() + latestCommitment := ts.DefaultWallet().Node.Protocol.Engines.Main.Get().Storage.Settings().LatestCommitment() apiForSlot := ts.DefaultWallet().Node.Protocol.APIForSlot(block1_2Slot) futureBoundedSlotIndex := latestCommitment.Slot() + apiForSlot.ProtocolParameters().MinCommittableAge() @@ -243,7 +243,7 @@ func Test_RewardInputCannotPointToNFTOutput(t *testing.T) { 0, ), mock.WithCommitmentInput(&iotago.CommitmentInput{ - CommitmentID: ts.DefaultWallet().Node.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Commitment().MustID(), + CommitmentID: ts.DefaultWallet().Node.Protocol.Engines.Main.Get().Storage.Settings().LatestCommitment().Commitment().MustID(), })) ts.IssueBasicBlockWithOptions("block2", ts.DefaultWallet(), tx2, mock.WithStrongParents(latestParents...)) diff --git a/pkg/tests/upgrade_signaling_test.go b/pkg/tests/upgrade_signaling_test.go index 0a627d5e1..511ca05c0 100644 --- a/pkg/tests/upgrade_signaling_test.go +++ b/pkg/tests/upgrade_signaling_test.go @@ -14,7 +14,6 @@ import ( "github.com/iotaledger/hive.go/runtime/options" "github.com/iotaledger/iota-core/pkg/model" "github.com/iotaledger/iota-core/pkg/protocol" - "github.com/iotaledger/iota-core/pkg/protocol/chainmanager" "github.com/iotaledger/iota-core/pkg/protocol/engine" "github.com/iotaledger/iota-core/pkg/protocol/engine/accounts" "github.com/iotaledger/iota-core/pkg/protocol/engine/blocks" @@ -58,12 +57,6 @@ func Test_Upgrade_Signaling(t *testing.T) { ) nodeOptionsWithoutV5 := []options.Option[protocol.Protocol]{ - protocol.WithChainManagerOptions( - chainmanager.WithCommitmentRequesterOptions( - eventticker.RetryInterval[iotago.SlotIndex, iotago.CommitmentID](1*time.Second), - eventticker.RetryJitter[iotago.SlotIndex, iotago.CommitmentID](500*time.Millisecond), - ), - ), protocol.WithEngineOptions( engine.WithBlockRequesterOptions( eventticker.RetryInterval[iotago.SlotIndex, iotago.BlockID](1*time.Second), @@ -224,7 +217,7 @@ func Test_Upgrade_Signaling(t *testing.T) { }, ts.Nodes()...) // check that rollback is correct - pastAccounts, err := ts.Node("nodeA").Protocol.MainEngineInstance().Ledger.PastAccounts(iotago.AccountIDs{ts.Node("nodeA").Validator.AccountID}, 7) + pastAccounts, err := ts.Node("nodeA").Protocol.Engines.Main.Get().Ledger.PastAccounts(iotago.AccountIDs{ts.Node("nodeA").Validator.AccountID}, 7) require.NoError(t, err) require.Contains(t, pastAccounts, ts.Node("nodeA").Validator.AccountID) require.Equal(t, model.VersionAndHash{Version: 4, Hash: hash2}, pastAccounts[ts.Node("nodeA").Validator.AccountID].LatestSupportedProtocolVersionAndHash) @@ -272,7 +265,7 @@ func Test_Upgrade_Signaling(t *testing.T) { // Create snapshot. snapshotPath := ts.Directory.Path(fmt.Sprintf("%d_snapshot", time.Now().Unix())) - require.NoError(t, ts.Node("nodeA").Protocol.MainEngineInstance().WriteSnapshot(snapshotPath)) + require.NoError(t, ts.Node("nodeA").Protocol.Engines.Main.Get().WriteSnapshot(snapshotPath)) { nodeG := ts.AddNode("nodeG") @@ -331,7 +324,7 @@ func Test_Upgrade_Signaling(t *testing.T) { // Create snapshot and start new nodeH from it. { snapshotPath := ts.Directory.Path(fmt.Sprintf("%d_snapshot", time.Now().Unix())) - require.NoError(t, ts.Node("nodeE2").Protocol.MainEngineInstance().WriteSnapshot(snapshotPath)) + require.NoError(t, ts.Node("nodeE2").Protocol.Engines.Main.Get().WriteSnapshot(snapshotPath)) nodeG := ts.AddNode("nodeH") nodeG.Initialize(true, diff --git a/pkg/testsuite/accounts.go b/pkg/testsuite/accounts.go index 30faf1a0c..a10f9833f 100644 --- a/pkg/testsuite/accounts.go +++ b/pkg/testsuite/accounts.go @@ -13,12 +13,12 @@ import ( func (t *TestSuite) AssertAccountData(accountData *accounts.AccountData, nodes ...*mock.Node) { for _, node := range nodes { t.Eventually(func() error { - actualAccountData, exists, err := node.Protocol.MainEngineInstance().Ledger.Account(accountData.ID, node.Protocol.MainEngineInstance().SyncManager.LatestCommitment().Slot()) + actualAccountData, exists, err := node.Protocol.Engines.Main.Get().Ledger.Account(accountData.ID, node.Protocol.Engines.Main.Get().SyncManager.LatestCommitment().Slot()) if err != nil { return ierrors.Wrap(err, "AssertAccountData: failed to load account data") } if !exists { - return ierrors.Errorf("AssertAccountData: %s: account %s does not exist with latest committed slot %d", node.Name, accountData.ID, node.Protocol.MainEngineInstance().SyncManager.LatestCommitment().Slot()) + return ierrors.Errorf("AssertAccountData: %s: account %s does not exist with latest committed slot %d", node.Name, accountData.ID, node.Protocol.Engines.Main.Get().SyncManager.LatestCommitment().Slot()) } if accountData.ID != actualAccountData.ID { @@ -74,7 +74,7 @@ func (t *TestSuite) AssertAccountDiff(accountID iotago.AccountID, index iotago.S for _, node := range nodes { t.Eventually(func() error { - accountsDiffStorage, err := node.Protocol.MainEngineInstance().Storage.AccountDiffs(index) + accountsDiffStorage, err := node.Protocol.Engines.Main.Get().Storage.AccountDiffs(index) if err != nil { return ierrors.Wrapf(err, "AssertAccountDiff: %s: failed to load accounts diff for slot %d", node.Name, index) } diff --git a/pkg/testsuite/attestations.go b/pkg/testsuite/attestations.go index 4b4670252..af29c4525 100644 --- a/pkg/testsuite/attestations.go +++ b/pkg/testsuite/attestations.go @@ -23,7 +23,7 @@ func (t *TestSuite) AssertAttestationsForSlot(slot iotago.SlotIndex, blocks []*b for _, node := range nodes { t.Eventually(func() error { - attestationTree, err := node.Protocol.MainEngineInstance().Attestations.GetMap(slot) + attestationTree, err := node.Protocol.Engines.Main.Get().Attestations.GetMap(slot) if err != nil { return ierrors.Wrapf(err, "AssertAttestationsForSlot: %s: error loading attestation tree for slot %d", node.Name, slot) } diff --git a/pkg/testsuite/blocks.go b/pkg/testsuite/blocks.go index e89412277..40bdd265b 100644 --- a/pkg/testsuite/blocks.go +++ b/pkg/testsuite/blocks.go @@ -16,7 +16,7 @@ func (t *TestSuite) AssertBlock(block *blocks.Block, node *mock.Node) *model.Blo var loadedBlock *model.Block t.Eventually(func() error { var exists bool - loadedBlock, exists = node.Protocol.MainEngineInstance().Block(block.ID()) + loadedBlock, exists = node.Protocol.Engines.Main.Get().Block(block.ID()) if !exists { return ierrors.Errorf("AssertBlock: %s: block %s does not exist", node.Name, block.ID()) } @@ -47,7 +47,7 @@ func (t *TestSuite) AssertBlocksExist(blocks []*blocks.Block, expectedExist bool t.AssertBlock(block, node) } else { t.Eventually(func() error { - if lo.Return2(node.Protocol.MainEngineInstance().Block(block.ID())) { + if lo.Return2(node.Protocol.Engines.Main.Get().Block(block.ID())) { return ierrors.Errorf("AssertBlocksExist: %s: block %s exists but should not", node.Name, block) } @@ -82,7 +82,7 @@ func (t *TestSuite) assertBlocksInCacheWithFunc(expectedBlocks []*blocks.Block, for _, node := range nodes { for _, block := range expectedBlocks { t.Eventually(func() error { - blockFromCache, exists := node.Protocol.MainEngineInstance().BlockFromCache(block.ID()) + blockFromCache, exists := node.Protocol.Engines.Main.Get().BlockFromCache(block.ID()) if !exists { return ierrors.Errorf("assertBlocksInCacheWithFunc[%s]: %s: block %s does not exist", propertyName, node.Name, block.ID()) } @@ -139,7 +139,7 @@ func (t *TestSuite) AssertBlocksInCacheConflicts(blockConflicts map[*blocks.Bloc for _, node := range nodes { for block, conflictAliases := range blockConflicts { t.Eventually(func() error { - blockFromCache, exists := node.Protocol.MainEngineInstance().BlockFromCache(block.ID()) + blockFromCache, exists := node.Protocol.Engines.Main.Get().BlockFromCache(block.ID()) if !exists { return ierrors.Errorf("AssertBlocksInCacheConflicts: %s: block %s does not exist", node.Name, block.ID()) } diff --git a/pkg/testsuite/chainmanager.go b/pkg/testsuite/chainmanager.go index 5ed7c0afb..2f9b02101 100644 --- a/pkg/testsuite/chainmanager.go +++ b/pkg/testsuite/chainmanager.go @@ -10,20 +10,16 @@ func (t *TestSuite) AssertChainManagerIsSolid(nodes ...*mock.Node) { for _, node := range nodes { t.Eventually(func() error { - rootCommitment := node.Protocol.ChainManager.RootCommitment() - chain := node.Protocol.ChainManager.Chain(rootCommitment.ID()) + chain := node.Protocol.Chains.Main.Get() if chain == nil { return ierrors.Errorf("AssertChainManagerIsSolid: %s: chain is nil", node.Name) } - latestChainCommitment := chain.LatestCommitment() - latestCommitment := node.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment() + latestChainCommitment := chain.LatestCommitment.Get() + latestCommitment := node.Protocol.Engines.Main.Get().Storage.Settings().LatestCommitment() if latestCommitment.ID() != latestChainCommitment.ID() { - return ierrors.Errorf("AssertChainManagerIsSolid: %s: latest commitment is not equal, expected %d, got %d", node.Name, latestCommitment.ID(), latestChainCommitment.ID()) - } - if !latestChainCommitment.SolidEvent().WasTriggered() { - return ierrors.Errorf("AssertChainManagerIsSolid: %s: is not solid", node.Name) + return ierrors.Errorf("AssertChainManagerIsSolid: %s: latest commitment is not equal, expected %s, got %s", node.Name, latestCommitment.ID(), latestChainCommitment.ID()) } return nil diff --git a/pkg/testsuite/eviction.go b/pkg/testsuite/eviction.go index e45584bcd..8b9500a1b 100644 --- a/pkg/testsuite/eviction.go +++ b/pkg/testsuite/eviction.go @@ -20,7 +20,7 @@ func (t *TestSuite) AssertActiveRootBlocks(expectedBlocks []*blocks.Block, nodes for _, node := range nodes { t.Eventually(func() error { - activeRootBlocks := node.Protocol.MainEngineInstance().EvictionState.ActiveRootBlocks() + activeRootBlocks := node.Protocol.Engines.Main.Get().EvictionState.ActiveRootBlocks() if !assert.Equal(t.fakeTesting, expectedRootBlocks, activeRootBlocks) { return ierrors.Errorf("AssertActiveRootBlocks: %s: expected %v, got %v", node.Name, expectedRootBlocks, activeRootBlocks) @@ -36,8 +36,8 @@ func (t *TestSuite) AssertEvictedSlot(expectedIndex iotago.SlotIndex, nodes ...* for _, node := range nodes { t.Eventually(func() error { - if expectedIndex != lo.Return1(node.Protocol.MainEngineInstance().EvictionState.LastEvictedSlot()) { - return ierrors.Errorf("AssertEvictedSlot: %s: expected %d, got %d", node.Name, expectedIndex, lo.Return1(node.Protocol.MainEngineInstance().EvictionState.LastEvictedSlot())) + if expectedIndex != lo.Return1(node.Protocol.Engines.Main.Get().EvictionState.LastEvictedSlot()) { + return ierrors.Errorf("AssertEvictedSlot: %s: expected %d, got %d", node.Name, expectedIndex, lo.Return1(node.Protocol.Engines.Main.Get().EvictionState.LastEvictedSlot())) } return nil diff --git a/pkg/testsuite/mock/blockissuer.go b/pkg/testsuite/mock/blockissuer.go index 0f87ecf00..10b848f9f 100644 --- a/pkg/testsuite/mock/blockissuer.go +++ b/pkg/testsuite/mock/blockissuer.go @@ -197,7 +197,7 @@ func (i *BlockIssuer) IssueValidationBlock(ctx context.Context, alias string, no validationBlock, _ := block.ValidationBlock() - fmt.Printf("Issued ValidationBlock: %s - slot %d - commitment %s %d - latest finalized slot %d - version: %d - highestSupportedVersion: %d, hash: %s\n", block.ID(), block.ID().Slot(), block.SlotCommitmentID(), block.SlotCommitmentID().Slot(), block.ProtocolBlock().Header.LatestFinalizedSlot, block.ProtocolBlock().Header.ProtocolVersion, validationBlock.HighestSupportedVersion, validationBlock.ProtocolParametersHash) + node.Protocol.Engines.Main.Get().LogTrace("issued validation block", "blockID", block.ID(), "slot", block.ID().Slot(), "commitment", block.SlotCommitmentID(), "latestFinalizedSlot", block.ProtocolBlock().Header.LatestFinalizedSlot, "version", block.ProtocolBlock().Header.ProtocolVersion, "highestSupportedVersion", validationBlock.HighestSupportedVersion, "hash", validationBlock.ProtocolParametersHash) return block } @@ -264,7 +264,7 @@ func (i *BlockIssuer) CreateBasicBlock(ctx context.Context, alias string, node * if err != nil { rmcSlot = 0 } - rmc, err := node.Protocol.MainEngineInstance().Ledger.RMCManager().RMC(rmcSlot) + rmc, err := node.Protocol.Engines.Main.Get().Ledger.RMCManager().RMC(rmcSlot) require.NoError(i.Testing, err) // only set the burned Mana as the last step before signing, so workscore calculation is correct. @@ -292,7 +292,7 @@ func (i *BlockIssuer) IssueBasicBlock(ctx context.Context, alias string, node *N require.NoErrorf(i.Testing, i.IssueBlock(block.ModelBlock(), node), "%s > failed to issue block with alias %s", i.Name, alias) - fmt.Printf("%s > Issued block: %s - slot %d - commitment %s %d - latest finalized slot %d\n", i.Name, block.ID(), block.ID().Slot(), block.SlotCommitmentID(), block.SlotCommitmentID().Slot(), block.ProtocolBlock().Header.LatestFinalizedSlot) + node.Protocol.LogTrace("issued block", "blockID", block.ID(), "slot", block.ID().Slot(), "commitment", block.SlotCommitmentID(), "latestFinalizedSlot", block.ProtocolBlock().Header.LatestFinalizedSlot, "version", block.ProtocolBlock().Header.ProtocolVersion) return block } @@ -387,8 +387,8 @@ func (i *BlockIssuer) AttachBlock(ctx context.Context, iotaBlock *iotago.Block, } if iotaBlock.Header.SlotCommitmentID == iotago.EmptyCommitmentID { - iotaBlock.Header.SlotCommitmentID = node.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Commitment().MustID() - iotaBlock.Header.LatestFinalizedSlot = node.Protocol.MainEngineInstance().Storage.Settings().LatestFinalizedSlot() + iotaBlock.Header.SlotCommitmentID = node.Protocol.Engines.Main.Get().Storage.Settings().LatestCommitment().Commitment().MustID() + iotaBlock.Header.LatestFinalizedSlot = node.Protocol.Engines.Main.Get().Storage.Settings().LatestFinalizedSlot() resign = true } @@ -445,7 +445,7 @@ func (i *BlockIssuer) AttachBlock(ctx context.Context, iotaBlock *iotago.Block, if err != nil { rmcSlot = 0 } - rmc, err := node.Protocol.MainEngineInstance().Ledger.RMCManager().RMC(rmcSlot) + rmc, err := node.Protocol.Engines.Main.Get().Ledger.RMCManager().RMC(rmcSlot) if err != nil { return iotago.EmptyBlockID, ierrors.Wrapf(err, "error loading commitment of slot %d from storage to get RMC", rmcSlot) } @@ -484,7 +484,7 @@ func (i *BlockIssuer) AttachBlock(ctx context.Context, iotaBlock *iotago.Block, return iotago.EmptyBlockID, ierrors.Wrap(err, "error serializing block to model block") } - if !i.optsRateSetterEnabled || node.Protocol.MainEngineInstance().Scheduler.IsBlockIssuerReady(modelBlock.ProtocolBlock().Header.IssuerID) { + if !i.optsRateSetterEnabled || node.Protocol.Engines.Main.Get().Scheduler.IsBlockIssuerReady(modelBlock.ProtocolBlock().Header.IssuerID) { i.events.BlockConstructed.Trigger(modelBlock) if err = i.IssueBlockAndAwaitEvent(ctx, modelBlock, node, node.Protocol.Events.Engine.BlockDAG.BlockAttached); err != nil { @@ -511,7 +511,7 @@ func (i *BlockIssuer) setDefaultBlockParams(blockParams *BlockHeaderParams, node } if blockParams.LatestFinalizedSlot == nil { - latestFinalizedSlot := node.Protocol.MainEngineInstance().Storage.Settings().LatestFinalizedSlot() + latestFinalizedSlot := node.Protocol.Engines.Main.Get().Storage.Settings().LatestFinalizedSlot() blockParams.LatestFinalizedSlot = &latestFinalizedSlot } @@ -534,7 +534,7 @@ func (i *BlockIssuer) getAddressableCommitment(currentAPI iotago.API, blockIssui protoParams := currentAPI.ProtocolParameters() blockSlot := currentAPI.TimeProvider().SlotFromTime(blockIssuingTime) - commitment := node.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Commitment() + commitment := node.Protocol.Engines.Main.Get().Storage.Settings().LatestCommitment().Commitment() if blockSlot > commitment.Slot+protoParams.MaxCommittableAge() { return nil, ierrors.Wrapf(ErrBlockTooRecent, "can't issue block: block slot %d is too far in the future, latest commitment is %d", blockSlot, commitment.Slot) @@ -546,7 +546,7 @@ func (i *BlockIssuer) getAddressableCommitment(currentAPI iotago.API, blockIssui } commitmentSlot := commitment.Slot - protoParams.MinCommittableAge() - loadedCommitment, err := node.Protocol.MainEngineInstance().Storage.Commitments().Load(commitmentSlot) + loadedCommitment, err := node.Protocol.Engines.Main.Get().Storage.Commitments().Load(commitmentSlot) if err != nil { return nil, ierrors.Wrapf(err, "error loading valid commitment of slot %d according to minCommittableAge from storage", commitmentSlot) } @@ -577,7 +577,7 @@ func (i *BlockIssuer) getReferencesValidationBlock(ctx context.Context, node *No func (i *BlockIssuer) validateReferences(issuingTime time.Time, slotCommitmentIndex iotago.SlotIndex, references model.ParentReferences, node *Node) error { for _, parent := range lo.Flatten(lo.Map(lo.Values(references), func(ds iotago.BlockIDs) []iotago.BlockID { return ds })) { - b, exists := node.Protocol.MainEngineInstance().BlockFromCache(parent) + b, exists := node.Protocol.Engines.Main.Get().BlockFromCache(parent) if !exists { return ierrors.Errorf("cannot issue block if the parents are not known: %s", parent) } @@ -599,7 +599,7 @@ func (i *BlockIssuer) IssueBlock(block *model.Block, node *Node) error { } if _, isValidationBlock := block.ValidationBlock(); isValidationBlock { - _ = node.Protocol.MainEngineInstance().Storage.Settings().SetLatestIssuedValidationBlock(block) + _ = node.Protocol.Engines.Main.Get().Storage.Settings().SetLatestIssuedValidationBlock(block) } i.events.BlockIssued.Trigger(block) @@ -623,7 +623,7 @@ func (i *BlockIssuer) getReferencesWithRetry(ctx context.Context, parentsCount i defer timeutil.CleanupTicker(interval) for { - references = node.Protocol.MainEngineInstance().TipSelection.SelectTips(parentsCount) + references = node.Protocol.Engines.Main.Get().TipSelection.SelectTips(parentsCount) if len(references[iotago.StrongParentType]) > 0 { return references, nil } diff --git a/pkg/testsuite/mock/blockissuer_acceptance_loss.go b/pkg/testsuite/mock/blockissuer_acceptance_loss.go index 8e5909c7e..e295819b5 100644 --- a/pkg/testsuite/mock/blockissuer_acceptance_loss.go +++ b/pkg/testsuite/mock/blockissuer_acceptance_loss.go @@ -8,12 +8,12 @@ import ( ) func (i *BlockIssuer) reviveChain(issuingTime time.Time, node *Node) (*iotago.Commitment, iotago.BlockID, error) { - lastCommittedSlot := node.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Slot() + lastCommittedSlot := node.Protocol.Engines.Main.Get().Storage.Settings().LatestCommitment().Slot() apiForSlot := node.Protocol.APIForSlot(lastCommittedSlot) // Get a rootblock as recent as possible for the parent. parentBlockID := iotago.EmptyBlockID - for rootBlock := range node.Protocol.MainEngineInstance().EvictionState.ActiveRootBlocks() { + for rootBlock := range node.Protocol.Engines.Main.Get().EvictionState.ActiveRootBlocks() { if rootBlock.Slot() > parentBlockID.Slot() { parentBlockID = rootBlock } @@ -33,11 +33,11 @@ func (i *BlockIssuer) reviveChain(issuingTime time.Time, node *Node) (*iotago.Co } commitUntilSlot := issuingSlot - apiForSlot.ProtocolParameters().MinCommittableAge() - if err := node.Protocol.MainEngineInstance().Notarization.ForceCommitUntil(commitUntilSlot); err != nil { + if err := node.Protocol.Engines.Main.Get().Notarization.ForceCommitUntil(commitUntilSlot); err != nil { return nil, iotago.EmptyBlockID, ierrors.Wrapf(err, "failed to force commit until slot %d", commitUntilSlot) } - commitment, err := node.Protocol.MainEngineInstance().Storage.Commitments().Load(commitUntilSlot) + commitment, err := node.Protocol.Engines.Main.Get().Storage.Commitments().Load(commitUntilSlot) if err != nil { return nil, iotago.EmptyBlockID, ierrors.Wrapf(err, "failed to commit until slot %d to revive chain", commitUntilSlot) } diff --git a/pkg/testsuite/mock/network.go b/pkg/testsuite/mock/network.go index 42b363b7b..bb20c3579 100644 --- a/pkg/testsuite/mock/network.go +++ b/pkg/testsuite/mock/network.go @@ -115,6 +115,9 @@ func (e *Endpoint) LocalPeerID() peer.ID { } func (e *Endpoint) RegisterProtocol(_ func() proto.Message, handler func(peer.ID, proto.Message) error) { + e.network.dispatchersMutex.Lock() + defer e.network.dispatchersMutex.Unlock() + e.handler = handler } @@ -153,8 +156,10 @@ func (e *Endpoint) Send(packet proto.Message, to ...peer.ID) { e.network.dispatchersMutex.RLock() defer e.network.dispatchersMutex.RUnlock() - if err := dispatcher.handler(e.id, packet); err != nil { - fmt.Println(e.id, "ERROR: ", err) + if dispatcher.handler != nil { + if err := dispatcher.handler(e.id, packet); err != nil { + fmt.Println(e.id, "ERROR: ", err) + } } }() } diff --git a/pkg/testsuite/mock/node.go b/pkg/testsuite/mock/node.go index 827507c27..71c897207 100644 --- a/pkg/testsuite/mock/node.go +++ b/pkg/testsuite/mock/node.go @@ -3,6 +3,7 @@ package mock import ( "context" "fmt" + "log/slog" "sync/atomic" "testing" "time" @@ -13,13 +14,13 @@ import ( "golang.org/x/crypto/blake2b" "github.com/iotaledger/hive.go/lo" + "github.com/iotaledger/hive.go/log" "github.com/iotaledger/hive.go/runtime/options" "github.com/iotaledger/hive.go/runtime/syncutils" "github.com/iotaledger/hive.go/runtime/workerpool" "github.com/iotaledger/iota-core/pkg/core/account" "github.com/iotaledger/iota-core/pkg/model" "github.com/iotaledger/iota-core/pkg/protocol" - "github.com/iotaledger/iota-core/pkg/protocol/chainmanager" "github.com/iotaledger/iota-core/pkg/protocol/engine" "github.com/iotaledger/iota-core/pkg/protocol/engine/blocks" "github.com/iotaledger/iota-core/pkg/protocol/engine/filter/postsolidfilter" @@ -27,7 +28,6 @@ import ( "github.com/iotaledger/iota-core/pkg/protocol/engine/mempool" "github.com/iotaledger/iota-core/pkg/protocol/engine/notarization" iotago "github.com/iotaledger/iota.go/v4" - "github.com/iotaledger/iota.go/v4/merklehasher" "github.com/iotaledger/iota.go/v4/wallet" ) @@ -74,6 +74,9 @@ type Node struct { candidateEngineActivatedCount atomic.Uint32 mainEngineSwitchedCount atomic.Uint32 + logHandler slog.Handler + enableEngineLogging bool + mutex syncutils.RWMutex attachedBlocks []*blocks.Block currentSlot iotago.SlotIndex @@ -81,7 +84,7 @@ type Node struct { invalidTransactionEvents map[iotago.SignedTransactionID]InvalidSignedTransactionEvent } -func NewNode(t *testing.T, net *Network, partition string, name string, validator bool) *Node { +func NewNode(t *testing.T, net *Network, partition string, name string, validator bool, logHandler slog.Handler) *Node { keyManager := lo.PanicOnErr(wallet.NewKeyManagerFromRandom(wallet.DefaultIOTAPath)) priv, pub := keyManager.KeyPair() @@ -112,6 +115,9 @@ func NewNode(t *testing.T, net *Network, partition string, name string, validato Endpoint: net.JoinWithEndpointID(peerID, partition), Workers: workerpool.NewGroup(name), + logHandler: logHandler, + enableEngineLogging: true, + attachedBlocks: make([]*blocks.Block, 0), invalidTransactionEvents: make(map[iotago.SignedTransactionID]InvalidSignedTransactionEvent), } @@ -126,25 +132,28 @@ func (n *Node) IsValidator() bool { } func (n *Node) Initialize(failOnBlockFiltered bool, opts ...options.Option[protocol.Protocol]) { - n.Protocol = protocol.New(n.Workers.CreateGroup("Protocol"), + n.Protocol = protocol.New( + log.NewLogger(n.Name, n.logHandler), + n.Workers.CreateGroup("Protocol"), n.Endpoint, opts..., ) n.hookEvents() - n.hookLogging(failOnBlockFiltered) + + if n.enableEngineLogging { + n.hookLogging(failOnBlockFiltered) + } n.ctx, n.ctxCancel = context.WithCancel(context.Background()) started := make(chan struct{}, 1) - n.Protocol.HookInitialized(func() { + n.Protocol.Initialized.OnTrigger(func() { close(started) }) go func() { - defer n.ctxCancel() - if err := n.Protocol.Run(n.ctx); err != nil { fmt.Printf("%s > Run finished with error: %s\n", n.Name, err.Error()) } @@ -154,13 +163,21 @@ func (n *Node) Initialize(failOnBlockFiltered bool, opts ...options.Option[proto } func (n *Node) hookEvents() { - events := n.Protocol.Events - - events.ChainManager.ForkDetected.Hook(func(fork *chainmanager.Fork) { n.forkDetectedCount.Add(1) }) + n.Protocol.Chains.HeaviestAttestedCandidate.OnUpdate(func(_ *protocol.Chain, heaviestAttestedCandidate *protocol.Chain) { + if heaviestAttestedCandidate != nil { + n.forkDetectedCount.Add(1) - events.CandidateEngineActivated.Hook(func(e *engine.Engine) { n.candidateEngineActivatedCount.Add(1) }) + heaviestAttestedCandidate.Engine.OnUpdate(func(prevEngine *engine.Engine, newEngine *engine.Engine) { + n.candidateEngineActivatedCount.Add(1) + }) + } + }) - events.MainEngineSwitched.Hook(func(e *engine.Engine) { n.mainEngineSwitchedCount.Add(1) }) + n.Protocol.Chains.Main.OnUpdate(func(prevChain *protocol.Chain, newChain *protocol.Chain) { + if prevChain != nil { + n.mainEngineSwitchedCount.Add(1) + } + }) n.Protocol.Events.Engine.PostSolidFilter.BlockFiltered.Hook(func(event *postsolidfilter.BlockFilteredEvent) { n.mutex.Lock() @@ -169,7 +186,7 @@ func (n *Node) hookEvents() { n.filteredBlockEvents = append(n.filteredBlockEvents, event) }) - n.Protocol.MainEngineInstance().Ledger.MemPool().OnSignedTransactionAttached( + n.Protocol.Engines.Main.Get().Ledger.MemPool().OnSignedTransactionAttached( func(signedTransactionMetadata mempool.SignedTransactionMetadata) { signedTxID := signedTransactionMetadata.ID() @@ -198,86 +215,20 @@ func (n *Node) hookEvents() { } func (n *Node) hookLogging(failOnBlockFiltered bool) { - events := n.Protocol.Events - - n.attachEngineLogs(failOnBlockFiltered, n.Protocol.MainEngineInstance()) - - events.Network.BlockReceived.Hook(func(block *model.Block, source peer.ID) { - fmt.Printf("%s > Network.BlockReceived: from %s %s - %d\n", n.Name, source, block.ID(), block.ID().Slot()) - }) - - events.Network.BlockRequestReceived.Hook(func(blockID iotago.BlockID, source peer.ID) { - fmt.Printf("%s > Network.BlockRequestReceived: from %s %s\n", n.Name, source, blockID) - }) - - events.Network.SlotCommitmentReceived.Hook(func(commitment *model.Commitment, source peer.ID) { - fmt.Printf("%s > Network.SlotCommitmentReceived: from %s %s\n", n.Name, source, commitment.ID()) - }) - - events.Network.SlotCommitmentRequestReceived.Hook(func(commitmentID iotago.CommitmentID, source peer.ID) { - fmt.Printf("%s > Network.SlotCommitmentRequestReceived: from %s %s\n", n.Name, source, commitmentID) - }) - - events.Network.AttestationsReceived.Hook(func(commitment *model.Commitment, attestations []*iotago.Attestation, merkleProof *merklehasher.Proof[iotago.Identifier], source peer.ID) { - fmt.Printf("%s > Network.AttestationsReceived: from %s %s number of attestations: %d with merkleProof: %s - %s\n", n.Name, source, commitment.ID(), len(attestations), lo.PanicOnErr(merkleProof.JSONEncode()), lo.Map(attestations, func(a *iotago.Attestation) iotago.BlockID { - return lo.PanicOnErr(a.BlockID()) - })) - }) - - events.Network.AttestationsRequestReceived.Hook(func(id iotago.CommitmentID, source peer.ID) { - fmt.Printf("%s > Network.AttestationsRequestReceived: from %s %s\n", n.Name, source, id) - }) - - events.Network.WarpSyncResponseReceived.Hook(func(id iotago.CommitmentID, ds map[iotago.CommitmentID]iotago.BlockIDs, m *merklehasher.Proof[iotago.Identifier], ds2 iotago.TransactionIDs, m2 *merklehasher.Proof[iotago.Identifier], id2 peer.ID) { - fmt.Printf("%s > Network.WarpSyncResponseReceived: from %s %s\n", n.Name, id2, id) - }) - - events.Network.WarpSyncRequestReceived.Hook(func(id iotago.CommitmentID, id2 peer.ID) { - fmt.Printf("%s > Network.WarpSyncRequestReceived: from %s %s\n", n.Name, id2, id) - }) - - // events.ChainManager.CommitmentBelowRoot.Hook(func(commitmentID iotago.CommitmentID) { - // fmt.Printf("%s > ChainManager.CommitmentBelowRoot: %s\n", n.Name, commitmentID) - // }) - - events.ChainManager.ForkDetected.Hook(func(fork *chainmanager.Fork) { - fmt.Printf("%s > ChainManager.ForkDetected: %s\n", n.Name, fork) - }) - - // events.Engine.TipManager.BlockAdded.Hook(func(tipMetadata tipmanager.TipMetadata) { - // fmt.Printf("%s > TipManager.BlockAdded: %s in pool %d\n", n.Name, tipMetadata.ID(), tipMetadata.TipPool().Get()) - // }) - - events.CandidateEngineActivated.Hook(func(e *engine.Engine) { - fmt.Printf("%s > CandidateEngineActivated: %s, ChainID:%s Slot:%s\n", n.Name, e.Name(), e.ChainID(), e.ChainID().Slot()) - - n.attachEngineLogs(failOnBlockFiltered, e) - }) - - events.MainEngineSwitched.Hook(func(e *engine.Engine) { - fmt.Printf("%s > MainEngineSwitched: %s, ChainID:%s Slot:%s\n", n.Name, e.Name(), e.ChainID(), e.ChainID().Slot()) - }) - - events.MainEngineRestarted.Hook(func(e *engine.Engine) { - fmt.Printf("%s > MainEngineRestarted: %s, ChainID:%s Slot:%s\n", n.Name, e.Name(), e.ChainID(), e.ChainID().Slot()) - - n.attachEngineLogsWithName(failOnBlockFiltered, e, fmt.Sprintf("Main2 - %s", e.Name()[:8])) - }) - - events.Network.Error.Hook(func(err error, id peer.ID) { - fmt.Printf("%s > Network.Error: from %s %s\n", n.Name, id, err) - }) - - events.Error.Hook(func(err error) { - fmt.Printf("%s > Protocol.Error: %s\n", n.Name, err.Error()) + n.Protocol.Chains.WithElements(func(chain *protocol.Chain) (teardown func()) { + return chain.Engine.OnUpdate(func(_ *engine.Engine, newEngine *engine.Engine) { + if newEngine != nil { + n.attachEngineLogs(failOnBlockFiltered, newEngine) + } + }) }) } -func (n *Node) attachEngineLogsWithName(failOnBlockFiltered bool, instance *engine.Engine, engineName string) { +func (n *Node) attachEngineLogsWithName(failOnBlockFiltered bool, instance *engine.Engine) { events := instance.Events events.BlockDAG.BlockAttached.Hook(func(block *blocks.Block) { - fmt.Printf("%s > [%s] BlockDAG.BlockAttached: %s\n", n.Name, engineName, block.ID()) + instance.LogTrace("BlockDAG.BlockAttached", "block", block.ID()) n.mutex.Lock() defer n.mutex.Unlock() @@ -285,78 +236,80 @@ func (n *Node) attachEngineLogsWithName(failOnBlockFiltered bool, instance *engi }) events.BlockDAG.BlockSolid.Hook(func(block *blocks.Block) { - fmt.Printf("%s > [%s] BlockDAG.BlockSolid: %s\n", n.Name, engineName, block.ID()) + instance.LogTrace("BlockDAG.BlockSolid", "block", block.ID()) }) events.BlockDAG.BlockInvalid.Hook(func(block *blocks.Block, err error) { - fmt.Printf("%s > [%s] BlockDAG.BlockInvalid: %s - %s\n", n.Name, engineName, block.ID(), err) + instance.LogTrace("BlockDAG.BlockInvalid", "block", block.ID(), "err", err) }) events.BlockDAG.BlockMissing.Hook(func(block *blocks.Block) { - fmt.Printf("%s > [%s] BlockDAG.BlockMissing: %s\n", n.Name, engineName, block.ID()) + instance.LogTrace("BlockDAG.BlockMissing", "block", block.ID()) }) events.BlockDAG.MissingBlockAttached.Hook(func(block *blocks.Block) { - fmt.Printf("%s > [%s] BlockDAG.MissingBlockAttached: %s\n", n.Name, engineName, block.ID()) + instance.LogTrace("BlockDAG.MissingBlockAttached", "block", block.ID()) }) events.SeatManager.BlockProcessed.Hook(func(block *blocks.Block) { - fmt.Printf("%s > [%s] SybilProtection.BlockProcessed: %s\n", n.Name, engineName, block.ID()) + instance.LogTrace("SeatManager.BlockProcessed", "block", block.ID()) }) events.Booker.BlockBooked.Hook(func(block *blocks.Block) { - fmt.Printf("%s > [%s] Booker.BlockBooked: %s\n", n.Name, engineName, block.ID()) + instance.LogTrace("Booker.BlockBooked", "block", block.ID()) }) events.Booker.BlockInvalid.Hook(func(block *blocks.Block, err error) { - fmt.Printf("%s > [%s] Booker.BlockInvalid: %s - %s\n", n.Name, engineName, block.ID(), err.Error()) + instance.LogTrace("Booker.BlockInvalid", "block", block.ID(), "err", err) }) events.Booker.TransactionInvalid.Hook(func(metadata mempool.TransactionMetadata, err error) { - fmt.Printf("%s > [%s] Booker.TransactionInvalid: %s - %s\n", n.Name, engineName, metadata.ID(), err.Error()) + instance.LogTrace("Booker.TransactionInvalid", "tx", metadata.ID(), "err", err) }) events.Scheduler.BlockScheduled.Hook(func(block *blocks.Block) { - fmt.Printf("%s > [%s] Scheduler.BlockScheduled: %s\n", n.Name, engineName, block.ID()) + instance.LogTrace("Scheduler.BlockScheduled", "block", block.ID()) }) events.Scheduler.BlockEnqueued.Hook(func(block *blocks.Block) { - fmt.Printf("%s > [%s] Scheduler.BlockEnqueued: %s\n", n.Name, engineName, block.ID()) + instance.LogTrace("Scheduler.BlockEnqueued", "block", block.ID()) }) events.Scheduler.BlockSkipped.Hook(func(block *blocks.Block) { - fmt.Printf("%s > [%s] Scheduler.BlockSkipped: %s\n", n.Name, engineName, block.ID()) + instance.LogTrace("Scheduler.BlockSkipped", "block", block.ID()) }) events.Scheduler.BlockDropped.Hook(func(block *blocks.Block, err error) { - fmt.Printf("%s > [%s] Scheduler.BlockDropped: %s - %s\n", n.Name, engineName, block.ID(), err.Error()) + instance.LogTrace("Scheduler.BlockDropped", "block", block.ID(), "err", err) }) events.Clock.AcceptedTimeUpdated.Hook(func(newTime time.Time) { - fmt.Printf("%s > [%s] Clock.AcceptedTimeUpdated: %s [Slot %d]\n", n.Name, engineName, newTime, instance.LatestAPI().TimeProvider().SlotFromTime(newTime)) + instance.LogTrace("Clock.AcceptedTimeUpdated", "time", newTime, "slot", instance.LatestAPI().TimeProvider().SlotFromTime(newTime)) }) events.Clock.ConfirmedTimeUpdated.Hook(func(newTime time.Time) { - fmt.Printf("%s > [%s] Clock.ConfirmedTimeUpdated: %s [Slot %d]\n", n.Name, engineName, newTime, instance.LatestAPI().TimeProvider().SlotFromTime(newTime)) + instance.LogTrace("Clock.ConfirmedTimeUpdated", "time", newTime, "slot", instance.LatestAPI().TimeProvider().SlotFromTime(newTime)) }) events.PreSolidFilter.BlockPreAllowed.Hook(func(block *model.Block) { - fmt.Printf("%s > [%s] PreSolidFilter.BlockPreAllowed: %s\n", n.Name, engineName, block.ID()) + instance.LogTrace("PreSolidFilter.BlockPreAllowed", "block", block.ID()) }) events.PreSolidFilter.BlockPreFiltered.Hook(func(event *presolidfilter.BlockPreFilteredEvent) { - fmt.Printf("%s > [%s] PreSolidFilter.BlockPreFiltered: %s - %s\n", n.Name, engineName, event.Block.ID(), event.Reason.Error()) + instance.LogTrace("PreSolidFilter.BlockPreFiltered", "block", event.Block.ID(), "err", event.Reason) + if failOnBlockFiltered { n.Testing.Fatal("no blocks should be prefiltered") } }) events.PostSolidFilter.BlockAllowed.Hook(func(block *blocks.Block) { - fmt.Printf("%s > [%s] PostSolidFilter.BlockAllowed: %s\n", n.Name, engineName, block.ID()) + instance.LogTrace("PostSolidFilter.BlockAllowed", "block", block.ID()) }) events.PostSolidFilter.BlockFiltered.Hook(func(event *postsolidfilter.BlockFilteredEvent) { - fmt.Printf("%s > [%s] PostSolidFilter.BlockFiltered: %s - %s\n", n.Name, engineName, event.Block.ID(), event.Reason.Error()) + instance.LogTrace("PostSolidFilter.BlockFiltered", "block", event.Block.ID(), "err", event.Reason) + if failOnBlockFiltered { n.Testing.Fatal("no blocks should be filtered") } @@ -367,11 +320,11 @@ func (n *Node) attachEngineLogsWithName(failOnBlockFiltered bool, instance *engi }) events.BlockRequester.Tick.Hook(func(blockID iotago.BlockID) { - fmt.Printf("%s > [%s] BlockRequester.Tick: %s\n", n.Name, engineName, blockID) + instance.LogTrace("BlockRequester.Tick", "block", blockID) }) events.BlockProcessed.Hook(func(blockID iotago.BlockID) { - fmt.Printf("%s > [%s] Engine.BlockProcessed: %s\n", n.Name, engineName, blockID) + instance.LogTrace("BlockProcessed", "block", blockID) }) events.Notarization.SlotCommitted.Hook(func(details *notarization.SlotCommittedDetails) { @@ -398,117 +351,116 @@ func (n *Node) attachEngineLogsWithName(failOnBlockFiltered bool, instance *engi require.NoError(n.Testing, err) } - fmt.Printf("%s > [%s] NotarizationManager.SlotCommitted: %s %s Accepted Blocks: %s\n %s\n Attestations: %s\n", n.Name, engineName, details.Commitment.ID(), details.Commitment, acceptedBlocks, roots, attestationBlockIDs) + instance.LogTrace("NotarizationManager.SlotCommitted", "commitment", details.Commitment.ID(), "acceptedBlocks", acceptedBlocks, "roots", roots, "attestations", attestationBlockIDs) }) events.Notarization.LatestCommitmentUpdated.Hook(func(commitment *model.Commitment) { - fmt.Printf("%s > [%s] NotarizationManager.LatestCommitmentUpdated: %s\n", n.Name, engineName, commitment.ID()) + instance.LogTrace("NotarizationManager.LatestCommitmentUpdated", "commitment", commitment.ID()) }) events.BlockGadget.BlockPreAccepted.Hook(func(block *blocks.Block) { - fmt.Printf("%s > [%s] Consensus.BlockGadget.BlockPreAccepted: %s %s\n", n.Name, engineName, block.ID(), block.ProtocolBlock().Header.SlotCommitmentID) + instance.LogTrace("BlockGadget.BlockPreAccepted", "block", block.ID(), "slotCommitmentID", block.ProtocolBlock().Header.SlotCommitmentID) }) events.BlockGadget.BlockAccepted.Hook(func(block *blocks.Block) { - fmt.Printf("%s > [%s] Consensus.BlockGadget.BlockAccepted: %s @ slot %s committing to %s\n", n.Name, engineName, block.ID(), block.ID().Slot(), block.ProtocolBlock().Header.SlotCommitmentID) + instance.LogTrace("BlockGadget.BlockAccepted", "block", block.ID(), "slotCommitmentID", block.ProtocolBlock().Header.SlotCommitmentID) }) events.BlockGadget.BlockPreConfirmed.Hook(func(block *blocks.Block) { - fmt.Printf("%s > [%s] Consensus.BlockGadget.BlockPreConfirmed: %s %s\n", n.Name, engineName, block.ID(), block.ProtocolBlock().Header.SlotCommitmentID) + instance.LogTrace("BlockGadget.BlockPreConfirmed", "block", block.ID(), "slotCommitmentID", block.ProtocolBlock().Header.SlotCommitmentID) }) events.BlockGadget.BlockConfirmed.Hook(func(block *blocks.Block) { - fmt.Printf("%s > [%s] Consensus.BlockGadget.BlockConfirmed: %s %s\n", n.Name, engineName, block.ID(), block.ProtocolBlock().Header.SlotCommitmentID) + instance.LogTrace("BlockGadget.BlockConfirmed", "block", block.ID(), "slotCommitmentID", block.ProtocolBlock().Header.SlotCommitmentID) }) events.SlotGadget.SlotFinalized.Hook(func(slot iotago.SlotIndex) { - fmt.Printf("%s > [%s] Consensus.SlotGadget.SlotFinalized: %s\n", n.Name, engineName, slot) + instance.LogTrace("SlotGadget.SlotFinalized", "slot", slot) }) events.SeatManager.OnlineCommitteeSeatAdded.Hook(func(seat account.SeatIndex, accountID iotago.AccountID) { - fmt.Printf("%s > [%s] SybilProtection.OnlineCommitteeSeatAdded: %d - %s\n", n.Name, engineName, seat, accountID) + instance.LogTrace("SybilProtection.OnlineCommitteeSeatAdded", "seat", seat, "accountID", accountID) }) events.SeatManager.OnlineCommitteeSeatRemoved.Hook(func(seat account.SeatIndex) { - fmt.Printf("%s > [%s] SybilProtection.OnlineCommitteeSeatRemoved: %d\n", n.Name, engineName, seat) + instance.LogTrace("SybilProtection.OnlineCommitteeSeatRemoved", "seat", seat) }) events.SybilProtection.CommitteeSelected.Hook(func(committee *account.Accounts, epoch iotago.EpochIndex) { - fmt.Printf("%s > [%s] SybilProtection.CommitteeSelected: epoch %d - %s\n", n.Name, engineName, epoch, committee.IDs()) + instance.LogTrace("SybilProtection.CommitteeSelected", "epoch", epoch, "committee", committee.IDs()) }) - events.SpendDAG.SpenderCreated.Hook(func(spenderID iotago.TransactionID) { - fmt.Printf("%s > [%s] SpendDAG.SpendCreated: %s\n", n.Name, engineName, spenderID) + events.SpendDAG.SpenderCreated.Hook(func(conflictID iotago.TransactionID) { + instance.LogTrace("SpendDAG.SpenderCreated", "conflictID", conflictID) }) - events.SpendDAG.SpenderEvicted.Hook(func(spenderID iotago.TransactionID) { - fmt.Printf("%s > [%s] SpendDAG.SpendEvicted: %s\n", n.Name, engineName, spenderID) + events.SpendDAG.SpenderEvicted.Hook(func(conflictID iotago.TransactionID) { + instance.LogTrace("SpendDAG.SpenderEvicted", "conflictID", conflictID) }) - events.SpendDAG.SpenderRejected.Hook(func(spenderID iotago.TransactionID) { - fmt.Printf("%s > [%s] SpendDAG.SpendRejected: %s\n", n.Name, engineName, spenderID) + + events.SpendDAG.SpenderRejected.Hook(func(conflictID iotago.TransactionID) { + instance.LogTrace("SpendDAG.SpenderRejected", "conflictID", conflictID) }) - events.SpendDAG.SpenderAccepted.Hook(func(spenderID iotago.TransactionID) { - fmt.Printf("%s > [%s] SpendDAG.SpendAccepted: %s\n", n.Name, engineName, spenderID) + events.SpendDAG.SpenderAccepted.Hook(func(conflictID iotago.TransactionID) { + instance.LogTrace("SpendDAG.SpenderAccepted", "conflictID", conflictID) }) instance.Ledger.MemPool().OnSignedTransactionAttached( func(signedTransactionMetadata mempool.SignedTransactionMetadata) { signedTransactionMetadata.OnSignaturesInvalid(func(err error) { - fmt.Printf("%s > [%s] MemPool.SignedTransactionSignaturesInvalid(%s): %s\n", n.Name, engineName, err, signedTransactionMetadata.ID()) + instance.LogTrace("MemPool.SignedTransactionSignaturesInvalid", "tx", signedTransactionMetadata.ID(), "err", err) }) }, ) instance.Ledger.OnTransactionAttached(func(transactionMetadata mempool.TransactionMetadata) { - fmt.Printf("%s > [%s] Ledger.TransactionAttached: %s\n", n.Name, engineName, transactionMetadata.ID()) + instance.LogTrace("Ledger.TransactionAttached", "tx", transactionMetadata.ID()) transactionMetadata.OnSolid(func() { - fmt.Printf("%s > [%s] MemPool.TransactionSolid: %s\n", n.Name, engineName, transactionMetadata.ID()) + instance.LogTrace("MemPool.TransactionSolid", "tx", transactionMetadata.ID()) }) transactionMetadata.OnExecuted(func() { - fmt.Printf("%s > [%s] MemPool.TransactionExecuted: %s\n", n.Name, engineName, transactionMetadata.ID()) + instance.LogTrace("MemPool.TransactionExecuted", "tx", transactionMetadata.ID()) }) transactionMetadata.OnBooked(func() { - fmt.Printf("%s > [%s] MemPool.TransactionBooked: %s\n", n.Name, engineName, transactionMetadata.ID()) + instance.LogTrace("MemPool.TransactionBooked", "tx", transactionMetadata.ID()) }) transactionMetadata.OnConflicting(func() { - fmt.Printf("%s > [%s] MemPool.TransactionConflicting: %s\n", n.Name, engineName, transactionMetadata.ID()) + instance.LogTrace("MemPool.TransactionConflicting", "tx", transactionMetadata.ID()) }) transactionMetadata.OnAccepted(func() { - fmt.Printf("%s > [%s] MemPool.TransactionAccepted: %s\n", n.Name, engineName, transactionMetadata.ID()) + instance.LogTrace("MemPool.TransactionAccepted", "tx", transactionMetadata.ID()) }) transactionMetadata.OnRejected(func() { - fmt.Printf("%s > [%s] MemPool.TransactionRejected: %s\n", n.Name, engineName, transactionMetadata.ID()) + instance.LogTrace("MemPool.TransactionRejected", "tx", transactionMetadata.ID()) }) transactionMetadata.OnInvalid(func(err error) { - fmt.Printf("%s > [%s] MemPool.TransactionInvalid(%s): %s\n", n.Name, engineName, err, transactionMetadata.ID()) + instance.LogTrace("MemPool.TransactionInvalid", "tx", transactionMetadata.ID(), "err", err) }) transactionMetadata.OnOrphanedSlotUpdated(func(slot iotago.SlotIndex) { - fmt.Printf("%s > [%s] MemPool.TransactionOrphanedSlotUpdated in slot %d: %s\n", n.Name, engineName, slot, transactionMetadata.ID()) + instance.LogTrace("MemPool.TransactionOrphanedSlotUpdated", "tx", transactionMetadata.ID(), "slot", slot) }) transactionMetadata.OnCommittedSlotUpdated(func(slot iotago.SlotIndex) { - fmt.Printf("%s > [%s] MemPool.TransactionCommittedSlotUpdated in slot %d: %s\n", n.Name, engineName, slot, transactionMetadata.ID()) + instance.LogTrace("MemPool.TransactionCommittedSlotUpdated", "tx", transactionMetadata.ID(), "slot", slot) }) transactionMetadata.OnPending(func() { - fmt.Printf("%s > [%s] MemPool.TransactionPending: %s\n", n.Name, engineName, transactionMetadata.ID()) + instance.LogTrace("MemPool.TransactionPending", "tx", transactionMetadata.ID()) }) }) } func (n *Node) attachEngineLogs(failOnBlockFiltered bool, instance *engine.Engine) { - engineName := fmt.Sprintf("%s - %s", lo.Cond(n.Protocol.MainEngineInstance() != instance, "Candidate", "Main"), instance.Name()[:8]) - - n.attachEngineLogsWithName(failOnBlockFiltered, instance, engineName) + n.attachEngineLogsWithName(failOnBlockFiltered, instance) } func (n *Node) Wait() { @@ -519,7 +471,7 @@ func (n *Node) Shutdown() { stopped := make(chan struct{}, 1) if n.Protocol != nil { - n.Protocol.HookStopped(func() { + n.Protocol.Stopped.OnTrigger(func() { close(stopped) }) } else { diff --git a/pkg/testsuite/mock/wallet_transactions.go b/pkg/testsuite/mock/wallet_transactions.go index f39e400ff..ab88d479a 100644 --- a/pkg/testsuite/mock/wallet_transactions.go +++ b/pkg/testsuite/mock/wallet_transactions.go @@ -44,7 +44,7 @@ func (w *Wallet) CreateAccountFromInput(transactionName string, inputName string signedTransaction := w.createSignedTransactionWithOptions( transactionName, WithCommitmentInput(&iotago.CommitmentInput{ - CommitmentID: w.Node.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Commitment().MustID(), + CommitmentID: w.Node.Protocol.Engines.Main.Get().Storage.Settings().LatestCommitment().Commitment().MustID(), }), WithInputs(utxoledger.Outputs{input}), WithOutputs(outputStates), @@ -90,7 +90,7 @@ func (w *Wallet) CreateDelegationFromInput(transactionName string, inputName str signedTransaction := w.createSignedTransactionWithOptions( transactionName, WithCommitmentInput(&iotago.CommitmentInput{ - CommitmentID: w.Node.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Commitment().MustID(), + CommitmentID: w.Node.Protocol.Engines.Main.Get().Storage.Settings().LatestCommitment().Commitment().MustID(), }), WithInputs(utxoledger.Outputs{input}), WithOutputs(outputStates), @@ -122,7 +122,7 @@ func (w *Wallet) DelayedClaimingTransition(transactionName string, inputName str signedTransaction := w.createSignedTransactionWithOptions( transactionName, WithCommitmentInput(&iotago.CommitmentInput{ - CommitmentID: w.Node.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Commitment().MustID(), + CommitmentID: w.Node.Protocol.Engines.Main.Get().Storage.Settings().LatestCommitment().Commitment().MustID(), }), WithInputs(utxoledger.Outputs{input}), WithOutputs(iotago.Outputs[iotago.Output]{delegationOutput}), @@ -152,7 +152,7 @@ func (w *Wallet) TransitionAccount(transactionName string, inputName string, opt AccountID: accountOutput.AccountID, }), WithCommitmentInput(&iotago.CommitmentInput{ - CommitmentID: w.Node.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Commitment().MustID(), + CommitmentID: w.Node.Protocol.Engines.Main.Get().Storage.Settings().LatestCommitment().Commitment().MustID(), }), WithOutputs(iotago.Outputs[iotago.Output]{accountOutput}), ) @@ -182,7 +182,7 @@ func (w *Wallet) DestroyAccount(transactionName string, inputName string) *iotag AccountID: inputAccount.AccountID, }), WithCommitmentInput(&iotago.CommitmentInput{ - CommitmentID: w.Node.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Commitment().MustID(), + CommitmentID: w.Node.Protocol.Engines.Main.Get().Storage.Settings().LatestCommitment().Commitment().MustID(), }), WithAccountInput(input), WithOutputs(destructionOutputs), @@ -251,7 +251,7 @@ func (w *Wallet) TransitionImplicitAccountToAccountOutput(transactionName string AccountID: implicitAccountID, }), WithCommitmentInput(&iotago.CommitmentInput{ - CommitmentID: w.Node.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Commitment().MustID(), + CommitmentID: w.Node.Protocol.Engines.Main.Get().Storage.Settings().LatestCommitment().Commitment().MustID(), }), WithInputs(utxoledger.Outputs{input}), WithOutputs(iotago.Outputs[iotago.Output]{accountOutput}), @@ -262,7 +262,7 @@ func (w *Wallet) TransitionImplicitAccountToAccountOutput(transactionName string } func (w *Wallet) CreateBasicOutputsEquallyFromInput(transactionName string, outputCount int, inputName string) *iotago.SignedTransaction { - apiForSlot := w.Node.Protocol.MainEngineInstance().APIForSlot(w.currentSlot) + apiForSlot := w.Node.Protocol.Engines.Main.Get().APIForSlot(w.currentSlot) manaDecayProvider := apiForSlot.ManaDecayProvider() storageScoreStructure := apiForSlot.StorageScoreStructure() @@ -322,7 +322,7 @@ func (w *Wallet) RemoveFeatureFromAccount(featureType iotago.FeatureType, transa AccountID: accountOutput.AccountID, }), WithCommitmentInput(&iotago.CommitmentInput{ - CommitmentID: w.Node.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Commitment().MustID(), + CommitmentID: w.Node.Protocol.Engines.Main.Get().Storage.Settings().LatestCommitment().Commitment().MustID(), }), WithOutputs(iotago.Outputs[iotago.Output]{accountOutput}), ) @@ -413,7 +413,7 @@ func (w *Wallet) ClaimValidatorRewards(transactionName string, inputName string) panic(fmt.Sprintf("output with alias %s is not *iotago.AccountOutput", inputName)) } - rewardMana, _, _, err := w.Node.Protocol.MainEngineInstance().SybilProtection.ValidatorReward( + rewardMana, _, _, err := w.Node.Protocol.Engines.Main.Get().SybilProtection.ValidatorReward( inputAccount.AccountID, inputAccount.FeatureSet().Staking().StakedAmount, inputAccount.FeatureSet().Staking().StartEpoch, @@ -443,7 +443,7 @@ func (w *Wallet) ClaimValidatorRewards(transactionName string, inputName string) AccountID: accountOutput.AccountID, }), WithCommitmentInput(&iotago.CommitmentInput{ - CommitmentID: w.Node.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Commitment().MustID(), + CommitmentID: w.Node.Protocol.Engines.Main.Get().Storage.Settings().LatestCommitment().Commitment().MustID(), }), WithOutputs(iotago.Outputs[iotago.Output]{accountOutput}), ) @@ -509,7 +509,7 @@ func (w *Wallet) ClaimDelegatorRewards(transactionName string, inputName string) delegationEnd = apiForSlot.TimeProvider().EpochFromSlot(futureBoundedSlotIndex) - iotago.EpochIndex(1) } - rewardMana, _, _, err := w.Node.Protocol.MainEngineInstance().SybilProtection.DelegatorReward( + rewardMana, _, _, err := w.Node.Protocol.Engines.Main.Get().SybilProtection.DelegatorReward( inputDelegation.ValidatorAddress.AccountID(), inputDelegation.DelegatedAmount, inputDelegation.StartEpoch, @@ -539,7 +539,7 @@ func (w *Wallet) ClaimDelegatorRewards(transactionName string, inputName string) rewardMana, ), WithCommitmentInput(&iotago.CommitmentInput{ - CommitmentID: w.Node.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Commitment().MustID(), + CommitmentID: w.Node.Protocol.Engines.Main.Get().Storage.Settings().LatestCommitment().Commitment().MustID(), }), WithOutputs(outputStates), ) diff --git a/pkg/testsuite/snapshotcreator/snapshotcreator.go b/pkg/testsuite/snapshotcreator/snapshotcreator.go index ef66af742..beb9e47c4 100644 --- a/pkg/testsuite/snapshotcreator/snapshotcreator.go +++ b/pkg/testsuite/snapshotcreator/snapshotcreator.go @@ -7,6 +7,7 @@ import ( "github.com/iotaledger/hive.go/ierrors" "github.com/iotaledger/hive.go/lo" + "github.com/iotaledger/hive.go/log" "github.com/iotaledger/hive.go/runtime/options" "github.com/iotaledger/hive.go/runtime/workerpool" "github.com/iotaledger/iota-core/pkg/model" @@ -94,8 +95,9 @@ func CreateSnapshot(opts ...options.Option[Options]) error { } } - engineInstance := engine.New(workers.CreateGroup("Engine"), - errorHandler, + engineInstance := engine.New( + log.NewLogger("snapshot-creator"), + workers.CreateGroup("Engine"), s, presolidblockfilter.NewProvider(), postsolidblockfilter.NewProvider(), @@ -116,7 +118,7 @@ func CreateSnapshot(opts ...options.Option[Options]) error { trivialsyncmanager.NewProvider(), engine.WithSnapshotPath(""), // magic to disable loading snapshot ) - defer engineInstance.Shutdown() + defer engineInstance.Shutdown.Trigger() if opt.AddGenesisRootBlock { engineInstance.EvictionState.AddRootBlock(api.ProtocolParameters().GenesisBlockID(), genesisCommitment.ID()) diff --git a/pkg/testsuite/spenders.go b/pkg/testsuite/spenders.go index f2c41ec99..fceb808ae 100644 --- a/pkg/testsuite/spenders.go +++ b/pkg/testsuite/spenders.go @@ -13,7 +13,7 @@ func (t *TestSuite) AssertSpendersInCacheAcceptanceState(expectedConflictAliases for _, node := range nodes { for _, conflictAlias := range expectedConflictAliases { t.Eventually(func() error { - acceptanceState := node.Protocol.MainEngineInstance().Ledger.SpendDAG().AcceptanceState(ds.NewSet(t.DefaultWallet().TransactionID(conflictAlias))) + acceptanceState := node.Protocol.Engines.Main.Get().Ledger.SpendDAG().AcceptanceState(ds.NewSet(t.DefaultWallet().TransactionID(conflictAlias))) if acceptanceState != expectedState { return ierrors.Errorf("assertTransactionsInCacheWithFunc: %s: conflict %s is %s, but expected %s", node.Name, conflictAlias, acceptanceState, expectedState) diff --git a/pkg/testsuite/storage_accountdiffs.go b/pkg/testsuite/storage_accountdiffs.go index 75f79e9fa..e3d9ec9b3 100644 --- a/pkg/testsuite/storage_accountdiffs.go +++ b/pkg/testsuite/storage_accountdiffs.go @@ -15,7 +15,7 @@ func (t *TestSuite) AssertStorageAccountDiffs(slot iotago.SlotIndex, accountDiff for _, node := range nodes { for accountID, diffChange := range accountDiffs { t.Eventually(func() error { - store, err := node.Protocol.MainEngineInstance().Storage.AccountDiffs(slot) + store, err := node.Protocol.Engines.Main.Get().Storage.AccountDiffs(slot) if err != nil { return ierrors.Wrapf(err, "AssertStorageAccountDiffs: %s: failed to load accounts diff for slot %d", node.Name, slot) } diff --git a/pkg/testsuite/storage_blocks.go b/pkg/testsuite/storage_blocks.go index c96fb87f7..09c1afe30 100644 --- a/pkg/testsuite/storage_blocks.go +++ b/pkg/testsuite/storage_blocks.go @@ -10,7 +10,7 @@ import ( func (t *TestSuite) AssertStorageBlock(block *model.Block, node *mock.Node) { t.Eventually(func() error { - storage, err := node.Protocol.MainEngineInstance().Storage.Blocks(block.ID().Slot()) + storage, err := node.Protocol.Engines.Main.Get().Storage.Blocks(block.ID().Slot()) if err != nil { return ierrors.Errorf("AssertStorageBlock: %s: storage for %s is nil", node.Name, block.ID().Slot()) } @@ -37,7 +37,7 @@ func (t *TestSuite) AssertStorageBlockExist(block *model.Block, expectedExist bo t.AssertStorageBlock(block, node) } else { t.Eventually(func() error { - storage, err := node.Protocol.MainEngineInstance().Storage.Blocks(block.ID().Slot()) + storage, err := node.Protocol.Engines.Main.Get().Storage.Blocks(block.ID().Slot()) if err != nil { //nolint:nilerr // expected behavior return nil diff --git a/pkg/testsuite/storage_commitments.go b/pkg/testsuite/storage_commitments.go index aed96efb2..c515a6972 100644 --- a/pkg/testsuite/storage_commitments.go +++ b/pkg/testsuite/storage_commitments.go @@ -15,7 +15,7 @@ func (t *TestSuite) AssertStorageCommitments(commitments []*iotago.Commitment, n for _, node := range nodes { for _, commitment := range commitments { t.Eventually(func() error { - storedCommitment, err := node.Protocol.MainEngineInstance().Storage.Commitments().Load(commitment.Slot) + storedCommitment, err := node.Protocol.Engines.Main.Get().Storage.Commitments().Load(commitment.Slot) if err != nil { return ierrors.Wrapf(err, "AssertStorageCommitments: %s: error loading commitment: %s", node.Name, commitment.MustID()) } @@ -37,7 +37,7 @@ func (t *TestSuite) AssertEqualStoredCommitmentAtIndex(index iotago.SlotIndex, n var commitment *model.Commitment var commitmentNode *mock.Node for _, node := range nodes { - storedCommitment, err := node.Protocol.MainEngineInstance().Storage.Commitments().Load(index) + storedCommitment, err := node.Protocol.Engines.Main.Get().Storage.Commitments().Load(index) if err != nil { return ierrors.Wrapf(err, "AssertEqualStoredCommitmentAtIndex: %s: error loading commitment for slot: %d", node.Name, index) } @@ -63,12 +63,12 @@ func (t *TestSuite) AssertStorageCommitmentBlocks(slot iotago.SlotIndex, expecte for _, node := range nodes { t.Eventually(func() error { - storedCommitment, err := node.Protocol.MainEngineInstance().Storage.Commitments().Load(slot) + storedCommitment, err := node.Protocol.Engines.Main.Get().Storage.Commitments().Load(slot) if err != nil { return ierrors.Wrapf(err, "AssertStorageCommitmentBlocks: %s: error loading commitment for slot: %d", node.Name, slot) } - committedSlot, err := node.Protocol.MainEngineInstance().CommittedSlot(storedCommitment.ID()) + committedSlot, err := node.Protocol.Engines.Main.Get().CommittedSlot(storedCommitment.ID()) if err != nil { return ierrors.Wrapf(err, "AssertStorageCommitmentBlocks: %s: error getting committed slot for commitment: %s", node.Name, storedCommitment.ID()) } diff --git a/pkg/testsuite/storage_prunable.go b/pkg/testsuite/storage_prunable.go index 8d7598bf6..f5fa36271 100644 --- a/pkg/testsuite/storage_prunable.go +++ b/pkg/testsuite/storage_prunable.go @@ -26,7 +26,7 @@ func (t *TestSuite) AssertPrunedUntil(expectedStorage *types.Tuple[int, bool], for _, node := range nodes { t.Eventually(func() error { - if err := t.assertPrunedUntil(node.Protocol.MainEngineInstance().Storage, expectedStorage, expectedDecidedUpgrades, expectedPoolStats, expectedCommittee, expectedRewards); err != nil { + if err := t.assertPrunedUntil(node.Protocol.Engines.Main.Get().Storage, expectedStorage, expectedDecidedUpgrades, expectedPoolStats, expectedCommittee, expectedRewards); err != nil { return ierrors.Wrapf(err, "AssertPrunedSlot: %s", node.Name) } diff --git a/pkg/testsuite/storage_rootblocks.go b/pkg/testsuite/storage_rootblocks.go index 60b721ddf..0884ba71f 100644 --- a/pkg/testsuite/storage_rootblocks.go +++ b/pkg/testsuite/storage_rootblocks.go @@ -12,7 +12,7 @@ func (t *TestSuite) AssertStorageRootBlocks(blocks []*blocks.Block, nodes ...*mo for _, node := range nodes { for _, block := range blocks { t.Eventually(func() error { - storage, err := node.Protocol.MainEngineInstance().Storage.RootBlocks(block.ID().Slot()) + storage, err := node.Protocol.Engines.Main.Get().Storage.RootBlocks(block.ID().Slot()) if err != nil { return ierrors.Errorf("AssertStorageRootBlocks: %s: error loading root blocks for %s: %v", node.Name, block.ID().Slot(), err) } diff --git a/pkg/testsuite/storage_settings.go b/pkg/testsuite/storage_settings.go index 19e244ed6..7345b1f52 100644 --- a/pkg/testsuite/storage_settings.go +++ b/pkg/testsuite/storage_settings.go @@ -11,8 +11,8 @@ func (t *TestSuite) AssertSnapshotImported(imported bool, nodes ...*mock.Node) { for _, node := range nodes { t.Eventually(func() error { - if imported != node.Protocol.MainEngineInstance().Storage.Settings().IsSnapshotImported() { - return ierrors.Errorf("AssertSnapshotImported: %s: expected %v, got %v", node.Name, imported, node.Protocol.MainEngineInstance().Storage.Settings().IsSnapshotImported()) + if imported != node.Protocol.Engines.Main.Get().Storage.Settings().IsSnapshotImported() { + return ierrors.Errorf("AssertSnapshotImported: %s: expected %v, got %v", node.Name, imported, node.Protocol.Engines.Main.Get().Storage.Settings().IsSnapshotImported()) } return nil @@ -39,8 +39,8 @@ func (t *TestSuite) AssertLatestCommitment(commitment *iotago.Commitment, nodes for _, node := range nodes { t.Eventually(func() error { - if !commitment.Equals(node.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Commitment()) { - return ierrors.Errorf("AssertLatestCommitment: %s: expected %s, got %s", node.Name, commitment, node.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment()) + if !commitment.Equals(node.Protocol.Engines.Main.Get().Storage.Settings().LatestCommitment().Commitment()) { + return ierrors.Errorf("AssertLatestCommitment: %s: expected %s, got %s", node.Name, commitment, node.Protocol.Engines.Main.Get().Storage.Settings().LatestCommitment()) } return nil @@ -53,11 +53,11 @@ func (t *TestSuite) AssertCommitmentSlotIndexExists(slot iotago.SlotIndex, nodes for _, node := range nodes { t.Eventually(func() error { - if node.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().ID().Slot() < slot { + if node.Protocol.Engines.Main.Get().Storage.Settings().LatestCommitment().ID().Slot() < slot { return ierrors.Errorf("AssertCommitmentSlotIndexExists: %s: commitment with at least %v not found in settings.LatestCommitment()", node.Name, slot) } - cm, err := node.Protocol.MainEngineInstance().Storage.Commitments().Load(slot) + cm, err := node.Protocol.Engines.Main.Get().Storage.Commitments().Load(slot) if err != nil { return ierrors.Errorf("AssertCommitmentSlotIndexExists: %s: expected %v, got error %v", node.Name, slot, err) } @@ -67,7 +67,7 @@ func (t *TestSuite) AssertCommitmentSlotIndexExists(slot iotago.SlotIndex, nodes } // Make sure the commitment is also available in the ChainManager. - if node.Protocol.ChainManager.RootCommitment().Chain().LatestCommitment().ID().Slot() < slot { + if node.Protocol.Chains.Main.Get().LatestCommitment.Get().ID().Slot() < slot { return ierrors.Errorf("AssertCommitmentSlotIndexExists: %s: commitment at index %v not found in ChainManager", node.Name, slot) } @@ -81,7 +81,7 @@ func (t *TestSuite) AssertLatestCommitmentSlotIndex(slot iotago.SlotIndex, nodes for _, node := range nodes { t.Eventually(func() error { - latestCommittedSlot := node.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Slot() + latestCommittedSlot := node.Protocol.Engines.Main.Get().Storage.Settings().LatestCommitment().Slot() if slot != latestCommittedSlot { return ierrors.Errorf("AssertLatestCommitmentSlotIndex: %s: expected %v, got %v", node.Name, slot, latestCommittedSlot) } @@ -96,8 +96,8 @@ func (t *TestSuite) AssertLatestCommitmentCumulativeWeight(cw uint64, nodes ...* for _, node := range nodes { t.Eventually(func() error { - if cw != node.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().CumulativeWeight() { - return ierrors.Errorf("AssertLatestCommitmentCumulativeWeight: %s: expected %v, got %v", node.Name, cw, node.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().CumulativeWeight()) + if cw != node.Protocol.Engines.Main.Get().Storage.Settings().LatestCommitment().CumulativeWeight() { + return ierrors.Errorf("AssertLatestCommitmentCumulativeWeight: %s: expected %v, got %v", node.Name, cw, node.Protocol.Engines.Main.Get().Storage.Settings().LatestCommitment().CumulativeWeight()) } return nil @@ -110,8 +110,8 @@ func (t *TestSuite) AssertLatestFinalizedSlot(slot iotago.SlotIndex, nodes ...*m for _, node := range nodes { t.Eventually(func() error { - if slot != node.Protocol.MainEngineInstance().Storage.Settings().LatestFinalizedSlot() { - return ierrors.Errorf("AssertLatestFinalizedSlot: %s: expected %d, got %d from settings", node.Name, slot, node.Protocol.MainEngineInstance().Storage.Settings().LatestFinalizedSlot()) + if slot != node.Protocol.Engines.Main.Get().Storage.Settings().LatestFinalizedSlot() { + return ierrors.Errorf("AssertLatestFinalizedSlot: %s: expected %d, got %d from settings", node.Name, slot, node.Protocol.Engines.Main.Get().Storage.Settings().LatestFinalizedSlot()) } return nil @@ -124,8 +124,9 @@ func (t *TestSuite) AssertChainID(expectedChainID iotago.CommitmentID, nodes ... for _, node := range nodes { t.Eventually(func() error { - actualChainID := node.Protocol.MainEngineInstance().ChainID() - if expectedChainID != node.Protocol.MainEngineInstance().ChainID() { + actualChainID := node.Protocol.Chains.Main.Get().ForkingPoint.Get().ID() + + if expectedChainID != actualChainID { return ierrors.Errorf("AssertChainID: %s: expected %s (index: %d), got %s (index: %d)", node.Name, expectedChainID, expectedChainID.Slot(), actualChainID, actualChainID.Slot()) } diff --git a/pkg/testsuite/sybilprotection.go b/pkg/testsuite/sybilprotection.go index 292b04ef9..187a7d001 100644 --- a/pkg/testsuite/sybilprotection.go +++ b/pkg/testsuite/sybilprotection.go @@ -17,7 +17,7 @@ func (t *TestSuite) AssertSybilProtectionCommittee(epoch iotago.EpochIndex, expe for _, node := range nodes { t.Eventually(func() error { - committeeInEpoch, exists := node.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInEpoch(epoch) + committeeInEpoch, exists := node.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInEpoch(epoch) if !exists { return ierrors.Errorf("AssertSybilProtectionCommittee: %s: failed to get committee in epoch %d", node.Name, epoch) } @@ -46,7 +46,7 @@ func (t *TestSuite) AssertSybilProtectionCandidates(epoch iotago.EpochIndex, exp for _, node := range nodes { t.Eventually(func() error { - candidates, err := node.Protocol.MainEngineInstance().SybilProtection.EligibleValidators(epoch) + candidates, err := node.Protocol.Engines.Main.Get().SybilProtection.EligibleValidators(epoch) candidateIDs := lo.Map(candidates, func(candidate *accounts.AccountData) iotago.AccountID { return candidate.ID }) @@ -70,7 +70,7 @@ func (t *TestSuite) AssertSybilProtectionOnlineCommittee(expectedSeats []account for _, node := range nodes { t.Eventually(func() error { - seats := node.Protocol.MainEngineInstance().SybilProtection.SeatManager().OnlineCommittee().ToSlice() + seats := node.Protocol.Engines.Main.Get().SybilProtection.SeatManager().OnlineCommittee().ToSlice() if !assert.ElementsMatch(t.fakeTesting, expectedSeats, seats) { return ierrors.Errorf("AssertSybilProtectionOnlineCommittee: %s: expected %v, got %v", node.Name, expectedSeats, seats) } diff --git a/pkg/testsuite/testsuite.go b/pkg/testsuite/testsuite.go index 76d7ecc75..fc8d74c88 100644 --- a/pkg/testsuite/testsuite.go +++ b/pkg/testsuite/testsuite.go @@ -2,6 +2,8 @@ package testsuite import ( "fmt" + "log/slog" + "os" "strings" "sync/atomic" "testing" @@ -14,6 +16,7 @@ import ( "github.com/iotaledger/hive.go/ds/orderedmap" "github.com/iotaledger/hive.go/ds/shrinkingmap" "github.com/iotaledger/hive.go/lo" + "github.com/iotaledger/hive.go/log" "github.com/iotaledger/hive.go/runtime/options" "github.com/iotaledger/hive.go/runtime/syncutils" "github.com/iotaledger/iota-core/pkg/core/account" @@ -110,6 +113,7 @@ type TestSuite struct { optsSnapshotOptions []options.Option[snapshotcreator.Options] optsWaitFor time.Duration optsTick time.Duration + optsLogHandler slog.Handler uniqueBlockTimeCounter atomic.Int64 automaticTransactionIssuingCounters shrinkingmap.ShrinkingMap[string, int] @@ -131,8 +135,9 @@ func NewTestSuite(testingT *testing.T, opts ...options.Option[TestSuite]) *TestS blocks: shrinkingmap.New[string, *blocks.Block](), automaticTransactionIssuingCounters: *shrinkingmap.New[string, int](), - optsWaitFor: durationFromEnvOrDefault(5*time.Second, "CI_UNIT_TESTS_WAIT_FOR"), - optsTick: durationFromEnvOrDefault(2*time.Millisecond, "CI_UNIT_TESTS_TICK"), + optsWaitFor: durationFromEnvOrDefault(5*time.Second, "CI_UNIT_TESTS_WAIT_FOR"), + optsTick: durationFromEnvOrDefault(2*time.Millisecond, "CI_UNIT_TESTS_TICK"), + optsLogHandler: log.NewTextHandler(os.Stdout), }, opts, func(t *TestSuite) { fmt.Println("Setup TestSuite -", testingT.Name(), " @ ", time.Now()) @@ -322,7 +327,7 @@ func (t *TestSuite) SeatOfNodes(slot iotago.SlotIndex, names ...string) []accoun nodes := t.Nodes(names...) return lo.Map(nodes, func(node *mock.Node) account.SeatIndex { - seatedAccounts, exists := node.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInSlot(slot) + seatedAccounts, exists := node.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInSlot(slot) require.True(t.Testing, exists, "node %s: committee at slot %d does not exist", node.Name, slot) seat, exists := seatedAccounts.GetSeat(node.Validator.AccountID) @@ -371,7 +376,7 @@ func (t *TestSuite) addNodeToPartition(name string, partition string, validator panic(fmt.Sprintf("cannot add validator node %s to partition %s: framework already running", name, partition)) } - node := mock.NewNode(t.Testing, t.network, partition, name, validator) + node := mock.NewNode(t.Testing, t.network, partition, name, validator, t.optsLogHandler) t.nodes.Set(name, node) node.SetCurrentSlot(t.currentSlot) @@ -559,7 +564,7 @@ func (t *TestSuite) Run(failOnBlockFiltered bool, nodesOptions ...map[string][]o if _, firstNode, exists := t.nodes.Head(); exists { t.wallets.ForEach(func(_ string, wallet *mock.Wallet) bool { - if err := firstNode.Protocol.MainEngineInstance().Ledger.ForEachUnspentOutput(func(output *utxoledger.Output) bool { + if err := firstNode.Protocol.Engines.Main.Get().Ledger.ForEachUnspentOutput(func(output *utxoledger.Output) bool { wallet.AddOutput(fmt.Sprintf("Genesis:%d", output.OutputID().Index()), output) return true }); err != nil { diff --git a/pkg/testsuite/testsuite_issue_blocks.go b/pkg/testsuite/testsuite_issue_blocks.go index 920baa482..65d9e59f4 100644 --- a/pkg/testsuite/testsuite_issue_blocks.go +++ b/pkg/testsuite/testsuite_issue_blocks.go @@ -149,7 +149,7 @@ func (t *TestSuite) issueBlockRow(prefix string, row int, parentsPrefix string, var b *blocks.Block // Only issue validator blocks if account has staking feature and is part of committee. - if node.Validator != nil && lo.Return1(node.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInSlot(t.currentSlot)).HasAccount(node.Validator.AccountID) { + if node.Validator != nil && lo.Return1(node.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInSlot(t.currentSlot)).HasAccount(node.Validator.AccountID) { blockHeaderOptions := append(issuingOptionsCopy[node.Name], mock.WithIssuingTime(issuingTime)) t.assertParentsCommitmentExistFromBlockOptions(blockHeaderOptions, node) t.assertParentsExistFromBlockOptions(blockHeaderOptions, node) @@ -220,7 +220,7 @@ func (t *TestSuite) IssueBlocksAtSlots(prefix string, slots []iotago.SlotIndex, if useCommitmentAtMinCommittableAge { // Make sure that all nodes create blocks throughout the slot that commit to the same commitment at slot-minCommittableAge-1. for _, node := range nodes { - commitment, err := node.Protocol.MainEngineInstance().Storage.Commitments().Load(commitmentSlot) + commitment, err := node.Protocol.Engines.Main.Get().Storage.Commitments().Load(commitmentSlot) require.NoError(t.Testing, err) issuingOptions[node.Name] = []options.Option[mock.BlockHeaderParams]{ @@ -263,7 +263,7 @@ func (t *TestSuite) CommitUntilSlot(slot iotago.SlotIndex, parents ...iotago.Blo // then issue one more block to accept the last in the chain which will trigger commitment of the second last in the chain activeValidators := t.Validators() - latestCommittedSlot := activeValidators[0].Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Slot() + latestCommittedSlot := activeValidators[0].Protocol.Engines.Main.Get().Storage.Settings().LatestCommitment().Slot() if latestCommittedSlot >= slot { return parents } @@ -274,21 +274,21 @@ func (t *TestSuite) CommitUntilSlot(slot iotago.SlotIndex, parents ...iotago.Blo // preacceptance of nextBlockSlot for _, node := range activeValidators { require.True(t.Testing, node.IsValidator(), "node: %s: is not a validator node", node.Name) - committeeAtBlockSlot, exists := node.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInSlot(t.currentSlot) + committeeAtBlockSlot, exists := node.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInSlot(t.currentSlot) require.True(t.Testing, exists, "node: %s: does not have committee selected for slot %d", node.Name, t.currentSlot) if committeeAtBlockSlot.HasAccount(node.Validator.AccountID) { blockName := fmt.Sprintf("chain-%s-%d-%s", parents[0].Alias(), chainIndex, node.Name) - latestCommitment := node.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Commitment() + latestCommitment := node.Protocol.Engines.Main.Get().Storage.Settings().LatestCommitment().Commitment() tips = []iotago.BlockID{t.IssueValidationBlockWithHeaderOptions(blockName, node, mock.WithSlotCommitment(latestCommitment), mock.WithStrongParents(tips...)).ID()} } } // acceptance of nextBlockSlot for _, node := range activeValidators { - committeeAtBlockSlot, exists := node.Protocol.MainEngineInstance().SybilProtection.SeatManager().CommitteeInSlot(t.currentSlot) + committeeAtBlockSlot, exists := node.Protocol.Engines.Main.Get().SybilProtection.SeatManager().CommitteeInSlot(t.currentSlot) require.True(t.Testing, exists, "node: %s: does not have committee selected for slot %d", node.Name, t.currentSlot) if committeeAtBlockSlot.HasAccount(node.Validator.AccountID) { blockName := fmt.Sprintf("chain-%s-%d-%s", parents[0].Alias(), chainIndex+1, node.Name) - latestCommitment := node.Protocol.MainEngineInstance().Storage.Settings().LatestCommitment().Commitment() + latestCommitment := node.Protocol.Engines.Main.Get().Storage.Settings().LatestCommitment().Commitment() tips = []iotago.BlockID{t.IssueValidationBlockWithHeaderOptions(blockName, node, mock.WithSlotCommitment(latestCommitment), mock.WithStrongParents(tips...)).ID()} } } diff --git a/pkg/testsuite/testsuite_options.go b/pkg/testsuite/testsuite_options.go index 0f3991013..5f5d9aab9 100644 --- a/pkg/testsuite/testsuite_options.go +++ b/pkg/testsuite/testsuite_options.go @@ -1,6 +1,7 @@ package testsuite import ( + "log/slog" "os" "time" @@ -39,6 +40,12 @@ func WithProtocolParametersOptions(protocolParameterOptions ...options.Option[io } } +func WithLogHandler(logHandler slog.Handler) options.Option[TestSuite] { + return func(t *TestSuite) { + t.optsLogHandler = logHandler + } +} + func GenesisTimeWithOffsetBySlots(slots iotago.SlotIndex, slotDurationInSeconds uint8) int64 { return time.Now().Truncate(time.Duration(slotDurationInSeconds)*time.Second).Unix() - int64(slotDurationInSeconds)*int64(slots) } diff --git a/pkg/testsuite/tips.go b/pkg/testsuite/tips.go index 9abd91a96..cc5b4f5ff 100644 --- a/pkg/testsuite/tips.go +++ b/pkg/testsuite/tips.go @@ -17,7 +17,7 @@ func (t *TestSuite) AssertStrongTips(expectedBlocks []*blocks.Block, nodes ...*m for _, node := range nodes { t.Eventually(func() error { - storedTipsBlocks := node.Protocol.MainEngineInstance().TipManager.StrongTips() + storedTipsBlocks := node.Protocol.Engines.Main.Get().TipManager.StrongTips() storedTipsBlockIDs := lo.Map(storedTipsBlocks, tipmanager.TipMetadata.ID) if !assert.ElementsMatch(t.fakeTesting, expectedBlockIDs, storedTipsBlockIDs) { diff --git a/pkg/testsuite/transactions.go b/pkg/testsuite/transactions.go index b3f01789e..26bd32a82 100644 --- a/pkg/testsuite/transactions.go +++ b/pkg/testsuite/transactions.go @@ -19,7 +19,7 @@ func (t *TestSuite) AssertTransaction(transaction *iotago.Transaction, node *moc t.Eventually(func() error { var exists bool - loadedTransactionMetadata, exists = node.Protocol.MainEngineInstance().Ledger.TransactionMetadata(transactionID) + loadedTransactionMetadata, exists = node.Protocol.Engines.Main.Get().Ledger.TransactionMetadata(transactionID) if !exists { return ierrors.Errorf("AssertTransaction: %s: transaction %s does not exist", node.Name, transactionID) } @@ -61,7 +61,7 @@ func (t *TestSuite) AssertTransactionsExist(transactions []*iotago.Transaction, t.AssertTransaction(transaction, node) } else { t.Eventually(func() error { - if lo.Return2(node.Protocol.MainEngineInstance().Ledger.TransactionMetadata(transactionID)) { + if lo.Return2(node.Protocol.Engines.Main.Get().Ledger.TransactionMetadata(transactionID)) { return ierrors.Errorf("AssertTransactionsExist: %s: transaction %s exists but should not", node.Name, transactionID) } @@ -84,7 +84,7 @@ func (t *TestSuite) assertTransactionsInCacheWithFunc(expectedTransactions []*io require.NoError(t.Testing, err) t.Eventually(func() error { - blockFromCache, exists := node.Protocol.MainEngineInstance().Ledger.TransactionMetadata(transactionID) + blockFromCache, exists := node.Protocol.Engines.Main.Get().Ledger.TransactionMetadata(transactionID) if !exists { return ierrors.Errorf("assertTransactionsInCacheWithFunc: %s: transaction %s does not exist", node.Name, transactionID) } @@ -136,7 +136,7 @@ func (t *TestSuite) AssertTransactionInCacheConflicts(transactionConflicts map[* require.NoError(t.Testing, err) t.Eventually(func() error { - transactionFromCache, exists := node.Protocol.MainEngineInstance().Ledger.TransactionMetadata(transactionID) + transactionFromCache, exists := node.Protocol.Engines.Main.Get().Ledger.TransactionMetadata(transactionID) if !exists { return ierrors.Errorf("AssertTransactionInCacheConflicts: %s: block %s does not exist", node.Name, transactionID) } diff --git a/pkg/testsuite/upgrades.go b/pkg/testsuite/upgrades.go index cb79bfa36..5ecfa1a33 100644 --- a/pkg/testsuite/upgrades.go +++ b/pkg/testsuite/upgrades.go @@ -14,7 +14,7 @@ func (t *TestSuite) AssertEpochVersions(epochVersions map[iotago.Version]iotago. t.Eventually(func() error { for version, expectedEpoch := range epochVersions { - epochForVersion, exists := node.Protocol.MainEngineInstance().Storage.Settings().APIProvider().EpochForVersion(version) + epochForVersion, exists := node.Protocol.Engines.Main.Get().Storage.Settings().APIProvider().EpochForVersion(version) if !exists { return ierrors.Errorf("AssertEpochVersions: %s: version %d not found", node.Name, version) } @@ -36,7 +36,7 @@ func (t *TestSuite) AssertVersionAndProtocolParameters(versionsAndProtocolParame t.Eventually(func() error { for version, expectedProtocolParameters := range versionsAndProtocolParameters { - protocolParameters := node.Protocol.MainEngineInstance().Storage.Settings().APIProvider().ProtocolParameters(version) + protocolParameters := node.Protocol.Engines.Main.Get().Storage.Settings().APIProvider().ProtocolParameters(version) if expectedProtocolParameters == nil { if protocolParameters != nil { @@ -71,7 +71,7 @@ func (t *TestSuite) AssertVersionAndProtocolParametersHashes(versionsAndProtocol t.Eventually(func() error { for version, expectedProtocolParametersHash := range versionsAndProtocolParametersHashes { - protocolParametersHash := node.Protocol.MainEngineInstance().Storage.Settings().APIProvider().ProtocolParametersHash(version) + protocolParametersHash := node.Protocol.Engines.Main.Get().Storage.Settings().APIProvider().ProtocolParametersHash(version) if expectedProtocolParametersHash == iotago.EmptyIdentifier { if protocolParametersHash != iotago.EmptyIdentifier { diff --git a/tools/gendoc/go.mod b/tools/gendoc/go.mod index 0cd85a09e..371d83dfa 100644 --- a/tools/gendoc/go.mod +++ b/tools/gendoc/go.mod @@ -5,7 +5,7 @@ go 1.21 replace github.com/iotaledger/iota-core => ../../ require ( - github.com/iotaledger/hive.go/app v0.0.0-20231127134220-90b88e35bdb2 + github.com/iotaledger/hive.go/app v0.0.0-20231130155327-398db92f09a3 github.com/iotaledger/hive.go/apputils v0.0.0-20230829152614-7afc7a4d89b3 github.com/iotaledger/iota-core v0.0.0-00010101000000-000000000000 ) @@ -18,7 +18,6 @@ require ( github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/containerd/cgroups v1.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect github.com/docker/go-units v0.5.0 // indirect @@ -57,19 +56,19 @@ require ( github.com/huin/goupnp v1.3.0 // indirect github.com/iancoleman/orderedmap v0.3.0 // indirect github.com/iotaledger/grocksdb v1.7.5-0.20230220105546-5162e18885c7 // indirect - github.com/iotaledger/hive.go/ads v0.0.0-20231127134220-90b88e35bdb2 // indirect - github.com/iotaledger/hive.go/constraints v0.0.0-20231128121006-331a9e522dfe // indirect - github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231127134220-90b88e35bdb2 // indirect - github.com/iotaledger/hive.go/crypto v0.0.0-20231128121006-331a9e522dfe // indirect - github.com/iotaledger/hive.go/ds v0.0.0-20231127134220-90b88e35bdb2 // indirect - github.com/iotaledger/hive.go/ierrors v0.0.0-20231128121006-331a9e522dfe // indirect - github.com/iotaledger/hive.go/kvstore v0.0.0-20231127134220-90b88e35bdb2 // indirect - github.com/iotaledger/hive.go/lo v0.0.0-20231128121006-331a9e522dfe // indirect - github.com/iotaledger/hive.go/log v0.0.0-20231127134220-90b88e35bdb2 // indirect - github.com/iotaledger/hive.go/logger v0.0.0-20231127134220-90b88e35bdb2 // indirect - github.com/iotaledger/hive.go/runtime v0.0.0-20231128121006-331a9e522dfe // indirect - github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231127134220-90b88e35bdb2 // indirect - github.com/iotaledger/hive.go/stringify v0.0.0-20231128121006-331a9e522dfe // indirect + github.com/iotaledger/hive.go/ads v0.0.0-20231130155327-398db92f09a3 // indirect + github.com/iotaledger/hive.go/constraints v0.0.0-20231130155327-398db92f09a3 // indirect + github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231130155327-398db92f09a3 // indirect + github.com/iotaledger/hive.go/crypto v0.0.0-20231130155327-398db92f09a3 // indirect + github.com/iotaledger/hive.go/ds v0.0.0-20231130155327-398db92f09a3 // indirect + github.com/iotaledger/hive.go/ierrors v0.0.0-20231130155327-398db92f09a3 // indirect + github.com/iotaledger/hive.go/kvstore v0.0.0-20231130155327-398db92f09a3 // indirect + github.com/iotaledger/hive.go/lo v0.0.0-20231130155327-398db92f09a3 // indirect + github.com/iotaledger/hive.go/log v0.0.0-20231130155327-398db92f09a3 // indirect + github.com/iotaledger/hive.go/logger v0.0.0-20231130155327-398db92f09a3 // indirect + github.com/iotaledger/hive.go/runtime v0.0.0-20231130155327-398db92f09a3 // indirect + github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231130155327-398db92f09a3 // indirect + github.com/iotaledger/hive.go/stringify v0.0.0-20231130155327-398db92f09a3 // indirect github.com/iotaledger/inx-app v1.0.0-rc.3.0.20231201123347-1c44b3f24221 // indirect github.com/iotaledger/inx/go v1.0.0-rc.2.0.20231201114846-3bb5c3fd5665 // indirect github.com/iotaledger/iota.go/v4 v4.0.0-20231204142547-416c9a87403d // indirect @@ -134,7 +133,6 @@ require ( github.com/pelletier/go-toml/v2 v2.1.0 // indirect github.com/petermattis/goid v0.0.0-20230904192822-1876fd5063bc // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pokt-network/smt v0.6.1 // indirect github.com/polydawn/refmt v0.89.0 // indirect github.com/prometheus/client_golang v1.17.0 // indirect @@ -150,7 +148,6 @@ require ( github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/spf13/cast v1.5.1 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/stretchr/testify v1.8.4 // indirect github.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/valyala/fasttemplate v1.2.2 // indirect @@ -182,6 +179,5 @@ require ( google.golang.org/grpc v1.59.0 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.2.1 // indirect ) diff --git a/tools/gendoc/go.sum b/tools/gendoc/go.sum index 547b95422..9852401fd 100644 --- a/tools/gendoc/go.sum +++ b/tools/gendoc/go.sum @@ -277,36 +277,36 @@ github.com/iancoleman/orderedmap v0.3.0/go.mod h1:XuLcCUkdL5owUCQeF2Ue9uuw1EptkJ github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/iotaledger/grocksdb v1.7.5-0.20230220105546-5162e18885c7 h1:dTrD7X2PTNgli6EbS4tV9qu3QAm/kBU3XaYZV2xdzys= github.com/iotaledger/grocksdb v1.7.5-0.20230220105546-5162e18885c7/go.mod h1:ZRdPu684P0fQ1z8sXz4dj9H5LWHhz4a9oCtvjunkSrw= -github.com/iotaledger/hive.go/ads v0.0.0-20231127134220-90b88e35bdb2 h1:0FynHsnJTZgxQuXk3/maXNgzyvbwQ+TnuiwY48kYSr4= -github.com/iotaledger/hive.go/ads v0.0.0-20231127134220-90b88e35bdb2/go.mod h1:gbUvr01B5ha530GnNm8K2OsHXOd2BtzBYOMxyTX3iDg= -github.com/iotaledger/hive.go/app v0.0.0-20231127134220-90b88e35bdb2 h1:WI6MQCxeANDyO7fOTovefuIusma+wT8VUJ3BisQLZEA= -github.com/iotaledger/hive.go/app v0.0.0-20231127134220-90b88e35bdb2/go.mod h1:+riYmeLApkLlj4+EpuJpEJAsj/KGfD7cqLGy7oTsPOM= +github.com/iotaledger/hive.go/ads v0.0.0-20231130155327-398db92f09a3 h1:VLm18jYzB0wFceCXIfsbKzTwl3TDvCgRp4wJ/xMgXWM= +github.com/iotaledger/hive.go/ads v0.0.0-20231130155327-398db92f09a3/go.mod h1:gbUvr01B5ha530GnNm8K2OsHXOd2BtzBYOMxyTX3iDg= +github.com/iotaledger/hive.go/app v0.0.0-20231130155327-398db92f09a3 h1:O7okRQP8g8a9gvUDG/lzZ47MgaSGqbOqr98rvkT+NFE= +github.com/iotaledger/hive.go/app v0.0.0-20231130155327-398db92f09a3/go.mod h1:+riYmeLApkLlj4+EpuJpEJAsj/KGfD7cqLGy7oTsPOM= github.com/iotaledger/hive.go/apputils v0.0.0-20230829152614-7afc7a4d89b3 h1:4aVJTc0KS77uEw0Tny4r0n1ORwcbAQDECaCclgf/6lE= github.com/iotaledger/hive.go/apputils v0.0.0-20230829152614-7afc7a4d89b3/go.mod h1:TZeAqieDu+xDOZp2e9+S+8pZp1PrfgcwLUnxmd8IgLU= -github.com/iotaledger/hive.go/constraints v0.0.0-20231128121006-331a9e522dfe h1:vgJj9kXM1IkLjbjWOV565Vil+RlzJwVhxG/KebMmrKE= -github.com/iotaledger/hive.go/constraints v0.0.0-20231128121006-331a9e522dfe/go.mod h1:dOBOM2s4se3HcWefPe8sQLUalGXJ8yVXw58oK8jke3s= -github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231127134220-90b88e35bdb2 h1:ZL4cGO4zy7IwCIfHQgpvu3yMbNnFFRvSvTqaZM5Uj5U= -github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231127134220-90b88e35bdb2/go.mod h1:CdixkrB7VdQzEDlVuwsxPtsiJL/WXrQgz3PELIqlLko= -github.com/iotaledger/hive.go/crypto v0.0.0-20231128121006-331a9e522dfe h1:/+Qw7fe5oSE5Jxm8RPzcfBKHbwjJ/zwHu3UWdSOvKW8= -github.com/iotaledger/hive.go/crypto v0.0.0-20231128121006-331a9e522dfe/go.mod h1:OQ9EVTTQT1mkO/16BgwSIyQlAhEg+Cptud/yutevWsI= -github.com/iotaledger/hive.go/ds v0.0.0-20231127134220-90b88e35bdb2 h1:8YQlcFMexyYvjh3V/YSYzldeYjaDZd+1mHt8SUh8Uqs= -github.com/iotaledger/hive.go/ds v0.0.0-20231127134220-90b88e35bdb2/go.mod h1:JE8cbZSvzbB5TrwXibg6M0B7ck35YxF30ItHBzQRlgc= -github.com/iotaledger/hive.go/ierrors v0.0.0-20231128121006-331a9e522dfe h1:kVkjbdBANpA8tyu9RM4/GeyVoyRfGcb4LT96PqHTIWc= -github.com/iotaledger/hive.go/ierrors v0.0.0-20231128121006-331a9e522dfe/go.mod h1:HcE8B5lP96enc/OALTb2/rIIi+yOLouRoHOKRclKmC8= -github.com/iotaledger/hive.go/kvstore v0.0.0-20231127134220-90b88e35bdb2 h1:b+AHpClIb7YAjpXgRHCsr+DRdBMuN4Q6k/wpFmT1wok= -github.com/iotaledger/hive.go/kvstore v0.0.0-20231127134220-90b88e35bdb2/go.mod h1:ytfKoHr/nF8u0y0G4mamfG0yjFtJiJVk0kgjnPOtsSY= -github.com/iotaledger/hive.go/lo v0.0.0-20231128121006-331a9e522dfe h1:dQBQ+ZOVwC6KJxTABHMMHjJto70gNU5Cn4dXeJp5xmM= -github.com/iotaledger/hive.go/lo v0.0.0-20231128121006-331a9e522dfe/go.mod h1:6Ee7i6b4tuTHuRYnPP8VUb0wr9XFI5qlqtnttBd9jRg= -github.com/iotaledger/hive.go/log v0.0.0-20231127134220-90b88e35bdb2 h1:3B6UFIJ+IJEiGmUbLt5+Zokv0i8RUs70IuSR9sB60DA= -github.com/iotaledger/hive.go/log v0.0.0-20231127134220-90b88e35bdb2/go.mod h1:vzO4/wRkEJDEZb/9fD10oKU9k1bj4qLir2Uhl5U1FkM= -github.com/iotaledger/hive.go/logger v0.0.0-20231127134220-90b88e35bdb2 h1:1r4fY+R9p2q5CzEkiXMuFr/UCM8RX3yPUllXkjm5/Fk= -github.com/iotaledger/hive.go/logger v0.0.0-20231127134220-90b88e35bdb2/go.mod h1:w1psHM2MuKsen1WdsPKrpqElYH7ZOQ+YdQIgJZg4HTo= -github.com/iotaledger/hive.go/runtime v0.0.0-20231128121006-331a9e522dfe h1:jewR0RJ7oTGWjzhTROdIwhMeBH4//frUHizKs/6Em+s= -github.com/iotaledger/hive.go/runtime v0.0.0-20231128121006-331a9e522dfe/go.mod h1:DrZPvUvLarK8C2qb+3H2vdypp/MuhpQmB3iMJbDCr/Q= -github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231127134220-90b88e35bdb2 h1:vTx/tPH+//CQcDjdC8DZv3s6x9KCqAfTqn3VTjYUUlw= -github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231127134220-90b88e35bdb2/go.mod h1:FoH3T6yKlZJp8xm8K+zsQiibSynp32v21CpWx8xkek8= -github.com/iotaledger/hive.go/stringify v0.0.0-20231128121006-331a9e522dfe h1:RcFUqhnJ+86+sA0XMrZ0q+086ULrdWQkWrjUt2OnJK4= -github.com/iotaledger/hive.go/stringify v0.0.0-20231128121006-331a9e522dfe/go.mod h1:FTo/UWzNYgnQ082GI9QVM9HFDERqf9rw9RivNpqrnTs= +github.com/iotaledger/hive.go/constraints v0.0.0-20231130155327-398db92f09a3 h1:qJe/BR6FpAM2tsoJBTbLSeQERjnxidNBZuJasvYhR7s= +github.com/iotaledger/hive.go/constraints v0.0.0-20231130155327-398db92f09a3/go.mod h1:dOBOM2s4se3HcWefPe8sQLUalGXJ8yVXw58oK8jke3s= +github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231130155327-398db92f09a3 h1:MsvSJAMWne3QUuU6Vo0jeVEt91foSZiuEM4xEyV79X4= +github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231130155327-398db92f09a3/go.mod h1:CdixkrB7VdQzEDlVuwsxPtsiJL/WXrQgz3PELIqlLko= +github.com/iotaledger/hive.go/crypto v0.0.0-20231130155327-398db92f09a3 h1:MNkwHDOeIUb62KbbsSCKwiWYHdzJM+6sNZ45AVhPmiA= +github.com/iotaledger/hive.go/crypto v0.0.0-20231130155327-398db92f09a3/go.mod h1:OQ9EVTTQT1mkO/16BgwSIyQlAhEg+Cptud/yutevWsI= +github.com/iotaledger/hive.go/ds v0.0.0-20231130155327-398db92f09a3 h1:gWSJjaIIRvYG778ITT4V9MVqxEuGy1ohE10Awx6HiRg= +github.com/iotaledger/hive.go/ds v0.0.0-20231130155327-398db92f09a3/go.mod h1:JE8cbZSvzbB5TrwXibg6M0B7ck35YxF30ItHBzQRlgc= +github.com/iotaledger/hive.go/ierrors v0.0.0-20231130155327-398db92f09a3 h1:4YjvERr9WQVwPjKirblj2grnwTzKBNLrT5KaHMZRFBQ= +github.com/iotaledger/hive.go/ierrors v0.0.0-20231130155327-398db92f09a3/go.mod h1:HcE8B5lP96enc/OALTb2/rIIi+yOLouRoHOKRclKmC8= +github.com/iotaledger/hive.go/kvstore v0.0.0-20231130155327-398db92f09a3 h1:k9EO005mzPj+2atTByhUdco3rDtLx3mY7ZW2B/dLKOA= +github.com/iotaledger/hive.go/kvstore v0.0.0-20231130155327-398db92f09a3/go.mod h1:ytfKoHr/nF8u0y0G4mamfG0yjFtJiJVk0kgjnPOtsSY= +github.com/iotaledger/hive.go/lo v0.0.0-20231130155327-398db92f09a3 h1:6rnSXCbIyYUVol3ihMoMFvxSNJsTdXgf1A1kqfa7FC8= +github.com/iotaledger/hive.go/lo v0.0.0-20231130155327-398db92f09a3/go.mod h1:6Ee7i6b4tuTHuRYnPP8VUb0wr9XFI5qlqtnttBd9jRg= +github.com/iotaledger/hive.go/log v0.0.0-20231130155327-398db92f09a3 h1:+GEZHE+oCj4PkH2S3BS9BlyDz1kUlw3eWKtzhCKS3ds= +github.com/iotaledger/hive.go/log v0.0.0-20231130155327-398db92f09a3/go.mod h1:vzO4/wRkEJDEZb/9fD10oKU9k1bj4qLir2Uhl5U1FkM= +github.com/iotaledger/hive.go/logger v0.0.0-20231130155327-398db92f09a3 h1:FBRbpEkEtfmymy4XUda72feIP81pRcr8zjXMmHRF7Lc= +github.com/iotaledger/hive.go/logger v0.0.0-20231130155327-398db92f09a3/go.mod h1:w1psHM2MuKsen1WdsPKrpqElYH7ZOQ+YdQIgJZg4HTo= +github.com/iotaledger/hive.go/runtime v0.0.0-20231130155327-398db92f09a3 h1:ibIJnyoBAbNDlGpRXZpSIcLLb5vbTorCxQPr+dLeGO8= +github.com/iotaledger/hive.go/runtime v0.0.0-20231130155327-398db92f09a3/go.mod h1:DrZPvUvLarK8C2qb+3H2vdypp/MuhpQmB3iMJbDCr/Q= +github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231130155327-398db92f09a3 h1:fF2gn/vkBZjLTbvf/vALLJ0wrPVUQH7Fg4cPd40RecY= +github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231130155327-398db92f09a3/go.mod h1:FoH3T6yKlZJp8xm8K+zsQiibSynp32v21CpWx8xkek8= +github.com/iotaledger/hive.go/stringify v0.0.0-20231130155327-398db92f09a3 h1:n0D5PTekFS1Vuktamz9e21AQ7kNcspr0Kv3Ob9u48Q0= +github.com/iotaledger/hive.go/stringify v0.0.0-20231130155327-398db92f09a3/go.mod h1:FTo/UWzNYgnQ082GI9QVM9HFDERqf9rw9RivNpqrnTs= github.com/iotaledger/inx-app v1.0.0-rc.3.0.20231201123347-1c44b3f24221 h1:+ozrau44uPy2kYv2fuj2Wks8+VkXR62WB9zONOJgzdE= github.com/iotaledger/inx-app v1.0.0-rc.3.0.20231201123347-1c44b3f24221/go.mod h1:6cLX3gnhP0WL+Q+mf3/rIqfACe5fWKVR8luPXWh2xiY= github.com/iotaledger/inx/go v1.0.0-rc.2.0.20231201114846-3bb5c3fd5665 h1:XdhojOpZ0t0pJFyNO0zlBogSAUrhEI67eCpTC9H6sGM= diff --git a/tools/genesis-snapshot/go.mod b/tools/genesis-snapshot/go.mod index 3514c522b..264db9107 100644 --- a/tools/genesis-snapshot/go.mod +++ b/tools/genesis-snapshot/go.mod @@ -5,10 +5,10 @@ go 1.21 replace github.com/iotaledger/iota-core => ../../ require ( - github.com/iotaledger/hive.go/crypto v0.0.0-20231128121006-331a9e522dfe - github.com/iotaledger/hive.go/ierrors v0.0.0-20231128121006-331a9e522dfe - github.com/iotaledger/hive.go/lo v0.0.0-20231128121006-331a9e522dfe - github.com/iotaledger/hive.go/runtime v0.0.0-20231128121006-331a9e522dfe + github.com/iotaledger/hive.go/crypto v0.0.0-20231130155327-398db92f09a3 + github.com/iotaledger/hive.go/ierrors v0.0.0-20231130155327-398db92f09a3 + github.com/iotaledger/hive.go/lo v0.0.0-20231130155327-398db92f09a3 + github.com/iotaledger/hive.go/runtime v0.0.0-20231130155327-398db92f09a3 github.com/iotaledger/iota-core v0.0.0-00010101000000-000000000000 github.com/iotaledger/iota.go/v4 v4.0.0-20231204142547-416c9a87403d github.com/mr-tron/base58 v1.2.0 @@ -26,14 +26,14 @@ require ( github.com/holiman/uint256 v1.2.4 // indirect github.com/iancoleman/orderedmap v0.3.0 // indirect github.com/iotaledger/grocksdb v1.7.5-0.20230220105546-5162e18885c7 // indirect - github.com/iotaledger/hive.go/ads v0.0.0-20231127134220-90b88e35bdb2 // indirect - github.com/iotaledger/hive.go/constraints v0.0.0-20231128121006-331a9e522dfe // indirect - github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231127134220-90b88e35bdb2 // indirect - github.com/iotaledger/hive.go/ds v0.0.0-20231127134220-90b88e35bdb2 // indirect - github.com/iotaledger/hive.go/kvstore v0.0.0-20231127134220-90b88e35bdb2 // indirect - github.com/iotaledger/hive.go/log v0.0.0-20231127134220-90b88e35bdb2 // indirect - github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231127134220-90b88e35bdb2 // indirect - github.com/iotaledger/hive.go/stringify v0.0.0-20231128121006-331a9e522dfe // indirect + github.com/iotaledger/hive.go/ads v0.0.0-20231130155327-398db92f09a3 // indirect + github.com/iotaledger/hive.go/constraints v0.0.0-20231130155327-398db92f09a3 // indirect + github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231130155327-398db92f09a3 // indirect + github.com/iotaledger/hive.go/ds v0.0.0-20231130155327-398db92f09a3 // indirect + github.com/iotaledger/hive.go/kvstore v0.0.0-20231130155327-398db92f09a3 // indirect + github.com/iotaledger/hive.go/log v0.0.0-20231130155327-398db92f09a3 // indirect + github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231130155327-398db92f09a3 // indirect + github.com/iotaledger/hive.go/stringify v0.0.0-20231130155327-398db92f09a3 // indirect github.com/ipfs/go-cid v0.4.1 // indirect github.com/klauspost/cpuid/v2 v2.2.5 // indirect github.com/kr/text v0.2.0 // indirect diff --git a/tools/genesis-snapshot/go.sum b/tools/genesis-snapshot/go.sum index 9374066ac..c92c919b1 100644 --- a/tools/genesis-snapshot/go.sum +++ b/tools/genesis-snapshot/go.sum @@ -28,30 +28,30 @@ github.com/iancoleman/orderedmap v0.3.0 h1:5cbR2grmZR/DiVt+VJopEhtVs9YGInGIxAoMJ github.com/iancoleman/orderedmap v0.3.0/go.mod h1:XuLcCUkdL5owUCQeF2Ue9uuw1EptkJDkXXS7VoV7XGE= github.com/iotaledger/grocksdb v1.7.5-0.20230220105546-5162e18885c7 h1:dTrD7X2PTNgli6EbS4tV9qu3QAm/kBU3XaYZV2xdzys= github.com/iotaledger/grocksdb v1.7.5-0.20230220105546-5162e18885c7/go.mod h1:ZRdPu684P0fQ1z8sXz4dj9H5LWHhz4a9oCtvjunkSrw= -github.com/iotaledger/hive.go/ads v0.0.0-20231127134220-90b88e35bdb2 h1:0FynHsnJTZgxQuXk3/maXNgzyvbwQ+TnuiwY48kYSr4= -github.com/iotaledger/hive.go/ads v0.0.0-20231127134220-90b88e35bdb2/go.mod h1:gbUvr01B5ha530GnNm8K2OsHXOd2BtzBYOMxyTX3iDg= -github.com/iotaledger/hive.go/constraints v0.0.0-20231128121006-331a9e522dfe h1:vgJj9kXM1IkLjbjWOV565Vil+RlzJwVhxG/KebMmrKE= -github.com/iotaledger/hive.go/constraints v0.0.0-20231128121006-331a9e522dfe/go.mod h1:dOBOM2s4se3HcWefPe8sQLUalGXJ8yVXw58oK8jke3s= -github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231127134220-90b88e35bdb2 h1:ZL4cGO4zy7IwCIfHQgpvu3yMbNnFFRvSvTqaZM5Uj5U= -github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231127134220-90b88e35bdb2/go.mod h1:CdixkrB7VdQzEDlVuwsxPtsiJL/WXrQgz3PELIqlLko= -github.com/iotaledger/hive.go/crypto v0.0.0-20231128121006-331a9e522dfe h1:/+Qw7fe5oSE5Jxm8RPzcfBKHbwjJ/zwHu3UWdSOvKW8= -github.com/iotaledger/hive.go/crypto v0.0.0-20231128121006-331a9e522dfe/go.mod h1:OQ9EVTTQT1mkO/16BgwSIyQlAhEg+Cptud/yutevWsI= -github.com/iotaledger/hive.go/ds v0.0.0-20231127134220-90b88e35bdb2 h1:8YQlcFMexyYvjh3V/YSYzldeYjaDZd+1mHt8SUh8Uqs= -github.com/iotaledger/hive.go/ds v0.0.0-20231127134220-90b88e35bdb2/go.mod h1:JE8cbZSvzbB5TrwXibg6M0B7ck35YxF30ItHBzQRlgc= -github.com/iotaledger/hive.go/ierrors v0.0.0-20231128121006-331a9e522dfe h1:kVkjbdBANpA8tyu9RM4/GeyVoyRfGcb4LT96PqHTIWc= -github.com/iotaledger/hive.go/ierrors v0.0.0-20231128121006-331a9e522dfe/go.mod h1:HcE8B5lP96enc/OALTb2/rIIi+yOLouRoHOKRclKmC8= -github.com/iotaledger/hive.go/kvstore v0.0.0-20231127134220-90b88e35bdb2 h1:b+AHpClIb7YAjpXgRHCsr+DRdBMuN4Q6k/wpFmT1wok= -github.com/iotaledger/hive.go/kvstore v0.0.0-20231127134220-90b88e35bdb2/go.mod h1:ytfKoHr/nF8u0y0G4mamfG0yjFtJiJVk0kgjnPOtsSY= -github.com/iotaledger/hive.go/lo v0.0.0-20231128121006-331a9e522dfe h1:dQBQ+ZOVwC6KJxTABHMMHjJto70gNU5Cn4dXeJp5xmM= -github.com/iotaledger/hive.go/lo v0.0.0-20231128121006-331a9e522dfe/go.mod h1:6Ee7i6b4tuTHuRYnPP8VUb0wr9XFI5qlqtnttBd9jRg= -github.com/iotaledger/hive.go/log v0.0.0-20231127134220-90b88e35bdb2 h1:3B6UFIJ+IJEiGmUbLt5+Zokv0i8RUs70IuSR9sB60DA= -github.com/iotaledger/hive.go/log v0.0.0-20231127134220-90b88e35bdb2/go.mod h1:vzO4/wRkEJDEZb/9fD10oKU9k1bj4qLir2Uhl5U1FkM= -github.com/iotaledger/hive.go/runtime v0.0.0-20231128121006-331a9e522dfe h1:jewR0RJ7oTGWjzhTROdIwhMeBH4//frUHizKs/6Em+s= -github.com/iotaledger/hive.go/runtime v0.0.0-20231128121006-331a9e522dfe/go.mod h1:DrZPvUvLarK8C2qb+3H2vdypp/MuhpQmB3iMJbDCr/Q= -github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231127134220-90b88e35bdb2 h1:vTx/tPH+//CQcDjdC8DZv3s6x9KCqAfTqn3VTjYUUlw= -github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231127134220-90b88e35bdb2/go.mod h1:FoH3T6yKlZJp8xm8K+zsQiibSynp32v21CpWx8xkek8= -github.com/iotaledger/hive.go/stringify v0.0.0-20231128121006-331a9e522dfe h1:RcFUqhnJ+86+sA0XMrZ0q+086ULrdWQkWrjUt2OnJK4= -github.com/iotaledger/hive.go/stringify v0.0.0-20231128121006-331a9e522dfe/go.mod h1:FTo/UWzNYgnQ082GI9QVM9HFDERqf9rw9RivNpqrnTs= +github.com/iotaledger/hive.go/ads v0.0.0-20231130155327-398db92f09a3 h1:VLm18jYzB0wFceCXIfsbKzTwl3TDvCgRp4wJ/xMgXWM= +github.com/iotaledger/hive.go/ads v0.0.0-20231130155327-398db92f09a3/go.mod h1:gbUvr01B5ha530GnNm8K2OsHXOd2BtzBYOMxyTX3iDg= +github.com/iotaledger/hive.go/constraints v0.0.0-20231130155327-398db92f09a3 h1:qJe/BR6FpAM2tsoJBTbLSeQERjnxidNBZuJasvYhR7s= +github.com/iotaledger/hive.go/constraints v0.0.0-20231130155327-398db92f09a3/go.mod h1:dOBOM2s4se3HcWefPe8sQLUalGXJ8yVXw58oK8jke3s= +github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231130155327-398db92f09a3 h1:MsvSJAMWne3QUuU6Vo0jeVEt91foSZiuEM4xEyV79X4= +github.com/iotaledger/hive.go/core v1.0.0-rc.3.0.20231130155327-398db92f09a3/go.mod h1:CdixkrB7VdQzEDlVuwsxPtsiJL/WXrQgz3PELIqlLko= +github.com/iotaledger/hive.go/crypto v0.0.0-20231130155327-398db92f09a3 h1:MNkwHDOeIUb62KbbsSCKwiWYHdzJM+6sNZ45AVhPmiA= +github.com/iotaledger/hive.go/crypto v0.0.0-20231130155327-398db92f09a3/go.mod h1:OQ9EVTTQT1mkO/16BgwSIyQlAhEg+Cptud/yutevWsI= +github.com/iotaledger/hive.go/ds v0.0.0-20231130155327-398db92f09a3 h1:gWSJjaIIRvYG778ITT4V9MVqxEuGy1ohE10Awx6HiRg= +github.com/iotaledger/hive.go/ds v0.0.0-20231130155327-398db92f09a3/go.mod h1:JE8cbZSvzbB5TrwXibg6M0B7ck35YxF30ItHBzQRlgc= +github.com/iotaledger/hive.go/ierrors v0.0.0-20231130155327-398db92f09a3 h1:4YjvERr9WQVwPjKirblj2grnwTzKBNLrT5KaHMZRFBQ= +github.com/iotaledger/hive.go/ierrors v0.0.0-20231130155327-398db92f09a3/go.mod h1:HcE8B5lP96enc/OALTb2/rIIi+yOLouRoHOKRclKmC8= +github.com/iotaledger/hive.go/kvstore v0.0.0-20231130155327-398db92f09a3 h1:k9EO005mzPj+2atTByhUdco3rDtLx3mY7ZW2B/dLKOA= +github.com/iotaledger/hive.go/kvstore v0.0.0-20231130155327-398db92f09a3/go.mod h1:ytfKoHr/nF8u0y0G4mamfG0yjFtJiJVk0kgjnPOtsSY= +github.com/iotaledger/hive.go/lo v0.0.0-20231130155327-398db92f09a3 h1:6rnSXCbIyYUVol3ihMoMFvxSNJsTdXgf1A1kqfa7FC8= +github.com/iotaledger/hive.go/lo v0.0.0-20231130155327-398db92f09a3/go.mod h1:6Ee7i6b4tuTHuRYnPP8VUb0wr9XFI5qlqtnttBd9jRg= +github.com/iotaledger/hive.go/log v0.0.0-20231130155327-398db92f09a3 h1:+GEZHE+oCj4PkH2S3BS9BlyDz1kUlw3eWKtzhCKS3ds= +github.com/iotaledger/hive.go/log v0.0.0-20231130155327-398db92f09a3/go.mod h1:vzO4/wRkEJDEZb/9fD10oKU9k1bj4qLir2Uhl5U1FkM= +github.com/iotaledger/hive.go/runtime v0.0.0-20231130155327-398db92f09a3 h1:ibIJnyoBAbNDlGpRXZpSIcLLb5vbTorCxQPr+dLeGO8= +github.com/iotaledger/hive.go/runtime v0.0.0-20231130155327-398db92f09a3/go.mod h1:DrZPvUvLarK8C2qb+3H2vdypp/MuhpQmB3iMJbDCr/Q= +github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231130155327-398db92f09a3 h1:fF2gn/vkBZjLTbvf/vALLJ0wrPVUQH7Fg4cPd40RecY= +github.com/iotaledger/hive.go/serializer/v2 v2.0.0-rc.1.0.20231130155327-398db92f09a3/go.mod h1:FoH3T6yKlZJp8xm8K+zsQiibSynp32v21CpWx8xkek8= +github.com/iotaledger/hive.go/stringify v0.0.0-20231130155327-398db92f09a3 h1:n0D5PTekFS1Vuktamz9e21AQ7kNcspr0Kv3Ob9u48Q0= +github.com/iotaledger/hive.go/stringify v0.0.0-20231130155327-398db92f09a3/go.mod h1:FTo/UWzNYgnQ082GI9QVM9HFDERqf9rw9RivNpqrnTs= github.com/iotaledger/iota.go/v4 v4.0.0-20231204142547-416c9a87403d h1:MPklxa8jW4/EgDm/LEzf6orxjik7U+vMUW/ToGT1Zqg= github.com/iotaledger/iota.go/v4 v4.0.0-20231204142547-416c9a87403d/go.mod h1:lCk9rhP3B5pX9BKhzR+Jobq4xPd+GHlqgF4Ga+eQfWA= github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s=