diff --git a/aggoracle/oracle.go b/aggoracle/oracle.go index 27383f43..df5b0297 100644 --- a/aggoracle/oracle.go +++ b/aggoracle/oracle.go @@ -63,12 +63,15 @@ func (a *AggOracle) Start(ctx context.Context) { case <-a.ticker.C: blockNumToFetch, gerToInject, err = a.getLastFinalisedGER(ctx, blockNumToFetch) if err != nil { - if errors.Is(err, l1infotreesync.ErrBlockNotProcessed) { + switch { + case errors.Is(err, l1infotreesync.ErrBlockNotProcessed): log.Debugf("syncer is not ready for the block %d", blockNumToFetch) - } else if errors.Is(err, l1infotreesync.ErrNotFound) { + + case errors.Is(err, l1infotreesync.ErrNotFound): blockNumToFetch = 0 log.Debugf("syncer has not found any GER until block %d", blockNumToFetch) - } else { + + default: log.Error("error calling getLastFinalisedGER: ", err) } diff --git a/aggregator/aggregator.go b/aggregator/aggregator.go index 2f5bd147..cc69dac1 100644 --- a/aggregator/aggregator.go +++ b/aggregator/aggregator.go @@ -375,6 +375,7 @@ func (a *Aggregator) handleRollbackBatches(rollbackData synchronizer.RollbackBat } marshalledBookMark, err = proto.Marshal(bookMark) + //nolint:gocritic if err != nil { log.Error("failed to marshal bookmark: %v", err) } else { @@ -1151,6 +1152,7 @@ func (a *Aggregator) validateEligibleFinalProof( batchNumberToVerify := lastVerifiedBatchNum + 1 if proof.BatchNumber != batchNumberToVerify { + //nolint:gocritic if proof.BatchNumber < batchNumberToVerify && proof.BatchNumberFinal >= batchNumberToVerify { // We have a proof that contains some batches below the last batch verified, anyway can be eligible as final proof log.Warnf( @@ -1764,6 +1766,7 @@ func (a *Aggregator) buildInputProver( l1InfoTreeData := map[uint32]*prover.L1Data{} forcedBlockhashL1 := common.Hash{} l1InfoRoot := batchToVerify.L1InfoRoot.Bytes() + //nolint:gocritic if !isForcedBatch { tree, err := l1infotree.NewL1InfoTree(32, [][32]byte{}) //nolint:mnd if err != nil { diff --git a/claimsponsor/claimsponsor.go b/claimsponsor/claimsponsor.go index bad29c86..4537888b 100644 --- a/claimsponsor/claimsponsor.go +++ b/claimsponsor/claimsponsor.go @@ -271,15 +271,19 @@ func (c *ClaimSponsor) AddClaimToQueue(ctx context.Context, claim *Claim) error var queuePosition uint64 lastQueuePosition, _, err := getLastQueueIndex(tx) - if errors.Is(err, ErrNotFound) { + switch { + case errors.Is(err, ErrNotFound): queuePosition = 0 - } else if err != nil { + + case err != nil: tx.Rollback() return err - } else { + + default: queuePosition = lastQueuePosition + 1 } + err = tx.Put(queueTable, dbCommon.Uint64ToBytes(queuePosition), claim.Key()) if err != nil { tx.Rollback() diff --git a/lastgersync/processor.go b/lastgersync/processor.go index 049b2847..628ea04a 100644 --- a/lastgersync/processor.go +++ b/lastgersync/processor.go @@ -141,13 +141,15 @@ func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { var lastIndex int64 if lenEvents > 0 { li, err := p.getLastIndexWithTx(tx) - if errors.Is(err, ErrNotFound) { + switch { + case errors.Is(err, ErrNotFound): lastIndex = -1 - } else if err != nil { - tx.Rollback() + case err != nil: + tx.Rollback() return err - } else { + + default: lastIndex = int64(li) } } diff --git a/rpc/bridge.go b/rpc/bridge.go index eb6da780..d2ffddba 100644 --- a/rpc/bridge.go +++ b/rpc/bridge.go @@ -170,12 +170,14 @@ func (b *BridgeEndpoints) ClaimProof( return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get rollup exit proof, error: %s", err)) } var proofLocalExitRoot [32]common.Hash - if networkID == 0 { + switch { + case networkID == 0: proofLocalExitRoot, err = b.bridgeL1.GetProof(ctx, depositCount, info.MainnetExitRoot) if err != nil { return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get local exit proof, error: %s", err)) } - } else if networkID == b.networkID { + + case networkID == b.networkID: localExitRoot, err := b.l1InfoTree.GetLocalExitRoot(ctx, networkID, info.RollupExitRoot) if err != nil { return zeroHex, rpc.NewRPCError( @@ -190,12 +192,14 @@ func (b *BridgeEndpoints) ClaimProof( fmt.Sprintf("failed to get local exit proof, error: %s", err), ) } - } else { + + default: return zeroHex, rpc.NewRPCError( rpc.DefaultErrorCode, fmt.Sprintf("this client does not support network %d", networkID), ) } + return ClaimProof{ ProofLocalExitRoot: proofLocalExitRoot, ProofRollupExitRoot: proofRollupExitRoot, diff --git a/sequencesender/sequencesender.go b/sequencesender/sequencesender.go index 2f799eb3..1a0dcdbb 100644 --- a/sequencesender/sequencesender.go +++ b/sequencesender/sequencesender.go @@ -425,7 +425,8 @@ func (s *SequenceSender) getResultAndUpdateEthTx(ctx context.Context, txHash com } txResult, err := s.ethTxManager.Result(ctx, txHash) - if errors.Is(err, ethtxmanager.ErrNotFound) { + switch { + case errors.Is(err, ethtxmanager.ErrNotFound): log.Infof("transaction %v does not exist in ethtxmanager. Marking it", txHash) txData.OnMonitor = false // Resend tx @@ -433,10 +434,12 @@ func (s *SequenceSender) getResultAndUpdateEthTx(ctx context.Context, txHash com if errSend == nil { txData.OnMonitor = false } - } else if err != nil { + + case err != nil: log.Errorf("error getting result for tx %v: %v", txHash, err) return err - } else { + + default: s.updateEthTxResult(txData, txResult) } @@ -879,16 +882,19 @@ func (s *SequenceSender) handleReceivedDataStream( } } - // Already virtualized - if l2Block.BatchNumber <= s.fromStreamBatch { + switch { + case l2Block.BatchNumber <= s.fromStreamBatch: + // Already virtualized if l2Block.BatchNumber != s.latestStreamBatch { log.Infof("skipped! batch already virtualized, number %d", l2Block.BatchNumber) } - } else if !s.validStream && l2Block.BatchNumber == s.fromStreamBatch+1 { + + case !s.validStream && l2Block.BatchNumber == s.fromStreamBatch+1: // Initial case after startup s.addNewSequenceBatch(l2Block) s.validStream = true - } else if l2Block.BatchNumber > s.wipBatch { + + case l2Block.BatchNumber > s.wipBatch: // Handle whether it's only a new block or also a new batch // Create new sequential batch s.addNewSequenceBatch(l2Block) @@ -1021,28 +1027,27 @@ func (s *SequenceSender) closeSequenceBatch() error { data.batch.SetL2Data(batchL2Data) } else { - log.Fatalf("wipBatch %d not found in sequenceData slice", s.wipBatch) + return fmt.Errorf("pending batch %d not found in sequence data", s.wipBatch) } // Sanity Check if s.cfg.SanityCheckRPCURL != "" { rpcNumberOfBlocks, batchL2Data, err := s.getBatchFromRPC(s.wipBatch) if err != nil { - log.Fatalf("error getting batch number from RPC while trying to perform sanity check: %v", err) + return fmt.Errorf("error getting batch number from RPC while trying to perform sanity check: %w", err) } else { dsNumberOfBlocks := len(s.sequenceData[s.wipBatch].batchRaw.Blocks) if rpcNumberOfBlocks != dsNumberOfBlocks { - log.Fatalf( - "number of blocks in batch %d (%d) does not match the number of blocks in the batch from the RPC (%d)", - s.wipBatch, dsNumberOfBlocks, rpcNumberOfBlocks, - ) + return fmt.Errorf("number of blocks in batch %d (%d) does not match "+ + "the number of blocks in the batch from the RPC (%d)", + s.wipBatch, dsNumberOfBlocks, rpcNumberOfBlocks) } if data.batchType == datastream.BatchType_BATCH_TYPE_REGULAR && common.Bytes2Hex(data.batch.L2Data()) != batchL2Data { log.Infof("datastream batchL2Data: %s", common.Bytes2Hex(data.batch.L2Data())) log.Infof("RPC batchL2Data: %s", batchL2Data) - log.Fatalf("batchL2Data in batch %d does not match batchL2Data from the RPC (%d)", s.wipBatch) + return fmt.Errorf("batchL2Data in batch %d does not match batchL2Data from the RPC ", s.wipBatch) } log.Infof("sanity check of batch %d against RPC successful", s.wipBatch)