Skip to content

Commit

Permalink
Merge branch 'master' into update-gethpin-v1.14.4
Browse files Browse the repository at this point in the history
  • Loading branch information
amsanghi authored Dec 12, 2024
2 parents 418a1f8 + 2b3b823 commit be6ba29
Show file tree
Hide file tree
Showing 2 changed files with 96 additions and 53 deletions.
131 changes: 87 additions & 44 deletions cmd/nitro/nitro.go
Original file line number Diff line number Diff line change
Expand Up @@ -616,6 +616,46 @@ func mainImpl() int {
}
}
}

// Before starting the node, wait until the transaction that deployed rollup is finalized
if nodeConfig.EnsureRollupDeployment &&
nodeConfig.Node.ParentChainReader.Enable &&
rollupAddrs.DeployedAt > 0 {
currentFinalized, err := l1Reader.LatestFinalizedBlockNr(ctx)
if err != nil && errors.Is(err, headerreader.ErrBlockNumberNotSupported) {
log.Info("Finality not supported by parent chain, disabling the check to verify if rollup deployment tx was finalized", "err", err)
} else {
newHeaders, unsubscribe := l1Reader.Subscribe(false)
retriesOnError := 10
sigint := make(chan os.Signal, 1)
signal.Notify(sigint, os.Interrupt, syscall.SIGTERM)
for currentFinalized < rollupAddrs.DeployedAt && retriesOnError > 0 {
select {
case <-newHeaders:
if finalized, err := l1Reader.LatestFinalizedBlockNr(ctx); err != nil {
if errors.Is(err, headerreader.ErrBlockNumberNotSupported) {
log.Error("Finality support was removed from parent chain mid way, disabling the check to verify if the rollup deployment tx was finalized", "err", err)
retriesOnError = 0 // Break out of for loop as well
break
}
log.Error("Error getting latestFinalizedBlockNr from l1Reader", "err", err)
retriesOnError--
} else {
currentFinalized = finalized
log.Debug("Finalized block number updated", "finalized", finalized)
}
case <-ctx.Done():
log.Error("Context done while checking if the rollup deployment tx was finalized")
return 1
case <-sigint:
log.Info("shutting down because of sigint")
return 0
}
}
unsubscribe()
}
}

gqlConf := nodeConfig.GraphQL
if gqlConf.Enable {
if err := graphql.New(stack, execNode.Backend.APIBackend(), execNode.FilterSystem, gqlConf.CORSDomain, gqlConf.VHosts); err != nil {
Expand Down Expand Up @@ -675,53 +715,55 @@ func mainImpl() int {
}

type NodeConfig struct {
Conf genericconf.ConfConfig `koanf:"conf" reload:"hot"`
Node arbnode.Config `koanf:"node" reload:"hot"`
Execution gethexec.Config `koanf:"execution" reload:"hot"`
Validation valnode.Config `koanf:"validation" reload:"hot"`
ParentChain conf.ParentChainConfig `koanf:"parent-chain" reload:"hot"`
Chain conf.L2Config `koanf:"chain"`
LogLevel string `koanf:"log-level" reload:"hot"`
LogType string `koanf:"log-type" reload:"hot"`
FileLogging genericconf.FileLoggingConfig `koanf:"file-logging" reload:"hot"`
Persistent conf.PersistentConfig `koanf:"persistent"`
HTTP genericconf.HTTPConfig `koanf:"http"`
WS genericconf.WSConfig `koanf:"ws"`
IPC genericconf.IPCConfig `koanf:"ipc"`
Auth genericconf.AuthRPCConfig `koanf:"auth"`
GraphQL genericconf.GraphQLConfig `koanf:"graphql"`
Metrics bool `koanf:"metrics"`
MetricsServer genericconf.MetricsServerConfig `koanf:"metrics-server"`
PProf bool `koanf:"pprof"`
PprofCfg genericconf.PProf `koanf:"pprof-cfg"`
Init conf.InitConfig `koanf:"init"`
Rpc genericconf.RpcConfig `koanf:"rpc"`
BlocksReExecutor blocksreexecutor.Config `koanf:"blocks-reexecutor"`
Conf genericconf.ConfConfig `koanf:"conf" reload:"hot"`
Node arbnode.Config `koanf:"node" reload:"hot"`
Execution gethexec.Config `koanf:"execution" reload:"hot"`
Validation valnode.Config `koanf:"validation" reload:"hot"`
ParentChain conf.ParentChainConfig `koanf:"parent-chain" reload:"hot"`
Chain conf.L2Config `koanf:"chain"`
LogLevel string `koanf:"log-level" reload:"hot"`
LogType string `koanf:"log-type" reload:"hot"`
FileLogging genericconf.FileLoggingConfig `koanf:"file-logging" reload:"hot"`
Persistent conf.PersistentConfig `koanf:"persistent"`
HTTP genericconf.HTTPConfig `koanf:"http"`
WS genericconf.WSConfig `koanf:"ws"`
IPC genericconf.IPCConfig `koanf:"ipc"`
Auth genericconf.AuthRPCConfig `koanf:"auth"`
GraphQL genericconf.GraphQLConfig `koanf:"graphql"`
Metrics bool `koanf:"metrics"`
MetricsServer genericconf.MetricsServerConfig `koanf:"metrics-server"`
PProf bool `koanf:"pprof"`
PprofCfg genericconf.PProf `koanf:"pprof-cfg"`
Init conf.InitConfig `koanf:"init"`
Rpc genericconf.RpcConfig `koanf:"rpc"`
BlocksReExecutor blocksreexecutor.Config `koanf:"blocks-reexecutor"`
EnsureRollupDeployment bool `koanf:"ensure-rollup-deployment" reload:"hot"`
}

var NodeConfigDefault = NodeConfig{
Conf: genericconf.ConfConfigDefault,
Node: arbnode.ConfigDefault,
Execution: gethexec.ConfigDefault,
Validation: valnode.DefaultValidationConfig,
ParentChain: conf.L1ConfigDefault,
Chain: conf.L2ConfigDefault,
LogLevel: "INFO",
LogType: "plaintext",
FileLogging: genericconf.DefaultFileLoggingConfig,
Persistent: conf.PersistentConfigDefault,
HTTP: genericconf.HTTPConfigDefault,
WS: genericconf.WSConfigDefault,
IPC: genericconf.IPCConfigDefault,
Auth: genericconf.AuthRPCConfigDefault,
GraphQL: genericconf.GraphQLConfigDefault,
Metrics: false,
MetricsServer: genericconf.MetricsServerConfigDefault,
Init: conf.InitConfigDefault,
Rpc: genericconf.DefaultRpcConfig,
PProf: false,
PprofCfg: genericconf.PProfDefault,
BlocksReExecutor: blocksreexecutor.DefaultConfig,
Conf: genericconf.ConfConfigDefault,
Node: arbnode.ConfigDefault,
Execution: gethexec.ConfigDefault,
Validation: valnode.DefaultValidationConfig,
ParentChain: conf.L1ConfigDefault,
Chain: conf.L2ConfigDefault,
LogLevel: "INFO",
LogType: "plaintext",
FileLogging: genericconf.DefaultFileLoggingConfig,
Persistent: conf.PersistentConfigDefault,
HTTP: genericconf.HTTPConfigDefault,
WS: genericconf.WSConfigDefault,
IPC: genericconf.IPCConfigDefault,
Auth: genericconf.AuthRPCConfigDefault,
GraphQL: genericconf.GraphQLConfigDefault,
Metrics: false,
MetricsServer: genericconf.MetricsServerConfigDefault,
Init: conf.InitConfigDefault,
Rpc: genericconf.DefaultRpcConfig,
PProf: false,
PprofCfg: genericconf.PProfDefault,
BlocksReExecutor: blocksreexecutor.DefaultConfig,
EnsureRollupDeployment: true,
}

func NodeConfigAddOptions(f *flag.FlagSet) {
Expand All @@ -748,6 +790,7 @@ func NodeConfigAddOptions(f *flag.FlagSet) {
conf.InitConfigAddOptions("init", f)
genericconf.RpcConfigAddOptions("rpc", f)
blocksreexecutor.ConfigAddOptions("blocks-reexecutor", f)
f.Bool("ensure-rollup-deployment", NodeConfigDefault.EnsureRollupDeployment, "before starting the node, wait until the transaction that deployed rollup is finalized")
}

func (c *NodeConfig) ResolveDirectoryNames() error {
Expand Down
18 changes: 9 additions & 9 deletions das/aggregator.go
Original file line number Diff line number Diff line change
Expand Up @@ -254,7 +254,7 @@ func (a *Aggregator) Store(ctx context.Context, message []byte, timeout uint64)
var sigs []blsSignatures.Signature
var aggSignersMask uint64
var successfullyStoredCount int
var returned bool
var returned int // 0-no status, 1-succeeded, 2-failed
for i := 0; i < len(a.services); i++ {
select {
case <-ctx.Done():
Expand All @@ -276,26 +276,26 @@ func (a *Aggregator) Store(ctx context.Context, message []byte, timeout uint64)
// certDetailsChan, so the Store function can return, but also continue
// running until all responses are received (or the context is canceled)
// in order to produce accurate logs/metrics.
if !returned {
if returned == 0 {
if successfullyStoredCount >= a.requiredServicesForStore {
cd := certDetails{}
cd.pubKeys = append(cd.pubKeys, pubKeys...)
cd.sigs = append(cd.sigs, sigs...)
cd.aggSignersMask = aggSignersMask
certDetailsChan <- cd
returned = true
if a.maxAllowedServiceStoreFailures > 0 && // Ignore the case where AssumedHonest = 1, probably a testnet
int(storeFailures.Load())+1 > a.maxAllowedServiceStoreFailures {
log.Error("das.Aggregator: storing the batch data succeeded to enough DAS commitee members to generate the Data Availability Cert, but if one more had failed then the cert would not have been able to be generated. Look for preceding logs with \"Error from backend\"")
}
returned = 1
} else if int(storeFailures.Load()) > a.maxAllowedServiceStoreFailures {
cd := certDetails{}
cd.err = fmt.Errorf("aggregator failed to store message to at least %d out of %d DASes (assuming %d are honest). %w", a.requiredServicesForStore, len(a.services), a.config.AssumedHonest, daprovider.ErrBatchToDasFailed)
certDetailsChan <- cd
returned = true
returned = 2
}
}

}
if returned == 1 &&
a.maxAllowedServiceStoreFailures > 0 && // Ignore the case where AssumedHonest = 1, probably a testnet
int(storeFailures.Load())+1 > a.maxAllowedServiceStoreFailures {
log.Error("das.Aggregator: storing the batch data succeeded to enough DAS commitee members to generate the Data Availability Cert, but if one more had failed then the cert would not have been able to be generated. Look for preceding logs with \"Error from backend\"")
}
}()

Expand Down

0 comments on commit be6ba29

Please sign in to comment.