diff --git a/chain/consensus/filcns/upgrades.go b/chain/consensus/filcns/upgrades.go index 26c93a0f37e..c6b7a896752 100644 --- a/chain/consensus/filcns/upgrades.go +++ b/chain/consensus/filcns/upgrades.go @@ -6,7 +6,6 @@ import ( _ "embed" "fmt" "os" - "runtime" "strconv" "time" @@ -72,7 +71,7 @@ var ( func init() { // the default calculation used for migration worker count - MigrationMaxWorkerCount = runtime.NumCPU() + MigrationMaxWorkerCount = 8 // check if an alternative value was request by environment if mwcs := os.Getenv(EnvMigrationMaxWorkerCount); mwcs != "" { mwc, err := strconv.ParseInt(mwcs, 10, 32) diff --git a/chain/stmgr/forks.go b/chain/stmgr/forks.go index 93f53c63ff0..d8d1bfb8972 100644 --- a/chain/stmgr/forks.go +++ b/chain/stmgr/forks.go @@ -235,7 +235,7 @@ func runPreMigration(ctx context.Context, sm *StateManager, fn PreMigrationFunc, height := ts.Height() parent := ts.ParentState() - if disabled := os.Getenv(EnvDisablePreMigrations); strings.TrimSpace(disabled) == "1" { + if val, isSet := os.LookupEnv(EnvDisablePreMigrations); !isSet || strings.TrimSpace(val) != "0" { log.Warnw("SKIPPING pre-migration", "height", height) return } diff --git a/chain/stmgr/forks_test.go b/chain/stmgr/forks_test.go index bf8793488b6..531559978c4 100644 --- a/chain/stmgr/forks_test.go +++ b/chain/stmgr/forks_test.go @@ -377,6 +377,9 @@ func testForkRefuseCall(t *testing.T, nullsBefore, nullsAfter int) { func TestForkPreMigration(t *testing.T) { //stm: @CHAIN_GEN_NEXT_TIPSET_001, //stm: @CHAIN_STATE_RESOLVE_TO_KEY_ADDR_001, @CHAIN_STATE_SET_VM_CONSTRUCTOR_001 + if err := os.Setenv("LOTUS_DISABLE_PRE_MIGRATIONS", "0"); err != nil { + t.Fatalf("failed to force LOTUS_DISABLE_PRE_MIGRATIONS: %v", err) + } logging.SetAllLoggers(logging.LevelInfo) cg, err := gen.NewGenerator() diff --git a/documentation/en/default-lotus-config.toml b/documentation/en/default-lotus-config.toml index 8d3c6a427e8..a26da4631bf 100644 --- a/documentation/en/default-lotus-config.toml +++ b/documentation/en/default-lotus-config.toml @@ -175,7 +175,7 @@ [Chainstore] # type: bool # env var: LOTUS_CHAINSTORE_ENABLESPLITSTORE - EnableSplitstore = true + EnableSplitstore = false [Chainstore.Splitstore] # ColdStoreType specifies the type of the coldstore. @@ -212,7 +212,7 @@ # # type: uint64 # env var: LOTUS_CHAINSTORE_SPLITSTORE_HOTSTOREFULLGCFREQUENCY - #HotStoreFullGCFrequency = 20 + #HotStoreFullGCFrequency = 1 # HotStoreMaxSpaceTarget sets a target max disk size for the hotstore. Splitstore GC # will run moving GC if disk utilization gets within a threshold (150 GB) of the target. diff --git a/node/config/def.go b/node/config/def.go index 1972b76371f..2281cc7fe0f 100644 --- a/node/config/def.go +++ b/node/config/def.go @@ -94,13 +94,13 @@ func DefaultFullNode() *FullNode { SimultaneousTransfersForRetrieval: DefaultSimultaneousTransfers, }, Chainstore: Chainstore{ - EnableSplitstore: true, + EnableSplitstore: false, Splitstore: Splitstore{ ColdStoreType: "discard", HotStoreType: "badger", MarkSetType: "badger", - HotStoreFullGCFrequency: 20, + HotStoreFullGCFrequency: 1, HotStoreMaxSpaceTarget: 650_000_000_000, HotStoreMaxSpaceThreshold: 150_000_000_000, HotstoreMaxSpaceSafetyBuffer: 50_000_000_000, diff --git a/node/modules/core.go b/node/modules/core.go index a0d52c291bc..0d232acb1b2 100644 --- a/node/modules/core.go +++ b/node/modules/core.go @@ -70,7 +70,7 @@ func MemoryConstraints() system.MemoryConstraints { // MemoryWatchdog starts the memory watchdog, applying the computed resource // constraints. func MemoryWatchdog(lr repo.LockedRepo, lc fx.Lifecycle, constraints system.MemoryConstraints) { - if os.Getenv(EnvWatchdogDisabled) == "1" { + if val, isSet := os.LookupEnv(EnvWatchdogDisabled); !isSet || val != "0" { log.Infof("memory watchdog is disabled via %s", EnvWatchdogDisabled) return }