From 156dda9c609199a617002f793eb348dcafc42c5a Mon Sep 17 00:00:00 2001 From: "Andrew Jackson (Ajax)" Date: Tue, 18 Jun 2024 22:00:54 -0500 Subject: [PATCH 01/27] proofparams alternate --- build/parameters.go | 17 +++-------------- build/proof-params/parameters.go | 19 +++++++++++++++++++ 2 files changed, 22 insertions(+), 14 deletions(-) create mode 100644 build/proof-params/parameters.go diff --git a/build/parameters.go b/build/parameters.go index 9e60f12a6a3..152c2d86e8a 100644 --- a/build/parameters.go +++ b/build/parameters.go @@ -1,19 +1,8 @@ package build import ( - _ "embed" + proofparams "github.com/filecoin-project/lotus/build/proof-params" ) -//go:embed proof-params/parameters.json -var params []byte - -//go:embed proof-params/srs-inner-product.json -var srs []byte - -func ParametersJSON() []byte { - return params -} - -func SrsJSON() []byte { - return srs -} +var ParametersJSON = proofparams.ParametersJSON +var SrsJSON = proofparams.SrsJSON diff --git a/build/proof-params/parameters.go b/build/proof-params/parameters.go new file mode 100644 index 00000000000..68158391e99 --- /dev/null +++ b/build/proof-params/parameters.go @@ -0,0 +1,19 @@ +package build + +import ( + _ "embed" +) + +//go:embed parameters.json +var params []byte + +//go:embed srs-inner-product.json +var srs []byte + +func ParametersJSON() []byte { + return params +} + +func SrsJSON() []byte { + return srs +} From f6d4075a12350f29a659f45fdbc4f4b22413e92f Mon Sep 17 00:00:00 2001 From: "Andrew Jackson (Ajax)" Date: Tue, 18 Jun 2024 22:53:57 -0500 Subject: [PATCH 02/27] createminer --- cli/spcli/actor.go | 117 +---------------------- cli/spcli/createminer/create_miner.go | 131 ++++++++++++++++++++++++++ 2 files changed, 133 insertions(+), 115 deletions(-) create mode 100644 cli/spcli/createminer/create_miner.go diff --git a/cli/spcli/actor.go b/cli/spcli/actor.go index 5db3f7e7dc2..0bafb4af837 100644 --- a/cli/spcli/actor.go +++ b/cli/spcli/actor.go @@ -2,7 +2,6 @@ package spcli import ( "bytes" - "context" "fmt" "strconv" @@ -21,19 +20,16 @@ import ( "github.com/filecoin-project/go-state-types/builtin" "github.com/filecoin-project/go-state-types/builtin/v9/miner" "github.com/filecoin-project/go-state-types/network" - power2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/power" - power6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/power" lapi "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/v1api" "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" lminer "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/actors/builtin/power" "github.com/filecoin-project/lotus/chain/types" lcli "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/lotus/cli/spcli/createminer" cliutil "github.com/filecoin-project/lotus/cli/util" "github.com/filecoin-project/lotus/node/impl" ) @@ -1320,119 +1316,10 @@ var ActorNewMinerCmd = &cli.Command{ } ssize := abi.SectorSize(sectorSizeInt) - _, err = CreateStorageMiner(ctx, full, owner, worker, sender, ssize, cctx.Uint64("confidence")) + _, err = createminer.CreateStorageMiner(ctx, full, owner, worker, sender, ssize, cctx.Uint64("confidence")) if err != nil { return err } return nil }, } - -func CreateStorageMiner(ctx context.Context, fullNode v1api.FullNode, owner, worker, sender address.Address, ssize abi.SectorSize, confidence uint64) (address.Address, error) { - // make sure the sender account exists on chain - _, err := fullNode.StateLookupID(ctx, owner, types.EmptyTSK) - if err != nil { - return address.Undef, xerrors.Errorf("sender must exist on chain: %w", err) - } - - // make sure the worker account exists on chain - _, err = fullNode.StateLookupID(ctx, worker, types.EmptyTSK) - if err != nil { - signed, err := fullNode.MpoolPushMessage(ctx, &types.Message{ - From: sender, - To: worker, - Value: types.NewInt(0), - }, nil) - if err != nil { - return address.Undef, xerrors.Errorf("push worker init: %w", err) - } - - fmt.Printf("Initializing worker account %s, message: %s\n", worker, signed.Cid()) - fmt.Println("Waiting for confirmation") - - mw, err := fullNode.StateWaitMsg(ctx, signed.Cid(), confidence, 2000, true) - if err != nil { - return address.Undef, xerrors.Errorf("waiting for worker init: %w", err) - } - if mw.Receipt.ExitCode != 0 { - return address.Undef, xerrors.Errorf("initializing worker account failed: exit code %d", mw.Receipt.ExitCode) - } - } - - // make sure the owner account exists on chain - _, err = fullNode.StateLookupID(ctx, owner, types.EmptyTSK) - if err != nil { - signed, err := fullNode.MpoolPushMessage(ctx, &types.Message{ - From: sender, - To: owner, - Value: types.NewInt(0), - }, nil) - if err != nil { - return address.Undef, xerrors.Errorf("push owner init: %w", err) - } - - fmt.Printf("Initializing owner account %s, message: %s\n", worker, signed.Cid()) - fmt.Println("Waiting for confirmation") - - mw, err := fullNode.StateWaitMsg(ctx, signed.Cid(), confidence, 2000, true) - if err != nil { - return address.Undef, xerrors.Errorf("waiting for owner init: %w", err) - } - if mw.Receipt.ExitCode != 0 { - return address.Undef, xerrors.Errorf("initializing owner account failed: exit code %d", mw.Receipt.ExitCode) - } - } - - // Note: the correct thing to do would be to call SealProofTypeFromSectorSize if actors version is v3 or later, but this still works - nv, err := fullNode.StateNetworkVersion(ctx, types.EmptyTSK) - if err != nil { - return address.Undef, xerrors.Errorf("failed to get network version: %w", err) - } - spt, err := lminer.WindowPoStProofTypeFromSectorSize(ssize, nv) - if err != nil { - return address.Undef, xerrors.Errorf("getting post proof type: %w", err) - } - - params, err := actors.SerializeParams(&power6.CreateMinerParams{ - Owner: owner, - Worker: worker, - WindowPoStProofType: spt, - }) - if err != nil { - return address.Undef, err - } - - createStorageMinerMsg := &types.Message{ - To: power.Address, - From: sender, - Value: big.Zero(), - - Method: power.Methods.CreateMiner, - Params: params, - } - - signed, err := fullNode.MpoolPushMessage(ctx, createStorageMinerMsg, nil) - if err != nil { - return address.Undef, xerrors.Errorf("pushing createMiner message: %w", err) - } - - fmt.Printf("Pushed CreateMiner message: %s\n", signed.Cid()) - fmt.Println("Waiting for confirmation") - - mw, err := fullNode.StateWaitMsg(ctx, signed.Cid(), confidence, 2000, true) - if err != nil { - return address.Undef, xerrors.Errorf("waiting for createMiner message: %w", err) - } - - if mw.Receipt.ExitCode != 0 { - return address.Undef, xerrors.Errorf("create miner failed: exit code %d", mw.Receipt.ExitCode) - } - - var retval power2.CreateMinerReturn - if err := retval.UnmarshalCBOR(bytes.NewReader(mw.Receipt.Return)); err != nil { - return address.Undef, err - } - - fmt.Printf("New miners address is: %s (%s)\n", retval.IDAddress, retval.RobustAddress) - return retval.IDAddress, nil -} diff --git a/cli/spcli/createminer/create_miner.go b/cli/spcli/createminer/create_miner.go new file mode 100644 index 00000000000..0743527ebf6 --- /dev/null +++ b/cli/spcli/createminer/create_miner.go @@ -0,0 +1,131 @@ +package createminer + +import ( + "bytes" + "context" + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "golang.org/x/xerrors" + + power2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/power" + power6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/power" + + lminer "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + + "github.com/filecoin-project/lotus/api/v1api" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/builtin/power" + "github.com/filecoin-project/lotus/chain/types" +) + +func CreateStorageMiner(ctx context.Context, fullNode v1api.FullNode, owner, worker, sender address.Address, ssize abi.SectorSize, confidence uint64) (address.Address, error) { + // make sure the sender account exists on chain + _, err := fullNode.StateLookupID(ctx, owner, types.EmptyTSK) + if err != nil { + return address.Undef, xerrors.Errorf("sender must exist on chain: %w", err) + } + + // make sure the worker account exists on chain + _, err = fullNode.StateLookupID(ctx, worker, types.EmptyTSK) + if err != nil { + signed, err := fullNode.MpoolPushMessage(ctx, &types.Message{ + From: sender, + To: worker, + Value: types.NewInt(0), + }, nil) + if err != nil { + return address.Undef, xerrors.Errorf("push worker init: %w", err) + } + + fmt.Printf("Initializing worker account %s, message: %s\n", worker, signed.Cid()) + fmt.Println("Waiting for confirmation") + + mw, err := fullNode.StateWaitMsg(ctx, signed.Cid(), confidence, 2000, true) + if err != nil { + return address.Undef, xerrors.Errorf("waiting for worker init: %w", err) + } + if mw.Receipt.ExitCode != 0 { + return address.Undef, xerrors.Errorf("initializing worker account failed: exit code %d", mw.Receipt.ExitCode) + } + } + + // make sure the owner account exists on chain + _, err = fullNode.StateLookupID(ctx, owner, types.EmptyTSK) + if err != nil { + signed, err := fullNode.MpoolPushMessage(ctx, &types.Message{ + From: sender, + To: owner, + Value: types.NewInt(0), + }, nil) + if err != nil { + return address.Undef, xerrors.Errorf("push owner init: %w", err) + } + + fmt.Printf("Initializing owner account %s, message: %s\n", worker, signed.Cid()) + fmt.Println("Waiting for confirmation") + + mw, err := fullNode.StateWaitMsg(ctx, signed.Cid(), confidence, 2000, true) + if err != nil { + return address.Undef, xerrors.Errorf("waiting for owner init: %w", err) + } + if mw.Receipt.ExitCode != 0 { + return address.Undef, xerrors.Errorf("initializing owner account failed: exit code %d", mw.Receipt.ExitCode) + } + } + + // Note: the correct thing to do would be to call SealProofTypeFromSectorSize if actors version is v3 or later, but this still works + nv, err := fullNode.StateNetworkVersion(ctx, types.EmptyTSK) + if err != nil { + return address.Undef, xerrors.Errorf("failed to get network version: %w", err) + } + spt, err := lminer.WindowPoStProofTypeFromSectorSize(ssize, nv) + if err != nil { + return address.Undef, xerrors.Errorf("getting post proof type: %w", err) + } + + params, err := actors.SerializeParams(&power6.CreateMinerParams{ + Owner: owner, + Worker: worker, + WindowPoStProofType: spt, + }) + if err != nil { + return address.Undef, err + } + + createStorageMinerMsg := &types.Message{ + To: power.Address, + From: sender, + Value: big.Zero(), + + Method: power.Methods.CreateMiner, + Params: params, + } + + signed, err := fullNode.MpoolPushMessage(ctx, createStorageMinerMsg, nil) + if err != nil { + return address.Undef, xerrors.Errorf("pushing createMiner message: %w", err) + } + + fmt.Printf("Pushed CreateMiner message: %s\n", signed.Cid()) + fmt.Println("Waiting for confirmation") + + mw, err := fullNode.StateWaitMsg(ctx, signed.Cid(), confidence, 2000, true) + if err != nil { + return address.Undef, xerrors.Errorf("waiting for createMiner message: %w", err) + } + + if mw.Receipt.ExitCode != 0 { + return address.Undef, xerrors.Errorf("create miner failed: exit code %d", mw.Receipt.ExitCode) + } + + var retval power2.CreateMinerReturn + if err := retval.UnmarshalCBOR(bytes.NewReader(mw.Receipt.Return)); err != nil { + return address.Undef, err + } + + fmt.Printf("New miners address is: %s (%s)\n", retval.IDAddress, retval.RobustAddress) + return retval.IDAddress, nil +} From b10c5ff79e864d7b8d91da837b3bc265a3f6fa59 Mon Sep 17 00:00:00 2001 From: "Andrew Jackson (Ajax)" Date: Wed, 19 Jun 2024 23:23:55 -0500 Subject: [PATCH 03/27] const factored from /build and types updated to use it --- build/buildconstants/drand.go | 79 ++++++++++++ build/buildconstants/params.go | 25 ++++ build/{ => buildconstants}/params_2k.go | 4 +- .../{ => buildconstants}/params_butterfly.go | 4 +- build/{ => buildconstants}/params_calibnet.go | 4 +- build/{ => buildconstants}/params_debug.go | 2 +- build/{ => buildconstants}/params_interop.go | 5 +- build/{ => buildconstants}/params_mainnet.go | 6 +- build/buildconstants/params_shared_vals.go | 112 ++++++++++++++++++ .../{ => buildconstants}/params_testground.go | 18 +-- build/buildconstants/shared_funcs.go | 32 +++++ build/drand.go | 82 ++----------- build/parameters.go | 78 ++++++++++++ build/params_shared_funcs.go | 26 +--- build/params_shared_vals.go | 96 +++++---------- build/params_testground_vals.go | 10 ++ build/version.go | 22 ++-- chain/messagepool/selection_test.go | 2 +- chain/types/bigint.go | 6 +- chain/types/electionproof.go | 6 +- chain/types/ethtypes/eth_transactions.go | 6 +- chain/types/ethtypes/eth_types.go | 4 +- chain/types/fil.go | 6 +- chain/types/message.go | 8 +- chain/types/mock/chain.go | 6 +- cmd/curio/guidedsetup/guidedsetup.go | 4 +- itests/curio_test.go | 6 +- 27 files changed, 436 insertions(+), 223 deletions(-) create mode 100644 build/buildconstants/drand.go create mode 100644 build/buildconstants/params.go rename build/{ => buildconstants}/params_2k.go (97%) rename build/{ => buildconstants}/params_butterfly.go (95%) rename build/{ => buildconstants}/params_calibnet.go (96%) rename build/{ => buildconstants}/params_debug.go (87%) rename build/{ => buildconstants}/params_interop.go (97%) rename build/{ => buildconstants}/params_mainnet.go (95%) create mode 100644 build/buildconstants/params_shared_vals.go rename build/{ => buildconstants}/params_testground.go (91%) create mode 100644 build/buildconstants/shared_funcs.go create mode 100644 build/params_testground_vals.go diff --git a/build/buildconstants/drand.go b/build/buildconstants/drand.go new file mode 100644 index 00000000000..0cff7e92ef6 --- /dev/null +++ b/build/buildconstants/drand.go @@ -0,0 +1,79 @@ +package buildconstants + +import ( + "github.com/filecoin-project/lotus/node/modules/dtypes" +) + +type DrandEnum int + +const ( + DrandMainnet DrandEnum = iota + 1 + DrandTestnet + DrandDevnet + DrandLocalnet + DrandIncentinet + DrandQuicknet +) + +var DrandConfigs = map[DrandEnum]dtypes.DrandConfig{ + DrandMainnet: { + Servers: []string{ + "https://api.drand.sh", + "https://api2.drand.sh", + "https://api3.drand.sh", + "https://drand.cloudflare.com", + "https://api.drand.secureweb3.com:6875", // Storswift + }, + Relays: []string{ + "/dnsaddr/api.drand.sh/", + "/dnsaddr/api2.drand.sh/", + "/dnsaddr/api3.drand.sh/", + }, + IsChained: true, + ChainInfoJSON: `{"public_key":"868f005eb8e6e4ca0a47c8a77ceaa5309a47978a7c71bc5cce96366b5d7a569937c529eeda66c7293784a9402801af31","period":30,"genesis_time":1595431050,"hash":"8990e7a9aaed2ffed73dbd7092123d6f289930540d7651336225dc172e51b2ce","groupHash":"176f93498eac9ca337150b46d21dd58673ea4e3581185f869672e59fa4cb390a"}`, + }, + DrandQuicknet: { + Servers: []string{ + "https://api.drand.sh", + "https://api2.drand.sh", + "https://api3.drand.sh", + "https://drand.cloudflare.com", + "https://api.drand.secureweb3.com:6875", // Storswift + }, + Relays: []string{ + "/dnsaddr/api.drand.sh/", + "/dnsaddr/api2.drand.sh/", + "/dnsaddr/api3.drand.sh/", + }, + IsChained: false, + ChainInfoJSON: `{"public_key":"83cf0f2896adee7eb8b5f01fcad3912212c437e0073e911fb90022d3e760183c8c4b450b6a0a6c3ac6a5776a2d1064510d1fec758c921cc22b0e17e63aaf4bcb5ed66304de9cf809bd274ca73bab4af5a6e9c76a4bc09e76eae8991ef5ece45a","period":3,"genesis_time":1692803367,"hash":"52db9ba70e0cc0f6eaf7803dd07447a1f5477735fd3f661792ba94600c84e971","groupHash":"f477d5c89f21a17c863a7f937c6a6d15859414d2be09cd448d4279af331c5d3e","schemeID":"bls-unchained-g1-rfc9380","metadata":{"beaconID":"quicknet"}}`, + }, + DrandTestnet: { + Servers: []string{ + "https://pl-eu.testnet.drand.sh", + "https://pl-us.testnet.drand.sh", + }, + Relays: []string{ + "/dnsaddr/pl-eu.testnet.drand.sh/", + "/dnsaddr/pl-us.testnet.drand.sh/", + }, + IsChained: true, + ChainInfoJSON: `{"public_key":"922a2e93828ff83345bae533f5172669a26c02dc76d6bf59c80892e12ab1455c229211886f35bb56af6d5bea981024df","period":25,"genesis_time":1590445175,"hash":"84b2234fb34e835dccd048255d7ad3194b81af7d978c3bf157e3469592ae4e02","groupHash":"4dd408e5fdff9323c76a9b6f087ba8fdc5a6da907bd9217d9d10f2287d081957"}`, + }, + DrandDevnet: { + Servers: []string{ + "https://dev1.drand.sh", + "https://dev2.drand.sh", + }, + Relays: []string{ + "/dnsaddr/dev1.drand.sh/", + "/dnsaddr/dev2.drand.sh/", + }, + IsChained: true, + ChainInfoJSON: `{"public_key":"8cda589f88914aa728fd183f383980b35789ce81b274e5daee1f338b77d02566ef4d3fb0098af1f844f10f9c803c1827","period":25,"genesis_time":1595348225,"hash":"e73b7dc3c4f6a236378220c0dd6aa110eb16eed26c11259606e07ee122838d4f","groupHash":"567d4785122a5a3e75a9bc9911d7ea807dd85ff76b78dc4ff06b075712898607"}`, + }, + DrandIncentinet: { + IsChained: true, + ChainInfoJSON: `{"public_key":"8cad0c72c606ab27d36ee06de1d5b2db1faf92e447025ca37575ab3a8aac2eaae83192f846fc9e158bc738423753d000","period":30,"genesis_time":1595873820,"hash":"80c8b872c714f4c00fdd3daa465d5514049f457f01f85a4caf68cdcd394ba039","groupHash":"d9406aaed487f7af71851b4399448e311f2328923d454e971536c05398ce2d9b"}`, + }, +} diff --git a/build/buildconstants/params.go b/build/buildconstants/params.go new file mode 100644 index 00000000000..bdee1add2ad --- /dev/null +++ b/build/buildconstants/params.go @@ -0,0 +1,25 @@ +package buildconstants + +import "github.com/filecoin-project/go-state-types/network" + +var BuildType int + +const ( + BuildDefault = 0 + BuildMainnet = 0x1 + Build2k = 0x2 + BuildDebug = 0x3 + BuildCalibnet = 0x4 + BuildInteropnet = 0x5 + BuildButterflynet = 0x7 +) + +var Devnet = true + +// Used by tests and some obscure tooling +/* inline-gen template +const TestNetworkVersion = network.Version{{.latestNetworkVersion}} +/* inline-gen start */ +const TestNetworkVersion = network.Version23 + +/* inline-gen end */ diff --git a/build/params_2k.go b/build/buildconstants/params_2k.go similarity index 97% rename from build/params_2k.go rename to build/buildconstants/params_2k.go index 7c754611eb4..1eb8a167dcc 100644 --- a/build/params_2k.go +++ b/build/buildconstants/params_2k.go @@ -1,7 +1,7 @@ //go:build debug || 2k // +build debug 2k -package build +package buildconstants import ( "os" @@ -10,7 +10,6 @@ import ( "github.com/ipfs/go-cid" "github.com/filecoin-project/go-state-types/abi" - actorstypes "github.com/filecoin-project/go-state-types/actors" "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/lotus/chain/actors/policy" @@ -20,7 +19,6 @@ const BootstrappersFile = "" const GenesisFile = "" var NetworkBundle = "devnet" -var BundleOverrides map[actorstypes.Version]string var ActorDebugging = true var GenesisNetworkVersion = network.Version22 diff --git a/build/params_butterfly.go b/build/buildconstants/params_butterfly.go similarity index 95% rename from build/params_butterfly.go rename to build/buildconstants/params_butterfly.go index fee1a434326..3db522117f9 100644 --- a/build/params_butterfly.go +++ b/build/buildconstants/params_butterfly.go @@ -1,14 +1,13 @@ //go:build butterflynet // +build butterflynet -package build +package buildconstants import ( "github.com/ipfs/go-cid" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" - actorstypes "github.com/filecoin-project/go-state-types/actors" "github.com/filecoin-project/go-state-types/network" builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" @@ -23,7 +22,6 @@ var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ const GenesisNetworkVersion = network.Version21 var NetworkBundle = "butterflynet" -var BundleOverrides map[actorstypes.Version]string var ActorDebugging = false const BootstrappersFile = "butterflynet.pi" diff --git a/build/params_calibnet.go b/build/buildconstants/params_calibnet.go similarity index 96% rename from build/params_calibnet.go rename to build/buildconstants/params_calibnet.go index 889f6519635..c27230545d0 100644 --- a/build/params_calibnet.go +++ b/build/buildconstants/params_calibnet.go @@ -1,7 +1,7 @@ //go:build calibnet // +build calibnet -package build +package buildconstants import ( "os" @@ -11,7 +11,6 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" - actorstypes "github.com/filecoin-project/go-state-types/actors" "github.com/filecoin-project/go-state-types/network" builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" @@ -26,7 +25,6 @@ var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ const GenesisNetworkVersion = network.Version0 var NetworkBundle = "calibrationnet" -var BundleOverrides map[actorstypes.Version]string var ActorDebugging = false const BootstrappersFile = "calibnet.pi" diff --git a/build/params_debug.go b/build/buildconstants/params_debug.go similarity index 87% rename from build/params_debug.go rename to build/buildconstants/params_debug.go index e977cda0562..29facea2ac5 100644 --- a/build/params_debug.go +++ b/build/buildconstants/params_debug.go @@ -1,7 +1,7 @@ //go:build debug // +build debug -package build +package buildconstants func init() { InsecurePoStValidation = true diff --git a/build/params_interop.go b/build/buildconstants/params_interop.go similarity index 97% rename from build/params_interop.go rename to build/buildconstants/params_interop.go index 18e4a464be0..577238f7128 100644 --- a/build/params_interop.go +++ b/build/buildconstants/params_interop.go @@ -1,9 +1,10 @@ //go:build interopnet // +build interopnet -package build +package buildconstants import ( + "log" "os" "strconv" @@ -11,7 +12,6 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" - actorstypes "github.com/filecoin-project/go-state-types/actors" "github.com/filecoin-project/go-state-types/network" builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" @@ -19,7 +19,6 @@ import ( ) var NetworkBundle = "caterpillarnet" -var BundleOverrides map[actorstypes.Version]string var ActorDebugging = false const BootstrappersFile = "interopnet.pi" diff --git a/build/params_mainnet.go b/build/buildconstants/params_mainnet.go similarity index 95% rename from build/params_mainnet.go rename to build/buildconstants/params_mainnet.go index e79acdca33d..1be7639771f 100644 --- a/build/params_mainnet.go +++ b/build/buildconstants/params_mainnet.go @@ -1,7 +1,7 @@ //go:build !debug && !2k && !testground && !calibnet && !butterflynet && !interopnet // +build !debug,!2k,!testground,!calibnet,!butterflynet,!interopnet -package build +package buildconstants import ( "math" @@ -10,7 +10,6 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" - actorstypes "github.com/filecoin-project/go-state-types/actors" "github.com/filecoin-project/go-state-types/network" builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" ) @@ -23,9 +22,6 @@ var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ var NetworkBundle = "mainnet" -// NOTE: DO NOT change this unless you REALLY know what you're doing. This is consensus critical. -var BundleOverrides map[actorstypes.Version]string - // NOTE: DO NOT change this unless you REALLY know what you're doing. This is consensus critical. const ActorDebugging = false diff --git a/build/buildconstants/params_shared_vals.go b/build/buildconstants/params_shared_vals.go new file mode 100644 index 00000000000..d2367aae6f5 --- /dev/null +++ b/build/buildconstants/params_shared_vals.go @@ -0,0 +1,112 @@ +//go:build !testground +// +build !testground + +package buildconstants + +import ( + "math/big" + "os" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + + "github.com/filecoin-project/lotus/chain/actors/policy" +) + +// ///// +// Storage + +const UnixfsChunkSize uint64 = 1 << 20 +const UnixfsLinksPerLevel = 1024 + +// ///// +// Consensus / Network + +const AllowableClockDriftSecs = uint64(1) + +// Blocks (e) +var BlocksPerEpoch = uint64(builtin2.ExpectedLeadersPerEpoch) + +// Epochs +const MessageConfidence = uint64(5) + +// constants for Weight calculation +// The ratio of weight contributed by short-term vs long-term factors in a given round +const WRatioNum = int64(1) +const WRatioDen = uint64(2) + +// ///// +// Proofs + +// Epochs +// TODO: unused +const SealRandomnessLookback = policy.SealRandomnessLookback + +// ///// +// Mining + +// Epochs +const TicketRandomnessLookback = abi.ChainEpoch(1) + +// ///// +// Address + +const AddressMainnetEnvVar = "_mainnet_" + +// the 'f' prefix doesn't matter +var ZeroAddress = MustParseAddress("f3yaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaby2smx7a") + +const FilBase = uint64(2_000_000_000) +const FilAllocStorageMining = uint64(1_100_000_000) + +const FilecoinPrecision = uint64(1_000_000_000_000_000_000) +const FilReserved = uint64(300_000_000) + +var InitialRewardBalance *big.Int +var InitialFilReserved *big.Int + +// TODO: Move other important consts here + +func init() { + InitialRewardBalance = big.NewInt(int64(FilAllocStorageMining)) + InitialRewardBalance = InitialRewardBalance.Mul(InitialRewardBalance, big.NewInt(int64(FilecoinPrecision))) + + InitialFilReserved = big.NewInt(int64(FilReserved)) + InitialFilReserved = InitialFilReserved.Mul(InitialFilReserved, big.NewInt(int64(FilecoinPrecision))) + + if os.Getenv("LOTUS_ADDRESS_TYPE") == AddressMainnetEnvVar { + SetAddressNetwork(address.Mainnet) + } +} + +// Sync +const BadBlockCacheSize = 1 << 15 + +// assuming 4000 messages per round, this lets us not lose any messages across a +// 10 block reorg. +const BlsSignatureCacheSize = 40000 + +// Size of signature verification cache +// 32k keeps the cache around 10MB in size, max +const VerifSigCacheSize = 32000 + +// /////// +// Limits + +// TODO: If this is gonna stay, it should move to specs-actors +const BlockMessageLimit = 10000 + +var BlockGasLimit = int64(10_000_000_000) +var BlockGasTarget = BlockGasLimit / 2 + +const BaseFeeMaxChangeDenom = 8 // 12.5% +const InitialBaseFee = 100e6 +const MinimumBaseFee = 100 +const PackingEfficiencyNum = 4 +const PackingEfficiencyDenom = 5 + +// revive:disable-next-line:exported +// Actor consts +// TODO: pieceSize unused from actors +var MinDealDuration, MaxDealDuration = policy.DealDurationBounds(0) diff --git a/build/params_testground.go b/build/buildconstants/params_testground.go similarity index 91% rename from build/params_testground.go rename to build/buildconstants/params_testground.go index beb296bd92e..b2d657cffac 100644 --- a/build/params_testground.go +++ b/build/buildconstants/params_testground.go @@ -5,7 +5,7 @@ // // Its purpose is to unlock various degrees of flexibility and parametrization // when writing Testground plans for Lotus. -package build +package buildconstants import ( "math/big" @@ -13,7 +13,6 @@ import ( "github.com/ipfs/go-cid" "github.com/filecoin-project/go-state-types/abi" - actorstypes "github.com/filecoin-project/go-state-types/actors" "github.com/filecoin-project/go-state-types/network" builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" @@ -55,8 +54,6 @@ var ( BlsSignatureCacheSize = 40000 VerifSigCacheSize = 32000 - SealRandomnessLookback = policy.SealRandomnessLookback - TicketRandomnessLookback = abi.ChainEpoch(1) FilBase uint64 = 2_000_000_000 @@ -77,10 +74,6 @@ var ( return v }() - // Actor consts - // TODO: pieceSize unused from actors - MinDealDuration, MaxDealDuration = policy.DealDurationBounds(0) - PackingEfficiencyNum int64 = 4 PackingEfficiencyDenom int64 = 5 @@ -124,13 +117,11 @@ var ( GenesisNetworkVersion = network.Version0 NetworkBundle = "devnet" - BundleOverrides map[actorstypes.Version]string ActorDebugging = true NewestNetworkVersion = network.Version16 ActorUpgradeNetworkVersion = network.Version16 - Devnet = true ZeroAddress = MustParseAddress("f3yaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaby2smx7a") WhitelistedBlock = cid.Undef @@ -138,11 +129,14 @@ var ( GenesisFile = "" ) -const Finality = policy.ChainFinality -const ForkLengthThreshold = Finality +func init() { + Devnet = true +} const BootstrapPeerThreshold = 1 // ChainId defines the chain ID used in the Ethereum JSON-RPC endpoint. // As per https://github.com/ethereum-lists/chains const Eip155ChainId = 31415926 + +var MinDealDuration, MaxDealDuration = policy.DealDurationBounds(0) diff --git a/build/buildconstants/shared_funcs.go b/build/buildconstants/shared_funcs.go new file mode 100644 index 00000000000..13682879e93 --- /dev/null +++ b/build/buildconstants/shared_funcs.go @@ -0,0 +1,32 @@ +package buildconstants + +import ( + "github.com/filecoin-project/go-address" + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log/v2" +) + +// moved from now-defunct build/paramfetch.go +var log = logging.Logger("build/buildtypes") + +func SetAddressNetwork(n address.Network) { + address.CurrentNetwork = n +} + +func MustParseAddress(addr string) address.Address { + ret, err := address.NewFromString(addr) + if err != nil { + panic(err) + } + + return ret +} + +func MustParseCid(c string) cid.Cid { + ret, err := cid.Decode(c) + if err != nil { + panic(err) + } + + return ret +} diff --git a/build/drand.go b/build/drand.go index c4ba4b3b7af..35e7e2d9c27 100644 --- a/build/drand.go +++ b/build/drand.go @@ -3,15 +3,16 @@ package build import ( "sort" + "github.com/filecoin-project/lotus/build/buildconstants" "github.com/filecoin-project/lotus/node/modules/dtypes" ) -type DrandEnum int +var DrandSchedule = buildconstants.DrandSchedule func DrandConfigSchedule() dtypes.DrandSchedule { out := dtypes.DrandSchedule{} for start, network := range DrandSchedule { - out = append(out, dtypes.DrandPoint{Start: start, Config: DrandConfigs[network]}) + out = append(out, dtypes.DrandPoint{Start: start, Config: buildconstants.DrandConfigs[network]}) } sort.Slice(out, func(i, j int) bool { @@ -21,74 +22,15 @@ func DrandConfigSchedule() dtypes.DrandSchedule { return out } +type DrandEnum = buildconstants.DrandEnum + const ( - DrandMainnet DrandEnum = iota + 1 - DrandTestnet - DrandDevnet - DrandLocalnet - DrandIncentinet - DrandQuicknet + DrandMainnet = buildconstants.DrandMainnet + DrandTestnet = buildconstants.DrandTestnet + DrandDevnet = buildconstants.DrandDevnet + DrandLocalnet = buildconstants.DrandLocalnet + DrandIncentinet = buildconstants.DrandIncentinet + DrandQuicknet = buildconstants.DrandQuicknet ) -var DrandConfigs = map[DrandEnum]dtypes.DrandConfig{ - DrandMainnet: { - Servers: []string{ - "https://api.drand.sh", - "https://api2.drand.sh", - "https://api3.drand.sh", - "https://drand.cloudflare.com", - "https://api.drand.secureweb3.com:6875", // Storswift - }, - Relays: []string{ - "/dnsaddr/api.drand.sh/", - "/dnsaddr/api2.drand.sh/", - "/dnsaddr/api3.drand.sh/", - }, - IsChained: true, - ChainInfoJSON: `{"public_key":"868f005eb8e6e4ca0a47c8a77ceaa5309a47978a7c71bc5cce96366b5d7a569937c529eeda66c7293784a9402801af31","period":30,"genesis_time":1595431050,"hash":"8990e7a9aaed2ffed73dbd7092123d6f289930540d7651336225dc172e51b2ce","groupHash":"176f93498eac9ca337150b46d21dd58673ea4e3581185f869672e59fa4cb390a"}`, - }, - DrandQuicknet: { - Servers: []string{ - "https://api.drand.sh", - "https://api2.drand.sh", - "https://api3.drand.sh", - "https://drand.cloudflare.com", - "https://api.drand.secureweb3.com:6875", // Storswift - }, - Relays: []string{ - "/dnsaddr/api.drand.sh/", - "/dnsaddr/api2.drand.sh/", - "/dnsaddr/api3.drand.sh/", - }, - IsChained: false, - ChainInfoJSON: `{"public_key":"83cf0f2896adee7eb8b5f01fcad3912212c437e0073e911fb90022d3e760183c8c4b450b6a0a6c3ac6a5776a2d1064510d1fec758c921cc22b0e17e63aaf4bcb5ed66304de9cf809bd274ca73bab4af5a6e9c76a4bc09e76eae8991ef5ece45a","period":3,"genesis_time":1692803367,"hash":"52db9ba70e0cc0f6eaf7803dd07447a1f5477735fd3f661792ba94600c84e971","groupHash":"f477d5c89f21a17c863a7f937c6a6d15859414d2be09cd448d4279af331c5d3e","schemeID":"bls-unchained-g1-rfc9380","metadata":{"beaconID":"quicknet"}}`, - }, - DrandTestnet: { - Servers: []string{ - "https://pl-eu.testnet.drand.sh", - "https://pl-us.testnet.drand.sh", - }, - Relays: []string{ - "/dnsaddr/pl-eu.testnet.drand.sh/", - "/dnsaddr/pl-us.testnet.drand.sh/", - }, - IsChained: true, - ChainInfoJSON: `{"public_key":"922a2e93828ff83345bae533f5172669a26c02dc76d6bf59c80892e12ab1455c229211886f35bb56af6d5bea981024df","period":25,"genesis_time":1590445175,"hash":"84b2234fb34e835dccd048255d7ad3194b81af7d978c3bf157e3469592ae4e02","groupHash":"4dd408e5fdff9323c76a9b6f087ba8fdc5a6da907bd9217d9d10f2287d081957"}`, - }, - DrandDevnet: { - Servers: []string{ - "https://dev1.drand.sh", - "https://dev2.drand.sh", - }, - Relays: []string{ - "/dnsaddr/dev1.drand.sh/", - "/dnsaddr/dev2.drand.sh/", - }, - IsChained: true, - ChainInfoJSON: `{"public_key":"8cda589f88914aa728fd183f383980b35789ce81b274e5daee1f338b77d02566ef4d3fb0098af1f844f10f9c803c1827","period":25,"genesis_time":1595348225,"hash":"e73b7dc3c4f6a236378220c0dd6aa110eb16eed26c11259606e07ee122838d4f","groupHash":"567d4785122a5a3e75a9bc9911d7ea807dd85ff76b78dc4ff06b075712898607"}`, - }, - DrandIncentinet: { - IsChained: true, - ChainInfoJSON: `{"public_key":"8cad0c72c606ab27d36ee06de1d5b2db1faf92e447025ca37575ab3a8aac2eaae83192f846fc9e158bc738423753d000","period":30,"genesis_time":1595873820,"hash":"80c8b872c714f4c00fdd3daa465d5514049f457f01f85a4caf68cdcd394ba039","groupHash":"d9406aaed487f7af71851b4399448e311f2328923d454e971536c05398ce2d9b"}`, - }, -} +var DrandConfigs = buildconstants.DrandConfigs diff --git a/build/parameters.go b/build/parameters.go index 152c2d86e8a..6f029fa3602 100644 --- a/build/parameters.go +++ b/build/parameters.go @@ -1,8 +1,86 @@ package build import ( + "github.com/filecoin-project/go-state-types/abi" + actorstypes "github.com/filecoin-project/go-state-types/actors" + + "github.com/filecoin-project/lotus/build/buildconstants" proofparams "github.com/filecoin-project/lotus/build/proof-params" + "github.com/filecoin-project/lotus/chain/actors/policy" ) var ParametersJSON = proofparams.ParametersJSON var SrsJSON = proofparams.SrsJSON + +// NOTE: DO NOT change this unless you REALLY know what you're doing. This is consensus critical. +var BundleOverrides map[actorstypes.Version]string + +var BootstrappersFile = buildconstants.BootstrappersFile + +var GenesisFile = buildconstants.GenesisFile + +var NetworkBundle = buildconstants.NetworkBundle +var ActorDebugging = buildconstants.ActorDebugging + +var GenesisNetworkVersion = buildconstants.GenesisNetworkVersion + +var UpgradeBreezeHeight abi.ChainEpoch = buildconstants.UpgradeBreezeHeight + +var BreezeGasTampingDuration abi.ChainEpoch = buildconstants.BreezeGasTampingDuration + +// upgrade heights +var UpgradeSmokeHeight abi.ChainEpoch = buildconstants.UpgradeSmokeHeight +var UpgradeIgnitionHeight abi.ChainEpoch = buildconstants.UpgradeIgnitionHeight +var UpgradeRefuelHeight abi.ChainEpoch = buildconstants.UpgradeRefuelHeight +var UpgradeTapeHeight abi.ChainEpoch = buildconstants.UpgradeTapeHeight +var UpgradeAssemblyHeight abi.ChainEpoch = buildconstants.UpgradeAssemblyHeight +var UpgradeLiftoffHeight abi.ChainEpoch = buildconstants.UpgradeLiftoffHeight +var UpgradeKumquatHeight abi.ChainEpoch = buildconstants.UpgradeKumquatHeight +var UpgradeCalicoHeight abi.ChainEpoch = buildconstants.UpgradeCalicoHeight +var UpgradePersianHeight abi.ChainEpoch = buildconstants.UpgradePersianHeight +var UpgradeOrangeHeight abi.ChainEpoch = buildconstants.UpgradeOrangeHeight +var UpgradeClausHeight abi.ChainEpoch = buildconstants.UpgradeClausHeight +var UpgradeTrustHeight abi.ChainEpoch = buildconstants.UpgradeTrustHeight +var UpgradeNorwegianHeight abi.ChainEpoch = buildconstants.UpgradeNorwegianHeight +var UpgradeTurboHeight abi.ChainEpoch = buildconstants.UpgradeTurboHeight +var UpgradeHyperdriveHeight abi.ChainEpoch = buildconstants.UpgradeHyperdriveHeight +var UpgradeChocolateHeight abi.ChainEpoch = buildconstants.UpgradeChocolateHeight +var UpgradeOhSnapHeight abi.ChainEpoch = buildconstants.UpgradeOhSnapHeight +var UpgradeSkyrHeight abi.ChainEpoch = buildconstants.UpgradeSkyrHeight +var UpgradeSharkHeight abi.ChainEpoch = buildconstants.UpgradeSharkHeight +var UpgradeHyggeHeight abi.ChainEpoch = buildconstants.UpgradeHyggeHeight +var UpgradeLightningHeight abi.ChainEpoch = buildconstants.UpgradeLightningHeight +var UpgradeThunderHeight abi.ChainEpoch = buildconstants.UpgradeThunderHeight +var UpgradeWatermelonHeight abi.ChainEpoch = buildconstants.UpgradeWatermelonHeight +var UpgradeDragonHeight abi.ChainEpoch = buildconstants.UpgradeDragonHeight +var UpgradePhoenixHeight abi.ChainEpoch = buildconstants.UpgradePhoenixHeight +var UpgradeAussieHeight abi.ChainEpoch = buildconstants.UpgradeAussieHeight + +// This fix upgrade only ran on calibrationnet +var UpgradeWatermelonFixHeight abi.ChainEpoch = buildconstants.UpgradeWatermelonFixHeight + +// This fix upgrade only ran on calibrationnet +var UpgradeWatermelonFix2Height abi.ChainEpoch = buildconstants.UpgradeWatermelonFix2Height + +// This fix upgrade only ran on calibrationnet +var UpgradeCalibrationDragonFixHeight abi.ChainEpoch = buildconstants.UpgradeCalibrationDragonFixHeight + +var SupportedProofTypes = buildconstants.SupportedProofTypes +var ConsensusMinerMinPower = buildconstants.ConsensusMinerMinPower +var PreCommitChallengeDelay = buildconstants.PreCommitChallengeDelay + +var BlockDelaySecs = buildconstants.BlockDelaySecs + +var PropagationDelaySecs = buildconstants.PropagationDelaySecs + +var EquivocationDelaySecs = buildconstants.EquivocationDelaySecs + +const BootstrapPeerThreshold = buildconstants.BootstrapPeerThreshold + +// ChainId defines the chain ID used in the Ethereum JSON-RPC endpoint. +// As per https://github.com/ethereum-lists/chains +const Eip155ChainId = buildconstants.Eip155ChainId + +var WhitelistedBlock = buildconstants.WhitelistedBlock + +const Finality = policy.ChainFinality diff --git a/build/params_shared_funcs.go b/build/params_shared_funcs.go index d117264ab6f..4d5aa5bf327 100644 --- a/build/params_shared_funcs.go +++ b/build/params_shared_funcs.go @@ -1,11 +1,9 @@ package build import ( - "github.com/ipfs/go-cid" "github.com/libp2p/go-libp2p/core/protocol" - "github.com/filecoin-project/go-address" - + "github.com/filecoin-project/lotus/build/buildconstants" "github.com/filecoin-project/lotus/node/modules/dtypes" ) @@ -28,24 +26,8 @@ func DhtProtocolName(netName dtypes.NetworkName) protocol.ID { return protocol.ID("/fil/kad/" + string(netName)) } -func SetAddressNetwork(n address.Network) { - address.CurrentNetwork = n -} - -func MustParseAddress(addr string) address.Address { - ret, err := address.NewFromString(addr) - if err != nil { - panic(err) - } - - return ret -} +var SetAddressNetwork = buildconstants.SetAddressNetwork -func MustParseCid(c string) cid.Cid { - ret, err := cid.Decode(c) - if err != nil { - panic(err) - } +var MustParseAddress = buildconstants.MustParseAddress - return ret -} +var MustParseCid = buildconstants.MustParseCid diff --git a/build/params_shared_vals.go b/build/params_shared_vals.go index 25cfdd2ea32..399ab79ec2b 100644 --- a/build/params_shared_vals.go +++ b/build/params_shared_vals.go @@ -1,85 +1,55 @@ -//go:build !testground -// +build !testground - package build import ( "math/big" - "os" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/network" - builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" - "github.com/filecoin-project/lotus/chain/actors/policy" + "github.com/filecoin-project/lotus/build/buildconstants" ) // ///// // Storage -const UnixfsChunkSize uint64 = 1 << 20 -const UnixfsLinksPerLevel = 1024 +var UnixfsChunkSize uint64 = buildconstants.UnixfsChunkSize +var UnixfsLinksPerLevel = buildconstants.UnixfsLinksPerLevel // ///// // Consensus / Network -const AllowableClockDriftSecs = uint64(1) - -// Used by tests and some obscure tooling -/* inline-gen template -const TestNetworkVersion = network.Version{{.latestNetworkVersion}} -/* inline-gen start */ -const TestNetworkVersion = network.Version23 - -/* inline-gen end */ +var AllowableClockDriftSecs = buildconstants.AllowableClockDriftSecs // Epochs -const ForkLengthThreshold = Finality +var ForkLengthThreshold = Finality // Blocks (e) -var BlocksPerEpoch = uint64(builtin2.ExpectedLeadersPerEpoch) +var BlocksPerEpoch = buildconstants.BlocksPerEpoch // Epochs -const Finality = policy.ChainFinality -const MessageConfidence = uint64(5) +var MessageConfidence = buildconstants.MessageConfidence // constants for Weight calculation // The ratio of weight contributed by short-term vs long-term factors in a given round -const WRatioNum = int64(1) -const WRatioDen = uint64(2) - -// ///// -// Proofs - -// Epochs -// TODO: unused -const SealRandomnessLookback = policy.SealRandomnessLookback +var WRatioNum = buildconstants.WRatioNum +var WRatioDen = buildconstants.WRatioDen // ///// // Mining // Epochs -const TicketRandomnessLookback = abi.ChainEpoch(1) - -// ///// -// Address - -const AddressMainnetEnvVar = "_mainnet_" +var TicketRandomnessLookback = buildconstants.TicketRandomnessLookback // the 'f' prefix doesn't matter -var ZeroAddress = MustParseAddress("f3yaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaby2smx7a") +var ZeroAddress = buildconstants.ZeroAddress // ///// // Devnet settings -var Devnet = true +var Devnet = buildconstants.Devnet -const FilBase = uint64(2_000_000_000) -const FilAllocStorageMining = uint64(1_100_000_000) +var FilBase = buildconstants.FilBase +var FilAllocStorageMining = buildconstants.FilAllocStorageMining -const FilecoinPrecision = uint64(1_000_000_000_000_000_000) -const FilReserved = uint64(300_000_000) +var FilecoinPrecision = buildconstants.FilecoinPrecision +var FilReserved = buildconstants.FilReserved var InitialRewardBalance *big.Int var InitialFilReserved *big.Int @@ -92,39 +62,35 @@ func init() { InitialFilReserved = big.NewInt(int64(FilReserved)) InitialFilReserved = InitialFilReserved.Mul(InitialFilReserved, big.NewInt(int64(FilecoinPrecision))) - - if os.Getenv("LOTUS_ADDRESS_TYPE") == AddressMainnetEnvVar { - SetAddressNetwork(address.Mainnet) - } } // Sync -const BadBlockCacheSize = 1 << 15 +var BadBlockCacheSize = buildconstants.BadBlockCacheSize // assuming 4000 messages per round, this lets us not lose any messages across a // 10 block reorg. -const BlsSignatureCacheSize = 40000 +var BlsSignatureCacheSize = buildconstants.BlsSignatureCacheSize // Size of signature verification cache // 32k keeps the cache around 10MB in size, max -const VerifSigCacheSize = 32000 +var VerifSigCacheSize = buildconstants.VerifSigCacheSize // /////// // Limits // TODO: If this is gonna stay, it should move to specs-actors -const BlockMessageLimit = 10000 +var BlockMessageLimit = buildconstants.BlockMessageLimit + +var BlockGasLimit = buildconstants.BlockGasLimit +var BlockGasTarget = buildconstants.BlockGasTarget -var BlockGasLimit = int64(10_000_000_000) -var BlockGasTarget = BlockGasLimit / 2 +var BaseFeeMaxChangeDenom int64 = buildconstants.BaseFeeMaxChangeDenom +var InitialBaseFee int64 = buildconstants.InitialBaseFee +var MinimumBaseFee int64 = buildconstants.MinimumBaseFee +var PackingEfficiencyNum int64 = buildconstants.PackingEfficiencyNum +var PackingEfficiencyDenom int64 = buildconstants.PackingEfficiencyDenom -const BaseFeeMaxChangeDenom = 8 // 12.5% -const InitialBaseFee = 100e6 -const MinimumBaseFee = 100 -const PackingEfficiencyNum = 4 -const PackingEfficiencyDenom = 5 +var MinDealDuration = buildconstants.MinDealDuration +var MaxDealDuration = buildconstants.MaxDealDuration -// revive:disable-next-line:exported -// Actor consts -// TODO: pieceSize unused from actors -var MinDealDuration, MaxDealDuration = policy.DealDurationBounds(0) +const TestNetworkVersion = buildconstants.TestNetworkVersion diff --git a/build/params_testground_vals.go b/build/params_testground_vals.go new file mode 100644 index 00000000000..8b5c140a3a1 --- /dev/null +++ b/build/params_testground_vals.go @@ -0,0 +1,10 @@ +//go:build testground +// +build testground + +package build + +import "github.com/filecoin-project/lotus/chain/actors/policy" + +// Actor consts +// TODO: pieceSize unused from actors +var MinDealDuration, MaxDealDuration = policy.DealDurationBounds(0) diff --git a/build/version.go b/build/version.go index 2244c98d871..d1b19bd0de3 100644 --- a/build/version.go +++ b/build/version.go @@ -1,18 +1,22 @@ package build -import "os" +import ( + "os" + + "github.com/filecoin-project/lotus/build/buildconstants" +) var CurrentCommit string -var BuildType int +var BuildType = buildconstants.BuildType const ( - BuildDefault = 0 - BuildMainnet = 0x1 - Build2k = 0x2 - BuildDebug = 0x3 - BuildCalibnet = 0x4 - BuildInteropnet = 0x5 - BuildButterflynet = 0x7 + BuildDefault = buildconstants.BuildDefault + BuildMainnet = buildconstants.BuildMainnet + Build2k = buildconstants.Build2k + BuildDebug = buildconstants.BuildDebug + BuildCalibnet = buildconstants.BuildCalibnet + BuildInteropnet = buildconstants.BuildInteropnet + BuildButterflynet = buildconstants.BuildButterflynet ) func BuildTypeString() string { diff --git a/chain/messagepool/selection_test.go b/chain/messagepool/selection_test.go index 5b46fadfbc6..ff98383633e 100644 --- a/chain/messagepool/selection_test.go +++ b/chain/messagepool/selection_test.go @@ -1598,7 +1598,7 @@ readLoop: mp, tma := makeTestMpool() - block := tma.nextBlockWithHeight(build.UpgradeBreezeHeight + 10) + block := tma.nextBlockWithHeight(uint64(build.UpgradeBreezeHeight + 10)) ts := mock.TipSet(block) tma.applyBlock(t, block) diff --git a/chain/types/bigint.go b/chain/types/bigint.go index 72ef5212862..ca274b7e11f 100644 --- a/chain/types/bigint.go +++ b/chain/types/bigint.go @@ -6,12 +6,12 @@ import ( big2 "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/build/buildconstants" ) const BigIntMaxSerializedLen = 128 // is this big enough? or too big? -var TotalFilecoinInt = FromFil(build.FilBase) +var TotalFilecoinInt = FromFil(buildconstants.FilBase) var EmptyInt = BigInt{} @@ -22,7 +22,7 @@ func NewInt(i uint64) BigInt { } func FromFil(i uint64) BigInt { - return BigMul(NewInt(i), NewInt(build.FilecoinPrecision)) + return BigMul(NewInt(i), NewInt(buildconstants.FilecoinPrecision)) } func BigFromBytes(b []byte) BigInt { diff --git a/chain/types/electionproof.go b/chain/types/electionproof.go index f3168becb8f..1d447e4cfcf 100644 --- a/chain/types/electionproof.go +++ b/chain/types/electionproof.go @@ -5,7 +5,7 @@ import ( "github.com/minio/blake2b-simd" - "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/build/buildconstants" ) type ElectionProof struct { @@ -100,14 +100,14 @@ func polyval(p []*big.Int, x *big.Int) *big.Int { // computes lambda in Q.256 func lambda(power, totalPower *big.Int) *big.Int { - blocksPerEpoch := NewInt(build.BlocksPerEpoch) + blocksPerEpoch := NewInt(buildconstants.BlocksPerEpoch) lam := new(big.Int).Mul(power, blocksPerEpoch.Int) // Q.0 lam = lam.Lsh(lam, precision) // Q.256 lam = lam.Div(lam /* Q.256 */, totalPower /* Q.0 */) // Q.256 return lam } -var MaxWinCount = 3 * int64(build.BlocksPerEpoch) +var MaxWinCount = 3 * int64(buildconstants.BlocksPerEpoch) type poiss struct { lam *big.Int diff --git a/chain/types/ethtypes/eth_transactions.go b/chain/types/ethtypes/eth_transactions.go index a3b1d01502a..fd680244ca7 100644 --- a/chain/types/ethtypes/eth_transactions.go +++ b/chain/types/ethtypes/eth_transactions.go @@ -17,7 +17,7 @@ import ( builtintypes "github.com/filecoin-project/go-state-types/builtin" typescrypto "github.com/filecoin-project/go-state-types/crypto" - "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/build/buildconstants" "github.com/filecoin-project/lotus/chain/types" ) @@ -151,7 +151,7 @@ func EthTxArgsFromUnsignedEthMessage(msg *types.Message) (EthTxArgs, error) { } return EthTxArgs{ - ChainID: build.Eip155ChainId, + ChainID: buildconstants.Eip155ChainId, Nonce: int(msg.Nonce), To: to, Value: msg.Value, @@ -163,7 +163,7 @@ func EthTxArgsFromUnsignedEthMessage(msg *types.Message) (EthTxArgs, error) { } func (tx *EthTxArgs) ToUnsignedMessage(from address.Address) (*types.Message, error) { - if tx.ChainID != build.Eip155ChainId { + if tx.ChainID != buildconstants.Eip155ChainId { return nil, xerrors.Errorf("unsupported chain id: %d", tx.ChainID) } diff --git a/chain/types/ethtypes/eth_types.go b/chain/types/ethtypes/eth_types.go index 893c0721c85..a408885b762 100644 --- a/chain/types/ethtypes/eth_types.go +++ b/chain/types/ethtypes/eth_types.go @@ -22,7 +22,7 @@ import ( "github.com/filecoin-project/go-state-types/big" builtintypes "github.com/filecoin-project/go-state-types/builtin" - "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/build/buildconstants" "github.com/filecoin-project/lotus/lib/must" ) @@ -208,7 +208,7 @@ func NewEthBlock(hasTransactions bool, tipsetLen int) EthBlock { Extradata: []byte{}, MixHash: EmptyEthHash, Nonce: EmptyEthNonce, - GasLimit: EthUint64(build.BlockGasLimit * int64(tipsetLen)), + GasLimit: EthUint64(buildconstants.BlockGasLimit * int64(tipsetLen)), Uncles: []EthHash{}, Transactions: []interface{}{}, } diff --git a/chain/types/fil.go b/chain/types/fil.go index 960a42f2879..ed9115fe7e1 100644 --- a/chain/types/fil.go +++ b/chain/types/fil.go @@ -8,7 +8,7 @@ import ( "github.com/invopop/jsonschema" - "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/build/buildconstants" ) type FIL BigInt @@ -21,7 +21,7 @@ func (f FIL) String() string { } func (f FIL) Unitless() string { - r := new(big.Rat).SetFrac(f.Int, big.NewInt(int64(build.FilecoinPrecision))) + r := new(big.Rat).SetFrac(f.Int, big.NewInt(int64(buildconstants.FilecoinPrecision))) if r.Sign() == 0 { return "0" } @@ -117,7 +117,7 @@ func ParseFIL(s string) (FIL, error) { } if !attofil { - r = r.Mul(r, big.NewRat(int64(build.FilecoinPrecision), 1)) + r = r.Mul(r, big.NewRat(int64(buildconstants.FilecoinPrecision), 1)) } if !r.IsInt() { diff --git a/chain/types/message.go b/chain/types/message.go index 473289ead45..2f3606dbe47 100644 --- a/chain/types/message.go +++ b/chain/types/message.go @@ -14,7 +14,7 @@ import ( "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/network" - "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/build/buildconstants" ) const MessageVersion = 0 @@ -155,7 +155,7 @@ func (m *Message) ValidForBlockInclusion(minGas int64, version network.Version) return xerrors.New("'To' address cannot be empty") } - if m.To == build.ZeroAddress && version >= network.Version7 { + if m.To == buildconstants.ZeroAddress && version >= network.Version7 { return xerrors.New("invalid 'To' address") } @@ -203,8 +203,8 @@ func (m *Message) ValidForBlockInclusion(minGas int64, version network.Version) return xerrors.New("'GasFeeCap' less than 'GasPremium'") } - if m.GasLimit > build.BlockGasLimit { - return xerrors.Errorf("'GasLimit' field cannot be greater than a block's gas limit (%d > %d)", m.GasLimit, build.BlockGasLimit) + if m.GasLimit > buildconstants.BlockGasLimit { + return xerrors.Errorf("'GasLimit' field cannot be greater than a block's gas limit (%d > %d)", m.GasLimit, buildconstants.BlockGasLimit) } if m.GasLimit <= 0 { diff --git a/chain/types/mock/chain.go b/chain/types/mock/chain.go index dcbcd85362c..e4e80890bce 100644 --- a/chain/types/mock/chain.go +++ b/chain/types/mock/chain.go @@ -12,7 +12,7 @@ import ( "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/build/buildconstants" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/wallet" ) @@ -58,7 +58,7 @@ func MkBlock(parents *types.TipSet, weightInc uint64, ticketNonce uint64) *types if parents != nil { pcids = parents.Cids() height = parents.Height() + 1 - timestamp = parents.MinTimestamp() + build.BlockDelaySecs + timestamp = parents.MinTimestamp() + buildconstants.BlockDelaySecs weight = types.BigAdd(parents.Blocks()[0].ParentWeight, weight) } @@ -79,7 +79,7 @@ func MkBlock(parents *types.TipSet, weightInc uint64, ticketNonce uint64) *types Timestamp: timestamp, ParentStateRoot: pstateRoot, BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS, Data: []byte("boo! im a signature")}, - ParentBaseFee: types.NewInt(uint64(build.MinimumBaseFee)), + ParentBaseFee: types.NewInt(uint64(buildconstants.MinimumBaseFee)), } } diff --git a/cmd/curio/guidedsetup/guidedsetup.go b/cmd/curio/guidedsetup/guidedsetup.go index af505966dee..4587beff060 100644 --- a/cmd/curio/guidedsetup/guidedsetup.go +++ b/cmd/curio/guidedsetup/guidedsetup.go @@ -42,7 +42,7 @@ import ( "github.com/filecoin-project/lotus/api/v1api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/cli/spcli" + "github.com/filecoin-project/lotus/cli/spcli/createminer" cliutil "github.com/filecoin-project/lotus/cli/util" "github.com/filecoin-project/lotus/cmd/curio/deps" _ "github.com/filecoin-project/lotus/cmd/curio/internal/translations" @@ -694,7 +694,7 @@ func stepCreateActor(d *MigrationData) { } minerInit: - miner, err := spcli.CreateStorageMiner(d.ctx, d.full, d.owner, d.worker, d.sender, d.ssize, d.confidence) + miner, err := createminer.CreateStorageMiner(d.ctx, d.full, d.owner, d.worker, d.sender, d.ssize, d.confidence) if err != nil { d.say(notice, "Failed to create the miner actor: %s", err.Error()) os.Exit(1) diff --git a/itests/curio_test.go b/itests/curio_test.go index 9a9693e1ed3..bea14e63d5a 100644 --- a/itests/curio_test.go +++ b/itests/curio_test.go @@ -29,7 +29,7 @@ import ( "github.com/filecoin-project/lotus/api/v1api" miner2 "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/cli/spcli" + "github.com/filecoin-project/lotus/cli/spcli/createminer" "github.com/filecoin-project/lotus/cmd/curio/deps" "github.com/filecoin-project/lotus/cmd/curio/rpc" "github.com/filecoin-project/lotus/cmd/curio/tasks" @@ -70,7 +70,7 @@ func TestCurioNewActor(t *testing.T) { sectorSizeInt, err := units.RAMInBytes("8MiB") require.NoError(t, err) - maddr, err := spcli.CreateStorageMiner(ctx, full, addr, addr, addr, abi.SectorSize(sectorSizeInt), 0) + maddr, err := createminer.CreateStorageMiner(ctx, full, addr, addr, addr, abi.SectorSize(sectorSizeInt), 0) require.NoError(t, err) err = deps.CreateMinerConfig(ctx, full, db, []string{maddr.String()}, "FULL NODE API STRING") @@ -133,7 +133,7 @@ func TestCurioHappyPath(t *testing.T) { sectorSizeInt, err := units.RAMInBytes("2KiB") require.NoError(t, err) - maddr, err := spcli.CreateStorageMiner(ctx, full, addr, addr, addr, abi.SectorSize(sectorSizeInt), 0) + maddr, err := createminer.CreateStorageMiner(ctx, full, addr, addr, addr, abi.SectorSize(sectorSizeInt), 0) require.NoError(t, err) err = deps.CreateMinerConfig(ctx, full, db, []string{maddr.String()}, fapi) From f4cf2505a55841fa04739900f5f99466f8eda84e Mon Sep 17 00:00:00 2001 From: "Andrew Jackson (Ajax)" Date: Wed, 19 Jun 2024 23:31:30 -0500 Subject: [PATCH 04/27] buildconstants for more places --- cli/util/epoch.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/cli/util/epoch.go b/cli/util/epoch.go index 81c92a7e3ed..55d385c8a52 100644 --- a/cli/util/epoch.go +++ b/cli/util/epoch.go @@ -8,18 +8,18 @@ import ( "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/build/buildconstants" "github.com/filecoin-project/lotus/chain/types" ) func EpochTime(curr, e abi.ChainEpoch) string { switch { case curr > e: - return fmt.Sprintf("%d (%s ago)", e, durafmt.Parse(time.Second*time.Duration(int64(build.BlockDelaySecs)*int64(curr-e))).LimitFirstN(2)) + return fmt.Sprintf("%d (%s ago)", e, durafmt.Parse(time.Second*time.Duration(int64(buildconstants.BlockDelaySecs)*int64(curr-e))).LimitFirstN(2)) case curr == e: return fmt.Sprintf("%d (now)", e) case curr < e: - return fmt.Sprintf("%d (in %s)", e, durafmt.Parse(time.Second*time.Duration(int64(build.BlockDelaySecs)*int64(e-curr))).LimitFirstN(2)) + return fmt.Sprintf("%d (in %s)", e, durafmt.Parse(time.Second*time.Duration(int64(buildconstants.BlockDelaySecs)*int64(e-curr))).LimitFirstN(2)) } panic("math broke") @@ -31,15 +31,15 @@ func EpochTime(curr, e abi.ChainEpoch) string { // // Example output: `1944975 (01 Jul 22 08:07 CEST, 10 hours 29 minutes ago)` func EpochTimeTs(curr, e abi.ChainEpoch, ts *types.TipSet) string { - timeStr := time.Unix(int64(ts.MinTimestamp()+(uint64(e-ts.Height())*build.BlockDelaySecs)), 0).Format(time.RFC822) + timeStr := time.Unix(int64(ts.MinTimestamp()+(uint64(e-ts.Height())*buildconstants.BlockDelaySecs)), 0).Format(time.RFC822) switch { case curr > e: - return fmt.Sprintf("%d (%s, %s ago)", e, timeStr, durafmt.Parse(time.Second*time.Duration(int64(build.BlockDelaySecs)*int64(curr-e))).LimitFirstN(2)) + return fmt.Sprintf("%d (%s, %s ago)", e, timeStr, durafmt.Parse(time.Second*time.Duration(int64(buildconstants.BlockDelaySecs)*int64(curr-e))).LimitFirstN(2)) case curr == e: return fmt.Sprintf("%d (%s, now)", e, timeStr) case curr < e: - return fmt.Sprintf("%d (%s, in %s)", e, timeStr, durafmt.Parse(time.Second*time.Duration(int64(build.BlockDelaySecs)*int64(e-curr))).LimitFirstN(2)) + return fmt.Sprintf("%d (%s, in %s)", e, timeStr, durafmt.Parse(time.Second*time.Duration(int64(buildconstants.BlockDelaySecs)*int64(e-curr))).LimitFirstN(2)) } panic("math broke") From 08f126cad9beb134d3f4982fea154ec3c51ceccc Mon Sep 17 00:00:00 2001 From: "Andrew Jackson (Ajax)" Date: Thu, 20 Jun 2024 11:29:44 -0500 Subject: [PATCH 05/27] deprecate msg --- build/parameters.go | 92 ++++++++++++++++++------------------ build/params_shared_funcs.go | 3 ++ build/params_shared_vals.go | 58 +++++++++++------------ 3 files changed, 78 insertions(+), 75 deletions(-) diff --git a/build/parameters.go b/build/parameters.go index 6f029fa3602..20baaa3d2df 100644 --- a/build/parameters.go +++ b/build/parameters.go @@ -15,72 +15,72 @@ var SrsJSON = proofparams.SrsJSON // NOTE: DO NOT change this unless you REALLY know what you're doing. This is consensus critical. var BundleOverrides map[actorstypes.Version]string -var BootstrappersFile = buildconstants.BootstrappersFile +var BootstrappersFile = buildconstants.BootstrappersFile // Deprecated: Use buildconstants.BootstrappersFile instead -var GenesisFile = buildconstants.GenesisFile +var GenesisFile = buildconstants.GenesisFile // Deprecated: Use buildconstants.GenesisFile instead -var NetworkBundle = buildconstants.NetworkBundle -var ActorDebugging = buildconstants.ActorDebugging +var NetworkBundle = buildconstants.NetworkBundle // Deprecated: Use buildconstants.NetworkBundle instead +var ActorDebugging = buildconstants.ActorDebugging // Deprecated: Use buildconstants.ActorDebugging instead -var GenesisNetworkVersion = buildconstants.GenesisNetworkVersion +var GenesisNetworkVersion = buildconstants.GenesisNetworkVersion // Deprecated: Use buildconstants.GenesisNetworkVersion instead -var UpgradeBreezeHeight abi.ChainEpoch = buildconstants.UpgradeBreezeHeight +var UpgradeBreezeHeight abi.ChainEpoch = buildconstants.UpgradeBreezeHeight // Deprecated: Use buildconstants.UpgradeBreezeHeight instead -var BreezeGasTampingDuration abi.ChainEpoch = buildconstants.BreezeGasTampingDuration +var BreezeGasTampingDuration abi.ChainEpoch = buildconstants.BreezeGasTampingDuration // Deprecated: Use buildconstants.BreezeGasTampingDuration instead // upgrade heights -var UpgradeSmokeHeight abi.ChainEpoch = buildconstants.UpgradeSmokeHeight -var UpgradeIgnitionHeight abi.ChainEpoch = buildconstants.UpgradeIgnitionHeight -var UpgradeRefuelHeight abi.ChainEpoch = buildconstants.UpgradeRefuelHeight -var UpgradeTapeHeight abi.ChainEpoch = buildconstants.UpgradeTapeHeight -var UpgradeAssemblyHeight abi.ChainEpoch = buildconstants.UpgradeAssemblyHeight -var UpgradeLiftoffHeight abi.ChainEpoch = buildconstants.UpgradeLiftoffHeight -var UpgradeKumquatHeight abi.ChainEpoch = buildconstants.UpgradeKumquatHeight -var UpgradeCalicoHeight abi.ChainEpoch = buildconstants.UpgradeCalicoHeight -var UpgradePersianHeight abi.ChainEpoch = buildconstants.UpgradePersianHeight -var UpgradeOrangeHeight abi.ChainEpoch = buildconstants.UpgradeOrangeHeight -var UpgradeClausHeight abi.ChainEpoch = buildconstants.UpgradeClausHeight -var UpgradeTrustHeight abi.ChainEpoch = buildconstants.UpgradeTrustHeight -var UpgradeNorwegianHeight abi.ChainEpoch = buildconstants.UpgradeNorwegianHeight -var UpgradeTurboHeight abi.ChainEpoch = buildconstants.UpgradeTurboHeight -var UpgradeHyperdriveHeight abi.ChainEpoch = buildconstants.UpgradeHyperdriveHeight -var UpgradeChocolateHeight abi.ChainEpoch = buildconstants.UpgradeChocolateHeight -var UpgradeOhSnapHeight abi.ChainEpoch = buildconstants.UpgradeOhSnapHeight -var UpgradeSkyrHeight abi.ChainEpoch = buildconstants.UpgradeSkyrHeight -var UpgradeSharkHeight abi.ChainEpoch = buildconstants.UpgradeSharkHeight -var UpgradeHyggeHeight abi.ChainEpoch = buildconstants.UpgradeHyggeHeight -var UpgradeLightningHeight abi.ChainEpoch = buildconstants.UpgradeLightningHeight -var UpgradeThunderHeight abi.ChainEpoch = buildconstants.UpgradeThunderHeight -var UpgradeWatermelonHeight abi.ChainEpoch = buildconstants.UpgradeWatermelonHeight -var UpgradeDragonHeight abi.ChainEpoch = buildconstants.UpgradeDragonHeight -var UpgradePhoenixHeight abi.ChainEpoch = buildconstants.UpgradePhoenixHeight -var UpgradeAussieHeight abi.ChainEpoch = buildconstants.UpgradeAussieHeight +var UpgradeSmokeHeight abi.ChainEpoch = buildconstants.UpgradeSmokeHeight // Deprecated: Use buildconstants.UpgradeSmokeHeight instead +var UpgradeIgnitionHeight abi.ChainEpoch = buildconstants.UpgradeIgnitionHeight // Deprecated: Use buildconstants.UpgradeIgnitionHeight instead +var UpgradeRefuelHeight abi.ChainEpoch = buildconstants.UpgradeRefuelHeight // Deprecated: Use buildconstants.UpgradeRefuelHeight instead +var UpgradeTapeHeight abi.ChainEpoch = buildconstants.UpgradeTapeHeight // Deprecated: Use buildconstants.UpgradeTapeHeight instead +var UpgradeAssemblyHeight abi.ChainEpoch = buildconstants.UpgradeAssemblyHeight // Deprecated: Use buildconstants.UpgradeAssemblyHeight instead +var UpgradeLiftoffHeight abi.ChainEpoch = buildconstants.UpgradeLiftoffHeight // Deprecated: Use buildconstants.UpgradeLiftoffHeight instead +var UpgradeKumquatHeight abi.ChainEpoch = buildconstants.UpgradeKumquatHeight // Deprecated: Use buildconstants.UpgradeKumquatHeight instead +var UpgradeCalicoHeight abi.ChainEpoch = buildconstants.UpgradeCalicoHeight // Deprecated: Use buildconstants.UpgradeCalicoHeight instead +var UpgradePersianHeight abi.ChainEpoch = buildconstants.UpgradePersianHeight // Deprecated: Use buildconstants.UpgradePersianHeight instead +var UpgradeOrangeHeight abi.ChainEpoch = buildconstants.UpgradeOrangeHeight // Deprecated: Use buildconstants.UpgradeOrangeHeight instead +var UpgradeClausHeight abi.ChainEpoch = buildconstants.UpgradeClausHeight // Deprecated: Use buildconstants.UpgradeClausHeight instead +var UpgradeTrustHeight abi.ChainEpoch = buildconstants.UpgradeTrustHeight // Deprecated: Use buildconstants.UpgradeTrustHeight instead +var UpgradeNorwegianHeight abi.ChainEpoch = buildconstants.UpgradeNorwegianHeight // Deprecated: Use buildconstants.UpgradeNorwegianHeight instead +var UpgradeTurboHeight abi.ChainEpoch = buildconstants.UpgradeTurboHeight // Deprecated: Use buildconstants.UpgradeTurboHeight instead +var UpgradeHyperdriveHeight abi.ChainEpoch = buildconstants.UpgradeHyperdriveHeight // Deprecated: Use buildconstants.UpgradeHyperdriveHeight instead +var UpgradeChocolateHeight abi.ChainEpoch = buildconstants.UpgradeChocolateHeight // Deprecated: Use buildconstants.UpgradeChocolateHeight instead +var UpgradeOhSnapHeight abi.ChainEpoch = buildconstants.UpgradeOhSnapHeight // Deprecated: Use buildconstants.UpgradeOhSnapHeight instead +var UpgradeSkyrHeight abi.ChainEpoch = buildconstants.UpgradeSkyrHeight // Deprecated: Use buildconstants.UpgradeSkyrHeight instead +var UpgradeSharkHeight abi.ChainEpoch = buildconstants.UpgradeSharkHeight // Deprecated: Use buildconstants.UpgradeSharkHeight instead +var UpgradeHyggeHeight abi.ChainEpoch = buildconstants.UpgradeHyggeHeight // Deprecated: Use buildconstants.UpgradeHyggeHeight instead +var UpgradeLightningHeight abi.ChainEpoch = buildconstants.UpgradeLightningHeight // Deprecated: Use buildconstants.UpgradeLightningHeight instead +var UpgradeThunderHeight abi.ChainEpoch = buildconstants.UpgradeThunderHeight // Deprecated: Use buildconstants.UpgradeThunderHeight instead +var UpgradeWatermelonHeight abi.ChainEpoch = buildconstants.UpgradeWatermelonHeight // Deprecated: Use buildconstants.UpgradeWatermelonHeight instead +var UpgradeDragonHeight abi.ChainEpoch = buildconstants.UpgradeDragonHeight // Deprecated: Use buildconstants.UpgradeDragonHeight instead +var UpgradePhoenixHeight abi.ChainEpoch = buildconstants.UpgradePhoenixHeight // Deprecated: Use buildconstants.UpgradePhoenixHeight instead +var UpgradeAussieHeight abi.ChainEpoch = buildconstants.UpgradeAussieHeight // Deprecated: Use buildconstants.UpgradeAussieHeight instead // This fix upgrade only ran on calibrationnet -var UpgradeWatermelonFixHeight abi.ChainEpoch = buildconstants.UpgradeWatermelonFixHeight +var UpgradeWatermelonFixHeight abi.ChainEpoch = buildconstants.UpgradeWatermelonFixHeight // Deprecated: Use buildconstants.UpgradeWatermelonFixHeight instead // This fix upgrade only ran on calibrationnet -var UpgradeWatermelonFix2Height abi.ChainEpoch = buildconstants.UpgradeWatermelonFix2Height +var UpgradeWatermelonFix2Height abi.ChainEpoch = buildconstants.UpgradeWatermelonFix2Height // Deprecated: Use buildconstants.UpgradeWatermelonFix2Height instead // This fix upgrade only ran on calibrationnet -var UpgradeCalibrationDragonFixHeight abi.ChainEpoch = buildconstants.UpgradeCalibrationDragonFixHeight +var UpgradeCalibrationDragonFixHeight abi.ChainEpoch = buildconstants.UpgradeCalibrationDragonFixHeight // Deprecated: Use buildconstants.UpgradeCalibrationDragonFixHeight instead -var SupportedProofTypes = buildconstants.SupportedProofTypes -var ConsensusMinerMinPower = buildconstants.ConsensusMinerMinPower -var PreCommitChallengeDelay = buildconstants.PreCommitChallengeDelay +var SupportedProofTypes = buildconstants.SupportedProofTypes // Deprecated: Use buildconstants.SupportedProofTypes instead +var ConsensusMinerMinPower = buildconstants.ConsensusMinerMinPower // Deprecated: Use buildconstants.ConsensusMinerMinPower instead +var PreCommitChallengeDelay = buildconstants.PreCommitChallengeDelay // Deprecated: Use buildconstants.PreCommitChallengeDelay instead -var BlockDelaySecs = buildconstants.BlockDelaySecs +var BlockDelaySecs = buildconstants.BlockDelaySecs // Deprecated: Use buildconstants.BlockDelaySecs instead -var PropagationDelaySecs = buildconstants.PropagationDelaySecs +var PropagationDelaySecs = buildconstants.PropagationDelaySecs // Deprecated: Use buildconstants.PropagationDelaySecs instead -var EquivocationDelaySecs = buildconstants.EquivocationDelaySecs +var EquivocationDelaySecs = buildconstants.EquivocationDelaySecs // Deprecated: Use buildconstants.EquivocationDelaySecs instead -const BootstrapPeerThreshold = buildconstants.BootstrapPeerThreshold +const BootstrapPeerThreshold = buildconstants.BootstrapPeerThreshold // Deprecated: Use buildconstants.BootstrapPeerThreshold instead // ChainId defines the chain ID used in the Ethereum JSON-RPC endpoint. // As per https://github.com/ethereum-lists/chains -const Eip155ChainId = buildconstants.Eip155ChainId +const Eip155ChainId = buildconstants.Eip155ChainId // Deprecated: Use buildconstants.Eip155ChainId instead -var WhitelistedBlock = buildconstants.WhitelistedBlock +var WhitelistedBlock = buildconstants.WhitelistedBlock // Deprecated: Use buildconstants.WhitelistedBlock instead -const Finality = policy.ChainFinality +const Finality = policy.ChainFinality // Deprecated: Use policy.ChainFinality instead diff --git a/build/params_shared_funcs.go b/build/params_shared_funcs.go index 4d5aa5bf327..6f78f4bee85 100644 --- a/build/params_shared_funcs.go +++ b/build/params_shared_funcs.go @@ -26,8 +26,11 @@ func DhtProtocolName(netName dtypes.NetworkName) protocol.ID { return protocol.ID("/fil/kad/" + string(netName)) } +// Deprecated: Use buildconstants.SetAddressNetwork instead. var SetAddressNetwork = buildconstants.SetAddressNetwork +// Deprecated: Use buildconstants.MustParseAddress instead. var MustParseAddress = buildconstants.MustParseAddress +// Deprecated: Use buildconstants.MustParseCid instead. var MustParseCid = buildconstants.MustParseCid diff --git a/build/params_shared_vals.go b/build/params_shared_vals.go index 399ab79ec2b..e94a43dfdbc 100644 --- a/build/params_shared_vals.go +++ b/build/params_shared_vals.go @@ -9,47 +9,47 @@ import ( // ///// // Storage -var UnixfsChunkSize uint64 = buildconstants.UnixfsChunkSize -var UnixfsLinksPerLevel = buildconstants.UnixfsLinksPerLevel +var UnixfsChunkSize uint64 = buildconstants.UnixfsChunkSize // Deprecated: Use buildconstants.UnixfsChunkSize instead +var UnixfsLinksPerLevel = buildconstants.UnixfsLinksPerLevel // Deprecated: Use buildconstants.UnixfsLinksPerLevel instead // ///// // Consensus / Network -var AllowableClockDriftSecs = buildconstants.AllowableClockDriftSecs +var AllowableClockDriftSecs = buildconstants.AllowableClockDriftSecs // Deprecated: Use buildconstants.AllowableClockDriftSecs instead // Epochs -var ForkLengthThreshold = Finality +var ForkLengthThreshold = Finality // Deprecated: Use Finality instead // Blocks (e) -var BlocksPerEpoch = buildconstants.BlocksPerEpoch +var BlocksPerEpoch = buildconstants.BlocksPerEpoch // Deprecated: Use buildconstants.BlocksPerEpoch instead // Epochs -var MessageConfidence = buildconstants.MessageConfidence +var MessageConfidence = buildconstants.MessageConfidence // Deprecated: Use buildconstants.MessageConfidence instead // constants for Weight calculation // The ratio of weight contributed by short-term vs long-term factors in a given round -var WRatioNum = buildconstants.WRatioNum -var WRatioDen = buildconstants.WRatioDen +var WRatioNum = buildconstants.WRatioNum // Deprecated: Use buildconstants.WRatioNum instead +var WRatioDen = buildconstants.WRatioDen // Deprecated: Use buildconstants.WRatioDen instead // ///// // Mining // Epochs -var TicketRandomnessLookback = buildconstants.TicketRandomnessLookback +var TicketRandomnessLookback = buildconstants.TicketRandomnessLookback // Deprecated: Use buildconstants.TicketRandomnessLookback instead // the 'f' prefix doesn't matter -var ZeroAddress = buildconstants.ZeroAddress +var ZeroAddress = buildconstants.ZeroAddress // Deprecated: Use buildconstants.ZeroAddress instead // ///// // Devnet settings -var Devnet = buildconstants.Devnet +var Devnet = buildconstants.Devnet // Deprecated: Use buildconstants.Devnet instead -var FilBase = buildconstants.FilBase -var FilAllocStorageMining = buildconstants.FilAllocStorageMining +var FilBase = buildconstants.FilBase // Deprecated: Use buildconstants.FilBase instead +var FilAllocStorageMining = buildconstants.FilAllocStorageMining // Deprecated: Use buildconstants.FilAllocStorageMining instead -var FilecoinPrecision = buildconstants.FilecoinPrecision -var FilReserved = buildconstants.FilReserved +var FilecoinPrecision = buildconstants.FilecoinPrecision // Deprecated: Use buildconstants.FilecoinPrecision instead +var FilReserved = buildconstants.FilReserved // Deprecated: Use buildconstants.FilReserved instead var InitialRewardBalance *big.Int var InitialFilReserved *big.Int @@ -65,32 +65,32 @@ func init() { } // Sync -var BadBlockCacheSize = buildconstants.BadBlockCacheSize +var BadBlockCacheSize = buildconstants.BadBlockCacheSize // Deprecated: Use buildconstants.BadBlockCacheSize instead // assuming 4000 messages per round, this lets us not lose any messages across a // 10 block reorg. -var BlsSignatureCacheSize = buildconstants.BlsSignatureCacheSize +var BlsSignatureCacheSize = buildconstants.BlsSignatureCacheSize // Deprecated: Use buildconstants.BlsSignatureCacheSize instead // Size of signature verification cache // 32k keeps the cache around 10MB in size, max -var VerifSigCacheSize = buildconstants.VerifSigCacheSize +var VerifSigCacheSize = buildconstants.VerifSigCacheSize // Deprecated: Use buildconstants.VerifSigCacheSize instead // /////// // Limits // TODO: If this is gonna stay, it should move to specs-actors -var BlockMessageLimit = buildconstants.BlockMessageLimit +var BlockMessageLimit = buildconstants.BlockMessageLimit // Deprecated: Use buildconstants.BlockMessageLimit instead -var BlockGasLimit = buildconstants.BlockGasLimit -var BlockGasTarget = buildconstants.BlockGasTarget +var BlockGasLimit = buildconstants.BlockGasLimit // Deprecated: Use buildconstants.BlockGasLimit instead +var BlockGasTarget = buildconstants.BlockGasTarget // Deprecated: Use buildconstants.BlockGasTarget instead -var BaseFeeMaxChangeDenom int64 = buildconstants.BaseFeeMaxChangeDenom -var InitialBaseFee int64 = buildconstants.InitialBaseFee -var MinimumBaseFee int64 = buildconstants.MinimumBaseFee -var PackingEfficiencyNum int64 = buildconstants.PackingEfficiencyNum -var PackingEfficiencyDenom int64 = buildconstants.PackingEfficiencyDenom +var BaseFeeMaxChangeDenom int64 = buildconstants.BaseFeeMaxChangeDenom // Deprecated: Use buildconstants.BaseFeeMaxChangeDenom instead +var InitialBaseFee int64 = buildconstants.InitialBaseFee // Deprecated: Use buildconstants.InitialBaseFee instead +var MinimumBaseFee int64 = buildconstants.MinimumBaseFee // Deprecated: Use buildconstants.MinimumBaseFee instead +var PackingEfficiencyNum int64 = buildconstants.PackingEfficiencyNum // Deprecated: Use buildconstants.PackingEfficiencyNum instead +var PackingEfficiencyDenom int64 = buildconstants.PackingEfficiencyDenom // Deprecated: Use buildconstants.PackingEfficiencyDenom instead -var MinDealDuration = buildconstants.MinDealDuration -var MaxDealDuration = buildconstants.MaxDealDuration +var MinDealDuration = buildconstants.MinDealDuration // Deprecated: Use buildconstants.MinDealDuration instead +var MaxDealDuration = buildconstants.MaxDealDuration // Deprecated: Use buildconstants.MaxDealDuration instead -const TestNetworkVersion = buildconstants.TestNetworkVersion +const TestNetworkVersion = buildconstants.TestNetworkVersion // Deprecated: Use buildconstants.TestNetworkVersion instead From 36772b56cfc7f700481d7626588546758cbf1fd5 Mon Sep 17 00:00:00 2001 From: "Andrew Jackson (Ajax)" Date: Thu, 20 Jun 2024 11:58:56 -0500 Subject: [PATCH 06/27] itest cleanup --- itests/api_test.go | 3 +++ itests/kit/ensemble.go | 2 ++ 2 files changed, 5 insertions(+) diff --git a/itests/api_test.go b/itests/api_test.go index ff43bd5c02e..c00dc52012f 100644 --- a/itests/api_test.go +++ b/itests/api_test.go @@ -18,6 +18,7 @@ import ( lapi "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/build/buildconstants" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/itests/kit" ) @@ -168,8 +169,10 @@ func (ts *apiSuite) testOutOfGasError(t *testing.T) { // Lowering it to 2 will cause it to run out of gas, testing the failure case we want originalLimit := build.BlockGasLimit build.BlockGasLimit = 2 + buildconstants.BlockGasTarget = 2 defer func() { build.BlockGasLimit = originalLimit + buildconstants.BlockGasTarget = originalLimit }() msg := &types.Message{ diff --git a/itests/kit/ensemble.go b/itests/kit/ensemble.go index 207ccef59af..97a222b178a 100644 --- a/itests/kit/ensemble.go +++ b/itests/kit/ensemble.go @@ -36,6 +36,7 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api/v1api" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/build/buildconstants" "github.com/filecoin-project/lotus/chain" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" @@ -178,6 +179,7 @@ func NewEnsemble(t *testing.T, opts ...EnsembleOpt) *Ensemble { } build.EquivocationDelaySecs = 0 + buildconstants.EquivocationDelaySecs = 0 return n } From 66c9252bc8c1c59c2f6b19d8a2d5b126243b41ed Mon Sep 17 00:00:00 2001 From: "Andrew Jackson (Ajax)" Date: Thu, 20 Jun 2024 12:19:57 -0500 Subject: [PATCH 07/27] alerting interface --- storage/paths/alertinginterface/ai.go | 11 +++++++++++ storage/paths/db_index.go | 10 +++++----- 2 files changed, 16 insertions(+), 5 deletions(-) create mode 100644 storage/paths/alertinginterface/ai.go diff --git a/storage/paths/alertinginterface/ai.go b/storage/paths/alertinginterface/ai.go new file mode 100644 index 00000000000..c8d592efc90 --- /dev/null +++ b/storage/paths/alertinginterface/ai.go @@ -0,0 +1,11 @@ +package alertinginterface + +type AlertingInterface interface { + AddAlertType(name, id string) AlertType + Raise(alert AlertType, metadata map[string]interface{}) + IsRaised(alert AlertType) bool + Resolve(alert AlertType, metadata map[string]string) +} +type AlertType struct { + System, Subsystem string +} diff --git a/storage/paths/db_index.go b/storage/paths/db_index.go index e6def455112..67b8c8a7d30 100644 --- a/storage/paths/db_index.go +++ b/storage/paths/db_index.go @@ -17,9 +17,9 @@ import ( "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/journal/alerting" "github.com/filecoin-project/lotus/lib/harmony/harmonydb" "github.com/filecoin-project/lotus/metrics" + "github.com/filecoin-project/lotus/storage/paths/alertinginterface" "github.com/filecoin-project/lotus/storage/sealer/fsutil" "github.com/filecoin-project/lotus/storage/sealer/storiface" ) @@ -31,18 +31,18 @@ const URLSeparator = "," var errAlreadyLocked = errors.New("already locked") type DBIndex struct { - alerting *alerting.Alerting - pathAlerts map[storiface.ID]alerting.AlertType + alerting alertinginterface.AlertingInterface + pathAlerts map[storiface.ID]alertinginterface.AlertType harmonyDB *harmonydb.DB } -func NewDBIndex(al *alerting.Alerting, db *harmonydb.DB) *DBIndex { +func NewDBIndex(al alertinginterface.AlertingInterface, db *harmonydb.DB) *DBIndex { return &DBIndex{ harmonyDB: db, alerting: al, - pathAlerts: map[storiface.ID]alerting.AlertType{}, + pathAlerts: map[storiface.ID]alertinginterface.AlertType{}, } } From 883469ad6e59efb5af99c2fa07a19fe54faf49ae Mon Sep 17 00:00:00 2001 From: "Andrew Jackson (Ajax)" Date: Thu, 20 Jun 2024 15:49:00 -0500 Subject: [PATCH 08/27] house cleaning --- Makefile | 79 +- cmd/curio/cli.go | 249 --- cmd/curio/config.go | 441 ----- cmd/curio/config_new.go | 57 - cmd/curio/config_test.go | 438 ----- cmd/curio/deps/apiinfo.go | 94 - cmd/curio/deps/deps.go | 529 ------ cmd/curio/ffi.go | 71 - cmd/curio/guidedsetup/guidedsetup.go | 896 --------- cmd/curio/guidedsetup/shared.go | 430 ----- cmd/curio/internal/translations/catalog.go | 486 ----- .../internal/translations/knowns/main.go | 82 - .../translations/locales/en/out.gotext.json | 1684 ----------------- .../locales/ko/messages.gotext.json | 1130 ----------- .../translations/locales/ko/out.gotext.json | 4 - .../locales/zh/messages.gotext.json | 1100 ----------- .../translations/locales/zh/out.gotext.json | 4 - .../internal/translations/translations.go | 27 - cmd/curio/internal/translations/updateLang.sh | 8 - cmd/curio/log.go | 105 - cmd/curio/main.go | 190 -- cmd/curio/market.go | 201 -- cmd/curio/pipeline.go | 216 --- cmd/curio/proving.go | 204 -- cmd/curio/rpc/rpc.go | 339 ---- cmd/curio/run.go | 200 -- cmd/curio/stop.go | 30 - cmd/curio/storage.go | 499 ----- cmd/curio/tasks/tasks.go | 248 --- curiosrc/address.go | 64 - curiosrc/alertmanager/alerts.go | 573 ------ curiosrc/alertmanager/task_alert.go | 234 --- curiosrc/build/build.go | 9 - curiosrc/builder.go | 45 - curiosrc/chainsched/chain_sched.go | 136 -- curiosrc/docker/.env | 5 - curiosrc/docker/.gitignore | 1 - curiosrc/docker/curio/Dockerfile | 30 - curiosrc/docker/curio/entrypoint.sh | 60 - curiosrc/docker/docker-compose.yaml | 101 - curiosrc/docker/lotus-miner/Dockerfile | 33 - curiosrc/docker/lotus-miner/entrypoint.sh | 16 - curiosrc/docker/lotus/Dockerfile | 35 - curiosrc/docker/lotus/entrypoint.sh | 33 - curiosrc/docker/yugabyte/Dockerfile | 12 - curiosrc/ffi/piece_funcs.go | 76 - curiosrc/ffi/sdr_funcs.go | 661 ------- curiosrc/ffi/task_storage.go | 232 --- curiosrc/gc/storage_endpoint_gc.go | 288 --- curiosrc/market/deal_ingest.go | 547 ------ curiosrc/market/fakelm/iface.go | 33 - curiosrc/market/fakelm/lmimpl.go | 381 ---- curiosrc/market/lmrpc/lmrpc.go | 620 ------ curiosrc/message/sender.go | 396 ---- curiosrc/message/watch.go | 214 --- curiosrc/multictladdr/multiaddresses.go | 81 - curiosrc/piece/task_cleanup_piece.go | 135 -- curiosrc/piece/task_park_piece.go | 239 --- curiosrc/proof/treed_build.go | 292 --- curiosrc/proof/treed_build_test.go | 516 ----- curiosrc/seal/README.md | 28 - curiosrc/seal/finalize_pieces.go | 51 - curiosrc/seal/poller.go | 304 --- curiosrc/seal/poller_commit_msg.go | 108 -- curiosrc/seal/poller_precommit_msg.go | 119 -- curiosrc/seal/sector_num_alloc.go | 127 -- curiosrc/seal/task_finalize.go | 156 -- curiosrc/seal/task_movestorage.go | 177 -- curiosrc/seal/task_porep.go | 177 -- curiosrc/seal/task_sdr.go | 279 --- curiosrc/seal/task_submit_commit.go | 423 ----- curiosrc/seal/task_submit_precommit.go | 297 --- curiosrc/seal/task_treed.go | 366 ---- curiosrc/seal/task_treed_test.go | 74 - curiosrc/seal/task_treerc.go | 200 -- curiosrc/web/api/apihelper/apihelper.go | 19 - curiosrc/web/api/config/config.go | 202 -- curiosrc/web/api/routes.go | 17 - curiosrc/web/api/sector/sector.go | 375 ---- curiosrc/web/api/webrpc/routes.go | 37 - curiosrc/web/api/webrpc/sync_state.go | 183 -- curiosrc/web/hapi/robust_rpc.go | 102 - curiosrc/web/hapi/routes.go | 58 - curiosrc/web/hapi/simpleinfo.go | 962 ---------- .../web/hapi/simpleinfo_pipeline_porep.go | 195 -- curiosrc/web/hapi/watch_actor.go | 286 --- curiosrc/web/hapi/web/actor_summary.gohtml | 30 - curiosrc/web/hapi/web/chain_rpcs.gohtml | 15 - curiosrc/web/hapi/web/cluster_machines.gohtml | 15 - .../web/hapi/web/cluster_task_history.gohtml | 19 - curiosrc/web/hapi/web/cluster_tasks.gohtml | 10 - curiosrc/web/hapi/web/node_info.gohtml | 100 - .../hapi/web/pipeline_porep_sectors.gohtml | 200 -- curiosrc/web/hapi/web/pipline_porep.gohtml | 15 - curiosrc/web/hapi/web/root.gohtml | 28 - curiosrc/web/hapi/web/sector_info.gohtml | 105 - curiosrc/web/srv.go | 82 - curiosrc/web/static/chain-connectivity.mjs | 63 - curiosrc/web/static/config/edit.html | 164 -- curiosrc/web/static/config/index.html | 125 -- curiosrc/web/static/favicon.svg | 1 - curiosrc/web/static/index.html | 204 -- curiosrc/web/static/lib/jsonrpc.mjs | 96 - curiosrc/web/static/pipeline_porep.html | 98 - curiosrc/web/static/sector/index.html | 129 -- curiosrc/web/static/ux/curio-ux.mjs | 99 - .../web/static/ux/fonts/Metropolis-Bold.woff | Bin 17788 -> 0 bytes .../ux/fonts/Metropolis-ExtraLight.woff | Bin 17340 -> 0 bytes .../web/static/ux/fonts/Metropolis-Light.woff | Bin 17488 -> 0 bytes .../static/ux/fonts/Metropolis-Medium.woff | Bin 17524 -> 0 bytes .../static/ux/fonts/Metropolis-Regular.woff | Bin 17376 -> 0 bytes .../web/static/ux/fonts/Metropolis-Thin.woff | Bin 17128 -> 0 bytes curiosrc/web/static/ux/main.css | 108 -- curiosrc/window/compute_do.go | 547 ------ curiosrc/window/compute_task.go | 453 ----- curiosrc/window/faults_simple.go | 152 -- curiosrc/window/recover_task.go | 324 ---- curiosrc/window/submit_task.go | 307 --- curiosrc/winning/winning_task.go | 725 ------- go.mod | 13 +- go.sum | 11 - itests/curio_test.go | 50 +- itests/kit/ensemble.go | 56 +- itests/kit/ensemble_presets.go | 15 - itests/kit/node_full.go | 12 - lib/ffiselect/ffidirect/ffi-direct.go | 71 - lib/ffiselect/ffiselect.go | 262 --- lib/ffiselect/logparse.go | 88 - lib/ffiselect/testffi.go | 27 - 129 files changed, 22 insertions(+), 26258 deletions(-) delete mode 100644 cmd/curio/cli.go delete mode 100644 cmd/curio/config.go delete mode 100644 cmd/curio/config_new.go delete mode 100644 cmd/curio/config_test.go delete mode 100644 cmd/curio/deps/apiinfo.go delete mode 100644 cmd/curio/deps/deps.go delete mode 100644 cmd/curio/ffi.go delete mode 100644 cmd/curio/guidedsetup/guidedsetup.go delete mode 100644 cmd/curio/guidedsetup/shared.go delete mode 100644 cmd/curio/internal/translations/catalog.go delete mode 100644 cmd/curio/internal/translations/knowns/main.go delete mode 100644 cmd/curio/internal/translations/locales/en/out.gotext.json delete mode 100644 cmd/curio/internal/translations/locales/ko/messages.gotext.json delete mode 100644 cmd/curio/internal/translations/locales/ko/out.gotext.json delete mode 100644 cmd/curio/internal/translations/locales/zh/messages.gotext.json delete mode 100644 cmd/curio/internal/translations/locales/zh/out.gotext.json delete mode 100644 cmd/curio/internal/translations/translations.go delete mode 100755 cmd/curio/internal/translations/updateLang.sh delete mode 100644 cmd/curio/log.go delete mode 100644 cmd/curio/main.go delete mode 100644 cmd/curio/market.go delete mode 100644 cmd/curio/pipeline.go delete mode 100644 cmd/curio/proving.go delete mode 100644 cmd/curio/rpc/rpc.go delete mode 100644 cmd/curio/run.go delete mode 100644 cmd/curio/stop.go delete mode 100644 cmd/curio/storage.go delete mode 100644 cmd/curio/tasks/tasks.go delete mode 100644 curiosrc/address.go delete mode 100644 curiosrc/alertmanager/alerts.go delete mode 100644 curiosrc/alertmanager/task_alert.go delete mode 100644 curiosrc/build/build.go delete mode 100644 curiosrc/builder.go delete mode 100644 curiosrc/chainsched/chain_sched.go delete mode 100644 curiosrc/docker/.env delete mode 100644 curiosrc/docker/.gitignore delete mode 100644 curiosrc/docker/curio/Dockerfile delete mode 100755 curiosrc/docker/curio/entrypoint.sh delete mode 100644 curiosrc/docker/docker-compose.yaml delete mode 100644 curiosrc/docker/lotus-miner/Dockerfile delete mode 100755 curiosrc/docker/lotus-miner/entrypoint.sh delete mode 100644 curiosrc/docker/lotus/Dockerfile delete mode 100755 curiosrc/docker/lotus/entrypoint.sh delete mode 100644 curiosrc/docker/yugabyte/Dockerfile delete mode 100644 curiosrc/ffi/piece_funcs.go delete mode 100644 curiosrc/ffi/sdr_funcs.go delete mode 100644 curiosrc/ffi/task_storage.go delete mode 100644 curiosrc/gc/storage_endpoint_gc.go delete mode 100644 curiosrc/market/deal_ingest.go delete mode 100644 curiosrc/market/fakelm/iface.go delete mode 100644 curiosrc/market/fakelm/lmimpl.go delete mode 100644 curiosrc/market/lmrpc/lmrpc.go delete mode 100644 curiosrc/message/sender.go delete mode 100644 curiosrc/message/watch.go delete mode 100644 curiosrc/multictladdr/multiaddresses.go delete mode 100644 curiosrc/piece/task_cleanup_piece.go delete mode 100644 curiosrc/piece/task_park_piece.go delete mode 100644 curiosrc/proof/treed_build.go delete mode 100644 curiosrc/proof/treed_build_test.go delete mode 100644 curiosrc/seal/README.md delete mode 100644 curiosrc/seal/finalize_pieces.go delete mode 100644 curiosrc/seal/poller.go delete mode 100644 curiosrc/seal/poller_commit_msg.go delete mode 100644 curiosrc/seal/poller_precommit_msg.go delete mode 100644 curiosrc/seal/sector_num_alloc.go delete mode 100644 curiosrc/seal/task_finalize.go delete mode 100644 curiosrc/seal/task_movestorage.go delete mode 100644 curiosrc/seal/task_porep.go delete mode 100644 curiosrc/seal/task_sdr.go delete mode 100644 curiosrc/seal/task_submit_commit.go delete mode 100644 curiosrc/seal/task_submit_precommit.go delete mode 100644 curiosrc/seal/task_treed.go delete mode 100644 curiosrc/seal/task_treed_test.go delete mode 100644 curiosrc/seal/task_treerc.go delete mode 100644 curiosrc/web/api/apihelper/apihelper.go delete mode 100644 curiosrc/web/api/config/config.go delete mode 100644 curiosrc/web/api/routes.go delete mode 100644 curiosrc/web/api/sector/sector.go delete mode 100644 curiosrc/web/api/webrpc/routes.go delete mode 100644 curiosrc/web/api/webrpc/sync_state.go delete mode 100644 curiosrc/web/hapi/robust_rpc.go delete mode 100644 curiosrc/web/hapi/routes.go delete mode 100644 curiosrc/web/hapi/simpleinfo.go delete mode 100644 curiosrc/web/hapi/simpleinfo_pipeline_porep.go delete mode 100644 curiosrc/web/hapi/watch_actor.go delete mode 100644 curiosrc/web/hapi/web/actor_summary.gohtml delete mode 100644 curiosrc/web/hapi/web/chain_rpcs.gohtml delete mode 100644 curiosrc/web/hapi/web/cluster_machines.gohtml delete mode 100644 curiosrc/web/hapi/web/cluster_task_history.gohtml delete mode 100644 curiosrc/web/hapi/web/cluster_tasks.gohtml delete mode 100644 curiosrc/web/hapi/web/node_info.gohtml delete mode 100644 curiosrc/web/hapi/web/pipeline_porep_sectors.gohtml delete mode 100644 curiosrc/web/hapi/web/pipline_porep.gohtml delete mode 100644 curiosrc/web/hapi/web/root.gohtml delete mode 100644 curiosrc/web/hapi/web/sector_info.gohtml delete mode 100644 curiosrc/web/srv.go delete mode 100644 curiosrc/web/static/chain-connectivity.mjs delete mode 100644 curiosrc/web/static/config/edit.html delete mode 100644 curiosrc/web/static/config/index.html delete mode 100644 curiosrc/web/static/favicon.svg delete mode 100644 curiosrc/web/static/index.html delete mode 100644 curiosrc/web/static/lib/jsonrpc.mjs delete mode 100644 curiosrc/web/static/pipeline_porep.html delete mode 100644 curiosrc/web/static/sector/index.html delete mode 100644 curiosrc/web/static/ux/curio-ux.mjs delete mode 100644 curiosrc/web/static/ux/fonts/Metropolis-Bold.woff delete mode 100644 curiosrc/web/static/ux/fonts/Metropolis-ExtraLight.woff delete mode 100644 curiosrc/web/static/ux/fonts/Metropolis-Light.woff delete mode 100644 curiosrc/web/static/ux/fonts/Metropolis-Medium.woff delete mode 100644 curiosrc/web/static/ux/fonts/Metropolis-Regular.woff delete mode 100644 curiosrc/web/static/ux/fonts/Metropolis-Thin.woff delete mode 100644 curiosrc/web/static/ux/main.css delete mode 100644 curiosrc/window/compute_do.go delete mode 100644 curiosrc/window/compute_task.go delete mode 100644 curiosrc/window/faults_simple.go delete mode 100644 curiosrc/window/recover_task.go delete mode 100644 curiosrc/window/submit_task.go delete mode 100644 curiosrc/winning/winning_task.go delete mode 100644 lib/ffiselect/ffidirect/ffi-direct.go delete mode 100644 lib/ffiselect/ffiselect.go delete mode 100644 lib/ffiselect/logparse.go delete mode 100644 lib/ffiselect/testffi.go diff --git a/Makefile b/Makefile index 11e839e5fac..ac047827cd8 100644 --- a/Makefile +++ b/Makefile @@ -66,7 +66,7 @@ CLEAN+=build/.update-modules deps: $(BUILD_DEPS) .PHONY: deps -build-devnets: build lotus-seed lotus-shed curio sptool +build-devnets: build lotus-seed lotus-shed sptool .PHONY: build-devnets debug: GOFLAGS+=-tags=debug @@ -97,18 +97,6 @@ lotus-miner: $(BUILD_DEPS) .PHONY: lotus-miner BINS+=lotus-miner -curio: $(BUILD_DEPS) - rm -f curio - $(GOCC) build $(GOFLAGS) -o curio -ldflags " \ - -X github.com/filecoin-project/lotus/curiosrc/build.IsOpencl=$(FFI_USE_OPENCL) \ - -X github.com/filecoin-project/lotus/curiosrc/build.Commit=`git log -1 --format=%h_%cI`" \ - ./cmd/curio -.PHONY: curio -BINS+=curio - -cu2k: GOFLAGS+=-tags=2k -cu2k: curio - sptool: $(BUILD_DEPS) rm -f sptool $(GOCC) build $(GOFLAGS) -o sptool ./cmd/sptool @@ -133,13 +121,13 @@ lotus-gateway: $(BUILD_DEPS) .PHONY: lotus-gateway BINS+=lotus-gateway -build: lotus lotus-miner lotus-worker curio sptool +build: lotus lotus-miner lotus-worker sptool @[[ $$(type -P "lotus") ]] && echo "Caution: you have \ an existing lotus binary in your PATH. This may cause problems if you don't run 'sudo make install'" || true .PHONY: build -install: install-daemon install-miner install-worker install-curio install-sptool +install: install-daemon install-miner install-worker install-sptool install-daemon: install -C ./lotus /usr/local/bin/lotus @@ -147,9 +135,6 @@ install-daemon: install-miner: install -C ./lotus-miner /usr/local/bin/lotus-miner -install-curio: - install -C ./curio /usr/local/bin/curio - install-sptool: install -C ./sptool /usr/local/bin/sptool @@ -168,9 +153,6 @@ uninstall-daemon: uninstall-miner: rm -f /usr/local/bin/lotus-miner -uninstall-curio: - rm -f /usr/local/bin/curio - uninstall-sptool: rm -f /usr/local/bin/sptool @@ -275,14 +257,6 @@ install-miner-service: install-miner install-daemon-service @echo "To start the service, run: 'sudo systemctl start lotus-miner'" @echo "To enable the service on startup, run: 'sudo systemctl enable lotus-miner'" -install-curio-service: install-curio install-sptool install-daemon-service - mkdir -p /etc/systemd/system - mkdir -p /var/log/lotus - install -C -m 0644 ./scripts/curio.service /etc/systemd/system/curio.service - systemctl daemon-reload - @echo - @echo "Curio service installed. Don't forget to run 'sudo systemctl start curio' to start it and 'sudo systemctl enable curio' for it to be enabled on startup." - install-main-services: install-miner-service install-all-services: install-main-services @@ -301,12 +275,6 @@ clean-miner-service: rm -f /etc/systemd/system/lotus-miner.service systemctl daemon-reload -clean-curio-service: - -systemctl stop curio - -systemctl disable curio - rm -f /etc/systemd/system/curio.service - systemctl daemon-reload - clean-main-services: clean-daemon-service clean-all-services: clean-main-services @@ -381,7 +349,7 @@ docsgen-md-bin: api-gen actors-gen docsgen-openrpc-bin: api-gen actors-gen $(GOCC) build $(GOFLAGS) -o docgen-openrpc ./api/docgen-openrpc/cmd -docsgen-md: docsgen-md-full docsgen-md-storage docsgen-md-worker docsgen-md-curio +docsgen-md: docsgen-md-full docsgen-md-storage docsgen-md-worker docsgen-md-full: docsgen-md-bin ./docgen-md "api/api_full.go" "FullNode" "api" "./api" > documentation/en/api-v1-unstable-methods.md @@ -390,8 +358,6 @@ docsgen-md-storage: docsgen-md-bin ./docgen-md "api/api_storage.go" "StorageMiner" "api" "./api" > documentation/en/api-v0-methods-miner.md docsgen-md-worker: docsgen-md-bin ./docgen-md "api/api_worker.go" "Worker" "api" "./api" > documentation/en/api-v0-methods-worker.md -docsgen-md-curio: docsgen-md-bin - ./docgen-md "api/api_curio.go" "Curio" "api" "./api" > documentation/en/api-v0-methods-curio.md docsgen-openrpc: docsgen-openrpc-full docsgen-openrpc-storage docsgen-openrpc-worker docsgen-openrpc-gateway @@ -416,47 +382,16 @@ gen: actors-code-gen type-gen cfgdoc-gen docsgen api-gen jen: gen -snap: lotus lotus-miner lotus-worker curio sptool +snap: lotus lotus-miner lotus-worker sptool snapcraft # snapcraft upload ./lotus_*.snap # separate from gen because it needs binaries -docsgen-cli: lotus lotus-miner lotus-worker curio sptool +docsgen-cli: lotus lotus-miner lotus-worker sptool python3 ./scripts/generate-lotus-cli.py ./lotus config default > documentation/en/default-lotus-config.toml ./lotus-miner config default > documentation/en/default-lotus-miner-config.toml - ./curio config default > documentation/en/default-curio-config.toml .PHONY: docsgen-cli print-%: - @echo $*=$($*) - -### Curio devnet images -curio_docker_user?=curio -curio_base_image=$(curio_docker_user)/curio-all-in-one:latest-debug -ffi_from_source?=0 - -curio-devnet: lotus lotus-miner lotus-shed lotus-seed curio sptool -.PHONY: curio-devnet - -curio_docker_build_cmd=docker build --build-arg CURIO_TEST_IMAGE=$(curio_base_image) \ - --build-arg FFI_BUILD_FROM_SOURCE=$(ffi_from_source) $(docker_args) - -docker/curio-all-in-one: - $(curio_docker_build_cmd) -f Dockerfile.curio --target curio-all-in-one \ - -t $(curio_base_image) --build-arg GOFLAGS=-tags=debug . -.PHONY: docker/curio-all-in-one - -docker/%: - cd curiosrc/docker/$* && DOCKER_BUILDKIT=1 $(curio_docker_build_cmd) -t $(curio_docker_user)/$*-dev:dev \ - --build-arg BUILD_VERSION=dev . - -docker/curio-devnet: $(lotus_build_cmd) \ - docker/curio-all-in-one docker/lotus docker/lotus-miner docker/curio docker/yugabyte -.PHONY: docker/curio-devnet - -curio-devnet/up: - rm -rf ./curiosrc/docker/data && docker compose -f ./curiosrc/docker/docker-compose.yaml up -d - -curio-devnet/down: - docker compose -f ./curiosrc/docker/docker-compose.yaml down --rmi=local && sleep 2 && rm -rf ./curiosrc/docker/data + @echo $*=$($*) \ No newline at end of file diff --git a/cmd/curio/cli.go b/cmd/curio/cli.go deleted file mode 100644 index 6c9cb7ec67b..00000000000 --- a/cmd/curio/cli.go +++ /dev/null @@ -1,249 +0,0 @@ -package main - -import ( - "bufio" - "context" - "encoding/base64" - "errors" - "fmt" - "net" - "os" - "time" - - "github.com/BurntSushi/toml" - "github.com/gbrlsnchs/jwt/v3" - manet "github.com/multiformats/go-multiaddr/net" - "github.com/urfave/cli/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-jsonrpc/auth" - - "github.com/filecoin-project/lotus/api" - lcli "github.com/filecoin-project/lotus/cli" - "github.com/filecoin-project/lotus/cmd/curio/deps" - "github.com/filecoin-project/lotus/cmd/curio/rpc" -) - -const providerEnvVar = "CURIO_API_INFO" - -var cliCmd = &cli.Command{ - Name: "cli", - Usage: "Execute cli commands", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "machine", - Usage: "machine host:port (curio run --listen address)", - }, - }, - Before: func(cctx *cli.Context) error { - if os.Getenv(providerEnvVar) != "" { - // set already - return nil - } - if os.Getenv("LOTUS_DOCS_GENERATION") == "1" { - return nil - } - - db, err := deps.MakeDB(cctx) - if err != nil { - return err - } - - ctx := lcli.ReqContext(cctx) - - machine := cctx.String("machine") - if machine == "" { - // interactive picker - var machines []struct { - HostAndPort string `db:"host_and_port"` - LastContact time.Time `db:"last_contact"` - } - - err := db.Select(ctx, &machines, "select host_and_port, last_contact from harmony_machines") - if err != nil { - return xerrors.Errorf("getting machine list: %w", err) - } - - now := time.Now() - fmt.Println("Available machines:") - for i, m := range machines { - // A machine is healthy if contacted not longer than 2 minutes ago - healthStatus := "unhealthy" - if now.Sub(m.LastContact) <= 2*time.Minute { - healthStatus = "healthy" - } - fmt.Printf("%d. %s %s\n", i+1, m.HostAndPort, healthStatus) - } - - fmt.Print("Select: ") - reader := bufio.NewReader(os.Stdin) - input, err := reader.ReadString('\n') - if err != nil { - return xerrors.Errorf("reading selection: %w", err) - } - - var selection int - _, err = fmt.Sscanf(input, "%d", &selection) - if err != nil { - return xerrors.Errorf("parsing selection: %w", err) - } - - if selection < 1 || selection > len(machines) { - return xerrors.New("invalid selection") - } - - machine = machines[selection-1].HostAndPort - } - - var apiKeys []string - { - var dbconfigs []struct { - Config string `db:"config"` - Title string `db:"title"` - } - - err := db.Select(ctx, &dbconfigs, "select config from harmony_config") - if err != nil { - return xerrors.Errorf("getting configs: %w", err) - } - - var seen = make(map[string]struct{}) - - for _, config := range dbconfigs { - var layer struct { - Apis struct { - StorageRPCSecret string - } - } - - if _, err := toml.Decode(config.Config, &layer); err != nil { - return xerrors.Errorf("decode config layer %s: %w", config.Title, err) - } - - if layer.Apis.StorageRPCSecret != "" { - if _, ok := seen[layer.Apis.StorageRPCSecret]; ok { - continue - } - seen[layer.Apis.StorageRPCSecret] = struct{}{} - apiKeys = append(apiKeys, layer.Apis.StorageRPCSecret) - } - } - } - - if len(apiKeys) == 0 { - return xerrors.New("no api keys found in the database") - } - if len(apiKeys) > 1 { - return xerrors.Errorf("multiple api keys found in the database, not supported yet") - } - - var apiToken []byte - { - type jwtPayload struct { - Allow []auth.Permission - } - - p := jwtPayload{ - Allow: api.AllPermissions, - } - - sk, err := base64.StdEncoding.DecodeString(apiKeys[0]) - if err != nil { - return xerrors.Errorf("decode secret: %w", err) - } - - apiToken, err = jwt.Sign(&p, jwt.NewHS256(sk)) - if err != nil { - return xerrors.Errorf("signing token: %w", err) - } - } - - { - - laddr, err := net.ResolveTCPAddr("tcp", machine) - if err != nil { - return xerrors.Errorf("net resolve: %w", err) - } - - if len(laddr.IP) == 0 { - // set localhost - laddr.IP = net.IPv4(127, 0, 0, 1) - } - - ma, err := manet.FromNetAddr(laddr) - if err != nil { - return xerrors.Errorf("net from addr (%v): %w", laddr, err) - } - - token := fmt.Sprintf("%s:%s", string(apiToken), ma) - if err := os.Setenv(providerEnvVar, token); err != nil { - return xerrors.Errorf("setting env var: %w", err) - } - } - - { - api, closer, err := rpc.GetCurioAPI(cctx) - if err != nil { - return err - } - defer closer() - - v, err := api.Version(ctx) - if err != nil { - return xerrors.Errorf("querying version: %w", err) - } - - fmt.Println("remote node version:", v.String()) - } - - return nil - }, - Subcommands: []*cli.Command{ - storageCmd, - logCmd, - waitApiCmd, - }, -} - -var waitApiCmd = &cli.Command{ - Name: "wait-api", - Usage: "Wait for Curio api to come online", - Flags: []cli.Flag{ - &cli.DurationFlag{ - Name: "timeout", - Usage: "duration to wait till fail", - Value: time.Second * 30, - }, - }, - Action: func(cctx *cli.Context) error { - ctx := lcli.ReqContext(cctx) - ctx, cancel := context.WithTimeout(ctx, cctx.Duration("timeout")) - defer cancel() - for { - if ctx.Err() != nil { - break - } - - api, closer, err := rpc.GetCurioAPI(cctx) - if err != nil { - fmt.Printf("Not online yet... (%s)\n", err) - time.Sleep(time.Second) - continue - } - defer closer() - - _, err = api.Version(ctx) - if err != nil { - return err - } - - return nil - } - - if errors.Is(ctx.Err(), context.DeadlineExceeded) { - return fmt.Errorf("timed out waiting for api to come online") - } - - return ctx.Err() - }, -} diff --git a/cmd/curio/config.go b/cmd/curio/config.go deleted file mode 100644 index 727e6211759..00000000000 --- a/cmd/curio/config.go +++ /dev/null @@ -1,441 +0,0 @@ -package main - -import ( - "context" - "errors" - "fmt" - "io" - "os" - "os/exec" - "path" - "strings" - - "github.com/BurntSushi/toml" - "github.com/fatih/color" - "github.com/urfave/cli/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/cmd/curio/deps" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/node/config" -) - -var configCmd = &cli.Command{ - Name: "config", - Usage: "Manage node config by layers. The layer 'base' will always be applied at Curio start-up.", - Subcommands: []*cli.Command{ - configDefaultCmd, - configSetCmd, - configGetCmd, - configListCmd, - configViewCmd, - configRmCmd, - configEditCmd, - configNewCmd, - }, -} - -var configDefaultCmd = &cli.Command{ - Name: "default", - Aliases: []string{"defaults"}, - Usage: "Print default node config", - Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "no-comment", - Usage: "don't comment default values", - }, - }, - Action: func(cctx *cli.Context) error { - comment := !cctx.Bool("no-comment") - cfg, err := deps.GetDefaultConfig(comment) - if err != nil { - return err - } - fmt.Print(cfg) - - return nil - }, -} - -var configSetCmd = &cli.Command{ - Name: "set", - Aliases: []string{"add", "update", "create"}, - Usage: "Set a config layer or the base by providing a filename or stdin.", - ArgsUsage: "a layer's file name", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "title", - Usage: "title of the config layer (req'd for stdin)", - }, - }, - Action: func(cctx *cli.Context) error { - args := cctx.Args() - - db, err := deps.MakeDB(cctx) - if err != nil { - return err - } - - name := cctx.String("title") - var stream io.Reader = os.Stdin - if args.Len() != 1 { - if cctx.String("title") == "" { - return errors.New("must have a title for stdin, or a file name") - } - } else { - stream, err = os.Open(args.First()) - if err != nil { - return fmt.Errorf("cannot open file %s: %w", args.First(), err) - } - if name == "" { - name = strings.Split(path.Base(args.First()), ".")[0] - } - } - bytes, err := io.ReadAll(stream) - if err != nil { - return fmt.Errorf("cannot read stream/file %w", err) - } - - curioConfig := config.DefaultCurioConfig() // ensure it's toml - _, err = deps.LoadConfigWithUpgrades(string(bytes), curioConfig) - if err != nil { - return fmt.Errorf("cannot decode file: %w", err) - } - _ = curioConfig - - err = setConfig(db, name, string(bytes)) - - if err != nil { - return fmt.Errorf("unable to save config layer: %w", err) - } - - fmt.Println("Layer " + name + " created/updated") - return nil - }, -} - -func setConfig(db *harmonydb.DB, name, config string) error { - _, err := db.Exec(context.Background(), - `INSERT INTO harmony_config (title, config) VALUES ($1, $2) - ON CONFLICT (title) DO UPDATE SET config = excluded.config`, name, config) - return err -} - -var configGetCmd = &cli.Command{ - Name: "get", - Aliases: []string{"cat", "show"}, - Usage: "Get a config layer by name. You may want to pipe the output to a file, or use 'less'", - ArgsUsage: "layer name", - Action: func(cctx *cli.Context) error { - args := cctx.Args() - if args.Len() != 1 { - return fmt.Errorf("want 1 layer arg, got %d", args.Len()) - } - db, err := deps.MakeDB(cctx) - if err != nil { - return err - } - - cfg, err := getConfig(db, args.First()) - if err != nil { - return err - } - fmt.Println(cfg) - - return nil - }, -} - -func getConfig(db *harmonydb.DB, layer string) (string, error) { - var cfg string - err := db.QueryRow(context.Background(), `SELECT config FROM harmony_config WHERE title=$1`, layer).Scan(&cfg) - if err != nil { - return "", err - } - return cfg, nil -} - -var configListCmd = &cli.Command{ - Name: "list", - Aliases: []string{"ls"}, - Usage: "List config layers present in the DB.", - Flags: []cli.Flag{}, - Action: func(cctx *cli.Context) error { - db, err := deps.MakeDB(cctx) - if err != nil { - return err - } - var res []string - err = db.Select(context.Background(), &res, `SELECT title FROM harmony_config ORDER BY title`) - if err != nil { - return fmt.Errorf("unable to read from db: %w", err) - } - for _, r := range res { - fmt.Println(r) - } - - return nil - }, -} - -var configRmCmd = &cli.Command{ - Name: "remove", - Aliases: []string{"rm", "del", "delete"}, - Usage: "Remove a named config layer.", - Flags: []cli.Flag{}, - Action: func(cctx *cli.Context) error { - args := cctx.Args() - if args.Len() != 1 { - return errors.New("must have exactly 1 arg for the layer name") - } - db, err := deps.MakeDB(cctx) - if err != nil { - return err - } - ct, err := db.Exec(context.Background(), `DELETE FROM harmony_config WHERE title=$1`, args.First()) - if err != nil { - return fmt.Errorf("unable to read from db: %w", err) - } - if ct == 0 { - return fmt.Errorf("no layer named %s", args.First()) - } - - return nil - }, -} -var configViewCmd = &cli.Command{ - Name: "interpret", - Aliases: []string{"view", "stacked", "stack"}, - Usage: "Interpret stacked config layers by this version of curio, with system-generated comments.", - ArgsUsage: "a list of layers to be interpreted as the final config", - Flags: []cli.Flag{ - &cli.StringSliceFlag{ - Name: "layers", - Usage: "comma or space separated list of layers to be interpreted (base is always applied)", - Required: true, - }, - }, - Action: func(cctx *cli.Context) error { - db, err := deps.MakeDB(cctx) - if err != nil { - return err - } - layers := cctx.StringSlice("layers") - curioConfig, err := deps.GetConfig(cctx.Context, layers, db) - if err != nil { - return err - } - cb, err := config.ConfigUpdate(curioConfig, config.DefaultCurioConfig(), config.Commented(true), config.DefaultKeepUncommented(), config.NoEnv()) - if err != nil { - return xerrors.Errorf("cannot interpret config: %w", err) - } - fmt.Println(string(cb)) - return nil - }, -} - -var configEditCmd = &cli.Command{ - Name: "edit", - Usage: "edit a config layer", - ArgsUsage: "[layer name]", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "editor", - Usage: "editor to use", - Value: "vim", - EnvVars: []string{"EDITOR"}, - }, - &cli.StringFlag{ - Name: "source", - Usage: "source config layer", - DefaultText: "", - }, - &cli.BoolFlag{ - Name: "allow-overwrite", - Usage: "allow overwrite of existing layer if source is a different layer", - }, - &cli.BoolFlag{ - Name: "no-source-diff", - Usage: "save the whole config into the layer, not just the diff", - }, - &cli.BoolFlag{ - Name: "no-interpret-source", - Usage: "do not interpret source layer", - DefaultText: "true if --source is set", - }, - }, - Action: func(cctx *cli.Context) error { - layer := cctx.Args().First() - if layer == "" { - return errors.New("layer name is required") - } - - source := layer - if cctx.IsSet("source") { - source = cctx.String("source") - - if source == layer && !cctx.Bool("allow-owerwrite") { - return errors.New("source and target layers are the same") - } - } - - db, err := deps.MakeDB(cctx) - if err != nil { - return err - } - - sourceConfig, err := getConfig(db, source) - if err != nil { - return xerrors.Errorf("getting source config: %w", err) - } - - if cctx.IsSet("source") && source != layer && !cctx.Bool("no-interpret-source") { - curioCfg := config.DefaultCurioConfig() - if _, err := toml.Decode(sourceConfig, curioCfg); err != nil { - return xerrors.Errorf("parsing source config: %w", err) - } - - cb, err := config.ConfigUpdate(curioCfg, config.DefaultCurioConfig(), config.Commented(true), config.DefaultKeepUncommented(), config.NoEnv()) - if err != nil { - return xerrors.Errorf("interpreting source config: %w", err) - } - sourceConfig = string(cb) - } - - editor := cctx.String("editor") - newConfig, err := edit(editor, sourceConfig) - if err != nil { - return xerrors.Errorf("editing config: %w", err) - } - - toWrite := newConfig - - if cctx.IsSet("source") && !cctx.Bool("no-source-diff") { - updated, err := diff(sourceConfig, newConfig) - if err != nil { - return xerrors.Errorf("computing diff: %w", err) - } - - { - fmt.Printf("%s will write changes as the layer because %s is not set\n", color.YellowString(">"), color.GreenString("--no-source-diff")) - fmt.Println(updated) - fmt.Printf("%s Confirm [y]: ", color.YellowString(">")) - - for { - var confirmBuf [16]byte - n, err := os.Stdin.Read(confirmBuf[:]) - if err != nil { - return xerrors.Errorf("reading confirmation: %w", err) - } - confirm := strings.TrimSpace(string(confirmBuf[:n])) - - if confirm == "" { - confirm = "y" - } - - if confirm[:1] == "y" { - break - } - if confirm[:1] == "n" { - return nil - } - - fmt.Printf("%s Confirm [y]:\n", color.YellowString(">")) - } - } - - toWrite = updated - } - - fmt.Printf("%s Writing config for layer %s\n", color.YellowString(">"), color.GreenString(layer)) - - return setConfig(db, layer, toWrite) - }, -} - -func diff(sourceConf, newConf string) (string, error) { - fromSrc := config.DefaultCurioConfig() - fromNew := config.DefaultCurioConfig() - - _, err := toml.Decode(sourceConf, fromSrc) - if err != nil { - return "", xerrors.Errorf("decoding source config: %w", err) - } - - _, err = toml.Decode(newConf, fromNew) - if err != nil { - return "", xerrors.Errorf("decoding new config: %w", err) - } - - cb, err := config.ConfigUpdate(fromNew, fromSrc, config.Commented(true), config.NoEnv()) - if err != nil { - return "", xerrors.Errorf("interpreting source config: %w", err) - } - - lines := strings.Split(string(cb), "\n") - var outLines []string - var categoryBuf string - - for _, line := range lines { - // drop empty lines - if strings.TrimSpace(line) == "" { - continue - } - // drop lines starting with '#' - if strings.HasPrefix(strings.TrimSpace(line), "#") { - continue - } - // if starting with [, it's a category - if strings.HasPrefix(strings.TrimSpace(line), "[") { - categoryBuf = line - continue - } - - if categoryBuf != "" { - outLines = append(outLines, categoryBuf) - categoryBuf = "" - } - - outLines = append(outLines, line) - } - - return strings.Join(outLines, "\n"), nil -} - -func edit(editor, cfg string) (string, error) { - file, err := os.CreateTemp("", "curio-config-*.toml") - if err != nil { - return "", err - } - - _, err = file.WriteString(cfg) - if err != nil { - return "", err - } - - filePath := file.Name() - - if err := file.Close(); err != nil { - return "", err - } - - defer func() { - _ = os.Remove(filePath) - }() - - cmd := exec.Command(editor, filePath) - cmd.Stdin = os.Stdin - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - err = cmd.Run() - if err != nil { - return "", err - } - - data, err := os.ReadFile(filePath) - if err != nil { - return "", err - } - - return string(data), err -} diff --git a/cmd/curio/config_new.go b/cmd/curio/config_new.go deleted file mode 100644 index 65549bd6995..00000000000 --- a/cmd/curio/config_new.go +++ /dev/null @@ -1,57 +0,0 @@ -package main - -import ( - "fmt" - - "github.com/urfave/cli/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/api" - cliutil "github.com/filecoin-project/lotus/cli/util" - "github.com/filecoin-project/lotus/cmd/curio/deps" - "github.com/filecoin-project/lotus/node/repo" -) - -var configNewCmd = &cli.Command{ - Name: "new-cluster", - Usage: "Create new configuration for a new cluster", - ArgsUsage: "[SP actor address...]", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "repo", - EnvVars: []string{"LOTUS_PATH"}, - Hidden: true, - Value: "~/.lotus", - }, - }, - Action: func(cctx *cli.Context) error { - if cctx.Args().Len() < 1 { - return xerrors.New("must specify at least one SP actor address. Use 'lotus-shed miner create' or use 'curio guided-setup'") - } - - ctx := cctx.Context - - db, err := deps.MakeDB(cctx) - if err != nil { - return err - } - - full, closer, err := cliutil.GetFullNodeAPIV1(cctx) - if err != nil { - return xerrors.Errorf("connecting to full node: %w", err) - } - defer closer() - - ainfo, err := cliutil.GetAPIInfo(cctx, repo.FullNode) - if err != nil { - return xerrors.Errorf("could not get API info for FullNode: %w", err) - } - - token, err := full.AuthNew(ctx, api.AllPermissions) - if err != nil { - return err - } - - return deps.CreateMinerConfig(ctx, full, db, cctx.Args().Slice(), fmt.Sprintf("%s:%s", string(token), ainfo.Addr)) - }, -} diff --git a/cmd/curio/config_test.go b/cmd/curio/config_test.go deleted file mode 100644 index 8043017d5ea..00000000000 --- a/cmd/curio/config_test.go +++ /dev/null @@ -1,438 +0,0 @@ -package main - -import ( - "reflect" - "testing" - "time" - - "github.com/invopop/jsonschema" - "github.com/samber/lo" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/lotus/cmd/curio/deps" - "github.com/filecoin-project/lotus/node/config" -) - -var baseText = ` -[Subsystems] - # EnableWindowPost enables window post to be executed on this curio instance. Each machine in the cluster - # with WindowPoSt enabled will also participate in the window post scheduler. It is possible to have multiple - # machines with WindowPoSt enabled which will provide redundancy, and in case of multiple partitions per deadline, - # will allow for parallel processing of partitions. - # - # It is possible to have instances handling both WindowPoSt and WinningPoSt, which can provide redundancy without - # the need for additional machines. In setups like this it is generally recommended to run - # partitionsPerDeadline+1 machines. - # - # type: bool - #EnableWindowPost = false - - # type: int - #WindowPostMaxTasks = 0 - - # EnableWinningPost enables winning post to be executed on this curio instance. - # Each machine in the cluster with WinningPoSt enabled will also participate in the winning post scheduler. - # It is possible to mix machines with WindowPoSt and WinningPoSt enabled, for details see the EnableWindowPost - # documentation. - # - # type: bool - #EnableWinningPost = false - - # type: int - #WinningPostMaxTasks = 0 - - # EnableParkPiece enables the "piece parking" task to run on this node. This task is responsible for fetching - # pieces from the network and storing them in the storage subsystem until sectors are sealed. This task is - # only applicable when integrating with boost, and should be enabled on nodes which will hold deal data - # from boost until sectors containing the related pieces have the TreeD/TreeR constructed. - # Note that future Curio implementations will have a separate task type for fetching pieces from the internet. - # - # type: bool - #EnableParkPiece = false - - # type: int - #ParkPieceMaxTasks = 0 - - # EnableSealSDR enables SDR tasks to run. SDR is the long sequential computation - # creating 11 layer files in sector cache directory. - # - # SDR is the first task in the sealing pipeline. It's inputs are just the hash of the - # unsealed data (CommD), sector number, miner id, and the seal proof type. - # It's outputs are the 11 layer files in the sector cache directory. - # - # In lotus-miner this was run as part of PreCommit1. - # - # type: bool - #EnableSealSDR = false - - # The maximum amount of SDR tasks that can run simultaneously. Note that the maximum number of tasks will - # also be bounded by resources available on the machine. - # - # type: int - #SealSDRMaxTasks = 0 - - # EnableSealSDRTrees enables the SDR pipeline tree-building task to run. - # This task handles encoding of unsealed data into last sdr layer and building - # of TreeR, TreeC and TreeD. - # - # This task runs after SDR - # TreeD is first computed with optional input of unsealed data - # TreeR is computed from replica, which is first computed as field - # addition of the last SDR layer and the bottom layer of TreeD (which is the unsealed data) - # TreeC is computed from the 11 SDR layers - # The 3 trees will later be used to compute the PoRep proof. - # - # In case of SyntheticPoRep challenges for PoRep will be pre-generated at this step, and trees and layers - # will be dropped. SyntheticPoRep works by pre-generating a very large set of challenges (~30GiB on disk) - # then using a small subset of them for the actual PoRep computation. This allows for significant scratch space - # saving between PreCommit and PoRep generation at the expense of more computation (generating challenges in this step) - # - # In lotus-miner this was run as part of PreCommit2 (TreeD was run in PreCommit1). - # Note that nodes with SDRTrees enabled will also answer to Finalize tasks, - # which just remove unneeded tree data after PoRep is computed. - # - # type: bool - #EnableSealSDRTrees = false - - # The maximum amount of SealSDRTrees tasks that can run simultaneously. Note that the maximum number of tasks will - # also be bounded by resources available on the machine. - # - # type: int - #SealSDRTreesMaxTasks = 0 - - # FinalizeMaxTasks is the maximum amount of finalize tasks that can run simultaneously. - # The finalize task is enabled on all machines which also handle SDRTrees tasks. Finalize ALWAYS runs on whichever - # machine holds sector cache files, as it removes unneeded tree data after PoRep is computed. - # Finalize will run in parallel with the SubmitCommitMsg task. - # - # type: int - #FinalizeMaxTasks = 0 - - # EnableSendPrecommitMsg enables the sending of precommit messages to the chain - # from this curio instance. - # This runs after SDRTrees and uses the output CommD / CommR (roots of TreeD / TreeR) for the message - # - # type: bool - #EnableSendPrecommitMsg = false - - # EnablePoRepProof enables the computation of the porep proof - # - # This task runs after interactive-porep seed becomes available, which happens 150 epochs (75min) after the - # precommit message lands on chain. This task should run on a machine with a GPU. Vanilla PoRep proofs are - # requested from the machine which holds sector cache files which most likely is the machine which ran the SDRTrees - # task. - # - # In lotus-miner this was Commit1 / Commit2 - # - # type: bool - #EnablePoRepProof = false - - # The maximum amount of PoRepProof tasks that can run simultaneously. Note that the maximum number of tasks will - # also be bounded by resources available on the machine. - # - # type: int - #PoRepProofMaxTasks = 0 - - # EnableSendCommitMsg enables the sending of commit messages to the chain - # from this curio instance. - # - # type: bool - #EnableSendCommitMsg = false - - # EnableMoveStorage enables the move-into-long-term-storage task to run on this curio instance. - # This tasks should only be enabled on nodes with long-term storage. - # - # The MoveStorage task is the last task in the sealing pipeline. It moves the sealed sector data from the - # SDRTrees machine into long-term storage. This task runs after the Finalize task. - # - # type: bool - #EnableMoveStorage = false - - # The maximum amount of MoveStorage tasks that can run simultaneously. Note that the maximum number of tasks will - # also be bounded by resources available on the machine. It is recommended that this value is set to a number which - # uses all available network (or disk) bandwidth on the machine without causing bottlenecks. - # - # type: int - #MoveStorageMaxTasks = 0 - - # EnableWebGui enables the web GUI on this curio instance. The UI has minimal local overhead, but it should - # only need to be run on a single machine in the cluster. - # - # type: bool - #EnableWebGui = false - - # The address that should listen for Web GUI requests. - # - # type: string - #GuiAddress = ":4701" - - -[Fees] - # type: types.FIL - #DefaultMaxFee = "0.07 FIL" - - # type: types.FIL - #MaxPreCommitGasFee = "0.025 FIL" - - # type: types.FIL - #MaxCommitGasFee = "0.05 FIL" - - # type: types.FIL - #MaxTerminateGasFee = "0.5 FIL" - - # WindowPoSt is a high-value operation, so the default fee should be high. - # - # type: types.FIL - #MaxWindowPoStGasFee = "5 FIL" - - # type: types.FIL - #MaxPublishDealsFee = "0.05 FIL" - - [Fees.MaxPreCommitBatchGasFee] - # type: types.FIL - #Base = "0 FIL" - - # type: types.FIL - #PerSector = "0.02 FIL" - - [Fees.MaxCommitBatchGasFee] - # type: types.FIL - #Base = "0 FIL" - - # type: types.FIL - #PerSector = "0.03 FIL" - -[[Addresses]] - #PreCommitControl = [] - - #CommitControl = [] - - #TerminateControl = [] - - #DisableOwnerFallback = false - - #DisableWorkerFallback = false - - MinerAddresses = ["t01013"] - - -[[Addresses]] - #PreCommitControl = [] - - #CommitControl = [] - - #TerminateControl = [] - - #DisableOwnerFallback = false - - #DisableWorkerFallback = false - - #MinerAddresses = [] - - -[[Addresses]] - #PreCommitControl = [] - - #CommitControl = [] - - #TerminateControl = [] - - #DisableOwnerFallback = false - - #DisableWorkerFallback = false - - MinerAddresses = ["t01006"] - - -[Proving] - # Maximum number of sector checks to run in parallel. (0 = unlimited) - # - # WARNING: Setting this value too high may make the node crash by running out of stack - # WARNING: Setting this value too low may make sector challenge reading much slower, resulting in failed PoSt due - # to late submission. - # - # After changing this option, confirm that the new value works in your setup by invoking - # 'lotus-miner proving compute window-post 0' - # - # type: int - #ParallelCheckLimit = 32 - - # Maximum amount of time a proving pre-check can take for a sector. If the check times out the sector will be skipped - # - # WARNING: Setting this value too low risks in sectors being skipped even though they are accessible, just reading the - # test challenge took longer than this timeout - # WARNING: Setting this value too high risks missing PoSt deadline in case IO operations related to this sector are - # blocked (e.g. in case of disconnected NFS mount) - # - # type: Duration - #SingleCheckTimeout = "10m0s" - - # Maximum amount of time a proving pre-check can take for an entire partition. If the check times out, sectors in - # the partition which didn't get checked on time will be skipped - # - # WARNING: Setting this value too low risks in sectors being skipped even though they are accessible, just reading the - # test challenge took longer than this timeout - # WARNING: Setting this value too high risks missing PoSt deadline in case IO operations related to this partition are - # blocked or slow - # - # type: Duration - #PartitionCheckTimeout = "20m0s" - - # Disable Window PoSt computation on the lotus-miner process even if no window PoSt workers are present. - # - # WARNING: If no windowPoSt workers are connected, window PoSt WILL FAIL resulting in faulty sectors which will need - # to be recovered. Before enabling this option, make sure your PoSt workers work correctly. - # - # After changing this option, confirm that the new value works in your setup by invoking - # 'lotus-miner proving compute window-post 0' - # - # type: bool - #DisableBuiltinWindowPoSt = false - - # Disable Winning PoSt computation on the lotus-miner process even if no winning PoSt workers are present. - # - # WARNING: If no WinningPoSt workers are connected, Winning PoSt WILL FAIL resulting in lost block rewards. - # Before enabling this option, make sure your PoSt workers work correctly. - # - # type: bool - #DisableBuiltinWinningPoSt = false - - # Disable WindowPoSt provable sector readability checks. - # - # In normal operation, when preparing to compute WindowPoSt, lotus-miner will perform a round of reading challenges - # from all sectors to confirm that those sectors can be proven. Challenges read in this process are discarded, as - # we're only interested in checking that sector data can be read. - # - # When using builtin proof computation (no PoSt workers, and DisableBuiltinWindowPoSt is set to false), this process - # can save a lot of time and compute resources in the case that some sectors are not readable - this is caused by - # the builtin logic not skipping snark computation when some sectors need to be skipped. - # - # When using PoSt workers, this process is mostly redundant, with PoSt workers challenges will be read once, and - # if challenges for some sectors aren't readable, those sectors will just get skipped. - # - # Disabling sector pre-checks will slightly reduce IO load when proving sectors, possibly resulting in shorter - # time to produce window PoSt. In setups with good IO capabilities the effect of this option on proving time should - # be negligible. - # - # NOTE: It likely is a bad idea to disable sector pre-checks in setups with no PoSt workers. - # - # NOTE: Even when this option is enabled, recovering sectors will be checked before recovery declaration message is - # sent to the chain - # - # After changing this option, confirm that the new value works in your setup by invoking - # 'lotus-miner proving compute window-post 0' - # - # type: bool - #DisableWDPoStPreChecks = false - - # Maximum number of partitions to prove in a single SubmitWindowPoSt messace. 0 = network limit (3 in nv21) - # - # A single partition may contain up to 2349 32GiB sectors, or 2300 64GiB sectors. - # // - # Note that setting this value lower may result in less efficient gas use - more messages will be sent, - # to prove each deadline, resulting in more total gas use (but each message will have lower gas limit) - # - # Setting this value above the network limit has no effect - # - # type: int - #MaxPartitionsPerPoStMessage = 0 - - # In some cases when submitting DeclareFaultsRecovered messages, - # there may be too many recoveries to fit in a BlockGasLimit. - # In those cases it may be necessary to set this value to something low (eg 1); - # Note that setting this value lower may result in less efficient gas use - more messages will be sent than needed, - # resulting in more total gas use (but each message will have lower gas limit) - # - # type: int - #MaxPartitionsPerRecoveryMessage = 0 - - # Enable single partition per PoSt Message for partitions containing recovery sectors - # - # In cases when submitting PoSt messages which contain recovering sectors, the default network limit may still be - # too high to fit in the block gas limit. In those cases, it becomes useful to only house the single partition - # with recovering sectors in the post message - # - # Note that setting this value lower may result in less efficient gas use - more messages will be sent, - # to prove each deadline, resulting in more total gas use (but each message will have lower gas limit) - # - # type: bool - #SingleRecoveringPartitionPerPostMessage = false - - -[Journal] - # Events of the form: "system1:event1,system1:event2[,...]" - # - # type: string - #DisabledEvents = "" - - -[Apis] - # ChainApiInfo is the API endpoint for the Lotus daemon. - # - # type: []string - ChainApiInfo = ["eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJBbGxvdyI6WyJyZWFkIiwid3JpdGUiLCJzaWduIiwiYWRtaW4iXX0.T_jmG4DTs9Zjd7rr78862lT7D2U63uz-zqcUKHwcqaU:/dns/localhost/tcp/1234/http"] - - # RPC Secret for the storage subsystem. - # If integrating with lotus-miner this must match the value from - # cat ~/.lotusminer/keystore/MF2XI2BNNJ3XILLQOJUXMYLUMU | jq -r .PrivateKey - # - # type: string - StorageRPCSecret = "HxHe8YLHiY0LjHVw/WT/4XQkPGgRyCEYk+xiFi0Ob0o=" - -` - -func TestConfig(t *testing.T) { - baseCfg := config.DefaultCurioConfig() - - addr1 := config.CurioAddresses{ - PreCommitControl: []string{}, - CommitControl: []string{}, - TerminateControl: []string{"t3qroiebizgkz7pvj26reg5r5mqiftrt5hjdske2jzjmlacqr2qj7ytjncreih2mvujxoypwpfusmwpipvxncq"}, - DisableOwnerFallback: false, - DisableWorkerFallback: false, - MinerAddresses: []string{"t01000"}, - } - - addr2 := config.CurioAddresses{ - MinerAddresses: []string{"t01001"}, - } - - _, err := deps.LoadConfigWithUpgrades(baseText, baseCfg) - require.NoError(t, err) - - baseCfg.Addresses = append(baseCfg.Addresses, addr1) - baseCfg.Addresses = lo.Filter(baseCfg.Addresses, func(a config.CurioAddresses, _ int) bool { - return len(a.MinerAddresses) > 0 - }) - - _, err = config.ConfigUpdate(baseCfg, config.DefaultCurioConfig(), config.Commented(true), config.DefaultKeepUncommented(), config.NoEnv()) - require.NoError(t, err) - - baseCfg.Addresses = append(baseCfg.Addresses, addr2) - baseCfg.Addresses = lo.Filter(baseCfg.Addresses, func(a config.CurioAddresses, _ int) bool { - return len(a.MinerAddresses) > 0 - }) - - _, err = config.ConfigUpdate(baseCfg, config.DefaultCurioConfig(), config.Commented(true), config.DefaultKeepUncommented(), config.NoEnv()) - require.NoError(t, err) - -} - -func TestCustomConfigDurationJson(t *testing.T) { - ref := new(jsonschema.Reflector) - ref.Mapper = func(i reflect.Type) *jsonschema.Schema { - if i == reflect.TypeOf(config.Duration(time.Second)) { - return &jsonschema.Schema{ - Type: "string", - Format: "duration", - } - } - return nil - } - - sch := ref.Reflect(config.CurioConfig{}) - definitions := sch.Definitions["CurioProvingConfig"] - prop, ok := definitions.Properties.Get("SingleCheckTimeout") - require.True(t, ok) - require.Equal(t, prop.Type, "string") -} diff --git a/cmd/curio/deps/apiinfo.go b/cmd/curio/deps/apiinfo.go deleted file mode 100644 index 0dd96d81735..00000000000 --- a/cmd/curio/deps/apiinfo.go +++ /dev/null @@ -1,94 +0,0 @@ -package deps - -import ( - "fmt" - "net/http" - - "github.com/urfave/cli/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-jsonrpc" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/client" - "github.com/filecoin-project/lotus/api/v1api" - cliutil "github.com/filecoin-project/lotus/cli/util" -) - -func getFullNodeAPIV1Curio(ctx *cli.Context, ainfoCfg []string, opts ...cliutil.GetFullNodeOption) (v1api.FullNode, jsonrpc.ClientCloser, error) { - if tn, ok := ctx.App.Metadata["testnode-full"]; ok { - return tn.(v1api.FullNode), func() {}, nil - } - - var options cliutil.GetFullNodeOptions - for _, opt := range opts { - opt(&options) - } - - var rpcOpts []jsonrpc.Option - if options.EthSubHandler != nil { - rpcOpts = append(rpcOpts, jsonrpc.WithClientHandler("Filecoin", options.EthSubHandler), jsonrpc.WithClientHandlerAlias("eth_subscription", "Filecoin.EthSubscription")) - } - - var httpHeads []httpHead - version := "v1" - { - if len(ainfoCfg) == 0 { - return nil, nil, xerrors.Errorf("could not get API info: none configured. \nConsider getting base.toml with './curio config get base >/tmp/base.toml' \nthen adding \n[APIs] \n ChainApiInfo = [\" result_from lotus auth api-info --perm=admin \"]\n and updating it with './curio config set /tmp/base.toml'") - } - for _, i := range ainfoCfg { - ainfo := cliutil.ParseApiInfo(i) - addr, err := ainfo.DialArgs(version) - if err != nil { - return nil, nil, xerrors.Errorf("could not get DialArgs: %w", err) - } - httpHeads = append(httpHeads, httpHead{addr: addr, header: ainfo.AuthHeader()}) - } - } - - if cliutil.IsVeryVerbose { - _, _ = fmt.Fprintln(ctx.App.Writer, "using full node API v1 endpoint:", httpHeads[0].addr) - } - - var fullNodes []api.FullNode - var closers []jsonrpc.ClientCloser - - for _, head := range httpHeads { - v1api, closer, err := client.NewFullNodeRPCV1(ctx.Context, head.addr, head.header, rpcOpts...) - if err != nil { - log.Warnf("Not able to establish connection to node with addr: %s, Reason: %s", head.addr, err.Error()) - continue - } - fullNodes = append(fullNodes, v1api) - closers = append(closers, closer) - } - - // When running in cluster mode and trying to establish connections to multiple nodes, fail - // if less than 2 lotus nodes are actually running - if len(httpHeads) > 1 && len(fullNodes) < 2 { - return nil, nil, xerrors.Errorf("Not able to establish connection to more than a single node") - } - - finalCloser := func() { - for _, c := range closers { - c() - } - } - - var v1API api.FullNodeStruct - cliutil.FullNodeProxy(fullNodes, &v1API) - - v, err := v1API.Version(ctx.Context) - if err != nil { - return nil, nil, err - } - if !v.APIVersion.EqMajorMinor(api.FullAPIVersion1) { - return nil, nil, xerrors.Errorf("Remote API version didn't match (expected %s, remote %s)", api.FullAPIVersion1, v.APIVersion) - } - return &v1API, finalCloser, nil -} - -type httpHead struct { - addr string - header http.Header -} diff --git a/cmd/curio/deps/deps.go b/cmd/curio/deps/deps.go deleted file mode 100644 index c9b7b315fa2..00000000000 --- a/cmd/curio/deps/deps.go +++ /dev/null @@ -1,529 +0,0 @@ -// Package deps provides the dependencies for the curio node. -package deps - -import ( - "context" - "crypto/rand" - "database/sql" - "encoding/base64" - "errors" - "fmt" - "io" - "net" - "net/http" - "net/url" - "os" - "path/filepath" - "strings" - - "github.com/BurntSushi/toml" - "github.com/gbrlsnchs/jwt/v3" - logging "github.com/ipfs/go-log/v2" - "github.com/samber/lo" - "github.com/urfave/cli/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-jsonrpc/auth" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/v1api" - "github.com/filecoin-project/lotus/chain/types" - curio "github.com/filecoin-project/lotus/curiosrc" - "github.com/filecoin-project/lotus/curiosrc/multictladdr" - "github.com/filecoin-project/lotus/journal" - "github.com/filecoin-project/lotus/journal/alerting" - "github.com/filecoin-project/lotus/journal/fsjournal" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/node/config" - "github.com/filecoin-project/lotus/node/modules" - "github.com/filecoin-project/lotus/node/modules/dtypes" - "github.com/filecoin-project/lotus/node/repo" - "github.com/filecoin-project/lotus/storage/paths" - "github.com/filecoin-project/lotus/storage/sealer" - "github.com/filecoin-project/lotus/storage/sealer/ffiwrapper" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -var log = logging.Logger("curio/deps") - -func MakeDB(cctx *cli.Context) (*harmonydb.DB, error) { - // #1 CLI opts - fromCLI := func() (*harmonydb.DB, error) { - dbConfig := config.HarmonyDB{ - Username: cctx.String("db-user"), - Password: cctx.String("db-password"), - Hosts: strings.Split(cctx.String("db-host"), ","), - Database: cctx.String("db-name"), - Port: cctx.String("db-port"), - } - return harmonydb.NewFromConfig(dbConfig) - } - - readToml := func(path string) (*harmonydb.DB, error) { - cfg, err := config.FromFile(path) - if err != nil { - return nil, err - } - if c, ok := cfg.(*config.StorageMiner); ok { - return harmonydb.NewFromConfig(c.HarmonyDB) - } - return nil, errors.New("not a miner config") - } - - // #2 Try local miner config - fromMinerEnv := func() (*harmonydb.DB, error) { - v := os.Getenv("LOTUS_MINER_PATH") - if v == "" { - return nil, errors.New("no miner env") - } - return readToml(filepath.Join(v, "config.toml")) - - } - - fromMiner := func() (*harmonydb.DB, error) { - u, err := os.UserHomeDir() - if err != nil { - return nil, err - } - return readToml(filepath.Join(u, ".lotusminer/config.toml")) - } - fromEnv := func() (*harmonydb.DB, error) { - // #3 Try env - u, err := url.Parse(os.Getenv("CURIO_DB")) - if err != nil { - return nil, errors.New("no db connection string found in CURIO_DB env") - } - cfg := config.DefaultStorageMiner().HarmonyDB - if u.User.Username() != "" { - cfg.Username = u.User.Username() - } - if p, ok := u.User.Password(); ok && p != "" { - cfg.Password = p - } - if u.Hostname() != "" { - cfg.Hosts = []string{u.Hostname()} - } - if u.Port() != "" { - cfg.Port = u.Port() - } - if strings.TrimPrefix(u.Path, "/") != "" { - cfg.Database = strings.TrimPrefix(u.Path, "/") - } - - return harmonydb.NewFromConfig(cfg) - } - - for _, f := range []func() (*harmonydb.DB, error){fromCLI, fromMinerEnv, fromMiner, fromEnv} { - db, err := f() - if err != nil { - continue - } - return db, nil - } - log.Error("No db connection string found. User CLI args or env var: set CURIO_DB=postgres://USER:PASSWORD@HOST:PORT/DATABASE") - return fromCLI() //in-case it's not about bad config. -} - -type JwtPayload struct { - Allow []auth.Permission -} - -func StorageAuth(apiKey string) (sealer.StorageAuth, error) { - if apiKey == "" { - return nil, xerrors.Errorf("no api key provided") - } - - rawKey, err := base64.StdEncoding.DecodeString(apiKey) - if err != nil { - return nil, xerrors.Errorf("decoding api key: %w", err) - } - - key := jwt.NewHS256(rawKey) - - p := JwtPayload{ - Allow: []auth.Permission{"admin"}, - } - - token, err := jwt.Sign(&p, key) - if err != nil { - return nil, err - } - - headers := http.Header{} - headers.Add("Authorization", "Bearer "+string(token)) - return sealer.StorageAuth(headers), nil -} - -func GetDeps(ctx context.Context, cctx *cli.Context) (*Deps, error) { - var deps Deps - return &deps, deps.PopulateRemainingDeps(ctx, cctx, true) -} - -type Deps struct { - Layers []string - Cfg *config.CurioConfig // values - DB *harmonydb.DB // has itest capability - Full api.FullNode - Verif storiface.Verifier - As *multictladdr.MultiAddressSelector - Maddrs map[dtypes.MinerAddress]bool - ProofTypes map[abi.RegisteredSealProof]bool - Stor *paths.Remote - Si *paths.DBIndex - LocalStore *paths.Local - LocalPaths *paths.BasicLocalStorage - ListenAddr string -} - -const ( - FlagRepoPath = "repo-path" -) - -func (deps *Deps) PopulateRemainingDeps(ctx context.Context, cctx *cli.Context, makeRepo bool) error { - var err error - if makeRepo { - // Open repo - repoPath := cctx.String(FlagRepoPath) - fmt.Println("repopath", repoPath) - r, err := repo.NewFS(repoPath) - if err != nil { - return err - } - - ok, err := r.Exists() - if err != nil { - return err - } - if !ok { - if err := r.Init(repo.Curio); err != nil { - return err - } - } - } - - if deps.DB == nil { - deps.DB, err = MakeDB(cctx) - if err != nil { - return err - } - } - if deps.Layers == nil { - deps.Layers = append([]string{"base"}, cctx.StringSlice("layers")...) // Always stack on top of "base" layer - } - - if deps.Cfg == nil { - // The config feeds into task runners & their helpers - deps.Cfg, err = GetConfig(cctx.Context, cctx.StringSlice("layers"), deps.DB) - if err != nil { - return xerrors.Errorf("populate config: %w", err) - } - } - - log.Debugw("config", "config", deps.Cfg) - - if deps.Verif == nil { - deps.Verif = ffiwrapper.ProofVerifier - } - - if deps.As == nil { - deps.As, err = curio.AddressSelector(deps.Cfg.Addresses)() - if err != nil { - return err - } - } - - if deps.Si == nil { - de, err := journal.ParseDisabledEvents(deps.Cfg.Journal.DisabledEvents) - if err != nil { - return err - } - j, err := fsjournal.OpenFSJournalPath(cctx.String("journal"), de) - if err != nil { - return err - } - go func() { - <-ctx.Done() - _ = j.Close() - }() - - al := alerting.NewAlertingSystem(j) - deps.Si = paths.NewDBIndex(al, deps.DB) - } - - if deps.Full == nil { - var fullCloser func() - cfgApiInfo := deps.Cfg.Apis.ChainApiInfo - if v := os.Getenv("FULLNODE_API_INFO"); v != "" { - cfgApiInfo = []string{v} - } - deps.Full, fullCloser, err = getFullNodeAPIV1Curio(cctx, cfgApiInfo) - if err != nil { - return err - } - - go func() { - <-ctx.Done() - fullCloser() - }() - } - - deps.LocalPaths = &paths.BasicLocalStorage{ - PathToJSON: cctx.String("storage-json"), - } - - if deps.ListenAddr == "" { - listenAddr := cctx.String("listen") - const unspecifiedAddress = "0.0.0.0" - addressSlice := strings.Split(listenAddr, ":") - if ip := net.ParseIP(addressSlice[0]); ip != nil { - if ip.String() == unspecifiedAddress { - rip, err := deps.DB.GetRoutableIP() - if err != nil { - return err - } - deps.ListenAddr = rip + ":" + addressSlice[1] - } - } - } - if cctx.IsSet("gui-listen") { - deps.Cfg.Subsystems.GuiAddress = cctx.String("gui-listen") - } - if deps.LocalStore == nil { - deps.LocalStore, err = paths.NewLocal(ctx, deps.LocalPaths, deps.Si, []string{"http://" + deps.ListenAddr + "/remote"}) - if err != nil { - return err - } - } - - sa, err := StorageAuth(deps.Cfg.Apis.StorageRPCSecret) - if err != nil { - return xerrors.Errorf(`'%w' while parsing the config toml's - [Apis] - StorageRPCSecret=%v -Get it with: jq .PrivateKey ~/.lotus-miner/keystore/MF2XI2BNNJ3XILLQOJUXMYLUMU`, err, deps.Cfg.Apis.StorageRPCSecret) - } - if deps.Stor == nil { - deps.Stor = paths.NewRemote(deps.LocalStore, deps.Si, http.Header(sa), 10, &paths.DefaultPartialFileHandler{}) - } - - if deps.Maddrs == nil { - deps.Maddrs = map[dtypes.MinerAddress]bool{} - } - if len(deps.Maddrs) == 0 { - for _, s := range deps.Cfg.Addresses { - for _, s := range s.MinerAddresses { - addr, err := address.NewFromString(s) - if err != nil { - return err - } - deps.Maddrs[dtypes.MinerAddress(addr)] = true - } - } - } - - if deps.ProofTypes == nil { - deps.ProofTypes = map[abi.RegisteredSealProof]bool{} - } - if len(deps.ProofTypes) == 0 { - for maddr := range deps.Maddrs { - spt, err := modules.SealProofType(maddr, deps.Full) - if err != nil { - return err - } - deps.ProofTypes[spt] = true - } - } - - return nil -} - -func LoadConfigWithUpgrades(text string, curioConfigWithDefaults *config.CurioConfig) (toml.MetaData, error) { - // allow migration from old config format that was limited to 1 wallet setup. - newText := strings.Join(lo.Map(strings.Split(text, "\n"), func(line string, _ int) string { - if strings.EqualFold(line, "[addresses]") { - return "[[addresses]]" - } - return line - }), "\n") - meta, err := toml.Decode(newText, &curioConfigWithDefaults) - for i := range curioConfigWithDefaults.Addresses { - if curioConfigWithDefaults.Addresses[i].PreCommitControl == nil { - curioConfigWithDefaults.Addresses[i].PreCommitControl = []string{} - } - if curioConfigWithDefaults.Addresses[i].CommitControl == nil { - curioConfigWithDefaults.Addresses[i].CommitControl = []string{} - } - if curioConfigWithDefaults.Addresses[i].TerminateControl == nil { - curioConfigWithDefaults.Addresses[i].TerminateControl = []string{} - } - } - return meta, err -} -func GetConfig(ctx context.Context, layers []string, db *harmonydb.DB) (*config.CurioConfig, error) { - curioConfig := config.DefaultCurioConfig() - have := []string{} - layers = append([]string{"base"}, layers...) // Always stack on top of "base" layer - for _, layer := range layers { - text := "" - err := db.QueryRow(ctx, `SELECT config FROM harmony_config WHERE title=$1`, layer).Scan(&text) - if err != nil { - if strings.Contains(err.Error(), sql.ErrNoRows.Error()) { - return nil, fmt.Errorf("missing layer '%s' ", layer) - } - if layer == "base" { - return nil, errors.New(`curio defaults to a layer named 'base'. - Either use 'migrate' command or edit a base.toml and upload it with: curio config set base.toml`) - } - return nil, fmt.Errorf("could not read layer '%s': %w", layer, err) - } - - meta, err := LoadConfigWithUpgrades(text, curioConfig) - if err != nil { - return curioConfig, fmt.Errorf("could not read layer, bad toml %s: %w", layer, err) - } - for _, k := range meta.Keys() { - have = append(have, strings.Join(k, " ")) - } - log.Debugw("Using layer", "layer", layer, "config", curioConfig) - } - _ = have // FUTURE: verify that required fields are here. - // If config includes 3rd-party config, consider JSONSchema as a way that - // 3rd-parties can dynamically include config requirements and we can - // validate the config. Because of layering, we must validate @ startup. - return curioConfig, nil -} - -func GetDefaultConfig(comment bool) (string, error) { - c := config.DefaultCurioConfig() - cb, err := config.ConfigUpdate(c, nil, config.Commented(comment), config.DefaultKeepUncommented(), config.NoEnv()) - if err != nil { - return "", err - } - return string(cb), nil -} - -func GetDepsCLI(ctx context.Context, cctx *cli.Context) (*Deps, error) { - db, err := MakeDB(cctx) - if err != nil { - return nil, err - } - - layers := cctx.StringSlice("layers") - - cfg, err := GetConfig(cctx.Context, layers, db) - if err != nil { - return nil, err - } - - full, fullCloser, err := getFullNodeAPIV1Curio(cctx, cfg.Apis.ChainApiInfo) - if err != nil { - return nil, err - } - go func() { - select { - case <-ctx.Done(): - fullCloser() - } - }() - - return &Deps{ - Cfg: cfg, - DB: db, - Full: full, - }, nil -} - -func CreateMinerConfig(ctx context.Context, full v1api.FullNode, db *harmonydb.DB, miners []string, info string) error { - var titles []string - err := db.Select(ctx, &titles, `SELECT title FROM harmony_config WHERE LENGTH(config) > 0`) - if err != nil { - return fmt.Errorf("cannot reach the db. Ensure that Yugabyte flags are set correctly to"+ - " reach Yugabyte: %s", err.Error()) - } - - // setup config - curioConfig := config.DefaultCurioConfig() - - for _, addr := range miners { - maddr, err := address.NewFromString(addr) - if err != nil { - return xerrors.Errorf("Invalid address: %s", addr) - } - - _, err = full.StateMinerInfo(ctx, maddr, types.EmptyTSK) - if err != nil { - return xerrors.Errorf("Failed to get miner info: %w", err) - } - - curioConfig.Addresses = append(curioConfig.Addresses, config.CurioAddresses{ - PreCommitControl: []string{}, - CommitControl: []string{}, - TerminateControl: []string{}, - DisableOwnerFallback: false, - DisableWorkerFallback: false, - MinerAddresses: []string{addr}, - }) - } - - { - sk, err := io.ReadAll(io.LimitReader(rand.Reader, 32)) - if err != nil { - return err - } - - curioConfig.Apis.StorageRPCSecret = base64.StdEncoding.EncodeToString(sk) - } - - { - curioConfig.Apis.ChainApiInfo = append(curioConfig.Apis.ChainApiInfo, info) - } - - curioConfig.Addresses = lo.Filter(curioConfig.Addresses, func(a config.CurioAddresses, _ int) bool { - return len(a.MinerAddresses) > 0 - }) - - // If no base layer is present - if !lo.Contains(titles, "base") { - cb, err := config.ConfigUpdate(curioConfig, config.DefaultCurioConfig(), config.Commented(true), config.DefaultKeepUncommented(), config.NoEnv()) - if err != nil { - return xerrors.Errorf("Failed to generate default config: %w", err) - } - cfg := string(cb) - _, err = db.Exec(ctx, "INSERT INTO harmony_config (title, config) VALUES ('base', $1)", cfg) - if err != nil { - return xerrors.Errorf("failed to insert the 'base' into the database: %w", err) - } - fmt.Printf("The base layer has been updated with miner[s] %s\n", miners) - return nil - } - - // if base layer is present - baseCfg := config.DefaultCurioConfig() - var baseText string - err = db.QueryRow(ctx, "SELECT config FROM harmony_config WHERE title='base'").Scan(&baseText) - if err != nil { - return xerrors.Errorf("Cannot load base config from database: %w", err) - } - _, err = LoadConfigWithUpgrades(baseText, baseCfg) - if err != nil { - return xerrors.Errorf("Cannot parse base config: %w", err) - } - - baseCfg.Addresses = append(baseCfg.Addresses, curioConfig.Addresses...) - baseCfg.Addresses = lo.Filter(baseCfg.Addresses, func(a config.CurioAddresses, _ int) bool { - return len(a.MinerAddresses) > 0 - }) - - cb, err := config.ConfigUpdate(baseCfg, config.DefaultCurioConfig(), config.Commented(true), config.DefaultKeepUncommented(), config.NoEnv()) - if err != nil { - return xerrors.Errorf("cannot interpret config: %w", err) - } - _, err = db.Exec(ctx, "UPDATE harmony_config SET config=$1 WHERE title='base'", string(cb)) - if err != nil { - return xerrors.Errorf("cannot update base config: %w", err) - } - fmt.Printf("The base layer has been updated with miner[s] %s\n", miners) - return nil -} diff --git a/cmd/curio/ffi.go b/cmd/curio/ffi.go deleted file mode 100644 index 5c9411063c6..00000000000 --- a/cmd/curio/ffi.go +++ /dev/null @@ -1,71 +0,0 @@ -package main - -import ( - "encoding/gob" - "fmt" - "os" - "reflect" - - "github.com/ipfs/go-cid" - "github.com/samber/lo" - "github.com/urfave/cli/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/lib/ffiselect" - ffidirect "github.com/filecoin-project/lotus/lib/ffiselect/ffidirect" - "github.com/filecoin-project/lotus/lib/must" -) - -var ffiCmd = &cli.Command{ - Name: "ffi", - Hidden: true, - Flags: []cli.Flag{ - layersFlag, - }, - Action: func(cctx *cli.Context) (err error) { - output := os.NewFile(uintptr(3), "out") - - defer func() { - if r := recover(); r != nil { - err = fmt.Errorf("panic: %v", r) - } - if err != nil { - err = gob.NewEncoder(output).Encode(ffiselect.ValErr{Val: nil, Err: err.Error()}) - if err != nil { - panic(err) - } - } - }() - var callInfo ffiselect.FFICall - if err := gob.NewDecoder(os.Stdin).Decode(&callInfo); err != nil { - return xerrors.Errorf("ffi subprocess can not decode: %w", err) - } - - args := lo.Map(callInfo.Args, func(arg any, i int) reflect.Value { - return reflect.ValueOf(arg) - }) - - resAry := reflect.ValueOf(ffidirect.FFI{}).MethodByName(callInfo.Fn).Call(args) - res := lo.Map(resAry, func(res reflect.Value, i int) any { - return res.Interface() - }) - - err = gob.NewEncoder(output).Encode(ffiselect.ValErr{Val: res, Err: ""}) - if err != nil { - return xerrors.Errorf("ffi subprocess can not encode: %w", err) - } - - return output.Close() - }, -} - -func ffiSelfTest() { - val1, val2 := 12345678, must.One(cid.Parse("bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi")) - ret1, ret2, err := ffiselect.FFISelect{}.SelfTest(val1, val2) - if err != nil { - panic("ffi self test failed:" + err.Error()) - } - if ret1 != val1 || !val2.Equals(ret2) { - panic(fmt.Sprint("ffi self test failed: values do not match: ", val1, val2, ret1, ret2)) - } -} diff --git a/cmd/curio/guidedsetup/guidedsetup.go b/cmd/curio/guidedsetup/guidedsetup.go deleted file mode 100644 index 4587beff060..00000000000 --- a/cmd/curio/guidedsetup/guidedsetup.go +++ /dev/null @@ -1,896 +0,0 @@ -// guidedSetup for migration from lotus-miner to Curio -// -// IF STRINGS CHANGED { -// follow instructions at ../internal/translations/translations.go -// } -package guidedsetup - -import ( - "bytes" - "context" - "crypto/rand" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "math/bits" - "net/http" - "os" - "os/signal" - "path" - "reflect" - "strconv" - "strings" - "syscall" - "time" - - "github.com/BurntSushi/toml" - "github.com/charmbracelet/lipgloss" - "github.com/docker/go-units" - "github.com/manifoldco/promptui" - "github.com/mitchellh/go-homedir" - "github.com/samber/lo" - "github.com/urfave/cli/v2" - "golang.org/x/text/language" - "golang.org/x/text/message" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-jsonrpc" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/v1api" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/cli/spcli/createminer" - cliutil "github.com/filecoin-project/lotus/cli/util" - "github.com/filecoin-project/lotus/cmd/curio/deps" - _ "github.com/filecoin-project/lotus/cmd/curio/internal/translations" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/node/config" - "github.com/filecoin-project/lotus/node/repo" -) - -// URL to upload user-selected fields to help direct developer's focus. -const DeveloperFocusRequestURL = "https://curiostorage.org/cgi-bin/savedata.php" - -var GuidedsetupCmd = &cli.Command{ - Name: "guided-setup", - Usage: "Run the guided setup for migrating from lotus-miner to Curio or Creating a new Curio miner", - Flags: []cli.Flag{ - &cli.StringFlag{ // for cliutil.GetFullNodeAPI - Name: "repo", - EnvVars: []string{"LOTUS_PATH"}, - Hidden: true, - Value: "~/.lotus", - }, - }, - Action: func(cctx *cli.Context) (err error) { - T, say := SetupLanguage() - setupCtrlC(say) - - // Run the migration steps - migrationData := MigrationData{ - T: T, - say: say, - selectTemplates: &promptui.SelectTemplates{ - Help: T("Use the arrow keys to navigate: ↓ ↑ → ← "), - }, - cctx: cctx, - ctx: cctx.Context, - } - - newOrMigrate(&migrationData) - if migrationData.init { - say(header, "This interactive tool creates a new miner actor and creates the basic configuration layer for it.") - say(notice, "This process is partially idempotent. Once a new miner actor has been created and subsequent steps fail, the user need to run 'curio config new-cluster < miner ID >' to finish the configuration.") - for _, step := range newMinerSteps { - step(&migrationData) - } - } else { - say(header, "This interactive tool migrates lotus-miner to Curio in 5 minutes.") - say(notice, "Each step needs your confirmation and can be reversed. Press Ctrl+C to exit at any time.") - - for _, step := range migrationSteps { - step(&migrationData) - } - } - - for _, closer := range migrationData.closers { - closer() - } - return nil - }, -} - -func setupCtrlC(say func(style lipgloss.Style, key message.Reference, a ...interface{})) { - c := make(chan os.Signal, 1) - signal.Notify(c, os.Interrupt, syscall.SIGTERM) - go func() { - <-c - say(notice, "Ctrl+C pressed in Terminal") - os.Exit(2) - }() -} - -var ( - header = lipgloss.NewStyle(). - Align(lipgloss.Left). - Foreground(lipgloss.Color("#00FF00")). - Background(lipgloss.Color("#242424")). - BorderStyle(lipgloss.NormalBorder()). - Width(60).Margin(1) - - notice = lipgloss.NewStyle(). - Align(lipgloss.Left). - Bold(true). - Foreground(lipgloss.Color("#CCCCCC")). - Background(lipgloss.Color("#333300")).MarginBottom(1) - - green = lipgloss.NewStyle(). - Align(lipgloss.Left). - Foreground(lipgloss.Color("#00FF00")). - Background(lipgloss.Color("#000000")) - - plain = lipgloss.NewStyle().Align(lipgloss.Left) - - section = lipgloss.NewStyle(). - Align(lipgloss.Left). - Foreground(lipgloss.Color("#000000")). - Background(lipgloss.Color("#FFFFFF")). - Underline(true) - - code = lipgloss.NewStyle(). - Align(lipgloss.Left). - Foreground(lipgloss.Color("#00FF00")). - Background(lipgloss.Color("#f8f9fa")) -) - -func SetupLanguage() (func(key message.Reference, a ...interface{}) string, func(style lipgloss.Style, key message.Reference, a ...interface{})) { - langText := "en" - problem := false - if len(os.Getenv("LANG")) > 1 { - langText = os.Getenv("LANG")[:2] - } else { - problem = true - } - - lang, err := language.Parse(langText) - if err != nil { - lang = language.English - problem = true - fmt.Println("Error parsing language") - } - - langs := message.DefaultCatalog.Languages() - have := lo.SliceToMap(langs, func(t language.Tag) (string, bool) { return t.String(), true }) - if _, ok := have[lang.String()]; !ok { - lang = language.English - problem = true - } - if problem { - _ = os.Setenv("LANG", "en-US") // for later users of this function - notice.Copy().AlignHorizontal(lipgloss.Right). - Render("$LANG=" + langText + " unsupported. Available: " + strings.Join(lo.Keys(have), ", ")) - fmt.Println("Defaulting to English. Please reach out to the Curio team if you would like to have additional language support.") - } - return func(key message.Reference, a ...interface{}) string { - return message.NewPrinter(lang).Sprintf(key, a...) - }, func(sty lipgloss.Style, key message.Reference, a ...interface{}) { - msg := message.NewPrinter(lang).Sprintf(key, a...) - fmt.Println(sty.Render(msg)) - } -} - -func newOrMigrate(d *MigrationData) { - i, _, err := (&promptui.Select{ - Label: d.T("I want to:"), - Items: []string{ - d.T("Migrate from existing Lotus-Miner"), - d.T("Create a new miner")}, - Templates: d.selectTemplates, - }).Run() - if err != nil { - d.say(notice, "Aborting remaining steps.", err.Error()) - os.Exit(1) - } - if i == 1 { - d.init = true - } -} - -type migrationStep func(*MigrationData) - -var migrationSteps = []migrationStep{ - readMinerConfig, // Tells them to be on the miner machine - yugabyteConnect, // Miner is updated - configToDB, // work on base configuration migration. - verifySectors, // Verify the sectors are in the database - doc, - oneLastThing, - complete, -} - -type newMinerStep func(data *MigrationData) - -var newMinerSteps = []newMinerStep{ - stepPresteps, - stepCreateActor, - stepNewMinerConfig, - doc, - oneLastThing, - completeInit, -} - -type MigrationData struct { - T func(key message.Reference, a ...interface{}) string - say func(style lipgloss.Style, key message.Reference, a ...interface{}) - selectTemplates *promptui.SelectTemplates - MinerConfigPath string - MinerConfig *config.StorageMiner - DB *harmonydb.DB - MinerID address.Address - full v1api.FullNode - cctx *cli.Context - closers []jsonrpc.ClientCloser - ctx context.Context - owner address.Address - worker address.Address - sender address.Address - ssize abi.SectorSize - confidence uint64 - init bool -} - -func complete(d *MigrationData) { - stepCompleted(d, d.T("Lotus-Miner to Curio Migration.")) - d.say(plain, "Try the web interface with %s for further guided improvements.", code.Render("curio run --layers=gui")) - d.say(plain, "You can now migrate your market node (%s), if applicable.", "Boost") -} - -func completeInit(d *MigrationData) { - stepCompleted(d, d.T("New Miner initialization complete.")) - d.say(plain, "Try the web interface with %s for further guided improvements.", code.Render("curio run --layers=gui")) -} - -func configToDB(d *MigrationData) { - d.say(section, "Migrating lotus-miner config.toml to Curio in-database configuration.") - - { - var closer jsonrpc.ClientCloser - var err error - d.full, closer, err = cliutil.GetFullNodeAPIV1(d.cctx) - d.closers = append(d.closers, closer) - if err != nil { - d.say(notice, "Error getting API: %s", err.Error()) - os.Exit(1) - } - } - ainfo, err := cliutil.GetAPIInfo(d.cctx, repo.FullNode) - if err != nil { - d.say(notice, "could not get API info for FullNode: %w", err) - os.Exit(1) - } - token, err := d.full.AuthNew(context.Background(), api.AllPermissions) - if err != nil { - d.say(notice, "Error getting token: %s", err.Error()) - os.Exit(1) - } - - chainApiInfo := fmt.Sprintf("%s:%s", string(token), ainfo.Addr) - - shouldErrPrompt := func() bool { - i, _, err := (&promptui.Select{ - Label: d.T("Unmigratable sectors found. Do you want to continue?"), - Items: []string{ - d.T("Yes, continue"), - d.T("No, abort")}, - Templates: d.selectTemplates, - }).Run() - if err != nil { - d.say(notice, "Aborting migration.", err.Error()) - os.Exit(1) - } - return i == 1 - } - - d.MinerID, err = SaveConfigToLayerMigrateSectors(d.MinerConfigPath, chainApiInfo, shouldErrPrompt) - if err != nil { - d.say(notice, "Error saving config to layer: %s. Aborting Migration", err.Error()) - os.Exit(1) - } -} - -// bucket returns the power's 4 highest bits (rounded down). -func bucket(power *api.MinerPower) uint64 { - rawQAP := power.TotalPower.QualityAdjPower.Uint64() - magnitude := lo.Max([]int{bits.Len64(rawQAP), 5}) - - // shifting erases resolution so we cannot distinguish SPs of similar scales. - return rawQAP >> (uint64(magnitude) - 4) << (uint64(magnitude - 4)) -} - -type uploadType int - -const uploadTypeIndividual uploadType = 0 -const uploadTypeAggregate uploadType = 1 - -// const uploadTypeHint uploadType = 2 -const uploadTypeNothing uploadType = 3 - -func oneLastThing(d *MigrationData) { - d.say(section, "The Curio team wants to improve the software you use. Tell the team you're using `%s`.", "curio") - i, _, err := (&promptui.Select{ - Label: d.T("Select what you want to share with the Curio team."), - Items: []string{ - d.T("Individual Data: Miner ID, Curio version, chain (%s or %s). Signed.", "mainnet", "calibration"), - d.T("Aggregate-Anonymous: version, chain, and Miner power (bucketed)."), - d.T("Hint: I am someone running Curio on whichever chain."), - d.T("Nothing.")}, - Templates: d.selectTemplates, - }).Run() - preference := uploadType(i) - if err != nil { - d.say(notice, "Aborting remaining steps.", err.Error()) - os.Exit(1) - } - if preference != uploadTypeNothing { - msgMap := map[string]any{ - "domain": "curio-newuser", - "net": build.BuildTypeString(), - } - if preference == uploadTypeIndividual || preference == uploadTypeAggregate { - // articles of incorporation - power, err := d.full.StateMinerPower(context.Background(), d.MinerID, types.EmptyTSK) - if err != nil { - d.say(notice, "Error getting miner power: %s", err.Error()) - os.Exit(1) - } - msgMap["version"] = build.BuildVersion - msgMap["net"] = build.BuildType - msgMap["power"] = map[uploadType]uint64{ - uploadTypeIndividual: power.MinerPower.QualityAdjPower.Uint64(), - uploadTypeAggregate: bucket(power)}[preference] - - if preference == uploadTypeIndividual { // Sign it - msgMap["miner_id"] = d.MinerID - msg, err := json.Marshal(msgMap) - if err != nil { - d.say(notice, "Error marshalling message: %s", err.Error()) - os.Exit(1) - } - mi, err := d.full.StateMinerInfo(context.Background(), d.MinerID, types.EmptyTSK) - if err != nil { - d.say(notice, "Error getting miner info: %s", err.Error()) - os.Exit(1) - } - sig, err := d.full.WalletSign(context.Background(), mi.Worker, msg) - if err != nil { - d.say(notice, "Error signing message: %s", err.Error()) - os.Exit(1) - } - msgMap["signature"] = base64.StdEncoding.EncodeToString(sig.Data) - } - } - msg, err := json.Marshal(msgMap) - if err != nil { - d.say(notice, "Error marshalling message: %s", err.Error()) - os.Exit(1) - } - - resp, err := http.DefaultClient.Post(DeveloperFocusRequestURL, "application/json", bytes.NewReader(msg)) - if err != nil { - d.say(notice, "Error sending message: %s", err.Error()) - } - if resp != nil { - defer func() { _ = resp.Body.Close() }() - if resp.StatusCode != 200 { - b, err := io.ReadAll(resp.Body) - if err == nil { - d.say(notice, "Error sending message: Status %s, Message: ", resp.Status, string(b)) - } - } else { - stepCompleted(d, d.T("Message sent.")) - } - } - } -} - -func doc(d *MigrationData) { - d.say(plain, "Documentation: ") - d.say(plain, "The '%s' layer stores common configuration. All curio instances can include it in their %s argument.", "base", "--layers") - d.say(plain, "You can add other layers for per-machine configuration changes.") - - d.say(plain, "Filecoin %s channels: %s and %s", "Slack", "#fil-curio-help", "#fil-curio-dev") - - d.say(plain, "Increase reliability using redundancy: start multiple machines with at-least the post layer: 'curio run --layers=post'") - //d.say(plain, "Point your browser to your web GUI to complete setup with %s and advanced featues.", "Boost") - d.say(plain, "One database can serve multiple miner IDs: Run a migration for each lotus-miner.") -} - -func verifySectors(d *MigrationData) { - var i []int - var lastError string - fmt.Println() - d.say(section, "Please start (or restart) %s now that database credentials are in %s.", "lotus-miner", "config.toml") - d.say(notice, "Waiting for %s to write sectors into Yugabyte.", "lotus-miner") - - mid, err := address.IDFromAddress(d.MinerID) - if err != nil { - d.say(notice, "Error interpreting miner ID: %s: ID: %s", err.Error(), d.MinerID.String()) - os.Exit(1) - } - - for { - err := d.DB.Select(context.Background(), &i, ` - SELECT count(*) FROM sector_location WHERE miner_id=$1`, mid) - if err != nil { - if err.Error() != lastError { - d.say(notice, "Error verifying sectors: %s", err.Error()) - lastError = err.Error() - } - continue - } - if i[0] > 0 { - break - } - fmt.Print(".") - time.Sleep(5 * time.Second) - } - d.say(plain, "The sectors are in the database. The database is ready for %s.", "Curio") - d.say(notice, "Now shut down lotus-miner and lotus-worker and use run %s instead.", code.Render("curio run")) - - _, err = (&promptui.Prompt{Label: d.T("Press return to continue")}).Run() - if err != nil { - d.say(notice, "Aborting migration.") - os.Exit(1) - } - stepCompleted(d, d.T("Sectors verified. %d sector locations found.", i)) -} - -func yugabyteConnect(d *MigrationData) { - harmonyCfg := config.DefaultStorageMiner().HarmonyDB //copy the config to a local variable - if d.MinerConfig != nil { - harmonyCfg = d.MinerConfig.HarmonyDB //copy the config to a local variable - } - var err error - d.DB, err = harmonydb.NewFromConfig(harmonyCfg) - if err != nil { - hcfg := getDBDetails(d) - harmonyCfg = *hcfg - } - - d.say(plain, "Connected to Yugabyte. Schema is current.") - if !reflect.DeepEqual(harmonyCfg, d.MinerConfig.HarmonyDB) || !d.MinerConfig.Subsystems.EnableSectorIndexDB { - d.MinerConfig.HarmonyDB = harmonyCfg - d.MinerConfig.Subsystems.EnableSectorIndexDB = true - - d.say(plain, "Enabling Sector Indexing in the database.") - buf, err := config.ConfigUpdate(d.MinerConfig, config.DefaultStorageMiner()) - if err != nil { - d.say(notice, "Error encoding config.toml: %s", err.Error()) - os.Exit(1) - } - _, err = (&promptui.Prompt{ - Label: d.T("Press return to update %s with Yugabyte info. A Backup file will be written to that folder before changes are made.", "config.toml")}).Run() - if err != nil { - os.Exit(1) - } - p, err := homedir.Expand(d.MinerConfigPath) - if err != nil { - d.say(notice, "Error expanding path: %s", err.Error()) - os.Exit(1) - } - tomlPath := path.Join(p, "config.toml") - stat, err := os.Stat(tomlPath) - if err != nil { - d.say(notice, "Error reading filemode of config.toml: %s", err.Error()) - os.Exit(1) - } - fBackup, err := os.CreateTemp(p, "config-backup-*.toml") - if err != nil { - d.say(notice, "Error creating backup file: %s", err.Error()) - os.Exit(1) - } - fBackupContents, err := os.ReadFile(tomlPath) - if err != nil { - d.say(notice, "Error reading config.toml: %s", err.Error()) - os.Exit(1) - } - _, err = fBackup.Write(fBackupContents) - if err != nil { - d.say(notice, "Error writing backup file: %s", err.Error()) - os.Exit(1) - } - err = fBackup.Close() - if err != nil { - d.say(notice, "Error closing backup file: %s", err.Error()) - os.Exit(1) - } - - filemode := stat.Mode() - err = os.WriteFile(path.Join(p, "config.toml"), buf, filemode) - if err != nil { - d.say(notice, "Error writing config.toml: %s", err.Error()) - os.Exit(1) - } - d.say(section, "Restart Lotus Miner. ") - } - stepCompleted(d, d.T("Connected to Yugabyte")) -} - -func readMinerConfig(d *MigrationData) { - d.say(plain, "To start, ensure your sealing pipeline is drained and shut-down lotus-miner.") - - verifyPath := func(dir string) (*config.StorageMiner, error) { - cfg := config.DefaultStorageMiner() - dir, err := homedir.Expand(dir) - if err != nil { - return nil, err - } - _, err = toml.DecodeFile(path.Join(dir, "config.toml"), &cfg) - return cfg, err - } - - dirs := map[string]*config.StorageMiner{"~/.lotusminer": nil, "~/.lotus-miner-local-net": nil} - if v := os.Getenv("LOTUS_MINER_PATH"); v != "" { - dirs[v] = nil - } - for dir := range dirs { - cfg, err := verifyPath(dir) - if err != nil { - delete(dirs, dir) - } - dirs[dir] = cfg - } - - var otherPath bool - if len(dirs) > 0 { - _, str, err := (&promptui.Select{ - Label: d.T("Select the location of your lotus-miner config directory?"), - Items: append(lo.Keys(dirs), d.T("Other")), - Templates: d.selectTemplates, - }).Run() - if err != nil { - if err.Error() == "^C" { - os.Exit(1) - } - otherPath = true - } else { - if str == d.T("Other") { - otherPath = true - } else { - d.MinerConfigPath = str - d.MinerConfig = dirs[str] - } - } - } - if otherPath { - minerPathEntry: - str, err := (&promptui.Prompt{ - Label: d.T("Enter the path to the configuration directory used by %s", "lotus-miner"), - }).Run() - if err != nil { - d.say(notice, "No path provided, abandoning migration ") - os.Exit(1) - } - cfg, err := verifyPath(str) - if err != nil { - d.say(notice, "Cannot read the config.toml file in the provided directory, Error: %s", err.Error()) - goto minerPathEntry - } - d.MinerConfigPath = str - d.MinerConfig = cfg - } - - // Try to lock Miner repo to verify that lotus-miner is not running - { - r, err := repo.NewFS(d.MinerConfigPath) - if err != nil { - d.say(plain, "Could not create repo from directory: %s. Aborting migration", err.Error()) - os.Exit(1) - } - lr, err := r.Lock(repo.StorageMiner) - if err != nil { - d.say(plain, "Could not lock miner repo. Your miner must be stopped: %s\n Aborting migration", err.Error()) - os.Exit(1) - } - _ = lr.Close() - } - - stepCompleted(d, d.T("Read Miner Config")) -} -func stepCompleted(d *MigrationData, step string) { - fmt.Print(green.Render("✔ ")) - d.say(plain, "Step Complete: %s\n", step) -} - -func stepCreateActor(d *MigrationData) { - d.say(plain, "Initializing a new miner actor.") - - for { - i, _, err := (&promptui.Select{ - Label: d.T("Enter the info to create a new miner"), - Items: []string{ - d.T("Owner Address: %s", d.owner.String()), - d.T("Worker Address: %s", d.worker.String()), - d.T("Sender Address: %s", d.sender.String()), - d.T("Sector Size: %d", d.ssize), - d.T("Confidence epochs: %d", d.confidence), - d.T("Continue to verify the addresses and create a new miner actor.")}, - Size: 6, - Templates: d.selectTemplates, - }).Run() - if err != nil { - d.say(notice, "Miner creation error occurred: %s ", err.Error()) - os.Exit(1) - } - switch i { - case 0: - owner, err := (&promptui.Prompt{ - Label: d.T("Enter the owner address"), - }).Run() - if err != nil { - d.say(notice, "No address provided") - continue - } - ownerAddr, err := address.NewFromString(owner) - if err != nil { - d.say(notice, "Failed to parse the address: %s", err.Error()) - } - d.owner = ownerAddr - case 1, 2: - val, err := (&promptui.Prompt{ - Label: d.T("Enter %s address", []string{"worker", "sender"}[i-1]), - Default: d.owner.String(), - }).Run() - if err != nil { - d.say(notice, err.Error()) - continue - } - addr, err := address.NewFromString(val) - if err != nil { - d.say(notice, "Failed to parse the address: %s", err.Error()) - } - switch i { - case 1: - d.worker = addr - case 2: - d.sender = addr - } - continue - case 3: - val, err := (&promptui.Prompt{ - Label: d.T("Enter the sector size"), - }).Run() - if err != nil { - d.say(notice, "No value provided") - continue - } - sectorSize, err := units.RAMInBytes(val) - if err != nil { - d.say(notice, "Failed to parse sector size: %s", err.Error()) - continue - } - d.ssize = abi.SectorSize(sectorSize) - continue - case 4: - confidenceStr, err := (&promptui.Prompt{ - Label: d.T("Confidence epochs"), - Default: strconv.Itoa(5), - }).Run() - if err != nil { - d.say(notice, err.Error()) - continue - } - confidence, err := strconv.ParseUint(confidenceStr, 10, 64) - if err != nil { - d.say(notice, "Failed to parse confidence: %s", err.Error()) - continue - } - d.confidence = confidence - goto minerInit // break out of the for loop once we have all the values - } - } - -minerInit: - miner, err := createminer.CreateStorageMiner(d.ctx, d.full, d.owner, d.worker, d.sender, d.ssize, d.confidence) - if err != nil { - d.say(notice, "Failed to create the miner actor: %s", err.Error()) - os.Exit(1) - } - - d.MinerID = miner - stepCompleted(d, d.T("Miner %s created successfully", miner.String())) -} - -func stepPresteps(d *MigrationData) { - - // Setup and connect to YugabyteDB - _ = getDBDetails(d) - - // Verify HarmonyDB connection - var titles []string - err := d.DB.Select(d.ctx, &titles, `SELECT title FROM harmony_config WHERE LENGTH(config) > 0`) - if err != nil { - d.say(notice, "Cannot reach the DB: %s", err.Error()) - os.Exit(1) - } - - // Get full node API - full, closer, err := cliutil.GetFullNodeAPIV1(d.cctx) - if err != nil { - d.say(notice, "Error connecting to full node API: %s", err.Error()) - os.Exit(1) - } - d.full = full - d.closers = append(d.closers, closer) - stepCompleted(d, d.T("Pre-initialization steps complete")) -} - -func stepNewMinerConfig(d *MigrationData) { - curioCfg := config.DefaultCurioConfig() - curioCfg.Addresses = append(curioCfg.Addresses, config.CurioAddresses{ - PreCommitControl: []string{}, - CommitControl: []string{}, - TerminateControl: []string{}, - DisableOwnerFallback: false, - DisableWorkerFallback: false, - MinerAddresses: []string{d.MinerID.String()}, - }) - - sk, err := io.ReadAll(io.LimitReader(rand.Reader, 32)) - if err != nil { - d.say(notice, "Failed to generate random bytes for secret: %s", err.Error()) - d.say(notice, "Please do not run guided-setup again as miner creation is not idempotent. You need to run 'curio config new-cluster %s' to finish the configuration", d.MinerID.String()) - os.Exit(1) - } - - curioCfg.Apis.StorageRPCSecret = base64.StdEncoding.EncodeToString(sk) - - ainfo, err := cliutil.GetAPIInfo(d.cctx, repo.FullNode) - if err != nil { - d.say(notice, "Failed to get API info for FullNode: %w", err) - d.say(notice, "Please do not run guided-setup again as miner creation is not idempotent. You need to run 'curio config new-cluster %s' to finish the configuration", d.MinerID.String()) - os.Exit(1) - } - - token, err := d.full.AuthNew(d.ctx, api.AllPermissions) - if err != nil { - d.say(notice, "Failed to verify the auth token from daemon node: %s", err.Error()) - d.say(notice, "Please do not run guided-setup again as miner creation is not idempotent. You need to run 'curio config new-cluster %s' to finish the configuration", d.MinerID.String()) - os.Exit(1) - } - - curioCfg.Apis.ChainApiInfo = append(curioCfg.Apis.ChainApiInfo, fmt.Sprintf("%s:%s", string(token), ainfo.Addr)) - - // write config - var titles []string - err = d.DB.Select(d.ctx, &titles, `SELECT title FROM harmony_config WHERE LENGTH(config) > 0`) - if err != nil { - d.say(notice, "Cannot reach the DB: %s", err.Error()) - d.say(notice, "Please do not run guided-setup again as miner creation is not idempotent. You need to run 'curio config new-cluster %s' to finish the configuration", d.MinerID.String()) - os.Exit(1) - } - - // If 'base' layer is not present - if !lo.Contains(titles, "base") { - curioCfg.Addresses = lo.Filter(curioCfg.Addresses, func(a config.CurioAddresses, _ int) bool { - return len(a.MinerAddresses) > 0 - }) - cb, err := config.ConfigUpdate(curioCfg, config.DefaultCurioConfig(), config.Commented(true), config.DefaultKeepUncommented(), config.NoEnv()) - if err != nil { - d.say(notice, "Failed to generate default config: %s", err.Error()) - d.say(notice, "Please do not run guided-setup again as miner creation is not idempotent. You need to run 'curio config new-cluster %s' to finish the configuration", d.MinerID.String()) - os.Exit(1) - } - _, err = d.DB.Exec(d.ctx, "INSERT INTO harmony_config (title, config) VALUES ('base', $1)", string(cb)) - if err != nil { - d.say(notice, "Failed to insert 'base' config layer in database: %s", err.Error()) - d.say(notice, "Please do not run guided-setup again as miner creation is not idempotent. You need to run 'curio config new-cluster %s' to finish the configuration", d.MinerID.String()) - os.Exit(1) - } - stepCompleted(d, d.T("Configuration 'base' was updated to include this miner's address")) - return - } - - // If base layer is already present - baseCfg := config.DefaultCurioConfig() - var baseText string - - err = d.DB.QueryRow(d.ctx, "SELECT config FROM harmony_config WHERE title='base'").Scan(&baseText) - if err != nil { - d.say(notice, "Failed to load base config from database: %s", err.Error()) - d.say(notice, "Please do not run guided-setup again as miner creation is not idempotent. You need to run 'curio config new-cluster %s' to finish the configuration", d.MinerID.String()) - os.Exit(1) - } - _, err = deps.LoadConfigWithUpgrades(baseText, baseCfg) - if err != nil { - d.say(notice, "Failed to parse base config: %s", err.Error()) - d.say(notice, "Please do not run guided-setup again as miner creation is not idempotent. You need to run 'curio config new-cluster %s' to finish the configuration", d.MinerID.String()) - os.Exit(1) - } - - baseCfg.Addresses = append(baseCfg.Addresses, curioCfg.Addresses...) - baseCfg.Addresses = lo.Filter(baseCfg.Addresses, func(a config.CurioAddresses, _ int) bool { - return len(a.MinerAddresses) > 0 - }) - - cb, err := config.ConfigUpdate(baseCfg, config.DefaultCurioConfig(), config.Commented(true), config.DefaultKeepUncommented(), config.NoEnv()) - if err != nil { - d.say(notice, "Failed to regenerate base config: %s", err.Error()) - d.say(notice, "Please do not run guided-setup again as miner creation is not idempotent. You need to run 'curio config new-cluster %s' to finish the configuration", d.MinerID.String()) - os.Exit(1) - } - _, err = d.DB.Exec(d.ctx, "UPDATE harmony_config SET config=$1 WHERE title='base'", string(cb)) - if err != nil { - d.say(notice, "Failed to insert 'base' config layer in database: %s", err.Error()) - d.say(notice, "Please do not run guided-setup again as miner creation is not idempotent. You need to run 'curio config new-cluster %s' to finish the configuration", d.MinerID.String()) - os.Exit(1) - } - - stepCompleted(d, d.T("Configuration 'base' was updated to include this miner's address")) -} - -func getDBDetails(d *MigrationData) *config.HarmonyDB { - harmonyCfg := config.DefaultStorageMiner().HarmonyDB - for { - i, _, err := (&promptui.Select{ - Label: d.T("Enter the info to connect to your Yugabyte database installation (https://download.yugabyte.com/)"), - Items: []string{ - d.T("Host: %s", strings.Join(harmonyCfg.Hosts, ",")), - d.T("Port: %s", harmonyCfg.Port), - d.T("Username: %s", harmonyCfg.Username), - d.T("Password: %s", harmonyCfg.Password), - d.T("Database: %s", harmonyCfg.Database), - d.T("Continue to connect and update schema.")}, - Size: 6, - Templates: d.selectTemplates, - }).Run() - if err != nil { - d.say(notice, "Database config error occurred, abandoning migration: %s ", err.Error()) - os.Exit(1) - } - switch i { - case 0: - host, err := (&promptui.Prompt{ - Label: d.T("Enter the Yugabyte database host(s)"), - }).Run() - if err != nil { - d.say(notice, "No host provided") - continue - } - harmonyCfg.Hosts = strings.Split(host, ",") - case 1, 2, 3, 4: - val, err := (&promptui.Prompt{ - Label: d.T("Enter the Yugabyte database %s", []string{"port", "username", "password", "database"}[i-1]), - }).Run() - if err != nil { - d.say(notice, "No value provided") - continue - } - switch i { - case 1: - harmonyCfg.Port = val - case 2: - harmonyCfg.Username = val - case 3: - harmonyCfg.Password = val - case 4: - harmonyCfg.Database = val - } - continue - case 5: - db, err := harmonydb.NewFromConfig(harmonyCfg) - if err != nil { - if err.Error() == "^C" { - os.Exit(1) - } - d.say(notice, "Error connecting to Yugabyte database: %s", err.Error()) - continue - } - d.DB = db - return &harmonyCfg - } - } -} diff --git a/cmd/curio/guidedsetup/shared.go b/cmd/curio/guidedsetup/shared.go deleted file mode 100644 index 0636bcfb1be..00000000000 --- a/cmd/curio/guidedsetup/shared.go +++ /dev/null @@ -1,430 +0,0 @@ -package guidedsetup - -import ( - "bytes" - "context" - "encoding/base64" - "encoding/json" - "fmt" - "os" - "path" - "strings" - - "github.com/BurntSushi/toml" - "github.com/ipfs/go-cid" - "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/namespace" - "github.com/samber/lo" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-statestore" - - "github.com/filecoin-project/lotus/cmd/curio/deps" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/lib/must" - "github.com/filecoin-project/lotus/node/config" - "github.com/filecoin-project/lotus/node/modules" - "github.com/filecoin-project/lotus/node/repo" - sealing "github.com/filecoin-project/lotus/storage/pipeline" -) - -const ( - FlagMinerRepo = "miner-repo" -) - -const FlagMinerRepoDeprecation = "storagerepo" - -func SaveConfigToLayerMigrateSectors(minerRepoPath, chainApiInfo string, unmigSectorShouldFail func() bool) (minerAddress address.Address, err error) { - _, say := SetupLanguage() - ctx := context.Background() - - r, err := repo.NewFS(minerRepoPath) - if err != nil { - return minerAddress, err - } - - ok, err := r.Exists() - if err != nil { - return minerAddress, err - } - - if !ok { - return minerAddress, fmt.Errorf("repo not initialized at: %s", minerRepoPath) - } - - lr, err := r.LockRO(repo.StorageMiner) - if err != nil { - return minerAddress, fmt.Errorf("locking repo: %w", err) - } - defer func() { - err = lr.Close() - if err != nil { - fmt.Println("error closing repo: ", err) - } - }() - - cfgNode, err := lr.Config() - if err != nil { - return minerAddress, fmt.Errorf("getting node config: %w", err) - } - smCfg := cfgNode.(*config.StorageMiner) - - db, err := harmonydb.NewFromConfig(smCfg.HarmonyDB) - if err != nil { - return minerAddress, fmt.Errorf("could not reach the database. Ensure the Miner config toml's HarmonyDB entry"+ - " is setup to reach Yugabyte correctly: %w", err) - } - - var titles []string - err = db.Select(ctx, &titles, `SELECT title FROM harmony_config WHERE LENGTH(config) > 0`) - if err != nil { - return minerAddress, fmt.Errorf("miner cannot reach the db. Ensure the config toml's HarmonyDB entry"+ - " is setup to reach Yugabyte correctly: %s", err.Error()) - } - - // Copy over identical settings: - - buf, err := os.ReadFile(path.Join(lr.Path(), "config.toml")) - if err != nil { - return minerAddress, fmt.Errorf("could not read config.toml: %w", err) - } - curioCfg := config.DefaultCurioConfig() - - ensureEmptyArrays(curioCfg) - _, err = deps.LoadConfigWithUpgrades(string(buf), curioCfg) - - if err != nil { - return minerAddress, fmt.Errorf("could not decode toml: %w", err) - } - - // Populate Miner Address - mmeta, err := lr.Datastore(ctx, "/metadata") - if err != nil { - return minerAddress, xerrors.Errorf("opening miner metadata datastore: %w", err) - } - - maddrBytes, err := mmeta.Get(ctx, datastore.NewKey("miner-address")) - if err != nil { - return minerAddress, xerrors.Errorf("getting miner address datastore entry: %w", err) - } - - addr, err := address.NewFromBytes(maddrBytes) - if err != nil { - return minerAddress, xerrors.Errorf("parsing miner actor address: %w", err) - } - - if err := MigrateSectors(ctx, addr, mmeta, db, func(nSectors int) { - say(plain, "Migrating metadata for %d sectors.", nSectors) - }, unmigSectorShouldFail); err != nil { - return address.Address{}, xerrors.Errorf("migrating sectors: %w", err) - } - - minerAddress = addr - - curioCfg.Addresses = []config.CurioAddresses{{ - MinerAddresses: []string{addr.String()}, - PreCommitControl: smCfg.Addresses.PreCommitControl, - CommitControl: smCfg.Addresses.CommitControl, - TerminateControl: smCfg.Addresses.TerminateControl, - DisableOwnerFallback: smCfg.Addresses.DisableOwnerFallback, - DisableWorkerFallback: smCfg.Addresses.DisableWorkerFallback, - }} - - ks, err := lr.KeyStore() - if err != nil { - return minerAddress, xerrors.Errorf("keystore err: %w", err) - } - js, err := ks.Get(modules.JWTSecretName) - if err != nil { - return minerAddress, xerrors.Errorf("error getting JWTSecretName: %w", err) - } - - curioCfg.Apis.StorageRPCSecret = base64.StdEncoding.EncodeToString(js.PrivateKey) - - curioCfg.Apis.ChainApiInfo = append(curioCfg.Apis.ChainApiInfo, chainApiInfo) - // Express as configTOML - configTOML := &bytes.Buffer{} - if err = toml.NewEncoder(configTOML).Encode(curioCfg); err != nil { - return minerAddress, err - } - - if lo.Contains(titles, "base") { - // append addresses - var baseCfg = config.DefaultCurioConfig() - var baseText string - err = db.QueryRow(ctx, "SELECT config FROM harmony_config WHERE title='base'").Scan(&baseText) - if err != nil { - return minerAddress, xerrors.Errorf("Cannot load base config: %w", err) - } - ensureEmptyArrays(baseCfg) - _, err := deps.LoadConfigWithUpgrades(baseText, baseCfg) - if err != nil { - return minerAddress, xerrors.Errorf("Cannot load base config: %w", err) - } - for _, addr := range baseCfg.Addresses { - if lo.Contains(addr.MinerAddresses, curioCfg.Addresses[0].MinerAddresses[0]) { - goto skipWritingToBase - } - } - // write to base - { - baseCfg.Addresses = append(baseCfg.Addresses, curioCfg.Addresses[0]) - baseCfg.Addresses = lo.Filter(baseCfg.Addresses, func(a config.CurioAddresses, _ int) bool { - return len(a.MinerAddresses) > 0 - }) - if baseCfg.Apis.ChainApiInfo == nil { - baseCfg.Apis.ChainApiInfo = append(baseCfg.Apis.ChainApiInfo, chainApiInfo) - } - if baseCfg.Apis.StorageRPCSecret == "" { - baseCfg.Apis.StorageRPCSecret = curioCfg.Apis.StorageRPCSecret - } - - cb, err := config.ConfigUpdate(baseCfg, config.DefaultCurioConfig(), config.Commented(true), config.DefaultKeepUncommented(), config.NoEnv()) - if err != nil { - return minerAddress, xerrors.Errorf("cannot interpret config: %w", err) - } - _, err = db.Exec(ctx, "UPDATE harmony_config SET config=$1 WHERE title='base'", string(cb)) - if err != nil { - return minerAddress, xerrors.Errorf("cannot update base config: %w", err) - } - say(plain, "Configuration 'base' was updated to include this miner's address (%s) and its wallet setup.", minerAddress) - } - say(plain, "Compare the configurations %s to %s. Changes between the miner IDs other than wallet addreses should be a new, minimal layer for runners that need it.", "base", "mig-"+curioCfg.Addresses[0].MinerAddresses[0]) - skipWritingToBase: - } else { - _, err = db.Exec(ctx, `INSERT INTO harmony_config (title, config) VALUES ('base', $1) - ON CONFLICT(title) DO UPDATE SET config=EXCLUDED.config`, configTOML) - - if err != nil { - return minerAddress, xerrors.Errorf("Cannot insert base config: %w", err) - } - say(notice, "Configuration 'base' was created to resemble this lotus-miner's config.toml .") - } - - { // make a layer representing the migration - layerName := fmt.Sprintf("mig-%s", curioCfg.Addresses[0].MinerAddresses[0]) - _, err = db.Exec(ctx, "DELETE FROM harmony_config WHERE title=$1", layerName) - if err != nil { - return minerAddress, xerrors.Errorf("Cannot delete existing layer: %w", err) - } - - _, err = db.Exec(ctx, "INSERT INTO harmony_config (title, config) VALUES ($1, $2)", layerName, configTOML.String()) - if err != nil { - return minerAddress, xerrors.Errorf("Cannot insert layer after layer created message: %w", err) - } - say(plain, "Layer %s created. ", layerName) - } - - dbSettings := getDBSettings(*smCfg) - say(plain, "To work with the config: ") - fmt.Println(code.Render(`curio ` + dbSettings + ` config edit base`)) - say(plain, `To run Curio: With machine or cgroup isolation, use the command (with example layer selection):`) - fmt.Println(code.Render(`curio ` + dbSettings + ` run --layer=post`)) - return minerAddress, nil -} - -func getDBSettings(smCfg config.StorageMiner) string { - dbSettings := "" - def := config.DefaultStorageMiner().HarmonyDB - if def.Hosts[0] != smCfg.HarmonyDB.Hosts[0] { - dbSettings += ` --db-host="` + strings.Join(smCfg.HarmonyDB.Hosts, ",") + `"` - } - if def.Port != smCfg.HarmonyDB.Port { - dbSettings += " --db-port=" + smCfg.HarmonyDB.Port - } - if def.Username != smCfg.HarmonyDB.Username { - dbSettings += ` --db-user="` + smCfg.HarmonyDB.Username + `"` - } - if def.Password != smCfg.HarmonyDB.Password { - dbSettings += ` --db-password="` + smCfg.HarmonyDB.Password + `"` - } - if def.Database != smCfg.HarmonyDB.Database { - dbSettings += ` --db-name="` + smCfg.HarmonyDB.Database + `"` - } - return dbSettings -} - -func ensureEmptyArrays(cfg *config.CurioConfig) { - if cfg.Addresses == nil { - cfg.Addresses = []config.CurioAddresses{} - } else { - for i := range cfg.Addresses { - if cfg.Addresses[i].PreCommitControl == nil { - cfg.Addresses[i].PreCommitControl = []string{} - } - if cfg.Addresses[i].CommitControl == nil { - cfg.Addresses[i].CommitControl = []string{} - } - if cfg.Addresses[i].TerminateControl == nil { - cfg.Addresses[i].TerminateControl = []string{} - } - } - } - if cfg.Apis.ChainApiInfo == nil { - cfg.Apis.ChainApiInfo = []string{} - } -} - -func cidPtrToStrptr(c *cid.Cid) *string { - if c == nil { - return nil - } - s := c.String() - return &s -} - -func coalescePtrs[A any](a, b *A) *A { - if a != nil { - return a - } - return b -} - -func MigrateSectors(ctx context.Context, maddr address.Address, mmeta datastore.Batching, db *harmonydb.DB, logMig func(int), unmigSectorShouldFail func() bool) error { - mid, err := address.IDFromAddress(maddr) - if err != nil { - return xerrors.Errorf("getting miner ID: %w", err) - } - - sts := statestore.New(namespace.Wrap(mmeta, datastore.NewKey(sealing.SectorStorePrefix))) - - var sectors []sealing.SectorInfo - if err := sts.List(§ors); err != nil { - return xerrors.Errorf("getting sector list: %w", err) - } - - logMig(len(sectors)) - - migratableState := func(state sealing.SectorState) bool { - switch state { - case sealing.Proving, sealing.Available, sealing.Removed: - return true - default: - return false - } - } - - unmigratable := map[sealing.SectorState]int{} - - for _, sector := range sectors { - if !migratableState(sector.State) { - unmigratable[sector.State]++ - continue - } - } - - if len(unmigratable) > 0 { - fmt.Println("The following sector states are not migratable:") - for state, count := range unmigratable { - fmt.Printf(" %s: %d\n", state, count) - } - - if unmigSectorShouldFail() { - return xerrors.Errorf("aborting migration because sectors were found that are not migratable.") - } - } - - for _, sector := range sectors { - if !migratableState(sector.State) || sector.State == sealing.Removed { - continue - } - - // Insert sector metadata - _, err := db.Exec(ctx, ` - INSERT INTO sectors_meta (sp_id, sector_num, reg_seal_proof, ticket_epoch, ticket_value, - orig_sealed_cid, orig_unsealed_cid, cur_sealed_cid, cur_unsealed_cid, - msg_cid_precommit, msg_cid_commit, msg_cid_update, seed_epoch, seed_value) - VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14) - ON CONFLICT (sp_id, sector_num) DO UPDATE - SET reg_seal_proof = excluded.reg_seal_proof, ticket_epoch = excluded.ticket_epoch, ticket_value = excluded.ticket_value, - orig_sealed_cid = excluded.orig_sealed_cid, orig_unsealed_cid = excluded.orig_unsealed_cid, cur_sealed_cid = excluded.cur_sealed_cid, - cur_unsealed_cid = excluded.cur_unsealed_cid, msg_cid_precommit = excluded.msg_cid_precommit, msg_cid_commit = excluded.msg_cid_commit, - msg_cid_update = excluded.msg_cid_update, seed_epoch = excluded.seed_epoch, seed_value = excluded.seed_value`, - mid, - sector.SectorNumber, - sector.SectorType, - sector.TicketEpoch, - sector.TicketValue, - cidPtrToStrptr(sector.CommR), - cidPtrToStrptr(sector.CommD), - cidPtrToStrptr(coalescePtrs(sector.UpdateSealed, sector.CommR)), - cidPtrToStrptr(coalescePtrs(sector.UpdateUnsealed, sector.CommD)), - cidPtrToStrptr(sector.PreCommitMessage), - cidPtrToStrptr(sector.CommitMessage), - cidPtrToStrptr(sector.ReplicaUpdateMessage), - sector.SeedEpoch, - sector.SeedValue, - ) - if err != nil { - b, _ := json.MarshalIndent(sector, "", " ") - fmt.Println(string(b)) - - return xerrors.Errorf("inserting/updating sectors_meta for sector %d: %w", sector.SectorNumber, err) - } - - // Process each piece within the sector - for j, piece := range sector.Pieces { - dealID := int64(0) - startEpoch := int64(0) - endEpoch := int64(0) - var pamJSON *string - - if piece.HasDealInfo() { - dealInfo := piece.DealInfo() - if dealInfo.Impl().DealProposal != nil { - dealID = int64(dealInfo.Impl().DealID) - } - - startEpoch = int64(must.One(dealInfo.StartEpoch())) - endEpoch = int64(must.One(dealInfo.EndEpoch())) - if piece.Impl().PieceActivationManifest != nil { - pam, err := json.Marshal(piece.Impl().PieceActivationManifest) - if err != nil { - return xerrors.Errorf("error marshalling JSON for piece %d in sector %d: %w", j, sector.SectorNumber, err) - } - ps := string(pam) - pamJSON = &ps - } - } - - // Splitting the SQL statement for readability and adding new fields - _, err = db.Exec(ctx, ` - INSERT INTO sectors_meta_pieces ( - sp_id, sector_num, piece_num, piece_cid, piece_size, - requested_keep_data, raw_data_size, start_epoch, orig_end_epoch, - f05_deal_id, ddo_pam - ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11) - ON CONFLICT (sp_id, sector_num, piece_num) DO UPDATE - SET - piece_cid = excluded.piece_cid, - piece_size = excluded.piece_size, - requested_keep_data = excluded.requested_keep_data, - raw_data_size = excluded.raw_data_size, - start_epoch = excluded.start_epoch, - orig_end_epoch = excluded.orig_end_epoch, - f05_deal_id = excluded.f05_deal_id, - ddo_pam = excluded.ddo_pam`, - mid, - sector.SectorNumber, - j, - piece.PieceCID(), - piece.Piece().Size, - piece.HasDealInfo(), - nil, // raw_data_size might be calculated based on the piece size, or retrieved if available - startEpoch, - endEpoch, - dealID, - pamJSON, - ) - if err != nil { - b, _ := json.MarshalIndent(sector, "", " ") - fmt.Println(string(b)) - - return xerrors.Errorf("inserting/updating sector_meta_pieces for sector %d, piece %d: %w", sector.SectorNumber, j, err) - } - } - } - - return nil -} diff --git a/cmd/curio/internal/translations/catalog.go b/cmd/curio/internal/translations/catalog.go deleted file mode 100644 index 42efc8fceec..00000000000 --- a/cmd/curio/internal/translations/catalog.go +++ /dev/null @@ -1,486 +0,0 @@ -// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. - -package translations - -import ( - "golang.org/x/text/language" - "golang.org/x/text/message" - "golang.org/x/text/message/catalog" -) - -type dictionary struct { - index []uint32 - data string -} - -func (d *dictionary) Lookup(key string) (data string, ok bool) { - p, ok := messageKeyToIndex[key] - if !ok { - return "", false - } - start, end := d.index[p], d.index[p+1] - if start == end { - return "", false - } - return d.data[start:end], true -} - -func init() { - dict := map[string]catalog.Dictionary{ - "en": &dictionary{index: enIndex, data: enData}, - "ko": &dictionary{index: koIndex, data: koData}, - "zh": &dictionary{index: zhIndex, data: zhData}, - } - fallback := language.MustParse("en") - cat, err := catalog.NewFromMap(dict, catalog.Fallback(fallback)) - if err != nil { - panic(err) - } - message.DefaultCatalog = cat -} - -var messageKeyToIndex = map[string]int{ - "Aborting migration.": 21, - "Aborting remaining steps.": 9, - "Aggregate-Anonymous: version, chain, and Miner power (bucketed).": 26, - "Cannot reach the DB: %s": 93, - "Cannot read the config.toml file in the provided directory, Error: %s": 68, - "Compare the configurations %s to %s. Changes between the miner IDs other than wallet addreses should be a new, minimal layer for runners that need it.": 120, - "Confidence epochs": 89, - "Confidence epochs: %d": 79, - "Configuration 'base' was created to resemble this lotus-miner's config.toml .": 121, - "Configuration 'base' was updated to include this miner's address": 102, - "Configuration 'base' was updated to include this miner's address (%s) and its wallet setup.": 119, - "Connected to Yugabyte": 62, - "Connected to Yugabyte. Schema is current.": 50, - "Continue to connect and update schema.": 112, - "Continue to verify the addresses and create a new miner actor.": 80, - "Could not create repo from directory: %s. Aborting migration": 69, - "Could not lock miner repo. Your miner must be stopped: %s\n Aborting migration": 70, - "Create a new miner": 8, - "Ctrl+C pressed in Terminal": 5, - "Database config error occurred, abandoning migration: %s ": 113, - "Database: %s": 111, - "Documentation: ": 36, - "Each step needs your confirmation and can be reversed. Press Ctrl+C to exit at any time.": 4, - "Enabling Sector Indexing in the database.": 51, - "Enter %s address": 85, - "Enter the Yugabyte database %s": 116, - "Enter the Yugabyte database host(s)": 114, - "Enter the info to connect to your Yugabyte database installation (https://download.yugabyte.com/)": 106, - "Enter the info to create a new miner": 74, - "Enter the owner address": 82, - "Enter the path to the configuration directory used by %s": 66, - "Enter the sector size": 86, - "Error closing backup file: %s": 59, - "Error connecting to Yugabyte database: %s": 117, - "Error connecting to full node API: %s": 94, - "Error creating backup file: %s": 56, - "Error encoding config.toml: %s": 52, - "Error expanding path: %s": 54, - "Error getting API: %s": 15, - "Error getting miner info: %s": 31, - "Error getting miner power: %s": 29, - "Error getting token: %s": 17, - "Error interpreting miner ID: %s: ID: %s": 44, - "Error marshalling message: %s": 30, - "Error reading config.toml: %s": 57, - "Error reading filemode of config.toml: %s": 55, - "Error saving config to layer: %s. Aborting Migration": 22, - "Error sending message: %s": 33, - "Error sending message: Status %s, Message: ": 34, - "Error signing message: %s": 32, - "Error verifying sectors: %s": 45, - "Error writing backup file: %s": 58, - "Error writing config.toml: %s": 60, - "Failed to create the miner actor: %s": 91, - "Failed to generate default config: %s": 100, - "Failed to generate random bytes for secret: %s": 96, - "Failed to get API info for FullNode: %w": 98, - "Failed to insert 'base' config layer in database: %s": 101, - "Failed to load base config from database: %s": 103, - "Failed to parse base config: %s": 104, - "Failed to parse confidence: %s": 90, - "Failed to parse sector size: %s": 88, - "Failed to parse the address: %s": 84, - "Failed to regenerate base config: %s": 105, - "Failed to verify the auth token from daemon node: %s": 99, - "Filecoin %s channels: %s and %s": 39, - "Hint: I am someone running Curio on whichever chain.": 27, - "Host: %s": 107, - "I want to:": 6, - "Increase reliability using redundancy: start multiple machines with at-least the post layer: 'curio run --layers=post'": 40, - "Individual Data: Miner ID, Curio version, chain (%s or %s). Signed.": 25, - "Initializing a new miner actor.": 73, - "Layer %s created. ": 122, - "Lotus-Miner to Curio Migration.": 10, - "Message sent.": 35, - "Migrate from existing Lotus-Miner": 7, - "Migrating lotus-miner config.toml to Curio in-database configuration.": 14, - "Migrating metadata for %d sectors.": 118, - "Miner %s created successfully": 92, - "Miner creation error occurred: %s ": 81, - "New Miner initialization complete.": 13, - "No address provided": 83, - "No host provided": 115, - "No path provided, abandoning migration ": 67, - "No value provided": 87, - "No, abort": 20, - "Nothing.": 28, - "Now shut down lotus-miner and lotus-worker and use run %s instead.": 47, - "One database can serve multiple miner IDs: Run a migration for each lotus-miner.": 41, - "Other": 65, - "Owner Address: %s": 75, - "Password: %s": 110, - "Please do not run guided-setup again as miner creation is not idempotent. You need to run 'curio config new-cluster %s' to finish the configuration": 97, - "Please start (or restart) %s now that database credentials are in %s.": 42, - "Port: %s": 108, - "Pre-initialization steps complete": 95, - "Press return to continue": 48, - "Press return to update %s with Yugabyte info. A Backup file will be written to that folder before changes are made.": 53, - "Read Miner Config": 71, - "Restart Lotus Miner. ": 61, - "Sector Size: %d": 78, - "Sectors verified. %d sector locations found.": 49, - "Select the location of your lotus-miner config directory?": 64, - "Select what you want to share with the Curio team.": 24, - "Sender Address: %s": 77, - "Step Complete: %s\n": 72, - "The '%s' layer stores common configuration. All curio instances can include it in their %s argument.": 37, - "The Curio team wants to improve the software you use. Tell the team you're using `%s`.": 23, - "The sectors are in the database. The database is ready for %s.": 46, - "This interactive tool creates a new miner actor and creates the basic configuration layer for it.": 1, - "This interactive tool migrates lotus-miner to Curio in 5 minutes.": 3, - "This process is partially idempotent. Once a new miner actor has been created and subsequent steps fail, the user need to run 'curio config new-cluster < miner ID >' to finish the configuration.": 2, - "To run Curio: With machine or cgroup isolation, use the command (with example layer selection):": 124, - "To start, ensure your sealing pipeline is drained and shut-down lotus-miner.": 63, - "To work with the config: ": 123, - "Try the web interface with %s for further guided improvements.": 11, - "Unmigratable sectors found. Do you want to continue?": 18, - "Use the arrow keys to navigate: ↓ ↑ → ← ": 0, - "Username: %s": 109, - "Waiting for %s to write sectors into Yugabyte.": 43, - "Worker Address: %s": 76, - "Yes, continue": 19, - "You can add other layers for per-machine configuration changes.": 38, - "You can now migrate your market node (%s), if applicable.": 12, - "could not get API info for FullNode: %w": 16, -} - -var enIndex = []uint32{ // 126 elements - // Entry 0 - 1F - 0x00000000, 0x00000035, 0x00000097, 0x0000015a, - 0x0000019c, 0x000001f5, 0x00000210, 0x0000021b, - 0x0000023d, 0x00000250, 0x0000026a, 0x0000028a, - 0x000002cc, 0x00000309, 0x0000032c, 0x00000372, - 0x0000038b, 0x000003b6, 0x000003d1, 0x00000406, - 0x00000414, 0x0000041e, 0x00000432, 0x0000046a, - 0x000004c4, 0x000004f7, 0x00000541, 0x00000582, - 0x000005b7, 0x000005c0, 0x000005e1, 0x00000602, - // Entry 20 - 3F - 0x00000622, 0x0000063f, 0x0000065c, 0x0000068f, - 0x0000069d, 0x000006b1, 0x0000071c, 0x0000075c, - 0x00000785, 0x000007fc, 0x0000084d, 0x00000899, - 0x000008cb, 0x000008f9, 0x00000918, 0x0000095a, - 0x000009a0, 0x000009b9, 0x000009e9, 0x00000a13, - 0x00000a3d, 0x00000a5f, 0x00000ad6, 0x00000af2, - 0x00000b1f, 0x00000b41, 0x00000b62, 0x00000b83, - 0x00000ba4, 0x00000bc5, 0x00000bdf, 0x00000bf5, - // Entry 40 - 5F - 0x00000c42, 0x00000c7c, 0x00000c82, 0x00000cbe, - 0x00000cea, 0x00000d33, 0x00000d73, 0x00000dc4, - 0x00000dd6, 0x00000df0, 0x00000e10, 0x00000e35, - 0x00000e4a, 0x00000e60, 0x00000e76, 0x00000e89, - 0x00000ea2, 0x00000ee1, 0x00000f0b, 0x00000f23, - 0x00000f37, 0x00000f5a, 0x00000f6e, 0x00000f84, - 0x00000f96, 0x00000fb9, 0x00000fcb, 0x00000fed, - 0x00001015, 0x00001036, 0x00001051, 0x0000107a, - // Entry 60 - 7F - 0x0000109c, 0x000010ce, 0x00001165, 0x00001190, - 0x000011c8, 0x000011f1, 0x00001229, 0x0000126a, - 0x0000129a, 0x000012bd, 0x000012e5, 0x00001347, - 0x00001353, 0x0000135f, 0x0000136f, 0x0000137f, - 0x0000138f, 0x000013b6, 0x000013f7, 0x0000141b, - 0x0000142c, 0x0000144e, 0x0000147b, 0x000014a1, - 0x00001500, 0x0000159d, 0x000015eb, 0x00001605, - 0x00001623, 0x00001683, -} // Size: 528 bytes - -const enData string = "" + // Size: 5763 bytes - "\x04\x00\x01 0\x02Use the arrow keys to navigate: ↓ ↑ → ←\x02This intera" + - "ctive tool creates a new miner actor and creates the basic configuration" + - " layer for it.\x02This process is partially idempotent. Once a new miner" + - " actor has been created and subsequent steps fail, the user need to run " + - "'curio config new-cluster < miner ID >' to finish the configuration.\x02" + - "This interactive tool migrates lotus-miner to Curio in 5 minutes.\x02Eac" + - "h step needs your confirmation and can be reversed. Press Ctrl+C to exit" + - " at any time.\x02Ctrl+C pressed in Terminal\x02I want to:\x02Migrate fro" + - "m existing Lotus-Miner\x02Create a new miner\x02Aborting remaining steps" + - ".\x02Lotus-Miner to Curio Migration.\x02Try the web interface with %[1]s" + - " for further guided improvements.\x02You can now migrate your market nod" + - "e (%[1]s), if applicable.\x02New Miner initialization complete.\x02Migra" + - "ting lotus-miner config.toml to Curio in-database configuration.\x02Erro" + - "r getting API: %[1]s\x02could not get API info for FullNode: %[1]w\x02Er" + - "ror getting token: %[1]s\x02Unmigratable sectors found. Do you want to c" + - "ontinue?\x02Yes, continue\x02No, abort\x02Aborting migration.\x02Error s" + - "aving config to layer: %[1]s. Aborting Migration\x02The Curio team wants" + - " to improve the software you use. Tell the team you're using `%[1]s`." + - "\x02Select what you want to share with the Curio team.\x02Individual Dat" + - "a: Miner ID, Curio version, chain (%[1]s or %[2]s). Signed.\x02Aggregate" + - "-Anonymous: version, chain, and Miner power (bucketed).\x02Hint: I am so" + - "meone running Curio on whichever chain.\x02Nothing.\x02Error getting min" + - "er power: %[1]s\x02Error marshalling message: %[1]s\x02Error getting min" + - "er info: %[1]s\x02Error signing message: %[1]s\x02Error sending message:" + - " %[1]s\x04\x00\x01 .\x02Error sending message: Status %[1]s, Message:" + - "\x02Message sent.\x04\x00\x01 \x0f\x02Documentation:\x02The '%[1]s' laye" + - "r stores common configuration. All curio instances can include it in the" + - "ir %[2]s argument.\x02You can add other layers for per-machine configura" + - "tion changes.\x02Filecoin %[1]s channels: %[2]s and %[3]s\x02Increase re" + - "liability using redundancy: start multiple machines with at-least the po" + - "st layer: 'curio run --layers=post'\x02One database can serve multiple m" + - "iner IDs: Run a migration for each lotus-miner.\x02Please start (or rest" + - "art) %[1]s now that database credentials are in %[2]s.\x02Waiting for %[" + - "1]s to write sectors into Yugabyte.\x02Error interpreting miner ID: %[1]" + - "s: ID: %[2]s\x02Error verifying sectors: %[1]s\x02The sectors are in the" + - " database. The database is ready for %[1]s.\x02Now shut down lotus-miner" + - " and lotus-worker and use run %[1]s instead.\x02Press return to continue" + - "\x02Sectors verified. %[1]d sector locations found.\x02Connected to Yuga" + - "byte. Schema is current.\x02Enabling Sector Indexing in the database." + - "\x02Error encoding config.toml: %[1]s\x02Press return to update %[1]s wi" + - "th Yugabyte info. A Backup file will be written to that folder before ch" + - "anges are made.\x02Error expanding path: %[1]s\x02Error reading filemode" + - " of config.toml: %[1]s\x02Error creating backup file: %[1]s\x02Error rea" + - "ding config.toml: %[1]s\x02Error writing backup file: %[1]s\x02Error clo" + - "sing backup file: %[1]s\x02Error writing config.toml: %[1]s\x04\x00\x01 " + - "\x15\x02Restart Lotus Miner.\x02Connected to Yugabyte\x02To start, ensur" + - "e your sealing pipeline is drained and shut-down lotus-miner.\x02Select " + - "the location of your lotus-miner config directory?\x02Other\x02Enter the" + - " path to the configuration directory used by %[1]s\x04\x00\x01 '\x02No p" + - "ath provided, abandoning migration\x02Cannot read the config.toml file i" + - "n the provided directory, Error: %[1]s\x02Could not create repo from dir" + - "ectory: %[1]s. Aborting migration\x02Could not lock miner repo. Your min" + - "er must be stopped: %[1]s\x0a Aborting migration\x02Read Miner Config" + - "\x04\x00\x01\x0a\x15\x02Step Complete: %[1]s\x02Initializing a new miner" + - " actor.\x02Enter the info to create a new miner\x02Owner Address: %[1]s" + - "\x02Worker Address: %[1]s\x02Sender Address: %[1]s\x02Sector Size: %[1]d" + - "\x02Confidence epochs: %[1]d\x02Continue to verify the addresses and cre" + - "ate a new miner actor.\x04\x00\x01 %\x02Miner creation error occurred: %" + - "[1]s\x02Enter the owner address\x02No address provided\x02Failed to pars" + - "e the address: %[1]s\x02Enter %[1]s address\x02Enter the sector size\x02" + - "No value provided\x02Failed to parse sector size: %[1]s\x02Confidence ep" + - "ochs\x02Failed to parse confidence: %[1]s\x02Failed to create the miner " + - "actor: %[1]s\x02Miner %[1]s created successfully\x02Cannot reach the DB:" + - " %[1]s\x02Error connecting to full node API: %[1]s\x02Pre-initialization" + - " steps complete\x02Failed to generate random bytes for secret: %[1]s\x02" + - "Please do not run guided-setup again as miner creation is not idempotent" + - ". You need to run 'curio config new-cluster %[1]s' to finish the configu" + - "ration\x02Failed to get API info for FullNode: %[1]w\x02Failed to verify" + - " the auth token from daemon node: %[1]s\x02Failed to generate default co" + - "nfig: %[1]s\x02Failed to insert 'base' config layer in database: %[1]s" + - "\x02Configuration 'base' was updated to include this miner's address\x02" + - "Failed to load base config from database: %[1]s\x02Failed to parse base " + - "config: %[1]s\x02Failed to regenerate base config: %[1]s\x02Enter the in" + - "fo to connect to your Yugabyte database installation (https://download.y" + - "ugabyte.com/)\x02Host: %[1]s\x02Port: %[1]s\x02Username: %[1]s\x02Passwo" + - "rd: %[1]s\x02Database: %[1]s\x02Continue to connect and update schema." + - "\x04\x00\x01 <\x02Database config error occurred, abandoning migration: " + - "%[1]s\x02Enter the Yugabyte database host(s)\x02No host provided\x02Ente" + - "r the Yugabyte database %[1]s\x02Error connecting to Yugabyte database: " + - "%[1]s\x02Migrating metadata for %[1]d sectors.\x02Configuration 'base' w" + - "as updated to include this miner's address (%[1]s) and its wallet setup." + - "\x02Compare the configurations %[1]s to %[2]s. Changes between the miner" + - " IDs other than wallet addreses should be a new, minimal layer for runne" + - "rs that need it.\x02Configuration 'base' was created to resemble this lo" + - "tus-miner's config.toml .\x04\x00\x01 \x15\x02Layer %[1]s created.\x04" + - "\x00\x01 \x19\x02To work with the config:\x02To run Curio: With machine " + - "or cgroup isolation, use the command (with example layer selection):" - -var koIndex = []uint32{ // 126 elements - // Entry 0 - 1F - 0x00000000, 0x00000044, 0x000000c1, 0x000001c1, - 0x0000020e, 0x00000289, 0x000002aa, 0x000002bc, - 0x000002e5, 0x00000300, 0x00000325, 0x00000348, - 0x000003b2, 0x00000402, 0x00000428, 0x00000481, - 0x000004a0, 0x000004dc, 0x0000050c, 0x0000055c, - 0x00000568, 0x0000057a, 0x00000595, 0x000005ed, - 0x00000679, 0x000006b2, 0x00000708, 0x00000746, - 0x00000794, 0x000007af, 0x000007e9, 0x0000081c, - // Entry 20 - 3F - 0x00000856, 0x00000880, 0x000008aa, 0x000008ec, - 0x00000910, 0x0000091d, 0x000009a3, 0x000009f5, - 0x00000a1c, 0x00000ab8, 0x00000b4a, 0x00000bc5, - 0x00000c09, 0x00000c47, 0x00000c6e, 0x00000cd9, - 0x00000d26, 0x00000d4d, 0x00000d9c, 0x00000ddd, - 0x00000e1d, 0x00000e64, 0x00000f0a, 0x00000f3a, - 0x00000f89, 0x00000fac, 0x00000fcd, 0x00000ff0, - 0x00001013, 0x00001051, 0x00001075, 0x0000108b, - // Entry 40 - 5F - 0x000010f6, 0x00001145, 0x0000114c, 0x00001194, - 0x000011e6, 0x00001240, 0x000012aa, 0x0000133b, - 0x00001353, 0x0000136d, 0x00001391, 0x000013c4, - 0x000013dc, 0x000013f4, 0x0000140c, 0x00001421, - 0x00001439, 0x00001490, 0x000014bb, 0x000014d3, - 0x000014fa, 0x0000151d, 0x00001531, 0x00001546, - 0x0000156a, 0x00001594, 0x000015a5, 0x000015cb, - 0x000015f1, 0x0000162a, 0x00001662, 0x0000169a, - // Entry 60 - 7F - 0x000016b9, 0x00001705, 0x000017c3, 0x0000180f, - 0x0000185d, 0x00001880, 0x000018dc, 0x0000192c, - 0x00001981, 0x000019c4, 0x00001a03, 0x00001a71, - 0x00001a82, 0x00001a90, 0x00001aa8, 0x00001abc, - 0x00001ad6, 0x00001b00, 0x00001b63, 0x00001b9f, - 0x00001bc9, 0x00001c01, 0x00001c55, 0x00001c8d, - 0x00001d06, 0x00001dc0, 0x00001e17, 0x00001e46, - 0x00001e6d, 0x00001ef9, -} // Size: 528 bytes - -const koData string = "" + // Size: 7929 bytes - "\x04\x00\x01 ?\x02화살표 키를 사용하여 이동하세요: ↓ ↑ → ←\x02이 대화형 도구는 새로운 채굴자 액터를 생성" + - "하고 그에 대한 기본 구성 레이어를 생성합니다.\x02이 프로세스는 부분적으로 항등원적입니다. 새로운 채굴자 액터가 생성되었고" + - " 후속 단계가 실패하는 경우 사용자는 구성을 완료하기 위해 'curio config new-cluster < 채굴자 ID >'를 " + - "실행해야 합니다.\x02이 대화형 도구는 5분 안에 lotus-miner를 Curio로 이주합니다.\x02각 단계는 확인이 필" + - "요하며 되돌릴 수 있습니다. 언제든지 Ctrl+C를 눌러 종료할 수 있습니다.\x02터미널에서 Ctrl+C가 눌림\x02나는 " + - "원한다:\x02기존의 Lotus-Miner에서 이전하기\x02새로운 채굴자 생성\x02나머지 단계를 중단합니다.\x02Lotu" + - "s-Miner에서 Curio로 이주.\x02%[1]s를 사용하여 웹 인터페이스를 시도하고 더 나은 안내된 개선을 진행하세요." + - "\x02해당하는 경우 이제 시장 노드를 이주할 수 있습니다 (%[1]s).\x02새로운 채굴자 초기화 완료.\x02lotus-mi" + - "ner config.toml을 Curio의 데이터베이스 구성으로 이전 중입니다.\x02API 가져오기 오류: %[1]s\x02Fu" + - "llNode의 API 정보를 가져올 수 없습니다: %[1]w\x02토큰을 가져오는 중 오류 발생: %[1]s\x02이동할 수 없는" + - " 섹터가 발견되었습니다. 계속하시겠습니까?\x02예, 계속\x02아니오, 중단\x02마이그레이션 중단.\x02레이어에 구성을 저장" + - "하는 중 오류 발생: %[1]s. 마이그레이션 중단\x02Curio 팀은 당신이 사용하는 소프트웨어를 개선하고자 합니다. 팀에" + - "게 `%[1]s`를 사용 중이라고 알려주세요.\x02Curio 팀과 공유하고 싶은 것을 선택하세요.\x02개별 데이터: 채굴자" + - " ID, Curio 버전, 체인 (%[1]s 또는 %[2]s). 서명됨.\x02집계-익명: 버전, 체인, 및 채굴자 파워 (버킷)" + - ".\x02힌트: 나는 어떤 체인에서든 Curio를 실행 중인 사람입니다.\x02아무것도 없습니다.\x02마이너 파워를 가져오는 중" + - " 오류 발생: %[1]s\x02메시지를 마샬하는 중 오류 발생: %[1]s\x02마이너 정보를 가져오는 중 오류 발생: %[1]s" + - "\x02메시지 서명 중 오류 발생: %[1]s\x02메시지 전송 중 오류 발생: %[1]s\x04\x00\x01 =\x02메시지 " + - "전송 중 오류 발생: 상태 %[1]s, 메시지:\x02메시지가 전송되었습니다.\x04\x00\x01 \x08\x02문서:" + - "\x02'%[1]s' 레이어에는 공통 구성이 저장됩니다. 모든 Curio 인스턴스는 %[2]s 인수에 포함시킬 수 있습니다." + - "\x02기계별 구성 변경을 위해 다른 레이어를 추가할 수 있습니다.\x02Filecoin %[1]s 채널: %[2]s 및 %[3]" + - "s\x02신뢰성 향상을 위한 중복성 사용: 적어도 post 레이어를 사용하여 여러 대의 기계를 시작하십시오: 'curio run " + - "--layers=post'\x02한 개의 데이터베이스는 여러 광부 ID를 제공할 수 있습니다: 각 lotus-miner에 대해 마" + - "이그레이션을 실행하세요.\x02데이터베이스 자격 증명이 %[2]s에 입력되었으므로 지금 %[1]s을 시작하거나 다시 시작하세요" + - ".\x02%[1]s가 Yugabyte에 섹터를 기록하도록 대기 중입니다.\x02광부 ID를 해석하는 중 오류 발생: %[1]s: " + - "ID: %[2]s\x02섹터 확인 중 오류 발생: %[1]s\x02섹터가 데이터베이스에 있습니다. 데이터베이스가 %[1]s를 위해" + - " 준비되었습니다.\x02이제 lotus-miner와 lotus-worker를 종료하고 %[1]s을 실행하세요.\x02계속하려면 리" + - "턴을 누르세요\x02섹터가 확인되었습니다. %[1]d개의 섹터 위치를 찾았습니다.\x02Yugabyte에 연결되었습니다. 스키" + - "마가 현재입니다.\x02데이터베이스에서 Sector Indexing을 활성화합니다.\x02config.toml을 인코딩하는 중" + - " 오류가 발생했습니다: %[1]s\x02%[1]s을 Yugabyte 정보로 업데이트하려면 리턴 키를 누르세요. 변경 사항을 적용하" + - "기 전에 해당 폴더에 백업 파일이 작성됩니다.\x02경로를 확장하는 중 오류 발생: %[1]s\x02config.toml의 파" + - "일 모드를 읽는 중 오류가 발생했습니다: %[1]s\x02백업 파일 생성 오류: %[1]s\x02config.toml 읽기 오" + - "류: %[1]s\x02백업 파일 쓰기 오류: %[1]s\x02백업 파일 닫기 오류: %[1]s\x02config.toml을 쓰" + - "는 중 오류가 발생했습니다: %[1]s\x04\x00\x01 \x1f\x02로터스 마이너 재시작.\x02Yugabyte에 연결" + - "됨\x02시작하려면 밀봉 파이프라인이 비어 있고 lotus-miner가 종료되었는지 확인하세요.\x02로터스 마이너 구성 디렉" + - "토리의 위치를 선택하시겠습니까?\x02기타\x02%[1]s에서 사용하는 구성 디렉터리 경로를 입력하세요.\x04\x00\x01" + - " M\x02경로가 제공되지 않았으므로 마이그레이션을 포기합니다\x02제공된 디렉토리에서 config.toml 파일을 읽을 수 없습" + - "니다. 오류: %[1]s\x02디렉토리에서 저장소를 생성할 수 없습니다: %[1]s. 마이그레이션을 중단합니다.\x02광부 저" + - "장소를 잠금 해제할 수 없습니다. 귀하의 광부를 중지해야 합니다: %[1]s\x0a 마이그레이션을 중단합니다.\x02마이너 구" + - "성 읽기\x04\x00\x01\x0a\x15\x02단계 완료: %[1]s\x02새 채굴자 액터 초기화 중.\x02새 채굴자를 " + - "생성하기 위한 정보 입력\x02소유자 주소: %[1]s\x02작업자 주소: %[1]s\x02송신자 주소: %[1]s\x02섹터" + - " 크기: %[1]d\x02신뢰 에포크: %[1]d\x02주소를 확인하고 새 채굴자 액터를 생성하려면 계속 진행하세요.\x04" + - "\x00\x01 &\x02채굴자 생성 오류 발생: %[1]s\x02소유자 주소 입력\x02주소가 제공되지 않았습니다\x02주소 구" + - "문 분석 실패: %[1]s\x02%[1]s 주소 입력\x02섹터 크기 입력\x02값이 제공되지 않았습니다\x02섹터 크기 구문" + - " 분석 실패: %[1]s\x02신뢰 에포크\x02신뢰도 구문 분석 실패: %[1]s\x02채굴자 액터 생성 실패: %[1]s" + - "\x02%[1]s 채굴자가 성공적으로 생성되었습니다\x02데이터베이스에 연결할 수 없습니다: %[1]s\x02풀 노드 API에 연" + - "결하는 중 오류 발생: %[1]s\x02사전 초기화 단계 완료\x02비밀번호를 위한 랜덤 바이트 생성에 실패했습니다: %[1]" + - "s\x02마이너 생성은 idempotent하지 않으므로 가이드 설정을 다시 실행하지 마십시오. 구성을 완료하려면 'curio co" + - "nfig new-cluster %[1]s'를 실행해야 합니다.\x02FullNode에 대한 API 정보를 가져오는 데 실패했습니다" + - ": %[1]w\x02데몬 노드로부터 인증 토큰을 확인하는 중 오류 발생: %[1]s\x02기본 구성 생성 실패: %[1]s\x02" + - "데이터베이스에 'base' 구성 레이어를 삽입하는 데 실패했습니다: %[1]s\x02이 마이너 주소를 포함한 구성 'base'" + - "가 업데이트되었습니다.\x02데이터베이스에서 기본 구성을 로드하는 데 실패했습니다: %[1]s\x02기본 구성을 구문 분석하는" + - " 데 실패했습니다: %[1]s\x02기본 구성을 재생성하는 데 실패했습니다: %[1]s\x02Yugabyte 데이터베이스 설치에 " + - "연결할 정보를 입력하십시오 (https://download.yugabyte.com/)\x02호스트: %[1]s\x02포트: %" + - "[1]s\x02사용자 이름: %[1]s\x02비밀번호: %[1]s\x02데이터베이스: %[1]s\x02계속 연결 및 스키마 업데이" + - "트.\x04\x00\x01 ^\x02데이터베이스 구성 오류가 발생하여 마이그레이션을 포기합니다: %[1]s\x02Yugabyt" + - "e 데이터베이스 호스트를 입력하십시오\x02호스트가 제공되지 않았습니다\x02Yugabyte 데이터베이스 %[1]s을 입력하십시오" + - "\x02Yugabyte 데이터베이스에 연결하는 중 오류가 발생했습니다: %[1]s\x02%[1]d 섹터의 메타데이터를 이동 중입니" + - "다.\x02기본 설정 'base'가 이 마이너의 주소(%[1]s) 및 지갑 설정을 포함하도록 업데이트되었습니다.\x02구성 %" + - "[1]s를 %[2]s과 비교하세요. 지갑 주소 이외의 마이너 ID 사이의 변경 사항은 필요한 실행자를 위한 새로운 최소한의 레이어" + - "여야 합니다.\x02'base' 설정이 이 lotus-miner의 config.toml과 유사하게 만들어졌습니다.\x04" + - "\x00\x01 *\x02레이어 %[1]s가 생성되었습니다.\x04\x00\x01 \x22\x02구성 파일을 사용하려면:\x02C" + - "urio를 실행하려면: 기계 또는 cgroup 격리를 사용하여 다음 명령을 사용하세요 (예제 레이어 선택과 함께):" - -var zhIndex = []uint32{ // 126 elements - // Entry 0 - 1F - 0x00000000, 0x00000033, 0x0000008b, 0x00000134, - 0x0000017c, 0x000001cb, 0x000001e4, 0x000001f1, - 0x00000211, 0x0000022a, 0x00000240, 0x0000025d, - 0x000002a5, 0x000002e6, 0x00000302, 0x00000347, - 0x00000364, 0x0000038d, 0x000003ab, 0x000003df, - 0x000003ef, 0x000003fc, 0x0000040c, 0x00000445, - 0x00000499, 0x000004c6, 0x00000515, 0x00000550, - 0x00000585, 0x0000058f, 0x000005b3, 0x000005d1, - // Entry 20 - 3F - 0x000005f5, 0x00000613, 0x00000631, 0x00000666, - 0x00000679, 0x00000688, 0x000006e2, 0x0000071f, - 0x00000747, 0x000007a6, 0x000007f6, 0x00000849, - 0x0000086f, 0x0000089c, 0x000008ba, 0x000008f6, - 0x0000093a, 0x0000094a, 0x0000097d, 0x000009aa, - 0x000009cf, 0x000009f2, 0x00000a6a, 0x00000a88, - 0x00000ab7, 0x00000adb, 0x00000b00, 0x00000b24, - 0x00000b48, 0x00000b6b, 0x00000b8b, 0x00000ba0, - // Entry 40 - 5F - 0x00000beb, 0x00000c1b, 0x00000c22, 0x00000c4c, - 0x00000c70, 0x00000cb4, 0x00000ce6, 0x00000d2f, - 0x00000d42, 0x00000d5c, 0x00000d7b, 0x00000da0, - 0x00000db8, 0x00000dcd, 0x00000de5, 0x00000df9, - 0x00000e10, 0x00000e41, 0x00000e66, 0x00000e7c, - 0x00000e8c, 0x00000ea6, 0x00000eba, 0x00000ecd, - 0x00000eda, 0x00000efa, 0x00000f0a, 0x00000f27, - 0x00000f47, 0x00000f61, 0x00000f7e, 0x00000faf, - // Entry 60 - 7F - 0x00000fc8, 0x00000ff1, 0x0000107e, 0x000010aa, - 0x000010e5, 0x00001105, 0x00001136, 0x00001169, - 0x00001196, 0x000011b7, 0x000011dd, 0x00001237, - 0x00001246, 0x00001255, 0x00001267, 0x00001276, - 0x00001288, 0x000012a7, 0x000012df, 0x00001304, - 0x00001314, 0x00001332, 0x0000135e, 0x00001388, - 0x000013d9, 0x0000145b, 0x000014a2, 0x000014bc, - 0x000014d4, 0x0000152b, -} // Size: 528 bytes - -const zhData string = "" + // Size: 5419 bytes - "\x04\x00\x01 .\x02使用箭头键进行导航:↓ ↑ → ←\x02此交互式工具将创建一个新的矿工角色,并为其创建基本配置层。\x02" + - "该过程部分幂等。一旦创建了新的矿工角色,并且随后的步骤失败,用户需要运行 'curio config new-cluster < 矿工 ID" + - " >' 来完成配置。\x02这个交互式工具可以在5分钟内将lotus-miner迁移到Curio。\x02每一步都需要您的确认,并且可以撤销。随" + - "时按Ctrl+C退出。\x02在终端中按下Ctrl+C\x02我想要:\x02从现有的 Lotus-Miner 迁移\x02创建一个新的矿工" + - "\x02中止剩余步骤。\x02Lotus-Miner到Curio迁移。\x02尝试使用%[1]s的网络界面进行更进一步的指导性改进。\x02如果" + - "适用,您现在可以迁移您的市场节点(%[1]s)。\x02新矿工初始化完成。\x02将 lotus-miner config.toml 迁移到" + - " Curio 的数据库配置中。\x02获取 API 时出错:%[1]s\x02无法获取FullNode的API信息:%[1]w\x02获取令牌时" + - "出错:%[1]s\x02发现无法迁移的扇区。您想要继续吗?\x02是的,继续\x02不,中止\x02中止迁移。\x02保存配置到层时出错:%" + - "[1]s。正在中止迁移\x02Curio 团队希望改进您使用的软件。告诉团队您正在使用 `%[1]s`。\x02选择您想与Curio团队分享的内" + - "容。\x02个人数据:矿工 ID,Curio 版本,链(%[1]s 或 %[2]s)。签名。\x02聚合-匿名:版本,链和矿工算力(分桶)。" + - "\x02提示:我是在任何链上运行 Curio 的人。\x02没有。\x02获取矿工功率时出错:%[1]s\x02整理消息时出错:%[1]s" + - "\x02获取矿工信息时出错:%[1]s\x02签署消息时出错:%[1]s\x02发送消息时出错:%[1]s\x04\x00\x01 0\x02发" + - "送消息时出错:状态%[1]s,消息:\x02消息已发送。\x04\x00\x01 \x0a\x02文档:\x02'%[1]s'层存储通用配置" + - "。所有Curio实例都可以在其%[2]s参数中包含它。\x02您可以添加其他层进行每台机器的配置更改。\x02Filecoin %[1]s " + - "频道:%[2]s 和 %[3]s\x02通过冗余增加可靠性:使用至少后层启动多台机器:'curio run --layers=post'" + - "\x02一个数据库可以服务多个矿工ID:为每个lotus-miner运行迁移。\x02请立即启动(或重新启动)%[1]s,因为数据库凭据已在%[" + - "2]s中。\x02等待%[1]s将扇区写入Yugabyte。\x02解释矿工ID时出错:%[1]s:ID:%[2]s\x02验证扇区时出错:%[" + - "1]s\x02扇区在数据库中。数据库已准备好用于%[1]s。\x02现在关闭lotus-miner和lotus-worker,改为使用%[1]s" + - "运行。\x02按回车继续\x02扇区已验证。发现了%[1]d个扇区位置。\x02已连接到Yugabyte。模式是当前的。\x02在数据库中启" + - "用扇区索引。\x02编码config.toml时出错:%[1]s\x02按回车键更新 %[1]s 以包含 Yugabyte 信息。在进行更改" + - "之前,将在该文件夹中写入备份文件。\x02扩展路径时出错:%[1]s\x02读取config.toml文件模式时出错:%[1]s\x02创建" + - "备份文件时出错:%[1]s\x02读取 config.toml 时出错:%[1]s\x02写入备份文件时出错:%[1]s\x02关闭备份文件" + - "时出错:%[1]s\x02写入config.toml时出错:%[1]s\x04\x00\x01 \x1b\x02重新启动Lotus Mine" + - "r。\x02已连接到Yugabyte\x02开始之前,请确保您的密封管道已排空并关闭lotus-miner。\x02选择您的lotus-mine" + - "r配置目录的位置?\x02其他\x02输入%[1]s使用的配置目录的路径\x04\x00\x01 \x1f\x02未提供路径,放弃迁移\x02无" + - "法读取提供的目录中的config.toml文件,错误:%[1]s\x02无法从目录创建repo:%[1]s。 中止迁移\x02无法锁定矿工r" + - "epo。 您的矿工必须停止:%[1]s\x0a 中止迁移\x02读取矿工配置\x04\x00\x01\x0a\x15\x02步骤完成:%[1]s" + - "\x02初始化新的矿工角色。\x02输入创建新矿工所需的信息\x02所有者地址:%[1]s\x02工作地址:%[1]s\x02发送者地址:%[1" + - "]s\x02扇区大小: %[1]d\x02置信度时期: %[1]d\x02继续验证地址并创建新的矿工角色。\x04\x00\x01 \x02矿" + - "工创建错误发生: %[1]s\x02输入所有者地址\x02未提供地址\x02解析地址失败: %[1]s\x02输入 %[1]s 地址\x02" + - "输入扇区大小\x02未提供值\x02解析扇区大小失败: %[1]s\x02置信度时期\x02解析置信度失败: %[1]s\x02创建矿工角色" + - "失败: %[1]s\x02矿工 %[1]s 创建成功\x02无法访问数据库: %[1]s\x02连接到完整节点 API 时发生错误: %[1" + - "]s\x02预初始化步骤完成\x02生成密码的随机字节失败: %[1]s\x02请不要再次运行引导设置,因为矿工创建不是幂等的。 您需要运行 '" + - "curio config new-cluster %[1]s' 来完成配置。\x02无法获取 FullNode 的 API 信息: %[1]w" + - "\x02无法验证来自守护进程节点的授权令牌: %[1]s\x02无法生成默认配置: %[1]s\x02无法将 'base' 配置层插入数据库: " + - "%[1]s\x02配置 'base' 已更新以包含此矿工的地址\x02从数据库加载基本配置失败:%[1]s\x02解析基本配置失败:%[1]s" + - "\x02重新生成基本配置失败: %[1]s\x02输入连接到您的Yugabyte数据库安装的信息(https://download.yugaby" + - "te.com/)\x02主机:%[1]s\x02端口:%[1]s\x02用户名:%[1]s\x02密码:%[1]s\x02数据库:%[1]s" + - "\x02继续连接和更新架构。\x04\x00\x01 3\x02发生数据库配置错误,放弃迁移:%[1]s\x02输入Yugabyte数据库主机(" + - "S)\x02未提供主机\x02输入Yugabyte数据库 %[1]s\x02连接到Yugabyte数据库时出错:%[1]s\x02正在迁移%[1" + - "]d个扇区的元数据。\x02'base'配置已更新,包括该矿工的地址(%[1]s)及其钱包设置。\x02比较配置%[1]s和%[2]s。矿工ID" + - "之间除了钱包地址的变化应该是需要的运行者的一个新的、最小的层。\x02'base'配置已创建,以类似于这个lotus-miner的confi" + - "g.toml。\x04\x00\x01 \x15\x02层%[1]s已创建。\x04\x00\x01 \x13\x02要使用配置:\x02运行C" + - "urio:使用机器或cgroup隔离,使用命令(附带示例层选择):" - - // Total table size 20695 bytes (20KiB); checksum: BB5CCE20 diff --git a/cmd/curio/internal/translations/knowns/main.go b/cmd/curio/internal/translations/knowns/main.go deleted file mode 100644 index a30a940657e..00000000000 --- a/cmd/curio/internal/translations/knowns/main.go +++ /dev/null @@ -1,82 +0,0 @@ -package main - -import ( - "bytes" - "encoding/json" - "fmt" - "os" - "path" - - "github.com/samber/lo" -) - -func main() { - for _, arg := range os.Args { - handleKnowns(arg) - } -} - -func handleKnowns(pathStart string) { - outpath := path.Join(pathStart, "out.gotext.json") - b, err := os.ReadFile(outpath) - if err != nil { - fmt.Println("cannot open "+outpath+":", err) - return - } - type TMsg struct { - ID string `json:"id"` - Translation string `json:"translation"` - Message string `json:"message"` - Placeholder json.RawMessage `json:"placeholder"` - } - type Dataformat struct { - Language string `json:"language"` - Messages []TMsg `json:"messages"` - } - var outData Dataformat - err = json.NewDecoder(bytes.NewBuffer(b)).Decode(&outData) - if err != nil { - fmt.Println("cannot decode "+outpath+":", err) - return - } - - f, err := os.Open(path.Join(pathStart, "messages.gotext.json")) - if err != nil { - fmt.Println("cannot open "+path.Join(pathStart, "messages.gotext.json")+":", err) - return - } - defer func() { _ = f.Close() }() - - var msgData Dataformat - err = json.NewDecoder(f).Decode(&msgData) - if err != nil { - fmt.Println("cannot decode "+path.Join(pathStart, "messages.gotext.json")+":", err) - return - } - - knowns := map[string]string{} - for _, msg := range msgData.Messages { - knowns[msg.ID] = msg.Translation - } - - toTranslate := lo.Filter(outData.Messages, func(msg TMsg, _ int) bool { - _, ok := knowns[msg.ID] - return !ok - }) - - outData.Messages = toTranslate // drop the "done" messages - var outJSON bytes.Buffer - enc := json.NewEncoder(&outJSON) - enc.SetIndent(" ", " ") - err = enc.Encode(outData) - if err != nil { - fmt.Println("cannot encode "+outpath+":", err) - return - } - err = os.WriteFile(outpath, outJSON.Bytes(), 0644) - if err != nil { - fmt.Println("cannot write "+outpath+":", err) - return - } - fmt.Println("rearranged successfully") -} diff --git a/cmd/curio/internal/translations/locales/en/out.gotext.json b/cmd/curio/internal/translations/locales/en/out.gotext.json deleted file mode 100644 index 86b84e9a7ab..00000000000 --- a/cmd/curio/internal/translations/locales/en/out.gotext.json +++ /dev/null @@ -1,1684 +0,0 @@ -{ - "language": "en", - "messages": [ - { - "id": "Use the arrow keys to navigate: ↓ ↑ → ←", - "message": "Use the arrow keys to navigate: ↓ ↑ → ←", - "translation": "Use the arrow keys to navigate: ↓ ↑ → ←", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "This interactive tool creates a new miner actor and creates the basic configuration layer for it.", - "message": "This interactive tool creates a new miner actor and creates the basic configuration layer for it.", - "translation": "This interactive tool creates a new miner actor and creates the basic configuration layer for it.", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "This process is partially idempotent. Once a new miner actor has been created and subsequent steps fail, the user need to run 'curio config new-cluster \u003c miner ID \u003e' to finish the configuration.", - "message": "This process is partially idempotent. Once a new miner actor has been created and subsequent steps fail, the user need to run 'curio config new-cluster \u003c miner ID \u003e' to finish the configuration.", - "translation": "This process is partially idempotent. Once a new miner actor has been created and subsequent steps fail, the user need to run 'curio config new-cluster \u003c miner ID \u003e' to finish the configuration.", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "This interactive tool migrates lotus-miner to Curio in 5 minutes.", - "message": "This interactive tool migrates lotus-miner to Curio in 5 minutes.", - "translation": "This interactive tool migrates lotus-miner to Curio in 5 minutes.", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Each step needs your confirmation and can be reversed. Press Ctrl+C to exit at any time.", - "message": "Each step needs your confirmation and can be reversed. Press Ctrl+C to exit at any time.", - "translation": "Each step needs your confirmation and can be reversed. Press Ctrl+C to exit at any time.", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Ctrl+C pressed in Terminal", - "message": "Ctrl+C pressed in Terminal", - "translation": "Ctrl+C pressed in Terminal", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "I want to:", - "message": "I want to:", - "translation": "I want to:", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Migrate from existing Lotus-Miner", - "message": "Migrate from existing Lotus-Miner", - "translation": "Migrate from existing Lotus-Miner", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Create a new miner", - "message": "Create a new miner", - "translation": "Create a new miner", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Aborting remaining steps.", - "message": "Aborting remaining steps.", - "translation": "Aborting remaining steps.", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]v", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Lotus-Miner to Curio Migration.", - "message": "Lotus-Miner to Curio Migration.", - "translation": "Lotus-Miner to Curio Migration.", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Try the web interface with {Rendercurio_run___layersgui} for further guided improvements.", - "message": "Try the web interface with {Rendercurio_run___layersgui} for further guided improvements.", - "translation": "Try the web interface with {Rendercurio_run___layersgui} for further guided improvements.", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Rendercurio_run___layersgui", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "code.Render(\"curio run --layers=gui\")" - } - ], - "fuzzy": true - }, - { - "id": "You can now migrate your market node ({Boost}), if applicable.", - "message": "You can now migrate your market node ({Boost}), if applicable.", - "translation": "You can now migrate your market node ({Boost}), if applicable.", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Boost", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "\"Boost\"" - } - ], - "fuzzy": true - }, - { - "id": "New Miner initialization complete.", - "message": "New Miner initialization complete.", - "translation": "New Miner initialization complete.", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Migrating lotus-miner config.toml to Curio in-database configuration.", - "message": "Migrating lotus-miner config.toml to Curio in-database configuration.", - "translation": "Migrating lotus-miner config.toml to Curio in-database configuration.", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Error getting API: {Error}", - "message": "Error getting API: {Error}", - "translation": "Error getting API: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "could not get API info for FullNode: {Err}", - "message": "could not get API info for FullNode: {Err}", - "translation": "could not get API info for FullNode: {Err}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Err", - "string": "%[1]w", - "type": "error", - "underlyingType": "interface{Error() string}", - "argNum": 1, - "expr": "err" - } - ], - "fuzzy": true - }, - { - "id": "Error getting token: {Error}", - "message": "Error getting token: {Error}", - "translation": "Error getting token: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Unmigratable sectors found. Do you want to continue?", - "message": "Unmigratable sectors found. Do you want to continue?", - "translation": "Unmigratable sectors found. Do you want to continue?", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Yes, continue", - "message": "Yes, continue", - "translation": "Yes, continue", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "No, abort", - "message": "No, abort", - "translation": "No, abort", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Aborting migration.", - "message": "Aborting migration.", - "translation": "Aborting migration.", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]v", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Error saving config to layer: {Error}. Aborting Migration", - "message": "Error saving config to layer: {Error}. Aborting Migration", - "translation": "Error saving config to layer: {Error}. Aborting Migration", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "The Curio team wants to improve the software you use. Tell the team you're using `{Curio}`.", - "message": "The Curio team wants to improve the software you use. Tell the team you're using `{Curio}`.", - "translation": "The Curio team wants to improve the software you use. Tell the team you're using `{Curio}`.", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Curio", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "\"curio\"" - } - ], - "fuzzy": true - }, - { - "id": "Select what you want to share with the Curio team.", - "message": "Select what you want to share with the Curio team.", - "translation": "Select what you want to share with the Curio team.", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Individual Data: Miner ID, Curio version, chain ({Mainnet} or {Calibration}). Signed.", - "message": "Individual Data: Miner ID, Curio version, chain ({Mainnet} or {Calibration}). Signed.", - "translation": "Individual Data: Miner ID, Curio version, chain ({Mainnet} or {Calibration}). Signed.", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Mainnet", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "\"mainnet\"" - }, - { - "id": "Calibration", - "string": "%[2]s", - "type": "string", - "underlyingType": "string", - "argNum": 2, - "expr": "\"calibration\"" - } - ], - "fuzzy": true - }, - { - "id": "Aggregate-Anonymous: version, chain, and Miner power (bucketed).", - "message": "Aggregate-Anonymous: version, chain, and Miner power (bucketed).", - "translation": "Aggregate-Anonymous: version, chain, and Miner power (bucketed).", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Hint: I am someone running Curio on whichever chain.", - "message": "Hint: I am someone running Curio on whichever chain.", - "translation": "Hint: I am someone running Curio on whichever chain.", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Nothing.", - "message": "Nothing.", - "translation": "Nothing.", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Error getting miner power: {Error}", - "message": "Error getting miner power: {Error}", - "translation": "Error getting miner power: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Error marshalling message: {Error}", - "message": "Error marshalling message: {Error}", - "translation": "Error marshalling message: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Error getting miner info: {Error}", - "message": "Error getting miner info: {Error}", - "translation": "Error getting miner info: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Error signing message: {Error}", - "message": "Error signing message: {Error}", - "translation": "Error signing message: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Error sending message: {Error}", - "message": "Error sending message: {Error}", - "translation": "Error sending message: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Error sending message: Status {Status}, Message:", - "message": "Error sending message: Status {Status}, Message:", - "translation": "Error sending message: Status {Status}, Message:", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Status", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "resp.Status" - }, - { - "id": "Stringb", - "string": "%[2]v", - "type": "string", - "underlyingType": "string", - "argNum": 2, - "expr": "string(b)" - } - ], - "fuzzy": true - }, - { - "id": "Message sent.", - "message": "Message sent.", - "translation": "Message sent.", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Documentation:", - "message": "Documentation:", - "translation": "Documentation:", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "The '{Base}' layer stores common configuration. All curio instances can include it in their {__layers} argument.", - "message": "The '{Base}' layer stores common configuration. All curio instances can include it in their {__layers} argument.", - "translation": "The '{Base}' layer stores common configuration. All curio instances can include it in their {__layers} argument.", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Base", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "\"base\"" - }, - { - "id": "__layers", - "string": "%[2]s", - "type": "string", - "underlyingType": "string", - "argNum": 2, - "expr": "\"--layers\"" - } - ], - "fuzzy": true - }, - { - "id": "You can add other layers for per-machine configuration changes.", - "message": "You can add other layers for per-machine configuration changes.", - "translation": "You can add other layers for per-machine configuration changes.", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Filecoin {Slack} channels: {Fil_curio_help} and {Fil_curio_dev}", - "message": "Filecoin {Slack} channels: {Fil_curio_help} and {Fil_curio_dev}", - "translation": "Filecoin {Slack} channels: {Fil_curio_help} and {Fil_curio_dev}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Slack", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "\"Slack\"" - }, - { - "id": "Fil_curio_help", - "string": "%[2]s", - "type": "string", - "underlyingType": "string", - "argNum": 2, - "expr": "\"#fil-curio-help\"" - }, - { - "id": "Fil_curio_dev", - "string": "%[3]s", - "type": "string", - "underlyingType": "string", - "argNum": 3, - "expr": "\"#fil-curio-dev\"" - } - ], - "fuzzy": true - }, - { - "id": "Increase reliability using redundancy: start multiple machines with at-least the post layer: 'curio run --layers=post'", - "message": "Increase reliability using redundancy: start multiple machines with at-least the post layer: 'curio run --layers=post'", - "translation": "Increase reliability using redundancy: start multiple machines with at-least the post layer: 'curio run --layers=post'", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "One database can serve multiple miner IDs: Run a migration for each lotus-miner.", - "message": "One database can serve multiple miner IDs: Run a migration for each lotus-miner.", - "translation": "One database can serve multiple miner IDs: Run a migration for each lotus-miner.", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Please start (or restart) {Lotus_miner} now that database credentials are in {Toml}.", - "message": "Please start (or restart) {Lotus_miner} now that database credentials are in {Toml}.", - "translation": "Please start (or restart) {Lotus_miner} now that database credentials are in {Toml}.", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Lotus_miner", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "\"lotus-miner\"" - }, - { - "id": "Toml", - "string": "%[2]s", - "type": "string", - "underlyingType": "string", - "argNum": 2, - "expr": "\"config.toml\"" - } - ], - "fuzzy": true - }, - { - "id": "Waiting for {Lotus_miner} to write sectors into Yugabyte.", - "message": "Waiting for {Lotus_miner} to write sectors into Yugabyte.", - "translation": "Waiting for {Lotus_miner} to write sectors into Yugabyte.", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Lotus_miner", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "\"lotus-miner\"" - } - ], - "fuzzy": true - }, - { - "id": "Error interpreting miner ID: {Error}: ID: {String}", - "message": "Error interpreting miner ID: {Error}: ID: {String}", - "translation": "Error interpreting miner ID: {Error}: ID: {String}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - }, - { - "id": "String", - "string": "%[2]s", - "type": "string", - "underlyingType": "string", - "argNum": 2, - "expr": "d.MinerID.String()" - } - ], - "fuzzy": true - }, - { - "id": "Error verifying sectors: {Error}", - "message": "Error verifying sectors: {Error}", - "translation": "Error verifying sectors: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "The sectors are in the database. The database is ready for {Curio}.", - "message": "The sectors are in the database. The database is ready for {Curio}.", - "translation": "The sectors are in the database. The database is ready for {Curio}.", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Curio", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "\"Curio\"" - } - ], - "fuzzy": true - }, - { - "id": "Now shut down lotus-miner and lotus-worker and use run {Rendercurio_run} instead.", - "message": "Now shut down lotus-miner and lotus-worker and use run {Rendercurio_run} instead.", - "translation": "Now shut down lotus-miner and lotus-worker and use run {Rendercurio_run} instead.", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Rendercurio_run", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "code.Render(\"curio run\")" - } - ], - "fuzzy": true - }, - { - "id": "Press return to continue", - "message": "Press return to continue", - "translation": "Press return to continue", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Sectors verified. {I} sector locations found.", - "message": "Sectors verified. {I} sector locations found.", - "translation": "Sectors verified. {I} sector locations found.", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "I", - "string": "%[1]d", - "type": "[]int", - "underlyingType": "[]int", - "argNum": 1, - "expr": "i" - } - ], - "fuzzy": true - }, - { - "id": "Connected to Yugabyte. Schema is current.", - "message": "Connected to Yugabyte. Schema is current.", - "translation": "Connected to Yugabyte. Schema is current.", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Enabling Sector Indexing in the database.", - "message": "Enabling Sector Indexing in the database.", - "translation": "Enabling Sector Indexing in the database.", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Error encoding config.toml: {Error}", - "message": "Error encoding config.toml: {Error}", - "translation": "Error encoding config.toml: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Press return to update {Toml} with Yugabyte info. A Backup file will be written to that folder before changes are made.", - "message": "Press return to update {Toml} with Yugabyte info. A Backup file will be written to that folder before changes are made.", - "translation": "Press return to update {Toml} with Yugabyte info. A Backup file will be written to that folder before changes are made.", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Toml", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "\"config.toml\"" - } - ], - "fuzzy": true - }, - { - "id": "Error expanding path: {Error}", - "message": "Error expanding path: {Error}", - "translation": "Error expanding path: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Error reading filemode of config.toml: {Error}", - "message": "Error reading filemode of config.toml: {Error}", - "translation": "Error reading filemode of config.toml: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Error creating backup file: {Error}", - "message": "Error creating backup file: {Error}", - "translation": "Error creating backup file: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Error reading config.toml: {Error}", - "message": "Error reading config.toml: {Error}", - "translation": "Error reading config.toml: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Error writing backup file: {Error}", - "message": "Error writing backup file: {Error}", - "translation": "Error writing backup file: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Error closing backup file: {Error}", - "message": "Error closing backup file: {Error}", - "translation": "Error closing backup file: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Error writing config.toml: {Error}", - "message": "Error writing config.toml: {Error}", - "translation": "Error writing config.toml: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Restart Lotus Miner.", - "message": "Restart Lotus Miner.", - "translation": "Restart Lotus Miner.", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Connected to Yugabyte", - "message": "Connected to Yugabyte", - "translation": "Connected to Yugabyte", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "To start, ensure your sealing pipeline is drained and shut-down lotus-miner.", - "message": "To start, ensure your sealing pipeline is drained and shut-down lotus-miner.", - "translation": "To start, ensure your sealing pipeline is drained and shut-down lotus-miner.", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Select the location of your lotus-miner config directory?", - "message": "Select the location of your lotus-miner config directory?", - "translation": "Select the location of your lotus-miner config directory?", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Other", - "message": "Other", - "translation": "Other", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Enter the path to the configuration directory used by {Lotus_miner}", - "message": "Enter the path to the configuration directory used by {Lotus_miner}", - "translation": "Enter the path to the configuration directory used by {Lotus_miner}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Lotus_miner", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "\"lotus-miner\"" - } - ], - "fuzzy": true - }, - { - "id": "No path provided, abandoning migration", - "message": "No path provided, abandoning migration", - "translation": "No path provided, abandoning migration", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Cannot read the config.toml file in the provided directory, Error: {Error}", - "message": "Cannot read the config.toml file in the provided directory, Error: {Error}", - "translation": "Cannot read the config.toml file in the provided directory, Error: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Could not create repo from directory: {Error}. Aborting migration", - "message": "Could not create repo from directory: {Error}. Aborting migration", - "translation": "Could not create repo from directory: {Error}. Aborting migration", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Could not lock miner repo. Your miner must be stopped: {Error}\n Aborting migration", - "message": "Could not lock miner repo. Your miner must be stopped: {Error}\n Aborting migration", - "translation": "Could not lock miner repo. Your miner must be stopped: {Error}\n Aborting migration", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Read Miner Config", - "message": "Read Miner Config", - "translation": "Read Miner Config", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Step Complete: {Step}", - "message": "Step Complete: {Step}", - "translation": "Step Complete: {Step}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Step", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "step" - } - ], - "fuzzy": true - }, - { - "id": "Initializing a new miner actor.", - "message": "Initializing a new miner actor.", - "translation": "Initializing a new miner actor.", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Enter the info to create a new miner", - "message": "Enter the info to create a new miner", - "translation": "Enter the info to create a new miner", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Owner Address: {String}", - "message": "Owner Address: {String}", - "translation": "Owner Address: {String}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "String", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "d.owner.String()" - } - ], - "fuzzy": true - }, - { - "id": "Worker Address: {String}", - "message": "Worker Address: {String}", - "translation": "Worker Address: {String}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "String", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "d.worker.String()" - } - ], - "fuzzy": true - }, - { - "id": "Sender Address: {String}", - "message": "Sender Address: {String}", - "translation": "Sender Address: {String}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "String", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "d.sender.String()" - } - ], - "fuzzy": true - }, - { - "id": "Sector Size: {Ssize}", - "message": "Sector Size: {Ssize}", - "translation": "Sector Size: {Ssize}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Ssize", - "string": "%[1]d", - "type": "github.com/filecoin-project/go-state-types/abi.SectorSize", - "underlyingType": "uint64", - "argNum": 1, - "expr": "d.ssize" - } - ], - "fuzzy": true - }, - { - "id": "Confidence epochs: {Confidence}", - "message": "Confidence epochs: {Confidence}", - "translation": "Confidence epochs: {Confidence}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Confidence", - "string": "%[1]d", - "type": "uint64", - "underlyingType": "uint64", - "argNum": 1, - "expr": "d.confidence" - } - ], - "fuzzy": true - }, - { - "id": "Continue to verify the addresses and create a new miner actor.", - "message": "Continue to verify the addresses and create a new miner actor.", - "translation": "Continue to verify the addresses and create a new miner actor.", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Miner creation error occurred: {Error}", - "message": "Miner creation error occurred: {Error}", - "translation": "Miner creation error occurred: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Enter the owner address", - "message": "Enter the owner address", - "translation": "Enter the owner address", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "No address provided", - "message": "No address provided", - "translation": "No address provided", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Failed to parse the address: {Error}", - "message": "Failed to parse the address: {Error}", - "translation": "Failed to parse the address: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Enter {Stringworker_senderi_1} address", - "message": "Enter {Stringworker_senderi_1} address", - "translation": "Enter {Stringworker_senderi_1} address", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Stringworker_senderi_1", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "[]string{\"worker\", \"sender\"}[i-1]" - } - ], - "fuzzy": true - }, - { - "id": "Enter the sector size", - "message": "Enter the sector size", - "translation": "Enter the sector size", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "No value provided", - "message": "No value provided", - "translation": "No value provided", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Failed to parse sector size: {Error}", - "message": "Failed to parse sector size: {Error}", - "translation": "Failed to parse sector size: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Confidence epochs", - "message": "Confidence epochs", - "translation": "Confidence epochs", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Failed to parse confidence: {Error}", - "message": "Failed to parse confidence: {Error}", - "translation": "Failed to parse confidence: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Failed to create the miner actor: {Error}", - "message": "Failed to create the miner actor: {Error}", - "translation": "Failed to create the miner actor: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Miner {String} created successfully", - "message": "Miner {String} created successfully", - "translation": "Miner {String} created successfully", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "String", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "miner.String()" - } - ], - "fuzzy": true - }, - { - "id": "Cannot reach the DB: {Error}", - "message": "Cannot reach the DB: {Error}", - "translation": "Cannot reach the DB: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Error connecting to full node API: {Error}", - "message": "Error connecting to full node API: {Error}", - "translation": "Error connecting to full node API: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Pre-initialization steps complete", - "message": "Pre-initialization steps complete", - "translation": "Pre-initialization steps complete", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Failed to generate random bytes for secret: {Error}", - "message": "Failed to generate random bytes for secret: {Error}", - "translation": "Failed to generate random bytes for secret: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Please do not run guided-setup again as miner creation is not idempotent. You need to run 'curio config new-cluster {String}' to finish the configuration", - "message": "Please do not run guided-setup again as miner creation is not idempotent. You need to run 'curio config new-cluster {String}' to finish the configuration", - "translation": "Please do not run guided-setup again as miner creation is not idempotent. You need to run 'curio config new-cluster {String}' to finish the configuration", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "String", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "d.MinerID.String()" - } - ], - "fuzzy": true - }, - { - "id": "Failed to get API info for FullNode: {Err}", - "message": "Failed to get API info for FullNode: {Err}", - "translation": "Failed to get API info for FullNode: {Err}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Err", - "string": "%[1]w", - "type": "error", - "underlyingType": "interface{Error() string}", - "argNum": 1, - "expr": "err" - } - ], - "fuzzy": true - }, - { - "id": "Failed to verify the auth token from daemon node: {Error}", - "message": "Failed to verify the auth token from daemon node: {Error}", - "translation": "Failed to verify the auth token from daemon node: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Failed to generate default config: {Error}", - "message": "Failed to generate default config: {Error}", - "translation": "Failed to generate default config: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Failed to insert 'base' config layer in database: {Error}", - "message": "Failed to insert 'base' config layer in database: {Error}", - "translation": "Failed to insert 'base' config layer in database: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Configuration 'base' was updated to include this miner's address", - "message": "Configuration 'base' was updated to include this miner's address", - "translation": "Configuration 'base' was updated to include this miner's address", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Failed to load base config from database: {Error}", - "message": "Failed to load base config from database: {Error}", - "translation": "Failed to load base config from database: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Failed to parse base config: {Error}", - "message": "Failed to parse base config: {Error}", - "translation": "Failed to parse base config: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Failed to regenerate base config: {Error}", - "message": "Failed to regenerate base config: {Error}", - "translation": "Failed to regenerate base config: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Enter the info to connect to your Yugabyte database installation (https://download.yugabyte.com/)", - "message": "Enter the info to connect to your Yugabyte database installation (https://download.yugabyte.com/)", - "translation": "Enter the info to connect to your Yugabyte database installation (https://download.yugabyte.com/)", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Host: {Hosts_}", - "message": "Host: {Hosts_}", - "translation": "Host: {Hosts_}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Hosts_", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "strings.Join(harmonyCfg.Hosts, \",\")" - } - ], - "fuzzy": true - }, - { - "id": "Port: {Port}", - "message": "Port: {Port}", - "translation": "Port: {Port}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Port", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "harmonyCfg.Port" - } - ], - "fuzzy": true - }, - { - "id": "Username: {Username}", - "message": "Username: {Username}", - "translation": "Username: {Username}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Username", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "harmonyCfg.Username" - } - ], - "fuzzy": true - }, - { - "id": "Password: {Password}", - "message": "Password: {Password}", - "translation": "Password: {Password}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Password", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "harmonyCfg.Password" - } - ], - "fuzzy": true - }, - { - "id": "Database: {Database}", - "message": "Database: {Database}", - "translation": "Database: {Database}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Database", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "harmonyCfg.Database" - } - ], - "fuzzy": true - }, - { - "id": "Continue to connect and update schema.", - "message": "Continue to connect and update schema.", - "translation": "Continue to connect and update schema.", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Database config error occurred, abandoning migration: {Error}", - "message": "Database config error occurred, abandoning migration: {Error}", - "translation": "Database config error occurred, abandoning migration: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Enter the Yugabyte database host(s)", - "message": "Enter the Yugabyte database host(s)", - "translation": "Enter the Yugabyte database host(s)", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "No host provided", - "message": "No host provided", - "translation": "No host provided", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Enter the Yugabyte database {Stringport_username_password_databasei_1}", - "message": "Enter the Yugabyte database {Stringport_username_password_databasei_1}", - "translation": "Enter the Yugabyte database {Stringport_username_password_databasei_1}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Stringport_username_password_databasei_1", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "[]string{\"port\", \"username\", \"password\", \"database\"}[i-1]" - } - ], - "fuzzy": true - }, - { - "id": "Error connecting to Yugabyte database: {Error}", - "message": "Error connecting to Yugabyte database: {Error}", - "translation": "Error connecting to Yugabyte database: {Error}", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ], - "fuzzy": true - }, - { - "id": "Migrating metadata for {NSectors} sectors.", - "message": "Migrating metadata for {NSectors} sectors.", - "translation": "Migrating metadata for {NSectors} sectors.", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "NSectors", - "string": "%[1]d", - "type": "int", - "underlyingType": "int", - "argNum": 1, - "expr": "nSectors" - } - ], - "fuzzy": true - }, - { - "id": "Configuration 'base' was updated to include this miner's address ({MinerAddress}) and its wallet setup.", - "message": "Configuration 'base' was updated to include this miner's address ({MinerAddress}) and its wallet setup.", - "translation": "Configuration 'base' was updated to include this miner's address ({MinerAddress}) and its wallet setup.", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "MinerAddress", - "string": "%[1]s", - "type": "github.com/filecoin-project/go-address.Address", - "underlyingType": "struct{str string}", - "argNum": 1, - "expr": "minerAddress" - } - ], - "fuzzy": true - }, - { - "id": "Compare the configurations {Base} to {MinerAddresses0}. Changes between the miner IDs other than wallet addreses should be a new, minimal layer for runners that need it.", - "message": "Compare the configurations {Base} to {MinerAddresses0}. Changes between the miner IDs other than wallet addreses should be a new, minimal layer for runners that need it.", - "translation": "Compare the configurations {Base} to {MinerAddresses0}. Changes between the miner IDs other than wallet addreses should be a new, minimal layer for runners that need it.", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "Base", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "\"base\"" - }, - { - "id": "MinerAddresses0", - "string": "%[2]s", - "type": "string", - "underlyingType": "string", - "argNum": 2, - "expr": "\"mig-\" + curioCfg.Addresses[0].MinerAddresses[0]" - } - ], - "fuzzy": true - }, - { - "id": "Configuration 'base' was created to resemble this lotus-miner's config.toml .", - "message": "Configuration 'base' was created to resemble this lotus-miner's config.toml .", - "translation": "Configuration 'base' was created to resemble this lotus-miner's config.toml .", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "Layer {LayerName} created.", - "message": "Layer {LayerName} created.", - "translation": "Layer {LayerName} created.", - "translatorComment": "Copied from source.", - "placeholders": [ - { - "id": "LayerName", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "layerName" - } - ], - "fuzzy": true - }, - { - "id": "To work with the config:", - "message": "To work with the config:", - "translation": "To work with the config:", - "translatorComment": "Copied from source.", - "fuzzy": true - }, - { - "id": "To run Curio: With machine or cgroup isolation, use the command (with example layer selection):", - "message": "To run Curio: With machine or cgroup isolation, use the command (with example layer selection):", - "translation": "To run Curio: With machine or cgroup isolation, use the command (with example layer selection):", - "translatorComment": "Copied from source.", - "fuzzy": true - } - ] -} \ No newline at end of file diff --git a/cmd/curio/internal/translations/locales/ko/messages.gotext.json b/cmd/curio/internal/translations/locales/ko/messages.gotext.json deleted file mode 100644 index 15732f0c291..00000000000 --- a/cmd/curio/internal/translations/locales/ko/messages.gotext.json +++ /dev/null @@ -1,1130 +0,0 @@ -{ - "language": "ko", - "messages": [ - { - "id": "This interactive tool will walk you through migration of Curio.\nPress Ctrl+C to exit at any time.", - "message": "This interactive tool will walk you through migration of Curio.\nPress Ctrl+C to exit at any time.", - "translation": "이 대화형 도구는 Curio 마이그레이션 과정을 안내합니다.\n언제든지 종료하려면 Ctrl+C를 누르십시오." - }, - { - "id": "This tool confirms each action it does.", - "message": "This tool confirms each action it does.", - "translation": "이 도구는 수행하는 각 작업을 확인합니다." - }, - { - "id": "Ctrl+C pressed in Terminal", - "message": "Ctrl+C pressed in Terminal", - "translation": "터미널에서 Ctrl+C가 눌림" - }, - { - "id": "Verifying Sectors exist in Yugabyte.", - "message": "Verifying Sectors exist in Yugabyte.", - "translation": "Yugabyte에 섹터가 존재하는지 확인 중." - }, - { - "id": "Error verifying sectors: {Error}", - "message": "Error verifying sectors: {Error}", - "translation": "섹터 확인 중 오류 발생: {Error}", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ] - }, - { - "id": "Sectors verified. {I} sectors found.", - "message": "Sectors verified. {I} sectors found.", - "translation": "섹터가 확인되었습니다. {I}개의 섹터가 발견되었습니다.", - "placeholders": [ - { - "id": "I", - "string": "%[1]d", - "type": "[]int", - "underlyingType": "[]int", - "argNum": 1, - "expr": "i" - } - ] - }, - { - "id": "Never remove the database info from the config.toml for lotus-miner as it avoids double PoSt.", - "message": "Never remove the database info from the config.toml for lotus-miner as it avoids double PoSt.", - "translation": "로터스 마이너의 config.toml에서 데이터베이스 정보를 제거하지 마십시오. 두 번의 PoSt를 피하기 위함입니다." - }, - { - "id": "Enter the info to connect to your Yugabyte database installation (https://download.yugabyte.com/)", - "message": "Enter the info to connect to your Yugabyte database installation (https://download.yugabyte.com/)", - "translation": "Yugabyte 데이터베이스 설치에 연결할 정보를 입력하십시오 (https://download.yugabyte.com/)" - }, - { - "id": "Host: {Hosts_}", - "message": "Host: {Hosts_}", - "translation": "호스트: {Hosts_}", - "placeholders": [ - { - "id": "Hosts_", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "strings.Join(harmonycfg.Hosts, \",\")" - } - ] - }, - { - "id": "Port: {Port}", - "message": "Port: {Port}", - "translation": "포트: {Port}", - "placeholders": [ - { - "id": "Port", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "harmonycfg.Port" - } - ] - }, - { - "id": "Username: {Username}", - "message": "Username: {Username}", - "translation": "사용자 이름: {Username}", - "placeholders": [ - { - "id": "Username", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "harmonycfg.Username" - } - ] - }, - { - "id": "Password: {Password}", - "message": "Password: {Password}", - "translation": "비밀번호: {Password}", - "placeholders": [ - { - "id": "Password", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "harmonycfg.Password" - } - ] - }, - { - "id": "Database: {Database}", - "message": "Database: {Database}", - "translation": "데이터베이스: {Database}", - "placeholders": [ - { - "id": "Database", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "harmonycfg.Database" - } - ] - }, - { - "id": "Continue to connect and update schema.", - "message": "Continue to connect and update schema.", - "translation": "계속 연결 및 스키마 업데이트." - }, - { - "id": "Database config error occurred, abandoning migration: {Error}", - "message": "Database config error occurred, abandoning migration: {Error}", - "translation": "데이터베이스 구성 오류가 발생하여 마이그레이션을 포기합니다: {Error}", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ] - }, - { - "id": "Enter the Yugabyte database host(s)", - "message": "Enter the Yugabyte database host(s)", - "translation": "Yugabyte 데이터베이스 호스트를 입력하십시오" - }, - { - "id": "No host provided", - "message": "No host provided", - "translation": "호스트가 제공되지 않았습니다" - }, - { - "id": "Enter the Yugabyte database {Stringport_username_password_databasei_1}", - "message": "Enter the Yugabyte database {Stringport_username_password_databasei_1}", - "translation": "Yugabyte 데이터베이스 {Stringport_username_password_databasei_1}을 입력하십시오", - "placeholders": [ - { - "id": "Stringport_username_password_databasei_1", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "[]string{\"port\", \"username\", \"password\", \"database\"}[i-1]" - } - ] - }, - { - "id": "No value provided", - "message": "No value provided", - "translation": "값이 제공되지 않았습니다" - }, - { - "id": "Error connecting to Yugabyte database: {Error}", - "message": "Error connecting to Yugabyte database: {Error}", - "translation": "Yugabyte 데이터베이스에 연결하는 중 오류가 발생했습니다: {Error}", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ] - }, - { - "id": "Connected to Yugabyte. Schema is current.", - "message": "Connected to Yugabyte. Schema is current.", - "translation": "Yugabyte에 연결되었습니다. 스키마가 현재입니다." - }, - { - "id": "Error encoding config.toml: {Error}", - "message": "Error encoding config.toml: {Error}", - "translation": "config.toml을 인코딩하는 중 오류가 발생했습니다: {Error}", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ] - }, - { - "id": "Error reading filemode of config.toml: {Error}", - "message": "Error reading filemode of config.toml: {Error}", - "translation": "config.toml의 파일 모드를 읽는 중 오류가 발생했습니다: {Error}", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ] - }, - { - "id": "Error writing config.toml: {Error}", - "message": "Error writing config.toml: {Error}", - "translation": "config.toml을 쓰는 중 오류가 발생했습니다: {Error}", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ] - }, - { - "id": "Restart Lotus Miner.", - "message": "Restart Lotus Miner.", - "translation": "로터스 마이너 재시작." - }, - { - "id": "Connected to Yugabyte", - "message": "Connected to Yugabyte", - "translation": "Yugabyte에 연결됨" - }, - { - "id": "Select the location of your lotus-miner config directory?", - "message": "Select the location of your lotus-miner config directory?", - "translation": "로터스 마이너 구성 디렉토리의 위치를 선택하시겠습니까?" - }, - { - "id": "Other", - "message": "Other", - "translation": "기타" - }, - { - "id": "Enter the path to the configuration directory used by lotus-miner", - "message": "Enter the path to the configuration directory used by lotus-miner", - "translation": "로터스 마이너에서 사용하는 구성 디렉토리의 경로를 입력하십시오" - }, - { - "id": "No path provided, abandoning migration", - "message": "No path provided, abandoning migration", - "translation": "경로가 제공되지 않았으므로 마이그레이션을 포기합니다" - }, - { - "id": "Cannot read the config.toml file in the provided directory, Error: {Error}", - "message": "Cannot read the config.toml file in the provided directory, Error: {Error}", - "translation": "제공된 디렉토리에서 config.toml 파일을 읽을 수 없습니다. 오류: {Error}", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ] - }, - { - "id": "Read Miner Config", - "message": "Read Miner Config", - "translation": "마이너 구성 읽기" - }, - { - "id": "Completed Step: {Step}", - "message": "Completed Step: {Step}", - "translation": "단계 완료: {Step}", - "placeholders": [ - { - "id": "Step", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "step" - } - ] - }, - { - "id": "This interactive tool migrates lotus-miner to Curio in 5 minutes.", - "translation": "이 대화형 도구는 5분 안에 lotus-miner를 Curio로 이주합니다.", - "message": "This interactive tool migrates lotus-miner to Curio in 5 minutes.", - "placeholder": null - }, - { - "id": "Each step needs your confirmation and can be reversed. Press Ctrl+C to exit at any time.", - "translation": "각 단계는 확인이 필요하며 되돌릴 수 있습니다. 언제든지 Ctrl+C를 눌러 종료할 수 있습니다.", - "message": "Each step needs your confirmation and can be reversed. Press Ctrl+C to exit at any time.", - "placeholder": null - }, - { - "id": "Use the arrow keys to navigate: ↓ ↑ → ←", - "translation": "화살표 키를 사용하여 이동하세요: ↓ ↑ → ←", - "message": "Use the arrow keys to navigate: ↓ ↑ → ←", - "placeholder": null - }, - { - "id": "Lotus-Miner to Curio Migration.", - "translation": "Lotus-Miner에서 Curio로 이주.", - "message": "Lotus-Miner to Curio Migration.", - "placeholder": null - }, - { - "id": "Try the web interface with for further guided improvements.", - "translation": "더 나은 안내를 위해 웹 인터페이스를 사용해보세요.", - "message": "Try the web interface with for further guided improvements.", - "placeholder": null - }, - { - "id": "You can now migrate your market node ({Boost}), if applicable.", - "translation": "해당하는 경우 이제 시장 노드를 이주할 수 있습니다 ({Boost}).", - "message": "You can now migrate your market node ({Boost}), if applicable.", - "placeholder": null - }, - { - "id": "Migrating config.toml to database.", - "translation": "config.toml을 데이터베이스로 이주 중입니다.", - "message": "Migrating config.toml to database.", - "placeholder": null - }, - { - "id": "Error reading from database: {Error}. Aborting Migration.", - "translation": "데이터베이스에서 읽는 중 오류 발생: {Error}. 마이그레이션 중단.", - "message": "Error reading from database: {Error}. Aborting Migration.", - "placeholder": null - }, - { - "id": "cannot read API: {Error}. Aborting Migration", - "translation": "API를 읽을 수 없습니다: {Error}. 마이그레이션 중단", - "message": "cannot read API: {Error}. Aborting Migration", - "placeholder": null - }, - { - "id": "Error saving config to layer: {Error}. Aborting Migration", - "translation": "레이어에 구성을 저장하는 중 오류 발생: {Error}. 마이그레이션 중단", - "message": "Error saving config to layer: {Error}. Aborting Migration", - "placeholder": null - }, - { - "id": "Protocol Labs wants to improve the software you use. Tell the team you're using Curio.", - "translation": "Protocol Labs는 당신이 사용하는 소프트웨어를 개선하고 싶어합니다. Curio를 사용 중이라고 팀에 알려주세요.", - "message": "Protocol Labs wants to improve the software you use. Tell the team you're using Curio.", - "placeholder": null - }, - { - "id": "Select what you want to share with the Curio team.", - "translation": "Curio 팀과 공유하고 싶은 것을 선택하세요.", - "message": "Select what you want to share with the Curio team.", - "placeholder": null - }, - { - "id": "Individual Data: Miner ID, Curio version, net ({Mainnet} or {Testnet}). Signed.", - "translation": "개별 데이터: 마이너 ID, Curio 버전, 네트워크 ({Mainnet} 또는 {Testnet}). 서명됨.", - "message": "Individual Data: Miner ID, Curio version, net ({Mainnet} or {Testnet}). Signed.", - "placeholder": null - }, - { - "id": "Aggregate-Anonymous: version, net, and Miner power (bucketed).", - "translation": "집계-익명: 버전, 네트워크, 그리고 마이너 파워 (버킷).", - "message": "Aggregate-Anonymous: version, net, and Miner power (bucketed).", - "placeholder": null - }, - { - "id": "Hint: I am someone running Curio on net.", - "translation": "힌트: 네트워크에서 Curio를 실행 중인 사람입니다.", - "message": "Hint: I am someone running Curio on net.", - "placeholder": null - }, - { - "id": "Nothing.", - "translation": "아무것도 없습니다.", - "message": "Nothing.", - "placeholder": null - }, - { - "id": "Aborting remaining steps.", - "translation": "나머지 단계를 중단합니다.", - "message": "Aborting remaining steps.", - "placeholder": null - }, - { - "id": "Error connecting to lotus node: {Error}", - "translation": "로터스 노드에 연결하는 중 오류 발생: {Error}", - "message": "Error connecting to lotus node: {Error}", - "placeholder": null - }, - { - "id": "Error getting miner power: {Error}", - "translation": "마이너 파워를 가져오는 중 오류 발생: {Error}", - "message": "Error getting miner power: {Error}", - "placeholder": null - }, - { - "id": "Error marshalling message: {Error}", - "translation": "메시지를 마샬하는 중 오류 발생: {Error}", - "message": "Error marshalling message: {Error}", - "placeholder": null - }, - { - "id": "Error getting miner info: {Error}", - "translation": "마이너 정보를 가져오는 중 오류 발생: {Error}", - "message": "Error getting miner info: {Error}", - "placeholder": null - }, - { - "id": "Error signing message: {Error}", - "translation": "메시지 서명 중 오류 발생: {Error}", - "message": "Error signing message: {Error}", - "placeholder": null - }, - { - "id": "Error sending message: {Error}", - "translation": "메시지 전송 중 오류 발생: {Error}", - "message": "Error sending message: {Error}", - "placeholder": null - }, - { - "id": "Error sending message: Status {Status}, Message:", - "translation": "메시지 전송 중 오류 발생: 상태 {Status}, 메시지:", - "message": "Error sending message: Status {Status}, Message:", - "placeholder": null - }, - { - "id": "Message sent.", - "translation": "메시지가 전송되었습니다.", - "message": "Message sent.", - "placeholder": null - }, - { - "id": "Documentation:", - "translation": "문서:", - "message": "Documentation:", - "placeholder": null - }, - { - "id": "The '{Base}' layer stores common configuration. All curio instances can include it in their {__layers} argument.", - "translation": "'{Base}' 레이어에는 공통 구성이 저장됩니다. 모든 Curio 인스턴스는 {__layers} 인수에 포함시킬 수 있습니다.", - "message": "The '{Base}' layer stores common configuration. All curio instances can include it in their {__layers} argument.", - "placeholder": null - }, - { - "id": "You can add other layers for per-machine configuration changes.", - "translation": "기계별 구성 변경을 위해 다른 레이어를 추가할 수 있습니다.", - "message": "You can add other layers for per-machine configuration changes.", - "placeholder": null - }, - { - "id": "Join {Fil_curio_help} in Filecoin {Slack} for help.", - "translation": "도움을 위해 Filecoin {Slack}의 {Fil_curio_help}에 가입하세요.", - "message": "Join {Fil_curio_help} in Filecoin {Slack} for help.", - "placeholder": null - }, - { - "id": "Join {Fil_curio_dev} in Filecoin {Slack} to follow development and feedback!", - "translation": "개발과 피드백을 따르려면 Filecoin {Slack}의 {Fil_curio_dev}에 가입하세요!", - "message": "Join {Fil_curio_dev} in Filecoin {Slack} to follow development and feedback!", - "placeholder": null - }, - { - "id": "Want PoST redundancy? Run many Curio instances with the '{Post}' layer.", - "translation": "PoST 중복성이 필요하신가요? '{Post}' 레이어와 함께 여러 Curio 인스턴스를 실행하세요.", - "message": "Want PoST redundancy? Run many Curio instances with the '{Post}' layer.", - "placeholder": null - }, - { - "id": "Point your browser to your web GUI to complete setup with {Boost} and advanced featues.", - "translation": "브라우저를 웹 GUI로 이동하여 {Boost} 및 고급 기능으로 설정을 완료하세요.", - "message": "Point your browser to your web GUI to complete setup with {Boost} and advanced featues.", - "placeholder": null - }, - { - "id": "For SPs with multiple Miner IDs, run 1 migration per lotus-miner all to the same 1 database. The cluster will serve all Miner IDs.", - "translation": "여러 마이너 ID가 있는 SP의 경우 각 lotus-miner당 1회 마이그레이션을 동일한 1개의 데이터베이스로 모두 실행하세요. 클러스터는 모든 마이너 ID를 제공합니다.", - "message": "For SPs with multiple Miner IDs, run 1 migration per lotus-miner all to the same 1 database. The cluster will serve all Miner IDs.", - "placeholder": null - }, - { - "id": "Please start {Lotus_miner} now that database credentials are in {Toml}.", - "translation": "데이터베이스 자격 증명이 {Toml}에 있으므로 이제 {Lotus_miner}를 시작하세요.", - "message": "Please start {Lotus_miner} now that database credentials are in {Toml}.", - "placeholder": null - }, - { - "id": "Waiting for {Lotus_miner} to write sectors into Yugabyte.", - "translation": "{Lotus_miner}가 Yugabyte에 섹터를 기록하도록 대기 중입니다.", - "message": "Waiting for {Lotus_miner} to write sectors into Yugabyte.", - "placeholder": null - }, - { - "id": "The sectors are in the database. The database is ready for {Curio}.", - "translation": "섹터가 데이터베이스에 있습니다. 데이터베이스가 {Curio}를 위해 준비되었습니다.", - "message": "The sectors are in the database. The database is ready for {Curio}.", - "placeholder": null - }, - { - "id": "Now shut down lotus-miner and move the systems to {Curio}.", - "translation": "이제 lotus-miner를 종료하고 시스템을 {Curio}로 이동하세요.", - "message": "Now shut down lotus-miner and move the systems to {Curio}.", - "placeholder": null - }, - { - "id": "Press return to continue", - "translation": "계속하려면 리턴을 누르세요", - "message": "Press return to continue", - "placeholder": null - }, - { - "id": "Aborting migration.", - "translation": "마이그레이션 중단.", - "message": "Aborting migration.", - "placeholder": null - }, - { - "id": "Sectors verified. {I} sector locations found.", - "translation": "섹터가 확인되었습니다. {I}개의 섹터 위치를 찾았습니다.", - "message": "Sectors verified. {I} sector locations found.", - "placeholder": null - }, - { - "id": "Press return to update {Toml} with Yugabyte info. Backup the file now.", - "translation": "{Toml}을 Yugabyte 정보로 업데이트하려면 리턴을 누르세요. 지금 파일을 백업하세요.", - "message": "Press return to update {Toml} with Yugabyte info. Backup the file now.", - "placeholder": null - }, - { - "id": "To start, ensure your sealing pipeline is drained and shut-down lotus-miner.", - "translation": "시작하려면 밀봉 파이프라인이 비어 있고 lotus-miner가 종료되었는지 확인하세요.", - "message": "To start, ensure your sealing pipeline is drained and shut-down lotus-miner.", - "placeholder": null - }, - { - "id": "Enter the path to the configuration directory used by {Lotus_miner}", - "translation": "{Lotus_miner}에서 사용하는 구성 디렉터리 경로를 입력하세요.", - "message": "Enter the path to the configuration directory used by {Lotus_miner}", - "placeholder": null - }, - { - "id": "Step Complete: {Step}", - "translation": "단계 완료: {Step}", - "message": "Step Complete: {Step}", - "placeholder": null - }, - { - "id": "Configuration 'base' was updated to include this miner's address and its wallet setup.", - "translation": "이 마이너의 주소와 지갑 설정을 포함하도록 구성 'base'가 업데이트되었습니다.", - "message": "Configuration 'base' was updated to include this miner's address and its wallet setup.", - "placeholder": null - }, - { - "id": "Compare the configurations {Base} to {MinerAddresses0}. Changes between the miner IDs other than wallet addreses should be a new, minimal layer for runners that need it.", - "translation": "구성 {Base}를 {MinerAddresses0}과 비교하세요. 지갑 주소 이외의 마이너 ID 사이의 변경 사항은 필요한 실행자를 위한 새로운 최소한의 레이어여야 합니다.", - "message": "Compare the configurations {Base} to {MinerAddresses0}. Changes between the miner IDs other than wallet addreses should be a new, minimal layer for runners that need it.", - "placeholder": null - }, - { - "id": "Configuration 'base' was created to include this miner's address and its wallet setup.", - "translation": "이 마이너의 주소와 지갑 설정을 포함하도록 구성 'base'가 생성되었습니다.", - "message": "Configuration 'base' was created to include this miner's address and its wallet setup.", - "placeholder": null - }, - { - "id": "Layer {LayerName} created.", - "translation": "레이어 {LayerName}가 생성되었습니다.", - "message": "Layer {LayerName} created.", - "placeholder": null - }, - { - "id": "To work with the config: \\n", - "translation": "구성을 사용하려면: \\n", - "message": "To work with the config: \\n", - "placeholder": null - }, - { - "id": "To run Curio: With machine or cgroup isolation, use the command (with example layer selection):", - "translation": "Curio를 실행하려면: 기계 또는 cgroup 격리를 사용하여 다음 명령을 사용하세요 (예제 레이어 선택과 함께):", - "message": "To run Curio: With machine or cgroup isolation, use the command (with example layer selection):", - "placeholder": null - }, - { - "id": "Try the web interface with {__layersgui} for further guided improvements.", - "translation": "더 많은 안내를 위해 {__layersgui}를 사용하여 웹 인터페이스를 시도하세요.", - "message": "Try the web interface with {__layersgui} for further guided improvements.", - "placeholder": null - }, - { - "id": "Error connecting to lotus node: {Error} {Error_1}", - "translation": "lotus 노드에 연결하는 중 오류 발생: {Error} {Error_1}", - "message": "Error connecting to lotus node: {Error} {Error_1}", - "placeholder": null - }, - { - "id": "could not get API info for FullNode: {Err}", - "translation": "FullNode의 API 정보를 가져올 수 없습니다: {Err}", - "message": "could not get API info for FullNode: {Err}", - "placeholder": null - }, - { - "id": "Error getting token: {Error}", - "translation": "토큰을 가져오는 중 오류 발생: {Error}", - "message": "Error getting token: {Error}", - "placeholder": null - }, - { - "id": "Filecoin {Slack} channels: {Fil_curio_help} and {Fil_curio_dev}", - "translation": "Filecoin {Slack} 채널: {Fil_curio_help} 및 {Fil_curio_dev}", - "message": "Filecoin {Slack} channels: {Fil_curio_help} and {Fil_curio_dev}", - "placeholder": null - }, - { - "id": "Start multiple Curio instances with the '{Post}' layer to redundancy.", - "translation": "'{Post}' 레이어로 여러 Curio 인스턴스를 시작하여 중복성을 확보하세요.", - "message": "Start multiple Curio instances with the '{Post}' layer to redundancy.", - "placeholder": null - }, - { - "id": "One database can serve multiple miner IDs: Run a migration for each lotus-miner.", - "translation": "한 개의 데이터베이스는 여러 광부 ID를 제공할 수 있습니다: 각 lotus-miner에 대해 마이그레이션을 실행하세요.", - "message": "One database can serve multiple miner IDs: Run a migration for each lotus-miner.", - "placeholder": null - }, - { - "id": "Please start (or restart) {Lotus_miner} now that database credentials are in {Toml}.", - "translation": "데이터베이스 자격 증명이 {Toml}에 입력되었으므로 지금 {Lotus_miner}을 시작하거나 다시 시작하세요.", - "message": "Please start (or restart) {Lotus_miner} now that database credentials are in {Toml}.", - "placeholder": null - }, - { - "id": "Error interpreting miner ID: {Error}: ID: {String}", - "translation": "광부 ID를 해석하는 중 오류 발생: {Error}: ID: {String}", - "message": "Error interpreting miner ID: {Error}: ID: {String}", - "placeholder": null - }, - { - "id": "Enabling Sector Indexing in the database.", - "translation": "데이터베이스에서 Sector Indexing을 활성화합니다.", - "message": "Enabling Sector Indexing in the database.", - "placeholder": null - }, - { - "id": "Error expanding path: {Error}", - "translation": "경로를 확장하는 중 오류 발생: {Error}", - "message": "Error expanding path: {Error}", - "placeholder": null - }, - { - "id": "Could not create repo from directory: {Error}. Aborting migration", - "translation": "디렉토리에서 저장소를 생성할 수 없습니다: {Error}. 마이그레이션을 중단합니다.", - "message": "Could not create repo from directory: {Error}. Aborting migration", - "placeholder": null - }, - { - "id": "Could not lock miner repo. Your miner must be stopped: {Error}\n Aborting migration", - "translation": "광부 저장소를 잠금 해제할 수 없습니다. 귀하의 광부를 중지해야 합니다: {Error}\n 마이그레이션을 중단합니다.", - "message": "Could not lock miner repo. Your miner must be stopped: {Error}\n Aborting migration", - "placeholder": null - }, - { - "id": "To work with the config:", - "translation": "구성 파일을 사용하려면:", - "message": "To work with the config:", - "placeholder": null - }, - { - "id": "This interactive tool creates a new miner actor and creates the basic configuration layer for it.", - "translation": "이 대화형 도구는 새로운 채굴자 액터를 생성하고 그에 대한 기본 구성 레이어를 생성합니다.", - "message": "This interactive tool creates a new miner actor and creates the basic configuration layer for it.", - "placeholder": null - }, - { - "id": "This process is partially idempotent. Once a new miner actor has been created and subsequent steps fail, the user need to run 'curio config new-cluster {Arg_1}' to finish the configuration.", - "translation": "이 프로세스는 부분적으로 idempotent합니다. 새로운 채굴자 액터가 생성되었고 후속 단계가 실패하면 사용자는 구성을 완료하기 위해 'curio config new-cluster {Arg_1}'를 실행해야 합니다.", - "message": "This process is partially idempotent. Once a new miner actor has been created and subsequent steps fail, the user need to run 'curio config new-cluster {Arg_1}' to finish the configuration.", - "placeholder": null - }, - { - "id": "Choose if you with to create a new miner or migrate from existing Lotus-Miner", - "translation": "새 채굴자를 생성할지 기존의 Lotus-Miner에서 이전할지 선택하세요.", - "message": "Choose if you with to create a new miner or migrate from existing Lotus-Miner", - "placeholder": null - }, - { - "id": "Migrate from existing Lotus-Miner", - "translation": "기존의 Lotus-Miner에서 이전하기", - "message": "Migrate from existing Lotus-Miner", - "placeholder": null - }, - { - "id": "Create a new miner", - "translation": "새로운 채굴자 생성", - "message": "Create a new miner", - "placeholder": null - }, - { - "id": "New Miner initialization complete.", - "translation": "새로운 채굴자 초기화 완료.", - "message": "New Miner initialization complete.", - "placeholder": null - }, - { - "id": "Migrating lotus-miner config.toml to Curio in-database configuration.", - "translation": "lotus-miner config.toml을 Curio의 데이터베이스 구성으로 이전 중입니다.", - "message": "Migrating lotus-miner config.toml to Curio in-database configuration.", - "placeholder": null - }, - { - "id": "Error getting API: {Error}", - "translation": "API 가져오기 오류: {Error}", - "message": "Error getting API: {Error}", - "placeholder": null - }, - { - "id": "The Curio team wants to improve the software you use. Tell the team you're using {Curio}.", - "translation": "Curio 팀은 당신이 사용하는 소프트웨어를 개선하고자 합니다. 팀에게 {Curio}를 사용 중이라고 알려주세요.", - "message": "The Curio team wants to improve the software you use. Tell the team you're using {Curio}.", - "placeholder": null - }, - { - "id": "Individual Data: Miner ID, Curio version, chain ({Mainnet} or {Calibration}). Signed.", - "translation": "개별 데이터: 채굴자 ID, Curio 버전, 체인 ({Mainnet} 또는 {Calibration}). 서명됨.", - "message": "Individual Data: Miner ID, Curio version, chain ({Mainnet} or {Calibration}). Signed.", - "placeholder": null - }, - { - "id": "Aggregate-Anonymous: version, chain, and Miner power (bucketed).", - "translation": "집계-익명: 버전, 체인, 및 채굴자 파워 (버킷).", - "message": "Aggregate-Anonymous: version, chain, and Miner power (bucketed).", - "placeholder": null - }, - { - "id": "Hint: I am someone running Curio on whichever chain.", - "translation": "힌트: 나는 어떤 체인에서든 Curio를 실행 중인 사람입니다.", - "message": "Hint: I am someone running Curio on whichever chain.", - "placeholder": null - }, - { - "id": "Press return to update {Toml} with Yugabyte info. A Backup file will be written to that folder before changes are made.", - "translation": "{Toml}을 Yugabyte 정보로 업데이트하려면 리턴 키를 누르세요. 변경 사항을 적용하기 전에 해당 폴더에 백업 파일이 작성됩니다.", - "message": "Press return to update {Toml} with Yugabyte info. A Backup file will be written to that folder before changes are made.", - "placeholder": null - }, - { - "id": "Error creating backup file: {Error}", - "translation": "백업 파일 생성 오류: {Error}", - "message": "Error creating backup file: {Error}", - "placeholder": null - }, - { - "id": "Error reading config.toml: {Error}", - "translation": "config.toml 읽기 오류: {Error}", - "message": "Error reading config.toml: {Error}", - "placeholder": null - }, - { - "id": "Error writing backup file: {Error}", - "translation": "백업 파일 쓰기 오류: {Error}", - "message": "Error writing backup file: {Error}", - "placeholder": null - }, - { - "id": "Error closing backup file: {Error}", - "translation": "백업 파일 닫기 오류: {Error}", - "message": "Error closing backup file: {Error}", - "placeholder": null - }, - { - "id": "Initializing a new miner actor.", - "translation": "새 채굴자 액터 초기화 중.", - "message": "Initializing a new miner actor.", - "placeholder": null - }, - { - "id": "Enter the info to create a new miner", - "translation": "새 채굴자를 생성하기 위한 정보 입력", - "message": "Enter the info to create a new miner", - "placeholder": null - }, - { - "id": "Owner Address: {String}", - "translation": "소유자 주소: {String}", - "message": "Owner Address: {String}", - "placeholder": null - }, - { - "id": "Worker Address: {String}", - "translation": "작업자 주소: {String}", - "message": "Worker Address: {String}", - "placeholder": null - }, - { - "id": "Sender Address: {String}", - "translation": "송신자 주소: {String}", - "message": "Sender Address: {String}", - "placeholder": null - }, - { - "id": "Sector Size: {Ssize}", - "translation": "섹터 크기: {Ssize}", - "message": "Sector Size: {Ssize}", - "placeholder": null - }, - { - "id": "Confidence: {Confidence}", - "translation": "신뢰도: {Confidence}", - "message": "Confidence: {Confidence}", - "placeholder": null - }, - { - "id": "Continue to verify the addresses and create a new miner actor.", - "translation": "주소를 확인하고 새 채굴자 액터를 생성하려면 계속 진행하세요.", - "message": "Continue to verify the addresses and create a new miner actor.", - "placeholder": null - }, - { - "id": "Miner creation error occurred: {Error}", - "translation": "채굴자 생성 오류 발생: {Error}", - "message": "Miner creation error occurred: {Error}", - "placeholder": null - }, - { - "id": "Enter the owner address", - "translation": "소유자 주소 입력", - "message": "Enter the owner address", - "placeholder": null - }, - { - "id": "No address provided", - "translation": "주소가 제공되지 않았습니다", - "message": "No address provided", - "placeholder": null - }, - { - "id": "Failed to parse the address: {Error}", - "translation": "주소 구문 분석 실패: {Error}", - "message": "Failed to parse the address: {Error}", - "placeholder": null - }, - { - "id": "Enter {Stringworker_senderi_1} address", - "translation": "{Stringworker_senderi_1} 주소 입력", - "message": "Enter {Stringworker_senderi_1} address", - "placeholder": null - }, - { - "id": "Enter the sector size", - "translation": "섹터 크기 입력", - "message": "Enter the sector size", - "placeholder": null - }, - { - "id": "Failed to parse sector size: {Error}", - "translation": "섹터 크기 구문 분석 실패: {Error}", - "message": "Failed to parse sector size: {Error}", - "placeholder": null - }, - { - "id": "Enter the confidence", - "translation": "신뢰도 입력", - "message": "Enter the confidence", - "placeholder": null - }, - { - "id": "Failed to parse confidence: {Error}", - "translation": "신뢰도 구문 분석 실패: {Error}", - "message": "Failed to parse confidence: {Error}", - "placeholder": null - }, - { - "id": "Failed to create the miner actor: {Error}", - "translation": "채굴자 액터 생성 실패: {Error}", - "message": "Failed to create the miner actor: {Error}", - "placeholder": null - }, - { - "id": "Miner {String} created successfully", - "translation": "{String} 채굴자가 성공적으로 생성되었습니다", - "message": "Miner {String} created successfully", - "placeholder": null - }, - { - "id": "Cannot reach the DB: {Error}", - "translation": "데이터베이스에 연결할 수 없습니다: {Error}", - "message": "Cannot reach the DB: {Error}", - "placeholder": null - }, - { - "id": "Error connecting to full node API: {Error}", - "translation": "풀 노드 API에 연결하는 중 오류 발생: {Error}", - "message": "Error connecting to full node API: {Error}", - "placeholder": null - }, - { - "id": "Pre-initialization steps complete", - "translation": "사전 초기화 단계 완료", - "message": "Pre-initialization steps complete", - "placeholder": null - }, - { - "id": "Failed to random bytes for secret: {Error}", - "translation": "비밀을 위한 랜덤 바이트 생성 실패: {Error}", - "message": "Failed to random bytes for secret: {Error}", - "placeholder": null - }, - { - "id": "Please do not run guided-setup again as miner creation is not idempotent. You need to run 'curio config new-cluster {String}' to finish the configuration", - "translation": "마이너 생성은 idempotent하지 않으므로 가이드 설정을 다시 실행하지 마십시오. 구성을 완료하려면 'curio config new-cluster {String}'를 실행해야 합니다.", - "message": "Please do not run guided-setup again as miner creation is not idempotent. You need to run 'curio config new-cluster {String}' to finish the configuration", - "placeholder": null - }, - { - "id": "Failed to verify the auth token from daemon node: {Error}", - "translation": "데몬 노드로부터 인증 토큰을 확인하는 중 오류 발생: {Error}", - "message": "Failed to verify the auth token from daemon node: {Error}", - "placeholder": null - }, - { - "id": "Failed to encode the config: {Error}", - "translation": "구성을 인코딩하는 중 오류 발생: {Error}", - "message": "Failed to encode the config: {Error}", - "placeholder": null - }, - { - "id": "Failed to generate default config: {Error}", - "translation": "기본 구성 생성 실패: {Error}", - "message": "Failed to generate default config: {Error}", - "placeholder": null - }, - { - "id": "Failed to inset 'base' config layer in database: {Error}", - "translation": "데이터베이스에 'base' 구성 레이어 삽입 실패: {Error}", - "message": "Failed to inset 'base' config layer in database: {Error}", - "placeholder": null - }, - { - "id": "Failed to inset '{String}' config layer in database: {Error}", - "translation": "데이터베이스에 '{String}' 구성 레이어 삽입 실패: {Error}", - "message": "Failed to inset '{String}' config layer in database: {Error}", - "placeholder": null - }, - { - "id": "New Curio configuration layer '{String}' created", - "translation": "새로운 Curio 구성 레이어 '{String}'가 생성되었습니다", - "message": "New Curio configuration layer '{String}' created", - "placeholder": null - }, - { - "id": "The Curio team wants to improve the software you use. Tell the team you're using `{Curio}`.", - "translation": "Curio 팀은 당신이 사용하는 소프트웨어를 개선하고자 합니다. 팀에게 `{Curio}`를 사용 중이라고 알려주세요.", - "message": "The Curio team wants to improve the software you use. Tell the team you're using `{Curio}`.", - "placeholder": null - }, - { - "id": "Confidence epochs: {Confidence}", - "translation": "신뢰 에포크: {Confidence}", - "message": "Confidence epochs: {Confidence}", - "placeholder": null - }, - { - "id": "Failed to generate random bytes for secret: {Error}", - "translation": "비밀번호를 위한 랜덤 바이트 생성에 실패했습니다: {Error}", - "message": "Failed to generate random bytes for secret: {Error}", - "placeholder": null - }, - { - "id": "Failed to get API info for FullNode: {Err}", - "translation": "FullNode에 대한 API 정보를 가져오는 데 실패했습니다: {Err}", - "message": "Failed to get API info for FullNode: {Err}", - "placeholder": null - }, - { - "id": "Failed to insert 'base' config layer in database: {Error}", - "translation": "데이터베이스에 'base' 구성 레이어를 삽입하는 데 실패했습니다: {Error}", - "message": "Failed to insert 'base' config layer in database: {Error}", - "placeholder": null - }, - { - "id": "Failed to insert '{String}' config layer in database: {Error}", - "translation": "데이터베이스에 '{String}' 구성 레이어를 삽입하는 데 실패했습니다: {Error}", - "message": "Failed to insert '{String}' config layer in database: {Error}", - "placeholder": null - }, - { - "id": "This process is partially idempotent. Once a new miner actor has been created and subsequent steps fail, the user need to run 'curio config new-cluster \u003c miner ID \u003e' to finish the configuration.", - "translation": "이 프로세스는 부분적으로 항등원적입니다. 새로운 채굴자 액터가 생성되었고 후속 단계가 실패하는 경우 사용자는 구성을 완료하기 위해 'curio config new-cluster \u003c 채굴자 ID \u003e'를 실행해야 합니다.", - "message": "This process is partially idempotent. Once a new miner actor has been created and subsequent steps fail, the user need to run 'curio config new-cluster \u003c miner ID \u003e' to finish the configuration.", - "placeholder": null - }, - { - "id": "Confidence epochs", - "translation": "신뢰 에포크", - "message": "Confidence epochs", - "placeholder": null - }, - { - "id": "Increase reliability using redundancy: start multiple machines with at-least the post layer: 'curio run --layers=post'", - "translation": "신뢰성 향상을 위한 중복성 사용: 적어도 post 레이어를 사용하여 여러 대의 기계를 시작하십시오: 'curio run --layers=post'", - "message": "Increase reliability using redundancy: start multiple machines with at-least the post layer: 'curio run --layers=post'", - "placeholder": null - }, - { - "id": "I want to:", - "translation": "나는 원한다:", - "message": "I want to:", - "placeholder": null - }, - { - "id": "Configuration 'base' was updated to include this miner's address", - "translation": "이 마이너 주소를 포함한 구성 'base'가 업데이트되었습니다.", - "message": "Configuration 'base' was updated to include this miner's address", - "placeholder": null - }, - { - "id": "Cannot load base config: {Error}", - "translation": "기본 구성을 불러올 수 없습니다: {Error}", - "message": "Cannot load base config: {Error}", - "placeholder": null - }, - { - "id": "Failed to load base config: {Error}", - "translation": "기본 구성을 로드하는 데 실패했습니다: {Error}", - "message": "Failed to load base config: {Error}", - "placeholder": null - }, - { - "id": "Failed to regenerate base config: {Error}", - "translation": "기본 구성을 재생성하는 데 실패했습니다: {Error}", - "message": "Failed to regenerate base config: {Error}", - "placeholder": null - }, - { - "id": "Failed to load base config from database: {Error}", - "translation": "데이터베이스에서 기본 구성을 로드하는 데 실패했습니다: {Error}", - "message": "Failed to load base config from database: {Error}", - "placeholder": null - }, - { - "id": "Failed to parse base config: {Error}", - "translation": "기본 구성을 구문 분석하는 데 실패했습니다: {Error}", - "message": "Failed to parse base config: {Error}", - "placeholder": null - }, - { - "id": "Try the web interface with {Rendercurio_run___layersgui} for further guided improvements.", - "translation": "{Rendercurio_run___layersgui}를 사용하여 웹 인터페이스를 시도하고 더 나은 안내된 개선을 진행하세요.", - "message": "Try the web interface with {Rendercurio_run___layersgui} for further guided improvements.", - "placeholder": null - }, - { - "id": "Now shut down lotus-miner and lotus-worker and use run {Rendercurio_run} instead.", - "translation": "이제 lotus-miner와 lotus-worker를 종료하고 {Rendercurio_run}을 실행하세요.", - "message": "Now shut down lotus-miner and lotus-worker and use run {Rendercurio_run} instead.", - "placeholder": null - }, - { - "id": "Configuration 'base' was updated to include this miner's address ({MinerAddress}) and its wallet setup.", - "translation": "기본 설정 'base'가 이 마이너의 주소({MinerAddress}) 및 지갑 설정을 포함하도록 업데이트되었습니다.", - "message": "Configuration 'base' was updated to include this miner's address ({MinerAddress}) and its wallet setup.", - "placeholder": null - }, - { - "id": "Configuration 'base' was created to resemble this lotus-miner's config.toml .", - "translation": "'base' 설정이 이 lotus-miner의 config.toml과 유사하게 만들어졌습니다.", - "message": "Configuration 'base' was created to resemble this lotus-miner's config.toml .", - "placeholder": null - }, - { - "id": "Unmigratable sectors found. Do you want to continue?", - "translation": "이동할 수 없는 섹터가 발견되었습니다. 계속하시겠습니까?", - "message": "Unmigratable sectors found. Do you want to continue?", - "placeholder": null - }, - { - "id": "Yes, continue", - "translation": "예, 계속", - "message": "Yes, continue", - "placeholder": null - }, - { - "id": "No, abort", - "translation": "아니오, 중단", - "message": "No, abort", - "placeholder": null - }, - { - "id": "Migrating metadata for {NSectors} sectors.", - "translation": "{NSectors} 섹터의 메타데이터를 이동 중입니다.", - "message": "Migrating metadata for {NSectors} sectors.", - "placeholder": null - } - ] -} \ No newline at end of file diff --git a/cmd/curio/internal/translations/locales/ko/out.gotext.json b/cmd/curio/internal/translations/locales/ko/out.gotext.json deleted file mode 100644 index 8e0014cd46d..00000000000 --- a/cmd/curio/internal/translations/locales/ko/out.gotext.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "language": "ko", - "messages": [] - } diff --git a/cmd/curio/internal/translations/locales/zh/messages.gotext.json b/cmd/curio/internal/translations/locales/zh/messages.gotext.json deleted file mode 100644 index f2121448425..00000000000 --- a/cmd/curio/internal/translations/locales/zh/messages.gotext.json +++ /dev/null @@ -1,1100 +0,0 @@ -{ - "language": "zh", - "messages": [ - { - "id": "This interactive tool will walk you through migration of Curio.\nPress Ctrl+C to exit at any time.", - "message": "This interactive tool will walk you through migration of Curio.\nPress Ctrl+C to exit at any time.", - "translation": "此互动工具将引导您完成Curio的迁移。\n随时按Ctrl+C退出。" - }, - { - "id": "This tool confirms each action it does.", - "message": "This tool confirms each action it does.", - "translation": "此工具确认其执行的每个操作。" - }, - { - "id": "Ctrl+C pressed in Terminal", - "message": "Ctrl+C pressed in Terminal", - "translation": "在终端中按下Ctrl+C" - }, - { - "id": "Verifying Sectors exist in Yugabyte.", - "message": "Verifying Sectors exist in Yugabyte.", - "translation": "正在验证Yugabyte中的扇区是否存在。" - }, - { - "id": "Error verifying sectors: {Error}", - "message": "Error verifying sectors: {Error}", - "translation": "验证扇区时出错:{Error}", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ] - }, - { - "id": "Sectors verified. {I} sectors found.", - "message": "Sectors verified. {I} sectors found.", - "translation": "已验证扇区。找到了{I}个扇区。", - "placeholders": [ - { - "id": "I", - "string": "%[1]d", - "type": "[]int", - "underlyingType": "[]int", - "argNum": 1, - "expr": "i" - } - ] - }, - { - "id": "Never remove the database info from the config.toml for lotus-miner as it avoids double PoSt.", - "message": "Never remove the database info from the config.toml for lotus-miner as it avoids double PoSt.", - "translation": "从config.toml中永远不要删除lotus-miner的数据库信息,因为它避免了双PoSt。" - }, - { - "id": "Enter the info to connect to your Yugabyte database installation (https://download.yugabyte.com/)", - "message": "Enter the info to connect to your Yugabyte database installation (https://download.yugabyte.com/)", - "translation": "输入连接到您的Yugabyte数据库安装的信息(https://download.yugabyte.com/)" - }, - { - "id": "Host: {Hosts_}", - "message": "Host: {Hosts_}", - "translation": "主机:{Hosts_}", - "placeholders": [ - { - "id": "Hosts_", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "strings.Join(harmonycfg.Hosts, \",\")" - } - ] - }, - { - "id": "Port: {Port}", - "message": "Port: {Port}", - "translation": "端口:{Port}", - "placeholders": [ - { - "id": "Port", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "harmonycfg.Port" - } - ] - }, - { - "id": "Username: {Username}", - "message": "Username: {Username}", - "translation": "用户名:{Username}", - "placeholders": [ - { - "id": "Username", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "harmonycfg.Username" - } - ] - }, - { - "id": "Password: {Password}", - "message": "Password: {Password}", - "translation": "密码:{Password}", - "placeholders": [ - { - "id": "Password", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "harmonycfg.Password" - } - ] - }, - { - "id": "Database: {Database}", - "message": "Database: {Database}", - "translation": "数据库:{Database}", - "placeholders": [ - { - "id": "Database", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "harmonycfg.Database" - } - ] - }, - { - "id": "Continue to connect and update schema.", - "message": "Continue to connect and update schema.", - "translation": "继续连接和更新架构。" - }, - { - "id": "Database config error occurred, abandoning migration: {Error}", - "message": "Database config error occurred, abandoning migration: {Error}", - "translation": "发生数据库配置错误,放弃迁移:{Error}", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ] - }, - { - "id": "Enter the Yugabyte database host(s)", - "message": "Enter the Yugabyte database host(s)", - "translation": "输入Yugabyte数据库主机(S)" - }, - { - "id": "No host provided", - "message": "No host provided", - "translation": "未提供主机" - }, - { - "id": "Enter the Yugabyte database {Stringport_username_password_databasei_1}", - "message": "Enter the Yugabyte database {Stringport_username_password_databasei_1}", - "translation": "输入Yugabyte数据库 {Stringport_username_password_databasei_1}", - "placeholders": [ - { - "id": "Stringport_username_password_databasei_1", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "[]string{\"port\", \"username\", \"password\", \"database\"}[i-1]" - } - ] - }, - { - "id": "No value provided", - "message": "No value provided", - "translation": "未提供值" - }, - { - "id": "Error connecting to Yugabyte database: {Error}", - "message": "Error connecting to Yugabyte database: {Error}", - "translation": "连接到Yugabyte数据库时出错:{Error}", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ] - }, - { - "id": "Connected to Yugabyte. Schema is current.", - "message": "Connected to Yugabyte. Schema is current.", - "translation": "已连接到Yugabyte。模式是当前的。" - }, - { - "id": "Error encoding config.toml: {Error}", - "message": "Error encoding config.toml: {Error}", - "translation": "编码config.toml时出错:{Error}", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ] - }, - { - "id": "Error reading filemode of config.toml: {Error}", - "message": "Error reading filemode of config.toml: {Error}", - "translation": "读取config.toml文件模式时出错:{Error}", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ] - }, - { - "id": "Error writing config.toml: {Error}", - "message": "Error writing config.toml: {Error}", - "translation": "写入config.toml时出错:{Error}", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ] - }, - { - "id": "Restart Lotus Miner.", - "message": "Restart Lotus Miner.", - "translation": "重新启动Lotus Miner。" - }, - { - "id": "Connected to Yugabyte", - "message": "Connected to Yugabyte", - "translation": "已连接到Yugabyte" - }, - { - "id": "Select the location of your lotus-miner config directory?", - "message": "Select the location of your lotus-miner config directory?", - "translation": "选择您的lotus-miner配置目录的位置?" - }, - { - "id": "Other", - "message": "Other", - "translation": "其他" - }, - { - "id": "Enter the path to the configuration directory used by lotus-miner", - "message": "Enter the path to the configuration directory used by lotus-miner", - "translation": "输入lotus-miner使用的配置目录的路径" - }, - { - "id": "No path provided, abandoning migration", - "message": "No path provided, abandoning migration", - "translation": "未提供路径,放弃迁移" - }, - { - "id": "Cannot read the config.toml file in the provided directory, Error: {Error}", - "message": "Cannot read the config.toml file in the provided directory, Error: {Error}", - "translation": "无法读取提供的目录中的config.toml文件,错误:{Error}", - "placeholders": [ - { - "id": "Error", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "err.Error()" - } - ] - }, - { - "id": "Read Miner Config", - "message": "Read Miner Config", - "translation": "读取矿工配置" - }, - { - "id": "Completed Step: {Step}", - "message": "Completed Step: {Step}", - "translation": "完成步骤:{Step}", - "placeholders": [ - { - "id": "Step", - "string": "%[1]s", - "type": "string", - "underlyingType": "string", - "argNum": 1, - "expr": "step" - } - ] - }, - { - "id": "This interactive tool migrates lotus-miner to Curio in 5 minutes.", - "translation": "这个交互式工具可以在5分钟内将lotus-miner迁移到Curio。", - "message": "This interactive tool migrates lotus-miner to Curio in 5 minutes.", - "placeholder": null - }, - { - "id": "Each step needs your confirmation and can be reversed. Press Ctrl+C to exit at any time.", - "translation": "每一步都需要您的确认,并且可以撤销。随时按Ctrl+C退出。", - "message": "Each step needs your confirmation and can be reversed. Press Ctrl+C to exit at any time.", - "placeholder": null - }, - { - "id": "Use the arrow keys to navigate: ↓ ↑ → ←", - "translation": "使用箭头键进行导航:↓ ↑ → ←", - "message": "Use the arrow keys to navigate: ↓ ↑ → ←", - "placeholder": null - }, - { - "id": "Lotus-Miner to Curio Migration.", - "translation": "Lotus-Miner到Curio迁移。", - "message": "Lotus-Miner to Curio Migration.", - "placeholder": null - }, - { - "id": "Try the web interface with for further guided improvements.", - "translation": "尝试使用网页界面进行进一步的指导改进。", - "message": "Try the web interface with for further guided improvements.", - "placeholder": null - }, - { - "id": "You can now migrate your market node ({Boost}), if applicable.", - "translation": "如果适用,您现在可以迁移您的市场节点({Boost})。", - "message": "You can now migrate your market node ({Boost}), if applicable.", - "placeholder": null - }, - { - "id": "Migrating config.toml to database.", - "translation": "正在将config.toml迁移到数据库。", - "message": "Migrating config.toml to database.", - "placeholder": null - }, - { - "id": "Error reading from database: {Error}. Aborting Migration.", - "translation": "读取数据库时出错:{Error}。正在中止迁移。", - "message": "Error reading from database: {Error}. Aborting Migration.", - "placeholder": null - }, - { - "id": "cannot read API: {Error}. Aborting Migration", - "translation": "无法读取API:{Error}。正在中止迁移", - "message": "cannot read API: {Error}. Aborting Migration", - "placeholder": null - }, - { - "id": "Error saving config to layer: {Error}. Aborting Migration", - "translation": "保存配置到层时出错:{Error}。正在中止迁移", - "message": "Error saving config to layer: {Error}. Aborting Migration", - "placeholder": null - }, - { - "id": "Protocol Labs wants to improve the software you use. Tell the team you're using Curio.", - "translation": "Protocol Labs希望改进您使用的软件。告诉团队您正在使用Curio。", - "message": "Protocol Labs wants to improve the software you use. Tell the team you're using Curio.", - "placeholder": null - }, - { - "id": "Select what you want to share with the Curio team.", - "translation": "选择您想与Curio团队分享的内容。", - "message": "Select what you want to share with the Curio team.", - "placeholder": null - }, - { - "id": "Individual Data: Miner ID, Curio version, net ({Mainnet} or {Testnet}). Signed.", - "translation": "个人数据:矿工ID、Curio版本、网络({Mainnet}或{Testnet})。已签名。", - "message": "Individual Data: Miner ID, Curio version, net ({Mainnet} or {Testnet}). Signed.", - "placeholder": null - }, - { - "id": "Aggregate-Anonymous: version, net, and Miner power (bucketed).", - "translation": "聚合-匿名:版本、网络和矿工功率(分桶)。", - "message": "Aggregate-Anonymous: version, net, and Miner power (bucketed).", - "placeholder": null - }, - { - "id": "Hint: I am someone running Curio on net.", - "translation": "提示:我是在网络上运行Curio的人。", - "message": "Hint: I am someone running Curio on net.", - "placeholder": null - }, - { - "id": "Nothing.", - "translation": "没有。", - "message": "Nothing.", - "placeholder": null - }, - { - "id": "Aborting remaining steps.", - "translation": "中止剩余步骤。", - "message": "Aborting remaining steps.", - "placeholder": null - }, - { - "id": "Error connecting to lotus node: {Error}", - "translation": "连接到莲花节点时出错:{Error}", - "message": "Error connecting to lotus node: {Error}", - "placeholder": null - }, - { - "id": "Error getting miner power: {Error}", - "translation": "获取矿工功率时出错:{Error}", - "message": "Error getting miner power: {Error}", - "placeholder": null - }, - { - "id": "Error marshalling message: {Error}", - "translation": "整理消息时出错:{Error}", - "message": "Error marshalling message: {Error}", - "placeholder": null - }, - { - "id": "Error getting miner info: {Error}", - "translation": "获取矿工信息时出错:{Error}", - "message": "Error getting miner info: {Error}", - "placeholder": null - }, - { - "id": "Error signing message: {Error}", - "translation": "签署消息时出错:{Error}", - "message": "Error signing message: {Error}", - "placeholder": null - }, - { - "id": "Error sending message: {Error}", - "translation": "发送消息时出错:{Error}", - "message": "Error sending message: {Error}", - "placeholder": null - }, - { - "id": "Error sending message: Status {Status}, Message:", - "translation": "发送消息时出错:状态{Status},消息:", - "message": "Error sending message: Status {Status}, Message:", - "placeholder": null - }, - { - "id": "Message sent.", - "translation": "消息已发送。", - "message": "Message sent.", - "placeholder": null - }, - { - "id": "Documentation:", - "translation": "文档:", - "message": "Documentation:", - "placeholder": null - }, - { - "id": "The '{Base}' layer stores common configuration. All curio instances can include it in their {__layers} argument.", - "translation": "'{Base}'层存储通用配置。所有Curio实例都可以在其{__layers}参数中包含它。", - "message": "The '{Base}' layer stores common configuration. All curio instances can include it in their {__layers} argument.", - "placeholder": null - }, - { - "id": "You can add other layers for per-machine configuration changes.", - "translation": "您可以添加其他层进行每台机器的配置更改。", - "message": "You can add other layers for per-machine configuration changes.", - "placeholder": null - }, - { - "id": "Join {Fil_curio_help} in Filecoin {Slack} for help.", - "translation": "加入Filecoin {Slack}中的{Fil_curio_help}寻求帮助。", - "message": "Join {Fil_curio_help} in Filecoin {Slack} for help.", - "placeholder": null - }, - { - "id": "Join {Fil_curio_dev} in Filecoin {Slack} to follow development and feedback!", - "translation": "加入Filecoin {Slack}中的{Fil_curio_dev}来跟踪开发和反馈!", - "message": "Join {Fil_curio_dev} in Filecoin {Slack} to follow development and feedback!", - "placeholder": null - }, - { - "id": "Want PoST redundancy? Run many Curio instances with the '{Post}' layer.", - "translation": "需要PoST冗余?使用'{Post}'层运行多个Curio实例。", - "message": "Want PoST redundancy? Run many Curio instances with the '{Post}' layer.", - "placeholder": null - }, - { - "id": "Point your browser to your web GUI to complete setup with {Boost} and advanced featues.", - "translation": "将您的浏览器指向您的网络GUI,以使用{Boost}和高级功能完成设置。", - "message": "Point your browser to your web GUI to complete setup with {Boost} and advanced featues.", - "placeholder": null - }, - { - "id": "For SPs with multiple Miner IDs, run 1 migration per lotus-miner all to the same 1 database. The cluster will serve all Miner IDs.", - "translation": "对于具有多个矿工ID的SP,针对所有lotus-miner运行1次迁移到同一个数据库。集群将服务所有矿工ID。", - "message": "For SPs with multiple Miner IDs, run 1 migration per lotus-miner all to the same 1 database. The cluster will serve all Miner IDs.", - "placeholder": null - }, - { - "id": "Please start {Lotus_miner} now that database credentials are in {Toml}.", - "translation": "现在数据库凭证在{Toml}中,请启动{Lotus_miner}。", - "message": "Please start {Lotus_miner} now that database credentials are in {Toml}.", - "placeholder": null - }, - { - "id": "Waiting for {Lotus_miner} to write sectors into Yugabyte.", - "translation": "等待{Lotus_miner}将扇区写入Yugabyte。", - "message": "Waiting for {Lotus_miner} to write sectors into Yugabyte.", - "placeholder": null - }, - { - "id": "The sectors are in the database. The database is ready for {Curio}.", - "translation": "扇区在数据库中。数据库已准备好用于{Curio}。", - "message": "The sectors are in the database. The database is ready for {Curio}.", - "placeholder": null - }, - { - "id": "Now shut down lotus-miner and move the systems to {Curio}.", - "translation": "现在关闭lotus-miner并将系统移至{Curio}。", - "message": "Now shut down lotus-miner and move the systems to {Curio}.", - "placeholder": null - }, - { - "id": "Press return to continue", - "translation": "按回车继续", - "message": "Press return to continue", - "placeholder": null - }, - { - "id": "Aborting migration.", - "translation": "中止迁移。", - "message": "Aborting migration.", - "placeholder": null - }, - { - "id": "Sectors verified. {I} sector locations found.", - "translation": "扇区已验证。发现了{I}个扇区位置。", - "message": "Sectors verified. {I} sector locations found.", - "placeholder": null - }, - { - "id": "Press return to update {Toml} with Yugabyte info. Backup the file now.", - "translation": "按回车更新{Toml}以获取Yugabyte信息。现在备份文件。", - "message": "Press return to update {Toml} with Yugabyte info. Backup the file now.", - "placeholder": null - }, - { - "id": "To start, ensure your sealing pipeline is drained and shut-down lotus-miner.", - "translation": "开始之前,请确保您的密封管道已排空并关闭lotus-miner。", - "message": "To start, ensure your sealing pipeline is drained and shut-down lotus-miner.", - "placeholder": null - }, - { - "id": "Enter the path to the configuration directory used by {Lotus_miner}", - "translation": "输入{Lotus_miner}使用的配置目录的路径", - "message": "Enter the path to the configuration directory used by {Lotus_miner}", - "placeholder": null - }, - { - "id": "Step Complete: {Step}", - "translation": "步骤完成:{Step}", - "message": "Step Complete: {Step}", - "placeholder": null - }, - { - "id": "Configuration 'base' was updated to include this miner's address and its wallet setup.", - "translation": "配置'base'已更新,包含了这个矿工的地址和其钱包设置。", - "message": "Configuration 'base' was updated to include this miner's address and its wallet setup.", - "placeholder": null - }, - { - "id": "Compare the configurations {Base} to {MinerAddresses0}. Changes between the miner IDs other than wallet addreses should be a new, minimal layer for runners that need it.", - "translation": "比较配置{Base}和{MinerAddresses0}。矿工ID之间除了钱包地址的变化应该是需要的运行者的一个新的、最小的层。", - "message": "Compare the configurations {Base} to {MinerAddresses0}. Changes between the miner IDs other than wallet addreses should be a new, minimal layer for runners that need it.", - "placeholder": null - }, - { - "id": "Configuration 'base' was created to include this miner's address and its wallet setup.", - "translation": "配置'base'已创建,包括了这个矿工的地址和其钱包设置。", - "message": "Configuration 'base' was created to include this miner's address and its wallet setup.", - "placeholder": null - }, - { - "id": "Layer {LayerName} created.", - "translation": "层{LayerName}已创建。", - "message": "Layer {LayerName} created.", - "placeholder": null - }, - { - "id": "To work with the config: \\n", - "translation": "要使用配置:\\n", - "message": "To work with the config: \\n", - "placeholder": null - }, - { - "id": "To run Curio: With machine or cgroup isolation, use the command (with example layer selection):", - "translation": "运行Curio:使用机器或cgroup隔离,使用命令(附带示例层选择):", - "message": "To run Curio: With machine or cgroup isolation, use the command (with example layer selection):", - "placeholder": null - }, - { - "id": "Try the web interface with {__layersgui} for further guided improvements.", - "translation": "尝试使用{__layersgui}的Web界面进行进一步引导式改进。", - "message": "Try the web interface with {__layersgui} for further guided improvements.", - "placeholder": null - }, - { - "id": "Error connecting to lotus node: {Error} {Error_1}", - "translation": "连接到lotus节点时出错:{Error} {Error_1}", - "message": "Error connecting to lotus node: {Error} {Error_1}", - "placeholder": null - }, - { - "id": "could not get API info for FullNode: {Err}", - "translation": "无法获取FullNode的API信息:{Err}", - "message": "could not get API info for FullNode: {Err}", - "placeholder": null - }, - { - "id": "Error getting token: {Error}", - "translation": "获取令牌时出错:{Error}", - "message": "Error getting token: {Error}", - "placeholder": null - }, - { - "id": "Filecoin {Slack} channels: {Fil_curio_help} and {Fil_curio_dev}", - "translation": "Filecoin {Slack} 频道:{Fil_curio_help} 和 {Fil_curio_dev}", - "message": "Filecoin {Slack} channels: {Fil_curio_help} and {Fil_curio_dev}", - "placeholder": null - }, - { - "id": "Start multiple Curio instances with the '{Post}' layer to redundancy.", - "translation": "使用'{Post}'层启动多个Curio实例以实现冗余。", - "message": "Start multiple Curio instances with the '{Post}' layer to redundancy.", - "placeholder": null - }, - { - "id": "One database can serve multiple miner IDs: Run a migration for each lotus-miner.", - "translation": "一个数据库可以服务多个矿工ID:为每个lotus-miner运行迁移。", - "message": "One database can serve multiple miner IDs: Run a migration for each lotus-miner.", - "placeholder": null - }, - { - "id": "Please start (or restart) {Lotus_miner} now that database credentials are in {Toml}.", - "translation": "请立即启动(或重新启动){Lotus_miner},因为数据库凭据已在{Toml}中。", - "message": "Please start (or restart) {Lotus_miner} now that database credentials are in {Toml}.", - "placeholder": null - }, - { - "id": "Error interpreting miner ID: {Error}: ID: {String}", - "translation": "解释矿工ID时出错:{Error}:ID:{String}", - "message": "Error interpreting miner ID: {Error}: ID: {String}", - "placeholder": null - }, - { - "id": "Enabling Sector Indexing in the database.", - "translation": "在数据库中启用扇区索引。", - "message": "Enabling Sector Indexing in the database.", - "placeholder": null - }, - { - "id": "Error expanding path: {Error}", - "translation": "扩展路径时出错:{Error}", - "message": "Error expanding path: {Error}", - "placeholder": null - }, - { - "id": "Could not create repo from directory: {Error}. Aborting migration", - "translation": "无法从目录创建repo:{Error}。 中止迁移", - "message": "Could not create repo from directory: {Error}. Aborting migration", - "placeholder": null - }, - { - "id": "Could not lock miner repo. Your miner must be stopped: {Error}\n Aborting migration", - "translation": "无法锁定矿工repo。 您的矿工必须停止:{Error}\n 中止迁移", - "message": "Could not lock miner repo. Your miner must be stopped: {Error}\n Aborting migration", - "placeholder": null - }, - { - "id": "To work with the config:", - "translation": "要使用配置:", - "message": "To work with the config:", - "placeholder": null - }, - { - "id": "This interactive tool creates a new miner actor and creates the basic configuration layer for it.", - "translation": "此交互式工具将创建一个新的矿工角色,并为其创建基本配置层。", - "message": "This interactive tool creates a new miner actor and creates the basic configuration layer for it.", - "placeholder": null - }, - { - "id": "This process is partially idempotent. Once a new miner actor has been created and subsequent steps fail, the user need to run 'curio config new-cluster {Arg_1}' to finish the configuration.", - "translation": "此过程在某种程度上是幂等的。一旦创建了新的矿工角色,并且后续步骤失败,用户需要运行'curio config new-cluster {Arg_1}'来完成配置。", - "message": "This process is partially idempotent. Once a new miner actor has been created and subsequent steps fail, the user need to run 'curio config new-cluster {Arg_1}' to finish the configuration.", - "placeholder": null - }, - { - "id": "Choose if you with to create a new miner or migrate from existing Lotus-Miner", - "translation": "选择您是否要创建新矿工或从现有的 Lotus-Miner 迁移", - "message": "Choose if you with to create a new miner or migrate from existing Lotus-Miner", - "placeholder": null - }, - { - "id": "Migrate from existing Lotus-Miner", - "translation": "从现有的 Lotus-Miner 迁移", - "message": "Migrate from existing Lotus-Miner", - "placeholder": null - }, - { - "id": "Create a new miner", - "translation": "创建一个新的矿工", - "message": "Create a new miner", - "placeholder": null - }, - { - "id": "New Miner initialization complete.", - "translation": "新矿工初始化完成。", - "message": "New Miner initialization complete.", - "placeholder": null - }, - { - "id": "Migrating lotus-miner config.toml to Curio in-database configuration.", - "translation": "将 lotus-miner config.toml 迁移到 Curio 的数据库配置中。", - "message": "Migrating lotus-miner config.toml to Curio in-database configuration.", - "placeholder": null - }, - { - "id": "Error getting API: {Error}", - "translation": "获取 API 时出错:{Error}", - "message": "Error getting API: {Error}", - "placeholder": null - }, - { - "id": "The Curio team wants to improve the software you use. Tell the team you're using `{Curio}`.", - "translation": "Curio 团队希望改进您使用的软件。告诉团队您正在使用 `{Curio}`。", - "message": "The Curio team wants to improve the software you use. Tell the team you're using `{Curio}`.", - "placeholder": null - }, - { - "id": "Individual Data: Miner ID, Curio version, chain ({Mainnet} or {Calibration}). Signed.", - "translation": "个人数据:矿工 ID,Curio 版本,链({Mainnet} 或 {Calibration})。签名。", - "message": "Individual Data: Miner ID, Curio version, chain ({Mainnet} or {Calibration}). Signed.", - "placeholder": null - }, - { - "id": "Aggregate-Anonymous: version, chain, and Miner power (bucketed).", - "translation": "聚合-匿名:版本,链和矿工算力(分桶)。", - "message": "Aggregate-Anonymous: version, chain, and Miner power (bucketed).", - "placeholder": null - }, - { - "id": "Hint: I am someone running Curio on whichever chain.", - "translation": "提示:我是在任何链上运行 Curio 的人。", - "message": "Hint: I am someone running Curio on whichever chain.", - "placeholder": null - }, - { - "id": "Press return to update {Toml} with Yugabyte info. A Backup file will be written to that folder before changes are made.", - "translation": "按回车键更新 {Toml} 以包含 Yugabyte 信息。在进行更改之前,将在该文件夹中写入备份文件。", - "message": "Press return to update {Toml} with Yugabyte info. A Backup file will be written to that folder before changes are made.", - "placeholder": null - }, - { - "id": "Error creating backup file: {Error}", - "translation": "创建备份文件时出错:{Error}", - "message": "Error creating backup file: {Error}", - "placeholder": null - }, - { - "id": "Error reading config.toml: {Error}", - "translation": "读取 config.toml 时出错:{Error}", - "message": "Error reading config.toml: {Error}", - "placeholder": null - }, - { - "id": "Error writing backup file: {Error}", - "translation": "写入备份文件时出错:{Error}", - "message": "Error writing backup file: {Error}", - "placeholder": null - }, - { - "id": "Error closing backup file: {Error}", - "translation": "关闭备份文件时出错:{Error}", - "message": "Error closing backup file: {Error}", - "placeholder": null - }, - { - "id": "Initializing a new miner actor.", - "translation": "初始化新的矿工角色。", - "message": "Initializing a new miner actor.", - "placeholder": null - }, - { - "id": "Enter the info to create a new miner", - "translation": "输入创建新矿工所需的信息", - "message": "Enter the info to create a new miner", - "placeholder": null - }, - { - "id": "Owner Address: {String}", - "translation": "所有者地址:{String}", - "message": "Owner Address: {String}", - "placeholder": null - }, - { - "id": "Worker Address: {String}", - "translation": "工作地址:{String}", - "message": "Worker Address: {String}", - "placeholder": null - }, - { - "id": "Sender Address: {String}", - "translation": "发送者地址:{String}", - "message": "Sender Address: {String}", - "placeholder": null - }, - { - "id": "Sector Size: {Ssize}", - "translation": "扇区大小: {Ssize}", - "message": "Sector Size: {Ssize}", - "placeholder": null - }, - { - "id": "Confidence epochs: {Confidence}", - "translation": "置信度时期: {Confidence}", - "message": "Confidence epochs: {Confidence}", - "placeholder": null - }, - { - "id": "Continue to verify the addresses and create a new miner actor.", - "translation": "继续验证地址并创建新的矿工角色。", - "message": "Continue to verify the addresses and create a new miner actor.", - "placeholder": null - }, - { - "id": "Miner creation error occurred: {Error}", - "translation": "矿工创建错误发生: {Error}", - "message": "Miner creation error occurred: {Error}", - "placeholder": null - }, - { - "id": "Enter the owner address", - "translation": "输入所有者地址", - "message": "Enter the owner address", - "placeholder": null - }, - { - "id": "No address provided", - "translation": "未提供地址", - "message": "No address provided", - "placeholder": null - }, - { - "id": "Failed to parse the address: {Error}", - "translation": "解析地址失败: {Error}", - "message": "Failed to parse the address: {Error}", - "placeholder": null - }, - { - "id": "Enter {Stringworker_senderi_1} address", - "translation": "输入 {Stringworker_senderi_1} 地址", - "message": "Enter {Stringworker_senderi_1} address", - "placeholder": null - }, - { - "id": "Enter the sector size", - "translation": "输入扇区大小", - "message": "Enter the sector size", - "placeholder": null - }, - { - "id": "Failed to parse sector size: {Error}", - "translation": "解析扇区大小失败: {Error}", - "message": "Failed to parse sector size: {Error}", - "placeholder": null - }, - { - "id": "Enter the confidence", - "translation": "输入置信度", - "message": "Enter the confidence", - "placeholder": null - }, - { - "id": "Failed to parse confidence: {Error}", - "translation": "解析置信度失败: {Error}", - "message": "Failed to parse confidence: {Error}", - "placeholder": null - }, - { - "id": "Failed to create the miner actor: {Error}", - "translation": "创建矿工角色失败: {Error}", - "message": "Failed to create the miner actor: {Error}", - "placeholder": null - }, - { - "id": "Miner {String} created successfully", - "translation": "矿工 {String} 创建成功", - "message": "Miner {String} created successfully", - "placeholder": null - }, - { - "id": "Cannot reach the DB: {Error}", - "translation": "无法访问数据库: {Error}", - "message": "Cannot reach the DB: {Error}", - "placeholder": null - }, - { - "id": "Error connecting to full node API: {Error}", - "translation": "连接到完整节点 API 时发生错误: {Error}", - "message": "Error connecting to full node API: {Error}", - "placeholder": null - }, - { - "id": "Pre-initialization steps complete", - "translation": "预初始化步骤完成", - "message": "Pre-initialization steps complete", - "placeholder": null - }, - { - "id": "Failed to generate random bytes for secret: {Error}", - "translation": "生成密码的随机字节失败: {Error}", - "message": "Failed to generate random bytes for secret: {Error}", - "placeholder": null - }, - { - "id": "Please do not run guided-setup again as miner creation is not idempotent. You need to run 'curio config new-cluster {String}' to finish the configuration", - "translation": "请不要再次运行引导设置,因为矿工创建不是幂等的。 您需要运行 'curio config new-cluster {String}' 来完成配置。", - "message": "Please do not run guided-setup again as miner creation is not idempotent. You need to run 'curio config new-cluster {String}' to finish the configuration", - "placeholder": null - }, - { - "id": "Failed to get API info for FullNode: {Err}", - "translation": "无法获取 FullNode 的 API 信息: {Err}", - "message": "Failed to get API info for FullNode: {Err}", - "placeholder": null - }, - { - "id": "Failed to verify the auth token from daemon node: {Error}", - "translation": "无法验证来自守护进程节点的授权令牌: {Error}", - "message": "Failed to verify the auth token from daemon node: {Error}", - "placeholder": null - }, - { - "id": "Failed to encode the config: {Error}", - "translation": "无法编码配置: {Error}", - "message": "Failed to encode the config: {Error}", - "placeholder": null - }, - { - "id": "Failed to generate default config: {Error}", - "translation": "无法生成默认配置: {Error}", - "message": "Failed to generate default config: {Error}", - "placeholder": null - }, - { - "id": "Failed to insert 'base' config layer in database: {Error}", - "translation": "无法将 'base' 配置层插入数据库: {Error}", - "message": "Failed to insert 'base' config layer in database: {Error}", - "placeholder": null - }, - { - "id": "Failed to insert '{String}' config layer in database: {Error}", - "translation": "无法将 '{String}' 配置层插入数据库: {Error}", - "message": "Failed to insert '{String}' config layer in database: {Error}", - "placeholder": null - }, - { - "id": "New Curio configuration layer '{String}' created", - "translation": "新的 Curio 配置层 '{String}' 已创建", - "message": "New Curio configuration layer '{String}' created", - "placeholder": null - }, - { - "id": "This process is partially idempotent. Once a new miner actor has been created and subsequent steps fail, the user need to run 'curio config new-cluster \u003c miner ID \u003e' to finish the configuration.", - "translation": "该过程部分幂等。一旦创建了新的矿工角色,并且随后的步骤失败,用户需要运行 'curio config new-cluster \u003c 矿工 ID \u003e' 来完成配置。", - "message": "This process is partially idempotent. Once a new miner actor has been created and subsequent steps fail, the user need to run 'curio config new-cluster \u003c miner ID \u003e' to finish the configuration.", - "placeholder": null - }, - { - "id": "Confidence epochs", - "translation": "置信度时期", - "message": "Confidence epochs", - "placeholder": null - }, - { - "id": "Increase reliability using redundancy: start multiple machines with at-least the post layer: 'curio run --layers=post'", - "translation": "通过冗余增加可靠性:使用至少后层启动多台机器:'curio run --layers=post'", - "message": "Increase reliability using redundancy: start multiple machines with at-least the post layer: 'curio run --layers=post'", - "placeholder": null - }, - { - "id": "I want to:", - "translation": "我想要:", - "message": "I want to:", - "placeholder": null - }, - { - "id": "Configuration 'base' was updated to include this miner's address", - "translation": "配置 'base' 已更新以包含此矿工的地址", - "message": "Configuration 'base' was updated to include this miner's address", - "placeholder": null - }, - { - "id": "Cannot load base config: {Error}", - "translation": "无法加载基本配置: {Error}", - "message": "Cannot load base config: {Error}", - "placeholder": null - }, - { - "id": "Failed to load base config: {Error}", - "translation": "加载基本配置失败: {Error}", - "message": "Failed to load base config: {Error}", - "placeholder": null - }, - { - "id": "Failed to regenerate base config: {Error}", - "translation": "重新生成基本配置失败: {Error}", - "message": "Failed to regenerate base config: {Error}", - "placeholder": null - }, - { - "id": "Failed to load base config from database: {Error}", - "translation": "从数据库加载基本配置失败:{Error}", - "message": "Failed to load base config from database: {Error}", - "placeholder": null - }, - { - "id": "Failed to parse base config: {Error}", - "translation": "解析基本配置失败:{Error}", - "message": "Failed to parse base config: {Error}", - "placeholder": null - }, - { - "id": "Try the web interface with {Rendercurio_run___layersgui} for further guided improvements.", - "translation": "尝试使用{Rendercurio_run___layersgui}的网络界面进行更进一步的指导性改进。", - "message": "Try the web interface with {Rendercurio_run___layersgui} for further guided improvements.", - "placeholder": null - }, - { - "id": "Now shut down lotus-miner and lotus-worker and use run {Rendercurio_run} instead.", - "translation": "现在关闭lotus-miner和lotus-worker,改为使用{Rendercurio_run}运行。", - "message": "Now shut down lotus-miner and lotus-worker and use run {Rendercurio_run} instead.", - "placeholder": null - }, - { - "id": "Configuration 'base' was updated to include this miner's address ({MinerAddress}) and its wallet setup.", - "translation": "'base'配置已更新,包括该矿工的地址({MinerAddress})及其钱包设置。", - "message": "Configuration 'base' was updated to include this miner's address ({MinerAddress}) and its wallet setup.", - "placeholder": null - }, - { - "id": "Configuration 'base' was created to resemble this lotus-miner's config.toml .", - "translation": "'base'配置已创建,以类似于这个lotus-miner的config.toml。", - "message": "Configuration 'base' was created to resemble this lotus-miner's config.toml .", - "placeholder": null - }, - { - "id": "Unmigratable sectors found. Do you want to continue?", - "translation": "发现无法迁移的扇区。您想要继续吗?", - "message": "Unmigratable sectors found. Do you want to continue?", - "placeholder": null - }, - { - "id": "Yes, continue", - "translation": "是的,继续", - "message": "Yes, continue", - "placeholder": null - }, - { - "id": "No, abort", - "translation": "不,中止", - "message": "No, abort", - "placeholder": null - }, - { - "id": "Migrating metadata for {NSectors} sectors.", - "translation": "正在迁移{NSectors}个扇区的元数据。", - "message": "Migrating metadata for {NSectors} sectors.", - "placeholder": null - } - ] -} \ No newline at end of file diff --git a/cmd/curio/internal/translations/locales/zh/out.gotext.json b/cmd/curio/internal/translations/locales/zh/out.gotext.json deleted file mode 100644 index bb9d25e4cad..00000000000 --- a/cmd/curio/internal/translations/locales/zh/out.gotext.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "language": "zh", - "messages": [] - } diff --git a/cmd/curio/internal/translations/translations.go b/cmd/curio/internal/translations/translations.go deleted file mode 100644 index 361e8e89401..00000000000 --- a/cmd/curio/internal/translations/translations.go +++ /dev/null @@ -1,27 +0,0 @@ -// Usage: -// To UPDATE translations: -// -// 1. add/change strings in guidedsetup folder that use d.T() or d.say(). -// -// 2. run `go generate` in the cmd/curio/internal/translations/ folder. -// -// 3. ChatGPT 3.5 can translate the ./locales/??/out.gotext.json files' -// which ONLY include the un-translated messages. -// APPEND to the messages.gotext.json files's array. -// -// ChatGPT fuss: -// - on a good day, you may need to hit "continue generating". -// - > 60? you'll need to give it sections of the file. -// -// 4. Re-import with `go generate` again. -// -// To ADD a language: -// 1. Add it to the list in updateLang.sh -// 2. Run `go generate` in the cmd/curio/internal/translations/ folder. -// 3. Follow the "Update translations" steps here. -// 4. Code will auto-detect the new language and use it. -// -// FUTURE Reliability: OpenAPI automation. -package translations - -//go:generate ./updateLang.sh diff --git a/cmd/curio/internal/translations/updateLang.sh b/cmd/curio/internal/translations/updateLang.sh deleted file mode 100755 index 984f63fd5d8..00000000000 --- a/cmd/curio/internal/translations/updateLang.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -#OP: Only run if some file in ../guidedsetup* is newer than catalog.go -# Change this condition if using translations more widely. -if [ "$(find ../../guidedsetup/* -newer catalog.go)" ] || [ "$(find locales/* -newer catalog.go)" ]; then - gotext -srclang=en update -out=catalog.go -lang=en,zh,ko github.com/filecoin-project/lotus/cmd/curio/guidedsetup - go run knowns/main.go locales/zh locales/ko -fi diff --git a/cmd/curio/log.go b/cmd/curio/log.go deleted file mode 100644 index 0af41a679dd..00000000000 --- a/cmd/curio/log.go +++ /dev/null @@ -1,105 +0,0 @@ -package main - -import ( - "fmt" - - "github.com/urfave/cli/v2" - "golang.org/x/xerrors" - - lcli "github.com/filecoin-project/lotus/cli" - "github.com/filecoin-project/lotus/cmd/curio/rpc" -) - -var logCmd = &cli.Command{ - Name: "log", - Usage: "Manage logging", - Subcommands: []*cli.Command{ - LogList, - LogSetLevel, - }, -} - -var LogList = &cli.Command{ - Name: "list", - Usage: "List log systems", - Action: func(cctx *cli.Context) error { - minerApi, closer, err := rpc.GetCurioAPI(cctx) - if err != nil { - return err - } - defer closer() - - ctx := lcli.ReqContext(cctx) - - systems, err := minerApi.LogList(ctx) - if err != nil { - return err - } - - for _, system := range systems { - fmt.Println(system) - } - - return nil - }, -} - -var LogSetLevel = &cli.Command{ - Name: "set-level", - Usage: "Set log level", - ArgsUsage: "[level]", - Description: `Set the log level for logging systems: - - The system flag can be specified multiple times. - - eg) log set-level --system chain --system chainxchg debug - - Available Levels: - debug - info - warn - error - - Environment Variables: - GOLOG_LOG_LEVEL - Default log level for all log systems - GOLOG_LOG_FMT - Change output log format (json, nocolor) - GOLOG_FILE - Write logs to file - GOLOG_OUTPUT - Specify whether to output to file, stderr, stdout or a combination, i.e. file+stderr -`, - Flags: []cli.Flag{ - &cli.StringSliceFlag{ - Name: "system", - Usage: "limit to log system", - Value: &cli.StringSlice{}, - }, - }, - Action: func(cctx *cli.Context) error { - minerApi, closer, err := rpc.GetCurioAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := lcli.ReqContext(cctx) - - if !cctx.Args().Present() { - return fmt.Errorf("level is required") - } - - systems := cctx.StringSlice("system") - if len(systems) == 0 { - var err error - systems, err = minerApi.LogList(ctx) - if err != nil { - return err - } - } - - for _, system := range systems { - if err := minerApi.LogSetLevel(ctx, system, cctx.Args().First()); err != nil { - return xerrors.Errorf("setting log level on %s: %v", system, err) - } - } - - return nil - }, -} diff --git a/cmd/curio/main.go b/cmd/curio/main.go deleted file mode 100644 index f6730138bc7..00000000000 --- a/cmd/curio/main.go +++ /dev/null @@ -1,190 +0,0 @@ -package main - -import ( - "context" - "fmt" - "os" - "os/signal" - "runtime/pprof" - "syscall" - - "github.com/docker/go-units" - "github.com/fatih/color" - logging "github.com/ipfs/go-log/v2" - "github.com/mitchellh/go-homedir" - "github.com/urfave/cli/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-paramfetch" - - "github.com/filecoin-project/lotus/build" - lcli "github.com/filecoin-project/lotus/cli" - cliutil "github.com/filecoin-project/lotus/cli/util" - "github.com/filecoin-project/lotus/cmd/curio/deps" - "github.com/filecoin-project/lotus/cmd/curio/guidedsetup" - "github.com/filecoin-project/lotus/lib/lotuslog" - "github.com/filecoin-project/lotus/lib/tracing" - "github.com/filecoin-project/lotus/node/repo" -) - -var log = logging.Logger("main") - -const ( - FlagMinerRepo = "miner-repo" -) - -func setupCloseHandler() { - c := make(chan os.Signal, 1) - signal.Notify(c, os.Interrupt, syscall.SIGTERM) - go func() { - <-c - fmt.Println("\r- Ctrl+C pressed in Terminal") - _ = pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) - panic(1) - }() -} - -func main() { - - lotuslog.SetupLogLevels() - - local := []*cli.Command{ - cliCmd, - runCmd, - stopCmd, - configCmd, - testCmd, - webCmd, - guidedsetup.GuidedsetupCmd, - sealCmd, - marketCmd, - fetchParamCmd, - ffiCmd, - } - - jaeger := tracing.SetupJaegerTracing("curio") - defer func() { - if jaeger != nil { - _ = jaeger.ForceFlush(context.Background()) - } - }() - - for _, cmd := range local { - cmd := cmd - originBefore := cmd.Before - cmd.Before = func(cctx *cli.Context) error { - if jaeger != nil { - _ = jaeger.Shutdown(cctx.Context) - } - jaeger = tracing.SetupJaegerTracing("curio/" + cmd.Name) - - if cctx.IsSet("color") { - color.NoColor = !cctx.Bool("color") - } - - if originBefore != nil { - return originBefore(cctx) - } - - return nil - } - } - - app := &cli.App{ - Name: "curio", - Usage: "Filecoin decentralized storage network provider", - Version: build.UserVersion(), - EnableBashCompletion: true, - Before: func(c *cli.Context) error { - setupCloseHandler() - return nil - }, - Flags: []cli.Flag{ - &cli.BoolFlag{ - // examined in the Before above - Name: "color", - Usage: "use color in display output", - DefaultText: "depends on output being a TTY", - }, - &cli.StringFlag{ - Name: "panic-reports", - EnvVars: []string{"CURIO_PANIC_REPORT_PATH"}, - Hidden: true, - Value: "~/.curio", // should follow --repo default - }, - &cli.StringFlag{ - Name: "db-host", - EnvVars: []string{"CURIO_DB_HOST", "CURIO_HARMONYDB_HOSTS"}, - Usage: "Command separated list of hostnames for yugabyte cluster", - Value: "127.0.0.1", - }, - &cli.StringFlag{ - Name: "db-name", - EnvVars: []string{"CURIO_DB_NAME", "CURIO_HARMONYDB_NAME"}, - Value: "yugabyte", - }, - &cli.StringFlag{ - Name: "db-user", - EnvVars: []string{"CURIO_DB_USER", "CURIO_HARMONYDB_USERNAME"}, - Value: "yugabyte", - }, - &cli.StringFlag{ - Name: "db-password", - EnvVars: []string{"CURIO_DB_PASSWORD", "CURIO_HARMONYDB_PASSWORD"}, - Value: "yugabyte", - }, - &cli.StringFlag{ - Name: "db-port", - EnvVars: []string{"CURIO_DB_PORT", "CURIO_HARMONYDB_PORT"}, - Value: "5433", - }, - &cli.StringFlag{ - Name: deps.FlagRepoPath, - EnvVars: []string{"CURIO_REPO_PATH"}, - Value: "~/.curio", - }, - cliutil.FlagVeryVerbose, - }, - Commands: local, - After: func(c *cli.Context) error { - if r := recover(); r != nil { - p, err := homedir.Expand(c.String(FlagMinerRepo)) - if err != nil { - log.Errorw("could not expand repo path for panic report", "error", err) - panic(r) - } - - // Generate report in CURIO_PATH and re-raise panic - build.GeneratePanicReport(c.String("panic-reports"), p, c.App.Name) - panic(r) - } - return nil - }, - } - app.Setup() - app.Metadata["repoType"] = repo.Curio - lcli.RunApp(app) -} - -var fetchParamCmd = &cli.Command{ - Name: "fetch-params", - Usage: "Fetch proving parameters", - ArgsUsage: "[sectorSize]", - Action: func(cctx *cli.Context) error { - if cctx.NArg() != 1 { - return xerrors.Errorf("incorrect number of arguments") - } - sectorSizeInt, err := units.RAMInBytes(cctx.Args().First()) - if err != nil { - return xerrors.Errorf("error parsing sector size (specify as \"32GiB\", for instance): %w", err) - } - sectorSize := uint64(sectorSizeInt) - - err = paramfetch.GetParams(lcli.ReqContext(cctx), build.ParametersJSON(), build.SrsJSON(), sectorSize) - if err != nil { - return xerrors.Errorf("fetching proof parameters: %w", err) - } - - return nil - }, -} diff --git a/cmd/curio/market.go b/cmd/curio/market.go deleted file mode 100644 index 4f546292894..00000000000 --- a/cmd/curio/market.go +++ /dev/null @@ -1,201 +0,0 @@ -package main - -import ( - "fmt" - "sort" - "strconv" - - "github.com/urfave/cli/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/types" - lcli "github.com/filecoin-project/lotus/cli" - "github.com/filecoin-project/lotus/cmd/curio/deps" - "github.com/filecoin-project/lotus/curiosrc/market/lmrpc" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" -) - -var marketCmd = &cli.Command{ - Name: "market", - Subcommands: []*cli.Command{ - marketRPCInfoCmd, - marketSealCmd, - }, -} - -var marketRPCInfoCmd = &cli.Command{ - Flags: []cli.Flag{ - &cli.StringSliceFlag{ - Name: "layers", - Usage: "list of layers to be interpreted (atop defaults). Default: base", - }, - }, - Action: func(cctx *cli.Context) error { - db, err := deps.MakeDB(cctx) - if err != nil { - return err - } - - layers := cctx.StringSlice("layers") - - cfg, err := deps.GetConfig(cctx.Context, layers, db) - if err != nil { - return xerrors.Errorf("get config: %w", err) - } - - ts, err := lmrpc.MakeTokens(cfg) - if err != nil { - return xerrors.Errorf("make tokens: %w", err) - } - - var addrTokens []struct { - Address string - Token string - } - - for address, s := range ts { - addrTokens = append(addrTokens, struct { - Address string - Token string - }{ - Address: address.String(), - Token: s, - }) - } - - sort.Slice(addrTokens, func(i, j int) bool { - return addrTokens[i].Address < addrTokens[j].Address - }) - - for _, at := range addrTokens { - fmt.Printf("[lotus-miner/boost compatible] %s %s\n", at.Address, at.Token) - } - - return nil - }, - Name: "rpc-info", -} - -var marketSealCmd = &cli.Command{ - Name: "seal", - Usage: "start sealing a deal sector early", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "actor", - Usage: "Specify actor address to start sealing sectors for", - Required: true, - }, - &cli.StringSliceFlag{ - Name: "layers", - Usage: "list of layers to be interpreted (atop defaults). Default: base", - }, - &cli.BoolFlag{ - Name: "synthetic", - Usage: "Use synthetic PoRep", - Value: false, // todo implement synthetic - }, - }, - Action: func(cctx *cli.Context) error { - act, err := address.NewFromString(cctx.String("actor")) - if err != nil { - return xerrors.Errorf("parsing --actor: %w", err) - } - - if cctx.Args().Len() > 1 { - return xerrors.Errorf("specify only one sector") - } - - sec := cctx.Args().First() - - sector, err := strconv.ParseUint(sec, 10, 64) - if err != nil { - return xerrors.Errorf("failed to parse the sector number: %w", err) - } - - ctx := lcli.ReqContext(cctx) - dep, err := deps.GetDepsCLI(ctx, cctx) - if err != nil { - return err - } - - mid, err := address.IDFromAddress(act) - if err != nil { - return xerrors.Errorf("getting miner id: %w", err) - } - - mi, err := dep.Full.StateMinerInfo(ctx, act, types.EmptyTSK) - if err != nil { - return xerrors.Errorf("getting miner info: %w", err) - } - - nv, err := dep.Full.StateNetworkVersion(ctx, types.EmptyTSK) - if err != nil { - return xerrors.Errorf("getting network version: %w", err) - } - - wpt := mi.WindowPoStProofType - spt, err := miner.PreferredSealProofTypeFromWindowPoStType(nv, wpt, cctx.Bool("synthetic")) - if err != nil { - return xerrors.Errorf("getting seal proof type: %w", err) - } - - comm, err := dep.DB.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { - // Get current open sector pieces from DB - var pieces []struct { - Sector abi.SectorNumber `db:"sector_number"` - Size abi.PaddedPieceSize `db:"piece_size"` - Index uint64 `db:"piece_index"` - } - err = tx.Select(&pieces, ` - SELECT - sector_number, - piece_size, - piece_index, - FROM - open_sector_pieces - WHERE - sp_id = $1 AND sector_number = $2 - ORDER BY - piece_index DESC;`, mid, sector) - if err != nil { - return false, xerrors.Errorf("getting open sectors from DB") - } - - if len(pieces) < 1 { - return false, xerrors.Errorf("sector %d is not waiting to be sealed", sector) - } - - cn, err := tx.Exec(`INSERT INTO sectors_sdr_pipeline (sp_id, sector_number, reg_seal_proof) VALUES ($1, $2, $3);`, mid, sector, spt) - - if err != nil { - return false, xerrors.Errorf("adding sector to pipeline: %w", err) - } - - if cn != 1 { - return false, xerrors.Errorf("incorrect number of rows returned") - } - - _, err = tx.Exec("SELECT transfer_and_delete_open_piece($1, $2)", mid, sector) - if err != nil { - return false, xerrors.Errorf("adding sector to pipeline: %w", err) - } - - return true, nil - - }, harmonydb.OptionRetry()) - - if err != nil { - return xerrors.Errorf("start sealing sector: %w", err) - } - - if !comm { - return xerrors.Errorf("start sealing sector: commit failed") - } - - return nil - }, -} diff --git a/cmd/curio/pipeline.go b/cmd/curio/pipeline.go deleted file mode 100644 index 8f57b5694d4..00000000000 --- a/cmd/curio/pipeline.go +++ /dev/null @@ -1,216 +0,0 @@ -package main - -import ( - "fmt" - - "github.com/ipfs/go-datastore" - "github.com/urfave/cli/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/types" - lcli "github.com/filecoin-project/lotus/cli" - "github.com/filecoin-project/lotus/cmd/curio/deps" - "github.com/filecoin-project/lotus/cmd/curio/guidedsetup" - "github.com/filecoin-project/lotus/curiosrc/seal" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/node/repo" -) - -var sealCmd = &cli.Command{ - Name: "seal", - Usage: "Manage the sealing pipeline", - Subcommands: []*cli.Command{ - sealStartCmd, - sealMigrateLMSectorsCmd, - }, -} - -var sealStartCmd = &cli.Command{ - Name: "start", - Usage: "Start new sealing operations manually", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "actor", - Usage: "Specify actor address to start sealing sectors for", - Required: true, - }, - &cli.BoolFlag{ - Name: "now", - Usage: "Start sealing sectors for all actors now (not on schedule)", - }, - &cli.BoolFlag{ - Name: "cc", - Usage: "Start sealing new CC sectors", - }, - &cli.IntFlag{ - Name: "count", - Usage: "Number of sectors to start", - Value: 1, - }, - &cli.BoolFlag{ - Name: "synthetic", - Usage: "Use synthetic PoRep", - Value: false, // todo implement synthetic - }, - &cli.StringSliceFlag{ - Name: "layers", - Usage: "list of layers to be interpreted (atop defaults). Default: base", - }, - }, - Action: func(cctx *cli.Context) error { - if !cctx.Bool("now") { - return xerrors.Errorf("schedule not implemented, use --now") - } - if !cctx.IsSet("actor") { - return cli.ShowCommandHelp(cctx, "start") - } - if !cctx.Bool("cc") { - return xerrors.Errorf("only CC sectors supported for now") - } - - act, err := address.NewFromString(cctx.String("actor")) - if err != nil { - return xerrors.Errorf("parsing --actor: %w", err) - } - - ctx := lcli.ReqContext(cctx) - dep, err := deps.GetDepsCLI(ctx, cctx) - if err != nil { - return err - } - - /* - create table sectors_sdr_pipeline ( - sp_id bigint not null, - sector_number bigint not null, - - -- at request time - create_time timestamp not null, - reg_seal_proof int not null, - comm_d_cid text not null, - - [... other not relevant fields] - */ - - mid, err := address.IDFromAddress(act) - if err != nil { - return xerrors.Errorf("getting miner id: %w", err) - } - - mi, err := dep.Full.StateMinerInfo(ctx, act, types.EmptyTSK) - if err != nil { - return xerrors.Errorf("getting miner info: %w", err) - } - - nv, err := dep.Full.StateNetworkVersion(ctx, types.EmptyTSK) - if err != nil { - return xerrors.Errorf("getting network version: %w", err) - } - - wpt := mi.WindowPoStProofType - spt, err := miner.PreferredSealProofTypeFromWindowPoStType(nv, wpt, cctx.Bool("synthetic")) - if err != nil { - return xerrors.Errorf("getting seal proof type: %w", err) - } - - num, err := seal.AllocateSectorNumbers(ctx, dep.Full, dep.DB, act, cctx.Int("count"), func(tx *harmonydb.Tx, numbers []abi.SectorNumber) (bool, error) { - for _, n := range numbers { - _, err := tx.Exec("insert into sectors_sdr_pipeline (sp_id, sector_number, reg_seal_proof) values ($1, $2, $3)", mid, n, spt) - if err != nil { - return false, xerrors.Errorf("inserting into sectors_sdr_pipeline: %w", err) - } - } - return true, nil - }) - if err != nil { - return xerrors.Errorf("allocating sector numbers: %w", err) - } - - for _, number := range num { - fmt.Println(number) - } - - return nil - }, -} - -var sealMigrateLMSectorsCmd = &cli.Command{ - Name: "migrate-lm-sectors", - Usage: "(debug tool) Copy LM sector metadata into Curio DB", - Hidden: true, // only needed in advanced cases where manual repair is needed - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "miner-repo", - Usage: "Path to miner repo", - Value: "~/.lotusminer", - }, - &cli.BoolFlag{ - Name: "seal-ignore", - Usage: "Ignore sectors that cannot be migrated", - Value: false, - EnvVars: []string{"CURUO_MIGRATE_SEAL_IGNORE"}, - }, - }, - Action: func(cctx *cli.Context) error { - ctx := lcli.ReqContext(cctx) - db, err := deps.MakeDB(cctx) - if err != nil { - return err - } - - r, err := repo.NewFS(cctx.String("miner-repo")) - if err != nil { - return err - } - - ok, err := r.Exists() - if err != nil { - return err - } - - if !ok { - return fmt.Errorf("repo not initialized at: %s", cctx.String("miner-repo")) - } - - lr, err := r.LockRO(repo.StorageMiner) - if err != nil { - return fmt.Errorf("locking repo: %w", err) - } - defer func() { - err = lr.Close() - if err != nil { - fmt.Println("error closing repo: ", err) - } - }() - - mmeta, err := lr.Datastore(ctx, "/metadata") - if err != nil { - return xerrors.Errorf("opening miner metadata datastore: %w", err) - } - - maddrBytes, err := mmeta.Get(ctx, datastore.NewKey("miner-address")) - if err != nil { - return xerrors.Errorf("getting miner address datastore entry: %w", err) - } - - addr, err := address.NewFromBytes(maddrBytes) - if err != nil { - return xerrors.Errorf("parsing miner actor address: %w", err) - } - - unmigSectorShouldFail := func() bool { return !cctx.Bool("seal-ignore") } - - err = guidedsetup.MigrateSectors(ctx, addr, mmeta, db, func(n int) { - fmt.Printf("Migrating %d sectors\n", n) - }, unmigSectorShouldFail) - if err != nil { - return xerrors.Errorf("migrating sectors: %w", err) - } - - return nil - }, -} diff --git a/cmd/curio/proving.go b/cmd/curio/proving.go deleted file mode 100644 index c5fb7863996..00000000000 --- a/cmd/curio/proving.go +++ /dev/null @@ -1,204 +0,0 @@ -package main - -import ( - "context" - "database/sql" - "encoding/json" - "errors" - "fmt" - "os" - "time" - - "github.com/urfave/cli/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/dline" - - "github.com/filecoin-project/lotus/cmd/curio/deps" - curio "github.com/filecoin-project/lotus/curiosrc" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" -) - -var testCmd = &cli.Command{ - Name: "test", - Usage: "Utility functions for testing", - Subcommands: []*cli.Command{ - //provingInfoCmd, - wdPostCmd, - }, - Before: func(cctx *cli.Context) error { - return nil - }, -} - -var wdPostCmd = &cli.Command{ - Name: "window-post", - Aliases: []string{"wd", "windowpost", "wdpost"}, - Usage: "Compute a proof-of-spacetime for a sector (requires the sector to be pre-sealed). These will not send to the chain.", - Subcommands: []*cli.Command{ - wdPostHereCmd, - wdPostTaskCmd, - }, -} - -// wdPostTaskCmd writes to harmony_task and wdpost_partition_tasks, then waits for the result. -// It is intended to be used to test the windowpost scheduler. -// The end of the compute task puts the task_id onto wdpost_proofs, which is read by the submit task. -// The submit task will not send test tasks to the chain, and instead will write the result to harmony_test. -// The result is read by this command, and printed to stdout. -var wdPostTaskCmd = &cli.Command{ - Name: "task", - Aliases: []string{"scheduled", "schedule", "async", "asynchronous"}, - Usage: "Test the windowpost scheduler by running it on the next available curio. ", - Flags: []cli.Flag{ - &cli.Uint64Flag{ - Name: "deadline", - Usage: "deadline to compute WindowPoSt for ", - Value: 0, - }, - &cli.StringSliceFlag{ - Name: "layers", - Usage: "list of layers to be interpreted (atop defaults). Default: base", - }, - }, - Action: func(cctx *cli.Context) error { - ctx := context.Background() - - deps, err := deps.GetDeps(ctx, cctx) - if err != nil { - return xerrors.Errorf("get config: %w", err) - } - - ts, err := deps.Full.ChainHead(ctx) - if err != nil { - return xerrors.Errorf("cannot get chainhead %w", err) - } - ht := ts.Height() - - // It's not important to be super-accurate as it's only for basic testing. - addr, err := address.NewFromString(deps.Cfg.Addresses[0].MinerAddresses[0]) - if err != nil { - return xerrors.Errorf("cannot get miner address %w", err) - } - maddr, err := address.IDFromAddress(addr) - if err != nil { - return xerrors.Errorf("cannot get miner id %w", err) - } - var taskId int64 - - _, err = deps.DB.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { - err = tx.QueryRow(`INSERT INTO harmony_task (name, posted_time, added_by) VALUES ('WdPost', CURRENT_TIMESTAMP, 123) RETURNING id`).Scan(&taskId) - if err != nil { - log.Error("inserting harmony_task: ", err) - return false, xerrors.Errorf("inserting harmony_task: %w", err) - } - _, err = tx.Exec(`INSERT INTO wdpost_partition_tasks - (task_id, sp_id, proving_period_start, deadline_index, partition_index) VALUES ($1, $2, $3, $4, $5)`, - taskId, maddr, ht, cctx.Uint64("deadline"), 0) - if err != nil { - log.Error("inserting wdpost_partition_tasks: ", err) - return false, xerrors.Errorf("inserting wdpost_partition_tasks: %w", err) - } - _, err = tx.Exec("INSERT INTO harmony_test (task_id) VALUES ($1)", taskId) - if err != nil { - return false, xerrors.Errorf("inserting into harmony_tests: %w", err) - } - return true, nil - }, harmonydb.OptionRetry()) - if err != nil { - return xerrors.Errorf("writing SQL transaction: %w", err) - } - fmt.Printf("Inserted task %v. Waiting for success ", taskId) - var result sql.NullString - for { - time.Sleep(time.Second) - err = deps.DB.QueryRow(ctx, `SELECT result FROM harmony_test WHERE task_id=$1`, taskId).Scan(&result) - if err != nil { - return xerrors.Errorf("reading result from harmony_test: %w", err) - } - if result.Valid { - break - } - fmt.Print(".") - } - fmt.Println() - log.Infof("Result: %s", result.String) - return nil - }, -} - -// This command is intended to be used to verify PoSt compute performance. -// It will not send any messages to the chain. Since it can compute any deadline, output may be incorrectly timed for the chain. -// The entire processing happens in this process while you wait. It does not use the scheduler. -var wdPostHereCmd = &cli.Command{ - Name: "here", - Aliases: []string{"cli"}, - Usage: "Compute WindowPoSt for performance and configuration testing.", - Description: `Note: This command is intended to be used to verify PoSt compute performance. -It will not send any messages to the chain. Since it can compute any deadline, output may be incorrectly timed for the chain.`, - ArgsUsage: "[deadline index]", - Flags: []cli.Flag{ - &cli.Uint64Flag{ - Name: "deadline", - Usage: "deadline to compute WindowPoSt for ", - Value: 0, - }, - &cli.StringSliceFlag{ - Name: "layers", - Usage: "list of layers to be interpreted (atop defaults). Default: base", - }, - &cli.StringFlag{ - Name: "storage-json", - Usage: "path to json file containing storage config", - Value: "~/.curio/storage.json", - }, - &cli.Uint64Flag{ - Name: "partition", - Usage: "partition to compute WindowPoSt for", - Value: 0, - }, - }, - Action: func(cctx *cli.Context) error { - - ctx := context.Background() - deps, err := deps.GetDeps(ctx, cctx) - if err != nil { - return err - } - - wdPostTask, wdPoStSubmitTask, derlareRecoverTask, err := curio.WindowPostScheduler( - ctx, deps.Cfg.Fees, deps.Cfg.Proving, deps.Full, deps.Verif, nil, nil, - deps.As, deps.Maddrs, deps.DB, deps.Stor, deps.Si, deps.Cfg.Subsystems.WindowPostMaxTasks) - if err != nil { - return err - } - _, _ = wdPoStSubmitTask, derlareRecoverTask - - if len(deps.Maddrs) == 0 { - return errors.New("no miners to compute WindowPoSt for") - } - head, err := deps.Full.ChainHead(ctx) - if err != nil { - return xerrors.Errorf("failed to get chain head: %w", err) - } - - di := dline.NewInfo(head.Height(), cctx.Uint64("deadline"), 0, 0, 0, 10 /*challenge window*/, 0, 0) - - for maddr := range deps.Maddrs { - out, err := wdPostTask.DoPartition(ctx, head, address.Address(maddr), di, cctx.Uint64("partition")) - if err != nil { - fmt.Println("Error computing WindowPoSt for miner", maddr, err) - continue - } - fmt.Println("Computed WindowPoSt for miner", maddr, ":") - err = json.NewEncoder(os.Stdout).Encode(out) - if err != nil { - fmt.Println("Could not encode WindowPoSt output for miner", maddr, err) - continue - } - } - - return nil - }, -} diff --git a/cmd/curio/rpc/rpc.go b/cmd/curio/rpc/rpc.go deleted file mode 100644 index 62a400f241f..00000000000 --- a/cmd/curio/rpc/rpc.go +++ /dev/null @@ -1,339 +0,0 @@ -// Package rpc provides all direct access to this node. -package rpc - -import ( - "context" - "encoding/base64" - "encoding/json" - "net" - "net/http" - "net/url" - "os" - "path/filepath" - "time" - - "github.com/gbrlsnchs/jwt/v3" - "github.com/google/uuid" - "github.com/gorilla/mux" - logging "github.com/ipfs/go-log/v2" - "github.com/mitchellh/go-homedir" - "github.com/urfave/cli/v2" - "go.opencensus.io/tag" - "golang.org/x/sync/errgroup" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-jsonrpc" - "github.com/filecoin-project/go-jsonrpc/auth" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/client" - cliutil "github.com/filecoin-project/lotus/cli/util" - "github.com/filecoin-project/lotus/cmd/curio/deps" - "github.com/filecoin-project/lotus/curiosrc/market" - "github.com/filecoin-project/lotus/curiosrc/web" - "github.com/filecoin-project/lotus/lib/rpcenc" - "github.com/filecoin-project/lotus/metrics" - "github.com/filecoin-project/lotus/metrics/proxy" - "github.com/filecoin-project/lotus/node/repo" - "github.com/filecoin-project/lotus/storage/paths" - "github.com/filecoin-project/lotus/storage/pipeline/piece" - "github.com/filecoin-project/lotus/storage/sealer/fsutil" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -const metaFile = "sectorstore.json" - -var log = logging.Logger("curio/rpc") -var permissioned = os.Getenv("LOTUS_DISABLE_AUTH_PERMISSIONED") != "1" - -func CurioHandler( - authv func(ctx context.Context, token string) ([]auth.Permission, error), - remote http.HandlerFunc, - a api.Curio, - permissioned bool) http.Handler { - mux := mux.NewRouter() - readerHandler, readerServerOpt := rpcenc.ReaderParamDecoder() - rpcServer := jsonrpc.NewServer(jsonrpc.WithServerErrors(api.RPCErrors), readerServerOpt) - - wapi := proxy.MetricedAPI[api.Curio, api.CurioStruct](a) - if permissioned { - wapi = api.PermissionedAPI[api.Curio, api.CurioStruct](wapi) - } - - rpcServer.Register("Filecoin", wapi) - rpcServer.AliasMethod("rpc.discover", "Filecoin.Discover") - - mux.Handle("/rpc/v0", rpcServer) - mux.Handle("/rpc/streams/v0/push/{uuid}", readerHandler) - mux.PathPrefix("/remote").HandlerFunc(remote) - mux.PathPrefix("/").Handler(http.DefaultServeMux) // pprof - - if !permissioned { - return mux - } - - ah := &auth.Handler{ - Verify: authv, - Next: mux.ServeHTTP, - } - return ah -} - -type CurioAPI struct { - *deps.Deps - paths.SectorIndex - ShutdownChan chan struct{} -} - -func (p *CurioAPI) Version(context.Context) (api.Version, error) { - return api.CurioAPIVersion0, nil -} -func (p *CurioAPI) StorageDetachLocal(ctx context.Context, path string) error { - path, err := homedir.Expand(path) - if err != nil { - return xerrors.Errorf("expanding local path: %w", err) - } - - // check that we have the path opened - lps, err := p.LocalStore.Local(ctx) - if err != nil { - return xerrors.Errorf("getting local path list: %w", err) - } - - var localPath *storiface.StoragePath - for _, lp := range lps { - if lp.LocalPath == path { - lp := lp // copy to make the linter happy - localPath = &lp - break - } - } - if localPath == nil { - return xerrors.Errorf("no local paths match '%s'", path) - } - - // drop from the persisted storage.json - var found bool - if err := p.LocalPaths.SetStorage(func(sc *storiface.StorageConfig) { - out := make([]storiface.LocalPath, 0, len(sc.StoragePaths)) - for _, storagePath := range sc.StoragePaths { - if storagePath.Path != path { - out = append(out, storagePath) - continue - } - found = true - } - sc.StoragePaths = out - }); err != nil { - return xerrors.Errorf("set storage config: %w", err) - } - if !found { - // maybe this is fine? - return xerrors.Errorf("path not found in storage.json") - } - - // unregister locally, drop from sector index - return p.LocalStore.ClosePath(ctx, localPath.ID) -} - -func (p *CurioAPI) StorageLocal(ctx context.Context) (map[storiface.ID]string, error) { - ps, err := p.LocalStore.Local(ctx) - if err != nil { - return nil, err - } - - var out = make(map[storiface.ID]string) - for _, path := range ps { - out[path.ID] = path.LocalPath - } - - return out, nil -} - -func (p *CurioAPI) StorageStat(ctx context.Context, id storiface.ID) (fsutil.FsStat, error) { - return p.Stor.FsStat(ctx, id) -} - -func (p *CurioAPI) AllocatePieceToSector(ctx context.Context, maddr address.Address, piece piece.PieceDealInfo, rawSize int64, source url.URL, header http.Header) (api.SectorOffset, error) { - di, err := market.NewPieceIngester(ctx, p.Deps.DB, p.Deps.Full, maddr, true, time.Minute) - if err != nil { - return api.SectorOffset{}, xerrors.Errorf("failed to create a piece ingestor") - } - - sector, err := di.AllocatePieceToSector(ctx, maddr, piece, rawSize, source, header) - if err != nil { - return api.SectorOffset{}, xerrors.Errorf("failed to add piece to a sector") - } - - err = di.Seal() - if err != nil { - return api.SectorOffset{}, xerrors.Errorf("failed to start sealing the sector %d for actor %s", sector.Sector, maddr) - } - - return sector, nil -} - -// Trigger shutdown -func (p *CurioAPI) Shutdown(context.Context) error { - close(p.ShutdownChan) - return nil -} - -func (p *CurioAPI) StorageInit(ctx context.Context, path string, opts storiface.LocalStorageMeta) error { - path, err := homedir.Expand(path) - if err != nil { - return xerrors.Errorf("expanding local path: %w", err) - } - - if err := os.MkdirAll(path, 0755); err != nil { - if !os.IsExist(err) { - return err - } - } - _, err = os.Stat(filepath.Join(path, metaFile)) - if !os.IsNotExist(err) { - if err == nil { - return xerrors.Errorf("path is already initialized") - } - return err - } - if opts.ID == "" { - opts.ID = storiface.ID(uuid.New().String()) - } - if !(opts.CanStore || opts.CanSeal) { - return xerrors.Errorf("must specify at least one of --store or --seal") - } - b, err := json.MarshalIndent(opts, "", " ") - if err != nil { - return xerrors.Errorf("marshaling storage config: %w", err) - } - if err := os.WriteFile(filepath.Join(path, metaFile), b, 0644); err != nil { - return xerrors.Errorf("persisting storage metadata (%s): %w", filepath.Join(path, metaFile), err) - } - return nil -} - -func (p *CurioAPI) StorageAddLocal(ctx context.Context, path string) error { - path, err := homedir.Expand(path) - if err != nil { - return xerrors.Errorf("expanding local path: %w", err) - } - - if err := p.LocalStore.OpenPath(ctx, path); err != nil { - return xerrors.Errorf("opening local path: %w", err) - } - - if err := p.LocalPaths.SetStorage(func(sc *storiface.StorageConfig) { - sc.StoragePaths = append(sc.StoragePaths, storiface.LocalPath{Path: path}) - }); err != nil { - return xerrors.Errorf("get storage config: %w", err) - } - - return nil -} - -func (p *CurioAPI) LogList(ctx context.Context) ([]string, error) { - return logging.GetSubsystems(), nil -} - -func (p *CurioAPI) LogSetLevel(ctx context.Context, subsystem, level string) error { - return logging.SetLogLevel(subsystem, level) -} - -func ListenAndServe(ctx context.Context, dependencies *deps.Deps, shutdownChan chan struct{}) error { - fh := &paths.FetchHandler{Local: dependencies.LocalStore, PfHandler: &paths.DefaultPartialFileHandler{}} - remoteHandler := func(w http.ResponseWriter, r *http.Request) { - if !auth.HasPerm(r.Context(), nil, api.PermAdmin) { - w.WriteHeader(401) - _ = json.NewEncoder(w).Encode(struct{ Error string }{"unauthorized: missing admin permission"}) - return - } - - fh.ServeHTTP(w, r) - } - - var authVerify func(context.Context, string) ([]auth.Permission, error) - { - privateKey, err := base64.StdEncoding.DecodeString(dependencies.Cfg.Apis.StorageRPCSecret) - if err != nil { - return xerrors.Errorf("decoding storage rpc secret: %w", err) - } - authVerify = func(ctx context.Context, token string) ([]auth.Permission, error) { - var payload deps.JwtPayload - if _, err := jwt.Verify([]byte(token), jwt.NewHS256(privateKey), &payload); err != nil { - return nil, xerrors.Errorf("JWT Verification failed: %w", err) - } - - return payload.Allow, nil - } - } - // Serve the RPC. - srv := &http.Server{ - Handler: CurioHandler( - authVerify, - remoteHandler, - &CurioAPI{dependencies, dependencies.Si, shutdownChan}, - permissioned), - ReadHeaderTimeout: time.Minute * 3, - BaseContext: func(listener net.Listener) context.Context { - ctx, _ := tag.New(context.Background(), tag.Upsert(metrics.APIInterface, "lotus-worker")) - return ctx - }, - Addr: dependencies.ListenAddr, - } - - log.Infof("Setting up RPC server at %s", dependencies.ListenAddr) - eg := errgroup.Group{} - eg.Go(srv.ListenAndServe) - - if dependencies.Cfg.Subsystems.EnableWebGui { - web, err := web.GetSrv(ctx, dependencies) - if err != nil { - return err - } - - go func() { - <-ctx.Done() - log.Warn("Shutting down...") - if err := srv.Shutdown(context.TODO()); err != nil { - log.Errorf("shutting down RPC server failed: %s", err) - } - if err := web.Shutdown(context.Background()); err != nil { - log.Errorf("shutting down web server failed: %s", err) - } - log.Warn("Graceful shutdown successful") - }() - - uiAddress := dependencies.Cfg.Subsystems.GuiAddress - if uiAddress == "" || uiAddress[0] == ':' { - uiAddress = "localhost" + uiAddress - } - log.Infof("GUI: http://%s", uiAddress) - eg.Go(web.ListenAndServe) - } - return eg.Wait() -} - -func GetCurioAPI(ctx *cli.Context) (api.Curio, jsonrpc.ClientCloser, error) { - addr, headers, err := cliutil.GetRawAPI(ctx, repo.Curio, "v0") - if err != nil { - return nil, nil, err - } - - u, err := url.Parse(addr) - if err != nil { - return nil, nil, xerrors.Errorf("parsing miner api URL: %w", err) - } - - switch u.Scheme { - case "ws": - u.Scheme = "http" - case "wss": - u.Scheme = "https" - } - - addr = u.String() - - return client.NewCurioRpc(ctx.Context, addr, headers) -} diff --git a/cmd/curio/run.go b/cmd/curio/run.go deleted file mode 100644 index c2c76328734..00000000000 --- a/cmd/curio/run.go +++ /dev/null @@ -1,200 +0,0 @@ -package main - -import ( - "context" - "fmt" - "os" - "strings" - - "github.com/pkg/errors" - "github.com/urfave/cli/v2" - "go.opencensus.io/stats" - "golang.org/x/xerrors" - - lcli "github.com/filecoin-project/lotus/cli" - "github.com/filecoin-project/lotus/cmd/curio/deps" - "github.com/filecoin-project/lotus/cmd/curio/rpc" - "github.com/filecoin-project/lotus/cmd/curio/tasks" - "github.com/filecoin-project/lotus/curiosrc/market/lmrpc" - "github.com/filecoin-project/lotus/lib/ulimit" - "github.com/filecoin-project/lotus/metrics" - "github.com/filecoin-project/lotus/node" -) - -type stackTracer interface { - StackTrace() errors.StackTrace -} - -var runCmd = &cli.Command{ - Name: "run", - Usage: "Start a Curio process", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "listen", - Usage: "host address and port the worker api will listen on", - Value: "0.0.0.0:12300", - EnvVars: []string{"CURIO_LISTEN"}, - }, - &cli.StringFlag{ - Name: "gui-listen", - Usage: "host address and port the gui will listen on", - Hidden: true, - }, - &cli.BoolFlag{ - Name: "nosync", - Usage: "don't check full-node sync status", - }, - &cli.BoolFlag{ - Name: "halt-after-init", - Usage: "only run init, then return", - Hidden: true, - }, - &cli.BoolFlag{ - Name: "manage-fdlimit", - Usage: "manage open file limit", - Value: true, - }, - &cli.StringFlag{ - Name: "storage-json", - Usage: "path to json file containing storage config", - Value: "~/.curio/storage.json", - }, - &cli.StringFlag{ - Name: "journal", - Usage: "path to journal files", - Value: "~/.curio/", - }, - &cli.StringSliceFlag{ - Name: "layers", - Usage: "list of layers to be interpreted (atop defaults). Default: base", - EnvVars: []string{"CURIO_LAYERS"}, - Aliases: []string{"l", "layer"}, - }, - }, - Action: func(cctx *cli.Context) (err error) { - defer func() { - if err != nil { - if err, ok := err.(stackTracer); ok { - for _, f := range err.StackTrace() { - fmt.Printf("%+s:%d\n", f, f) - } - } - } - }() - if !cctx.Bool("enable-gpu-proving") { - err := os.Setenv("BELLMAN_NO_GPU", "true") - if err != nil { - return err - } - } - - if err := os.MkdirAll(os.TempDir(), 0755); err != nil { - log.Errorf("ensuring tempdir exists: %s", err) - } - - ctx := lcli.DaemonContext(cctx) - shutdownChan := make(chan struct{}) - { - var ctxclose func() - ctx, ctxclose = context.WithCancel(ctx) - go func() { - <-shutdownChan - ctxclose() - }() - } - // Register all metric views - /* - if err := view.Register( - metrics.MinerNodeViews..., - ); err != nil { - log.Fatalf("Cannot register the view: %v", err) - } - */ - // Set the metric to one so it is published to the exporter - stats.Record(ctx, metrics.LotusInfo.M(1)) - - if cctx.Bool("manage-fdlimit") { - if _, _, err := ulimit.ManageFdLimit(); err != nil { - log.Errorf("setting file descriptor limit: %s", err) - } - } - - dependencies := &deps.Deps{} - err = dependencies.PopulateRemainingDeps(ctx, cctx, true) - if err != nil { - return err - } - - go ffiSelfTest() // Panics on failure - - taskEngine, err := tasks.StartTasks(ctx, dependencies) - - if err != nil { - return nil - } - defer taskEngine.GracefullyTerminate() - - if err := lmrpc.ServeCurioMarketRPCFromConfig(dependencies.DB, dependencies.Full, dependencies.Cfg); err != nil { - return xerrors.Errorf("starting market RPCs: %w", err) - } - - err = rpc.ListenAndServe(ctx, dependencies, shutdownChan) // Monitor for shutdown. - if err != nil { - return err - } - - finishCh := node.MonitorShutdown(shutdownChan) //node.ShutdownHandler{Component: "rpc server", StopFunc: rpcStopper}, - //node.ShutdownHandler{Component: "curio", StopFunc: stop}, - - <-finishCh - return nil - }, -} - -var layersFlag = &cli.StringSliceFlag{ - Name: "layers", - Usage: "list of layers to be interpreted (atop defaults). Default: base", -} - -var webCmd = &cli.Command{ - Name: "web", - Usage: "Start Curio web interface", - Description: `Start an instance of Curio web interface. - This creates the 'web' layer if it does not exist, then calls run with that layer.`, - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "gui-listen", - Usage: "Address to listen for the GUI on", - Value: "0.0.0.0:4701", - }, - &cli.BoolFlag{ - Name: "nosync", - Usage: "don't check full-node sync status", - }, - layersFlag, - }, - Action: func(cctx *cli.Context) error { - - db, err := deps.MakeDB(cctx) - if err != nil { - return err - } - - webtxt, err := getConfig(db, "web") - if err != nil || webtxt == "" { - - s := `[Susbystems] - EnableWebGui = true - ` - if err = setConfig(db, "web", s); err != nil { - return err - } - } - layers := append([]string{"web"}, cctx.StringSlice("layers")...) - err = cctx.Set("layers", strings.Join(layers, ",")) - if err != nil { - return err - } - return runCmd.Action(cctx) - }, -} diff --git a/cmd/curio/stop.go b/cmd/curio/stop.go deleted file mode 100644 index eb61a34fa4e..00000000000 --- a/cmd/curio/stop.go +++ /dev/null @@ -1,30 +0,0 @@ -package main - -import ( - _ "net/http/pprof" - - "github.com/urfave/cli/v2" - - lcli "github.com/filecoin-project/lotus/cli" -) - -var stopCmd = &cli.Command{ - Name: "stop", - Usage: "Stop a running Curio process", - Flags: []cli.Flag{}, - Action: func(cctx *cli.Context) error { - - api, closer, err := lcli.GetAPI(cctx) - if err != nil { - return err - } - defer closer() - - err = api.Shutdown(lcli.ReqContext(cctx)) - if err != nil { - return err - } - - return nil - }, -} diff --git a/cmd/curio/storage.go b/cmd/curio/storage.go deleted file mode 100644 index 2fa6d2d5291..00000000000 --- a/cmd/curio/storage.go +++ /dev/null @@ -1,499 +0,0 @@ -package main - -import ( - "fmt" - "math/bits" - "sort" - "strconv" - "strings" - "time" - - "github.com/docker/go-units" - "github.com/fatih/color" - "github.com/google/uuid" - "github.com/mitchellh/go-homedir" - "github.com/urfave/cli/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/chain/types" - lcli "github.com/filecoin-project/lotus/cli" - "github.com/filecoin-project/lotus/cmd/curio/rpc" - "github.com/filecoin-project/lotus/storage/sealer/fsutil" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -var storageCmd = &cli.Command{ - Name: "storage", - Usage: "manage sector storage", - Description: `Sectors can be stored across many filesystem paths. These -commands provide ways to manage the storage the miner will used to store sectors -long term for proving (references as 'store') as well as how sectors will be -stored while moving through the sealing pipeline (references as 'seal').`, - Subcommands: []*cli.Command{ - storageAttachCmd, - storageDetachCmd, - storageListCmd, - storageFindCmd, - /*storageDetachCmd, - storageRedeclareCmd, - storageCleanupCmd, - storageLocks,*/ - }, -} - -var storageAttachCmd = &cli.Command{ - Name: "attach", - Usage: "attach local storage path", - ArgsUsage: "[path]", - Description: `Storage can be attached to the miner using this command. The storage volume -list is stored local to the miner in storage.json set in curio run. We do not -recommend manually modifying this value without further understanding of the -storage system. - -Each storage volume contains a configuration file which describes the -capabilities of the volume. When the '--init' flag is provided, this file will -be created using the additional flags. - -Weight -A high weight value means data will be more likely to be stored in this path - -Seal -Data for the sealing process will be stored here - -Store -Finalized sectors that will be moved here for long term storage and be proven -over time - `, - Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "init", - Usage: "initialize the path first", - }, - &cli.Uint64Flag{ - Name: "weight", - Usage: "(for init) path weight", - Value: 10, - }, - &cli.BoolFlag{ - Name: "seal", - Usage: "(for init) use path for sealing", - }, - &cli.BoolFlag{ - Name: "store", - Usage: "(for init) use path for long-term storage", - }, - &cli.StringFlag{ - Name: "max-storage", - Usage: "(for init) limit storage space for sectors (expensive for very large paths!)", - }, - &cli.StringSliceFlag{ - Name: "groups", - Usage: "path group names", - }, - &cli.StringSliceFlag{ - Name: "allow-to", - Usage: "path groups allowed to pull data from this path (allow all if not specified)", - }, - }, - Action: func(cctx *cli.Context) error { - minerApi, closer, err := rpc.GetCurioAPI(cctx) - if err != nil { - return err - } - - defer closer() - ctx := lcli.ReqContext(cctx) - - if cctx.NArg() != 1 { - return lcli.IncorrectNumArgs(cctx) - } - - p, err := homedir.Expand(cctx.Args().First()) - if err != nil { - return xerrors.Errorf("expanding path: %w", err) - } - - if cctx.Bool("init") { - var maxStor int64 - if cctx.IsSet("max-storage") { - maxStor, err = units.RAMInBytes(cctx.String("max-storage")) - if err != nil { - return xerrors.Errorf("parsing max-storage: %w", err) - } - } - - cfg := storiface.LocalStorageMeta{ - ID: storiface.ID(uuid.New().String()), - Weight: cctx.Uint64("weight"), - CanSeal: cctx.Bool("seal"), - CanStore: cctx.Bool("store"), - MaxStorage: uint64(maxStor), - Groups: cctx.StringSlice("groups"), - AllowTo: cctx.StringSlice("allow-to"), - } - - if !(cfg.CanStore || cfg.CanSeal) { - return xerrors.Errorf("must specify at least one of --store or --seal") - } - - if err := minerApi.StorageInit(ctx, p, cfg); err != nil { - return xerrors.Errorf("init storage: %w", err) - } - } - - return minerApi.StorageAddLocal(ctx, p) - }, -} - -var storageDetachCmd = &cli.Command{ - Name: "detach", - Usage: "detach local storage path", - Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "really-do-it", - }, - }, - ArgsUsage: "[path]", - Action: func(cctx *cli.Context) error { - minerApi, closer, err := rpc.GetCurioAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := lcli.ReqContext(cctx) - - if cctx.NArg() != 1 { - return lcli.IncorrectNumArgs(cctx) - } - - p, err := homedir.Expand(cctx.Args().First()) - if err != nil { - return xerrors.Errorf("expanding path: %w", err) - } - - if !cctx.Bool("really-do-it") { - return xerrors.Errorf("pass --really-do-it to execute the action") - } - - return minerApi.StorageDetachLocal(ctx, p) - }, -} - -var storageListCmd = &cli.Command{ - Name: "list", - Usage: "list local storage paths", - Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "local", - Usage: "only list local storage paths", - }, - }, - Subcommands: []*cli.Command{ - //storageListSectorsCmd, - }, - Action: func(cctx *cli.Context) error { - minerApi, closer, err := rpc.GetCurioAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := lcli.ReqContext(cctx) - - st, err := minerApi.StorageList(ctx) - if err != nil { - return err - } - - local, err := minerApi.StorageLocal(ctx) - if err != nil { - return err - } - - type fsInfo struct { - storiface.ID - sectors []storiface.Decl - stat fsutil.FsStat - } - - sorted := make([]fsInfo, 0, len(st)) - for id, decls := range st { - if cctx.Bool("local") { - if _, ok := local[id]; !ok { - continue - } - } - - st, err := minerApi.StorageStat(ctx, id) - if err != nil { - sorted = append(sorted, fsInfo{ID: id, sectors: decls}) - continue - } - - sorted = append(sorted, fsInfo{id, decls, st}) - } - - sort.Slice(sorted, func(i, j int) bool { - if sorted[i].stat.Capacity != sorted[j].stat.Capacity { - return sorted[i].stat.Capacity > sorted[j].stat.Capacity - } - return sorted[i].ID < sorted[j].ID - }) - - for _, s := range sorted { - - var cnt [5]int - for _, decl := range s.sectors { - for i := range cnt { - if decl.SectorFileType&(1< 98: - percCol = color.FgRed - case usedPercent > 90: - percCol = color.FgYellow - } - - set := (st.Capacity - st.FSAvailable) * barCols / st.Capacity - used := (st.Capacity - (st.FSAvailable + st.Reserved)) * barCols / st.Capacity - reserved := set - used - bar := safeRepeat("#", int(used)) + safeRepeat("*", int(reserved)) + safeRepeat(" ", int(barCols-set)) - - desc := "" - if st.Max > 0 { - desc = " (filesystem)" - } - - fmt.Printf("\t[%s] %s/%s %s%s\n", color.New(percCol).Sprint(bar), - types.SizeStr(types.NewInt(uint64(st.Capacity-st.FSAvailable))), - types.SizeStr(types.NewInt(uint64(st.Capacity))), - color.New(percCol).Sprintf("%d%%", usedPercent), desc) - } - - // optional configured limit bar - if st.Max > 0 { - usedPercent := st.Used * 100 / st.Max - - percCol := color.FgGreen - switch { - case usedPercent > 98: - percCol = color.FgRed - case usedPercent > 90: - percCol = color.FgYellow - } - - set := st.Used * barCols / st.Max - used := (st.Used + st.Reserved) * barCols / st.Max - reserved := set - used - bar := safeRepeat("#", int(used)) + safeRepeat("*", int(reserved)) + safeRepeat(" ", int(barCols-set)) - - fmt.Printf("\t[%s] %s/%s %s (limit)\n", color.New(percCol).Sprint(bar), - types.SizeStr(types.NewInt(uint64(st.Used))), - types.SizeStr(types.NewInt(uint64(st.Max))), - color.New(percCol).Sprintf("%d%%", usedPercent)) - } - - fmt.Printf("\t%s; %s; %s; %s; %s; Reserved: %s\n", - color.YellowString("Unsealed: %d", cnt[0]), - color.GreenString("Sealed: %d", cnt[1]), - color.BlueString("Caches: %d", cnt[2]), - color.GreenString("Updated: %d", cnt[3]), - color.BlueString("Update-caches: %d", cnt[4]), - types.SizeStr(types.NewInt(uint64(st.Reserved)))) - - si, err := minerApi.StorageInfo(ctx, s.ID) - if err != nil { - return err - } - - fmt.Print("\t") - if si.CanSeal || si.CanStore { - fmt.Printf("Weight: %d; Use: ", si.Weight) - if si.CanSeal { - fmt.Print(color.MagentaString("Seal ")) - } - if si.CanStore { - fmt.Print(color.CyanString("Store")) - } - } else { - fmt.Print(color.HiYellowString("Use: ReadOnly")) - } - fmt.Println() - - if len(si.Groups) > 0 { - fmt.Printf("\tGroups: %s\n", strings.Join(si.Groups, ", ")) - } - if len(si.AllowTo) > 0 { - fmt.Printf("\tAllowTo: %s\n", strings.Join(si.AllowTo, ", ")) - } - - if len(si.AllowTypes) > 0 || len(si.DenyTypes) > 0 { - denied := storiface.FTAll.SubAllowed(si.AllowTypes, si.DenyTypes) - allowed := storiface.FTAll ^ denied - - switch { - case bits.OnesCount64(uint64(allowed)) == 0: - fmt.Printf("\tAllow Types: %s\n", color.RedString("None")) - case bits.OnesCount64(uint64(allowed)) < bits.OnesCount64(uint64(denied)): - fmt.Printf("\tAllow Types: %s\n", color.GreenString(strings.Join(allowed.Strings(), " "))) - default: - fmt.Printf("\tDeny Types: %s\n", color.RedString(strings.Join(denied.Strings(), " "))) - } - } - - if localPath, ok := local[s.ID]; ok { - fmt.Printf("\tLocal: %s\n", color.GreenString(localPath)) - } - for i, l := range si.URLs { - var rtt string - if _, ok := local[s.ID]; !ok && i == 0 { - rtt = " (latency: " + ping.Truncate(time.Microsecond*100).String() + ")" - } - - fmt.Printf("\tURL: %s%s\n", l, rtt) // TODO; try pinging maybe?? print latency? - } - fmt.Println() - } - - return nil - }, -} - -type storedSector struct { - id storiface.ID - store storiface.SectorStorageInfo - types map[storiface.SectorFileType]bool -} - -var storageFindCmd = &cli.Command{ - Name: "find", - Usage: "find sector in the storage system", - ArgsUsage: "[miner address] [sector number]", - Action: func(cctx *cli.Context) error { - minerApi, closer, err := rpc.GetCurioAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := lcli.ReqContext(cctx) - - if cctx.NArg() != 2 { - return lcli.IncorrectNumArgs(cctx) - } - - maddr := cctx.Args().First() - ma, err := address.NewFromString(maddr) - if err != nil { - return xerrors.Errorf("parsing miner address: %w", err) - } - - mid, err := address.IDFromAddress(ma) - if err != nil { - return err - } - - if !cctx.Args().Present() { - return xerrors.New("Usage: lotus-miner storage find [sector number]") - } - - snum, err := strconv.ParseUint(cctx.Args().Get(1), 10, 64) - if err != nil { - return err - } - - sid := abi.SectorID{ - Miner: abi.ActorID(mid), - Number: abi.SectorNumber(snum), - } - - sectorTypes := []storiface.SectorFileType{ - storiface.FTUnsealed, storiface.FTSealed, storiface.FTCache, storiface.FTUpdate, storiface.FTUpdateCache, - } - - byId := make(map[storiface.ID]*storedSector) - for _, sectorType := range sectorTypes { - infos, err := minerApi.StorageFindSector(ctx, sid, sectorType, 0, false) - if err != nil { - return xerrors.Errorf("finding sector type %d: %w", sectorType, err) - } - - for _, info := range infos { - sts, ok := byId[info.ID] - if !ok { - sts = &storedSector{ - id: info.ID, - store: info, - types: make(map[storiface.SectorFileType]bool), - } - byId[info.ID] = sts - } - sts.types[sectorType] = true - } - } - - local, err := minerApi.StorageLocal(ctx) - if err != nil { - return err - } - - var out []*storedSector - for _, sector := range byId { - out = append(out, sector) - } - sort.Slice(out, func(i, j int) bool { - return out[i].id < out[j].id - }) - - for _, info := range out { - var types []string - for sectorType, present := range info.types { - if present { - types = append(types, sectorType.String()) - } - } - sort.Strings(types) // Optional: Sort types for consistent output - fmt.Printf("In %s (%s)\n", info.id, strings.Join(types, ", ")) - fmt.Printf("\tSealing: %t; Storage: %t\n", info.store.CanSeal, info.store.CanStore) - if localPath, ok := local[info.id]; ok { - fmt.Printf("\tLocal (%s)\n", localPath) - } else { - fmt.Printf("\tRemote\n") - } - for _, l := range info.store.URLs { - fmt.Printf("\tURL: %s\n", l) - } - } - - return nil - }, -} diff --git a/cmd/curio/tasks/tasks.go b/cmd/curio/tasks/tasks.go deleted file mode 100644 index 0c2745cf159..00000000000 --- a/cmd/curio/tasks/tasks.go +++ /dev/null @@ -1,248 +0,0 @@ -// Package tasks contains tasks that can be run by the curio command. -package tasks - -import ( - "context" - "sort" - "strings" - "time" - - logging "github.com/ipfs/go-log/v2" - "github.com/samber/lo" - "golang.org/x/exp/maps" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - - "github.com/filecoin-project/lotus/cmd/curio/deps" - curio "github.com/filecoin-project/lotus/curiosrc" - "github.com/filecoin-project/lotus/curiosrc/alertmanager" - "github.com/filecoin-project/lotus/curiosrc/chainsched" - "github.com/filecoin-project/lotus/curiosrc/ffi" - "github.com/filecoin-project/lotus/curiosrc/gc" - "github.com/filecoin-project/lotus/curiosrc/message" - "github.com/filecoin-project/lotus/curiosrc/piece" - "github.com/filecoin-project/lotus/curiosrc/seal" - "github.com/filecoin-project/lotus/curiosrc/winning" - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" - "github.com/filecoin-project/lotus/lib/lazy" - "github.com/filecoin-project/lotus/lib/must" - "github.com/filecoin-project/lotus/node/modules" - "github.com/filecoin-project/lotus/node/modules/dtypes" -) - -var log = logging.Logger("curio/deps") - -func StartTasks(ctx context.Context, dependencies *deps.Deps) (*harmonytask.TaskEngine, error) { - cfg := dependencies.Cfg - db := dependencies.DB - full := dependencies.Full - verif := dependencies.Verif - as := dependencies.As - maddrs := dependencies.Maddrs - stor := dependencies.Stor - lstor := dependencies.LocalStore - si := dependencies.Si - var activeTasks []harmonytask.TaskInterface - - sender, sendTask := message.NewSender(full, full, db) - activeTasks = append(activeTasks, sendTask) - - chainSched := chainsched.New(full) - - var needProofParams bool - - /////////////////////////////////////////////////////////////////////// - ///// Task Selection - /////////////////////////////////////////////////////////////////////// - { - // PoSt - - if cfg.Subsystems.EnableWindowPost { - wdPostTask, wdPoStSubmitTask, derlareRecoverTask, err := curio.WindowPostScheduler( - ctx, cfg.Fees, cfg.Proving, full, verif, sender, chainSched, - as, maddrs, db, stor, si, cfg.Subsystems.WindowPostMaxTasks) - - if err != nil { - return nil, err - } - activeTasks = append(activeTasks, wdPostTask, wdPoStSubmitTask, derlareRecoverTask) - needProofParams = true - } - - if cfg.Subsystems.EnableWinningPost { - pl := dependencies.LocalStore - winPoStTask := winning.NewWinPostTask(cfg.Subsystems.WinningPostMaxTasks, db, pl, verif, full, maddrs) - activeTasks = append(activeTasks, winPoStTask) - needProofParams = true - } - } - - slrLazy := lazy.MakeLazy(func() (*ffi.SealCalls, error) { - return ffi.NewSealCalls(stor, lstor, si), nil - }) - - { - // Piece handling - if cfg.Subsystems.EnableParkPiece { - parkPieceTask, err := piece.NewParkPieceTask(db, must.One(slrLazy.Val()), cfg.Subsystems.ParkPieceMaxTasks) - if err != nil { - return nil, err - } - cleanupPieceTask := piece.NewCleanupPieceTask(db, must.One(slrLazy.Val()), 0) - activeTasks = append(activeTasks, parkPieceTask, cleanupPieceTask) - } - } - - hasAnySealingTask := cfg.Subsystems.EnableSealSDR || - cfg.Subsystems.EnableSealSDRTrees || - cfg.Subsystems.EnableSendPrecommitMsg || - cfg.Subsystems.EnablePoRepProof || - cfg.Subsystems.EnableMoveStorage || - cfg.Subsystems.EnableSendCommitMsg - { - // Sealing - - var sp *seal.SealPoller - var slr *ffi.SealCalls - if hasAnySealingTask { - sp = seal.NewPoller(db, full) - go sp.RunPoller(ctx) - - slr = must.One(slrLazy.Val()) - } - - // NOTE: Tasks with the LEAST priority are at the top - if cfg.Subsystems.EnableSealSDR { - sdrTask := seal.NewSDRTask(full, db, sp, slr, cfg.Subsystems.SealSDRMaxTasks) - activeTasks = append(activeTasks, sdrTask) - } - if cfg.Subsystems.EnableSealSDRTrees { - treeDTask := seal.NewTreeDTask(sp, db, slr, cfg.Subsystems.SealSDRTreesMaxTasks) - treeRCTask := seal.NewTreeRCTask(sp, db, slr, cfg.Subsystems.SealSDRTreesMaxTasks) - finalizeTask := seal.NewFinalizeTask(cfg.Subsystems.FinalizeMaxTasks, sp, slr, db) - activeTasks = append(activeTasks, treeDTask, treeRCTask, finalizeTask) - } - if cfg.Subsystems.EnableSendPrecommitMsg { - precommitTask := seal.NewSubmitPrecommitTask(sp, db, full, sender, as, cfg.Fees.MaxPreCommitGasFee) - activeTasks = append(activeTasks, precommitTask) - } - if cfg.Subsystems.EnablePoRepProof { - porepTask := seal.NewPoRepTask(db, full, sp, slr, cfg.Subsystems.PoRepProofMaxTasks) - activeTasks = append(activeTasks, porepTask) - needProofParams = true - } - if cfg.Subsystems.EnableMoveStorage { - moveStorageTask := seal.NewMoveStorageTask(sp, slr, db, cfg.Subsystems.MoveStorageMaxTasks) - activeTasks = append(activeTasks, moveStorageTask) - } - if cfg.Subsystems.EnableSendCommitMsg { - commitTask := seal.NewSubmitCommitTask(sp, db, full, sender, as, cfg) - activeTasks = append(activeTasks, commitTask) - } - } - - if hasAnySealingTask { - // Sealing nodes maintain storage index when bored - storageEndpointGcTask := gc.NewStorageEndpointGC(si, stor, db) - activeTasks = append(activeTasks, storageEndpointGcTask) - } - - amTask := alertmanager.NewAlertTask(full, db, cfg.Alerting) - activeTasks = append(activeTasks, amTask) - - if needProofParams { - for spt := range dependencies.ProofTypes { - if err := modules.GetParams(true)(spt); err != nil { - return nil, xerrors.Errorf("getting params: %w", err) - } - } - } - - minerAddresses := make([]string, 0, len(maddrs)) - for k := range maddrs { - minerAddresses = append(minerAddresses, address.Address(k).String()) - } - - log.Infow("This Curio instance handles", - "miner_addresses", minerAddresses, - "tasks", lo.Map(activeTasks, func(t harmonytask.TaskInterface, _ int) string { return t.TypeDetails().Name })) - - // harmony treats the first task as highest priority, so reverse the order - // (we could have just appended to this list in the reverse order, but defining - // tasks in pipeline order is more intuitive) - activeTasks = lo.Reverse(activeTasks) - - ht, err := harmonytask.New(db, activeTasks, dependencies.ListenAddr) - if err != nil { - return nil, err - } - go machineDetails(dependencies, activeTasks, ht.ResourcesAvailable().MachineID) - - if hasAnySealingTask { - watcher, err := message.NewMessageWatcher(db, ht, chainSched, full) - if err != nil { - return nil, err - } - _ = watcher - } - - if cfg.Subsystems.EnableWindowPost || hasAnySealingTask { - go chainSched.Run(ctx) - } - - return ht, nil -} - -func machineDetails(deps *deps.Deps, activeTasks []harmonytask.TaskInterface, machineID int) { - taskNames := lo.Map(activeTasks, func(item harmonytask.TaskInterface, _ int) string { - return item.TypeDetails().Name - }) - - miners := lo.Map(maps.Keys(deps.Maddrs), func(item dtypes.MinerAddress, _ int) string { - return address.Address(item).String() - }) - sort.Strings(miners) - - _, err := deps.DB.Exec(context.Background(), `INSERT INTO harmony_machine_details - (tasks, layers, startup_time, miners, machine_id) VALUES ($1, $2, $3, $4, $5) - ON CONFLICT (machine_id) DO UPDATE SET tasks=$1, layers=$2, startup_time=$3, miners=$4`, - strings.Join(taskNames, ","), strings.Join(deps.Layers, ","), - time.Now(), strings.Join(miners, ","), machineID) - - if err != nil { - log.Errorf("failed to update machine details: %s", err) - return - } - - // maybePostWarning - if !lo.Contains(taskNames, "WdPost") && !lo.Contains(taskNames, "WinPost") { - // Maybe we aren't running a PoSt for these miners? - var allMachines []struct { - MachineID int `db:"machine_id"` - Miners string `db:"miners"` - Tasks string `db:"tasks"` - } - err := deps.DB.Select(context.Background(), &allMachines, `SELECT machine_id, miners, tasks FROM harmony_machine_details`) - if err != nil { - log.Errorf("failed to get machine details: %s", err) - return - } - - for _, miner := range miners { - var myPostIsHandled bool - for _, m := range allMachines { - if !lo.Contains(strings.Split(m.Miners, ","), miner) { - continue - } - if lo.Contains(strings.Split(m.Tasks, ","), "WdPost") && lo.Contains(strings.Split(m.Tasks, ","), "WinPost") { - myPostIsHandled = true - break - } - } - if !myPostIsHandled { - log.Errorf("No PoSt tasks are running for miner %s. Start handling PoSts immediately with:\n\tcurio run --layers=\"post\" ", miner) - } - } - } -} diff --git a/curiosrc/address.go b/curiosrc/address.go deleted file mode 100644 index 6d1738f2dc3..00000000000 --- a/curiosrc/address.go +++ /dev/null @@ -1,64 +0,0 @@ -package curio - -import ( - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/curiosrc/multictladdr" - "github.com/filecoin-project/lotus/node/config" -) - -func AddressSelector(addrConf []config.CurioAddresses) func() (*multictladdr.MultiAddressSelector, error) { - return func() (*multictladdr.MultiAddressSelector, error) { - as := &multictladdr.MultiAddressSelector{ - MinerMap: make(map[address.Address]api.AddressConfig), - } - if addrConf == nil { - return as, nil - } - - for _, addrConf := range addrConf { - for _, minerID := range addrConf.MinerAddresses { - tmp := api.AddressConfig{ - DisableOwnerFallback: addrConf.DisableOwnerFallback, - DisableWorkerFallback: addrConf.DisableWorkerFallback, - } - - for _, s := range addrConf.PreCommitControl { - addr, err := address.NewFromString(s) - if err != nil { - return nil, xerrors.Errorf("parsing precommit control address: %w", err) - } - - tmp.PreCommitControl = append(tmp.PreCommitControl, addr) - } - - for _, s := range addrConf.CommitControl { - addr, err := address.NewFromString(s) - if err != nil { - return nil, xerrors.Errorf("parsing commit control address: %w", err) - } - - tmp.CommitControl = append(tmp.CommitControl, addr) - } - - for _, s := range addrConf.TerminateControl { - addr, err := address.NewFromString(s) - if err != nil { - return nil, xerrors.Errorf("parsing terminate control address: %w", err) - } - - tmp.TerminateControl = append(tmp.TerminateControl, addr) - } - a, err := address.NewFromString(minerID) - if err != nil { - return nil, xerrors.Errorf("parsing miner address %s: %w", minerID, err) - } - as.MinerMap[a] = tmp - } - } - return as, nil - } -} diff --git a/curiosrc/alertmanager/alerts.go b/curiosrc/alertmanager/alerts.go deleted file mode 100644 index f885ca219fa..00000000000 --- a/curiosrc/alertmanager/alerts.go +++ /dev/null @@ -1,573 +0,0 @@ -package alertmanager - -import ( - "bytes" - "database/sql" - "fmt" - "math" - "strings" - "time" - - "github.com/BurntSushi/toml" - "github.com/dustin/go-humanize" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/node/config" -) - -// balanceCheck retrieves the machine details from the database and performs balance checks on unique addresses. -// It populates the alert map with any errors encountered during the process and with any alerts related to low wallet balance and missing wallets. -// The alert map key is "Balance Check". -// It queries the database for the configuration of each layer and decodes it using the toml.Decode function. -// It then iterates over the addresses in the configuration and curates a list of unique addresses. -// If an address is not found in the chain node, it adds an alert to the alert map. -// If the balance of an address is below MinimumWalletBalance, it adds an alert to the alert map. -// If there are any errors encountered during the process, the err field of the alert map is populated. -func balanceCheck(al *alerts) { - Name := "Balance Check" - al.alertMap[Name] = &alertOut{} - - var ret string - - uniqueAddrs, _, err := al.getAddresses() - if err != nil { - al.alertMap[Name].err = err - return - } - - for _, addrStr := range uniqueAddrs { - addr, err := address.NewFromString(addrStr) - if err != nil { - al.alertMap[Name].err = xerrors.Errorf("failed to parse address: %w", err) - return - } - - has, err := al.api.WalletHas(al.ctx, addr) - if err != nil { - al.alertMap[Name].err = err - return - } - - if !has { - ret += fmt.Sprintf("Wallet %s was not found in chain node. ", addrStr) - } - - balance, err := al.api.WalletBalance(al.ctx, addr) - if err != nil { - al.alertMap[Name].err = err - } - - if abi.TokenAmount(al.cfg.MinimumWalletBalance).GreaterThanEqual(balance) { - ret += fmt.Sprintf("Balance for wallet %s is below 5 Fil. ", addrStr) - } - } - if ret != "" { - al.alertMap[Name].alertString = ret - } - return -} - -// taskFailureCheck retrieves the task failure counts from the database for a specific time period. -// It then checks for specific sealing tasks and tasks with more than 5 failures to generate alerts. -func taskFailureCheck(al *alerts) { - Name := "TaskFailures" - al.alertMap[Name] = &alertOut{} - - type taskFailure struct { - Machine string `db:"completed_by_host_and_port"` - Name string `db:"name"` - Failures int `db:"failed_count"` - } - - var taskFailures []taskFailure - - err := al.db.Select(al.ctx, &taskFailures, ` - SELECT completed_by_host_and_port, name, COUNT(*) AS failed_count - FROM harmony_task_history - WHERE result = FALSE - AND work_end >= NOW() - $1::interval - GROUP BY completed_by_host_and_port, name - ORDER BY completed_by_host_and_port, name;`, fmt.Sprintf("%f Minutes", AlertMangerInterval.Minutes())) - if err != nil { - al.alertMap[Name].err = xerrors.Errorf("getting failed task count: %w", err) - return - } - - mmap := make(map[string]int) - tmap := make(map[string]int) - - if len(taskFailures) > 0 { - for _, tf := range taskFailures { - _, ok := tmap[tf.Name] - if !ok { - tmap[tf.Name] = tf.Failures - } else { - tmap[tf.Name] += tf.Failures - } - _, ok = mmap[tf.Machine] - if !ok { - mmap[tf.Machine] = tf.Failures - } else { - mmap[tf.Machine] += tf.Failures - } - } - } - - sealingTasks := []string{"SDR", "TreeD", "TreeRC", "PreCommitSubmit", "PoRep", "Finalize", "MoveStorage", "CommitSubmit", "WdPost", "ParkPiece"} - contains := func(s []string, e string) bool { - for _, a := range s { - if a == e { - return true - } - } - return false - } - - // Alerts for any sealing pipeline failures. Other tasks should have at least 5 failures for an alert - for name, count := range tmap { - if contains(sealingTasks, name) { - al.alertMap[Name].alertString += fmt.Sprintf("Task: %s, Failures: %d. ", name, count) - } - if count > 5 { - al.alertMap[Name].alertString += fmt.Sprintf("Task: %s, Failures: %d. ", name, count) - } - } - - // Alert if a machine failed more than 5 tasks - for name, count := range tmap { - if count > 5 { - al.alertMap[Name].alertString += fmt.Sprintf("Machine: %s, Failures: %d. ", name, count) - } - } - - return -} - -// permanentStorageCheck retrieves the storage details from the database and checks if there is sufficient space for sealing sectors. -// It queries the database for the available storage for all storage paths that can store data. -// It queries the database for sectors being sealed that have not been finalized yet. -// For each sector, it calculates the required space for sealing based on the sector size. -// It checks if there is enough available storage for each sector and updates the sectorMap accordingly. -// If any sectors are unaccounted for, it calculates the total missing space and adds an alert to the alert map. -func permanentStorageCheck(al *alerts) { - Name := "PermanentStorageSpace" - al.alertMap[Name] = &alertOut{} - // Get all storage path for permanent storages - type storage struct { - ID string `db:"storage_id"` - Available int64 `db:"available"` - } - - var storages []storage - - err := al.db.Select(al.ctx, &storages, ` - SELECT storage_id, available - FROM storage_path - WHERE can_store = TRUE;`) - if err != nil { - al.alertMap[Name].err = xerrors.Errorf("getting storage details: %w", err) - return - } - - type sector struct { - Miner abi.ActorID `db:"sp_id"` - Number abi.SectorNumber `db:"sector_number"` - Proof abi.RegisteredSealProof `db:"reg_seal_proof"` - } - - var sectors []sector - - err = al.db.Select(al.ctx, §ors, ` - SELECT sp_id, sector_number, reg_seal_proof - FROM sectors_sdr_pipeline - WHERE after_move_storage = FALSE;`) - if err != nil { - al.alertMap[Name].err = xerrors.Errorf("getting sectors being sealed: %w", err) - return - } - - type sm struct { - s sector - size int64 - } - - sectorMap := make(map[sm]bool) - - for _, sec := range sectors { - space := int64(0) - sec := sec - sectorSize, err := sec.Proof.SectorSize() - if err != nil { - space = int64(64<<30)*2 + int64(200<<20) // Assume 64 GiB sector - } else { - space = int64(sectorSize)*2 + int64(200<<20) // sealed + unsealed + cache - } - - key := sm{s: sec, size: space} - - sectorMap[key] = false - - for _, strg := range storages { - if space > strg.Available { - strg.Available -= space - sectorMap[key] = true - } - } - } - - missingSpace := big.NewInt(0) - for sec, accounted := range sectorMap { - if !accounted { - big.Add(missingSpace, big.NewInt(sec.size)) - } - } - - if missingSpace.GreaterThan(big.NewInt(0)) { - al.alertMap[Name].alertString = fmt.Sprintf("Insufficient storage space for sealing sectors. Additional %s required.", humanize.Bytes(missingSpace.Uint64())) - } -} - -// getAddresses retrieves machine details from the database, stores them in an array and compares layers for uniqueness. -// It employs addrMap to handle unique addresses, and generated slices for configuration fields and MinerAddresses. -// The function iterates over layers, storing decoded configuration and verifying address existence in addrMap. -// It ends by returning unique addresses and miner slices. -func (al *alerts) getAddresses() ([]string, []string, error) { - // MachineDetails represents the structure of data received from the SQL query. - type machineDetail struct { - ID int - HostAndPort string - Layers string - } - var machineDetails []machineDetail - - // Get all layers in use - err := al.db.Select(al.ctx, &machineDetails, ` - SELECT m.id, m.host_and_port, d.layers - FROM harmony_machines m - LEFT JOIN harmony_machine_details d ON m.id = d.machine_id;`) - if err != nil { - return nil, nil, xerrors.Errorf("getting config layers for all machines: %w", err) - } - - // UniqueLayers takes an array of MachineDetails and returns a slice of unique layers. - - layerMap := make(map[string]bool) - var uniqueLayers []string - - // Get unique layers in use - for _, machine := range machineDetails { - machine := machine - // Split the Layers field into individual layers - layers := strings.Split(machine.Layers, ",") - for _, layer := range layers { - layer = strings.TrimSpace(layer) - if _, exists := layerMap[layer]; !exists && layer != "" { - layerMap[layer] = true - uniqueLayers = append(uniqueLayers, layer) - } - } - } - - addrMap := make(map[string]bool) - var uniqueAddrs []string - var miners []string - - // Get all unique addresses - for _, layer := range uniqueLayers { - text := "" - cfg := config.DefaultCurioConfig() - err := al.db.QueryRow(al.ctx, `SELECT config FROM harmony_config WHERE title=$1`, layer).Scan(&text) - if err != nil { - if strings.Contains(err.Error(), sql.ErrNoRows.Error()) { - return nil, nil, xerrors.Errorf("missing layer '%s' ", layer) - } - return nil, nil, fmt.Errorf("could not read layer '%s': %w", layer, err) - } - - _, err = toml.Decode(text, cfg) - if err != nil { - return nil, nil, fmt.Errorf("could not read layer, bad toml %s: %w", layer, err) - } - - for i := range cfg.Addresses { - prec := cfg.Addresses[i].PreCommitControl - com := cfg.Addresses[i].CommitControl - term := cfg.Addresses[i].TerminateControl - miner := cfg.Addresses[i].MinerAddresses - if prec != nil { - for j := range prec { - if _, ok := addrMap[prec[j]]; !ok && prec[j] != "" { - addrMap[prec[j]] = true - uniqueAddrs = append(uniqueAddrs, prec[j]) - } - } - } - if com != nil { - for j := range com { - if _, ok := addrMap[com[j]]; !ok && com[j] != "" { - addrMap[com[j]] = true - uniqueAddrs = append(uniqueAddrs, com[j]) - } - } - } - if term != nil { - for j := range term { - if _, ok := addrMap[term[j]]; !ok && term[j] != "" { - addrMap[term[j]] = true - uniqueAddrs = append(uniqueAddrs, term[j]) - } - } - } - if miner != nil { - for j := range miner { - if _, ok := addrMap[miner[j]]; !ok && miner[j] != "" { - addrMap[miner[j]] = true - miners = append(miners, miner[j]) - } - } - } - } - } - return uniqueAddrs, miners, nil -} - -func wdPostCheck(al *alerts) { - Name := "WindowPost" - al.alertMap[Name] = &alertOut{} - head, err := al.api.ChainHead(al.ctx) - if err != nil { - al.alertMap[Name].err = err - return - } - - from := head.Height() - abi.ChainEpoch(math.Ceil(AlertMangerInterval.Seconds()/float64(build.BlockDelaySecs))) - 1 - if from < 0 { - from = 0 - } - - log.Infof("ALERTMANAGER: FROM: %d", from) - - _, miners, err := al.getAddresses() - if err != nil { - al.alertMap[Name].err = err - return - } - - h := head - - type partSent struct { - sent bool - parts int - } - - msgCheck := make(map[address.Address]map[uint64]*partSent) - - for h.Height() >= from { - for _, minerStr := range miners { - maddr, err := address.NewFromString(minerStr) - if err != nil { - al.alertMap[Name].err = err - return - } - deadlineInfo, err := al.api.StateMinerProvingDeadline(al.ctx, maddr, h.Key()) - if err != nil { - al.alertMap[Name].err = xerrors.Errorf("getting miner deadline: %w", err) - return - } - partitions, err := al.api.StateMinerPartitions(al.ctx, maddr, deadlineInfo.Index, h.Key()) - if err != nil { - al.alertMap[Name].err = xerrors.Errorf("getting miner partitions: %w", err) - return - } - if _, ok := msgCheck[maddr]; !ok { - msgCheck[maddr] = make(map[uint64]*partSent) - } - if _, ok := msgCheck[maddr][deadlineInfo.Index]; !ok { - msgCheck[maddr][deadlineInfo.Index] = &partSent{ - sent: false, - parts: len(partitions), - } - } - } - h, err = al.api.ChainGetTipSet(al.ctx, h.Parents()) - if err != nil { - al.alertMap[Name].err = err - return - } - } - - for maddr, deadlines := range msgCheck { - for deadlineIndex, ps := range deadlines { - log.Infof("ALERTMANAGER: Address: %s, DEADLINE: %d, Partitions: %d", maddr.String(), deadlineIndex, ps.parts) - } - } - - var wdDetails []struct { - Miner int64 `db:"sp_id"` - Deadline int64 `db:"deadline"` - Partition int64 `db:"partition"` - Epoch abi.ChainEpoch `db:"submit_at_epoch"` - Proof []byte `db:"proof_params"` - } - - err = al.db.Select(al.ctx, &wdDetails, ` - SELECT sp_id, submit_at_epoch, proof_params, partition, deadline - FROM wdpost_proofs - WHERE submit_at_epoch > $1;`, from) - if err != nil { - al.alertMap[Name].err = xerrors.Errorf("getting windowPost details from database: %w", err) - return - } - - if len(wdDetails) < 1 { - return - } - - for _, detail := range wdDetails { - addr, err := address.NewIDAddress(uint64(detail.Miner)) - if err != nil { - al.alertMap[Name].err = xerrors.Errorf("getting miner address: %w", err) - return - } - if _, ok := msgCheck[addr][uint64(detail.Deadline)]; !ok { - al.alertMap[Name].alertString += fmt.Sprintf("unknown WindowPost jobs for miner %s deadline %d partition %d found. ", addr.String(), detail.Deadline, detail.Partition) - continue - } - msgCheck[addr][uint64(detail.Deadline)].sent = true - - var postOut miner.SubmitWindowedPoStParams - err = postOut.UnmarshalCBOR(bytes.NewReader(detail.Proof)) - if err != nil { - al.alertMap[Name].err = xerrors.Errorf("unmarshaling windowPost proof params: %w", err) - return - } - - for i := range postOut.Partitions { - c, err := postOut.Partitions[i].Skipped.Count() - if err != nil { - al.alertMap[Name].err = xerrors.Errorf("getting skipped sector count: %w", err) - return - } - if c > 0 { - al.alertMap[Name].alertString += fmt.Sprintf("Skipped %d sectors in deadline %d partition %d. ", c, postOut.Deadline, postOut.Partitions[i].Index) - } - } - } - - for maddr, deadlines := range msgCheck { - for deadlineIndex, ps := range deadlines { - if !ps.sent { - al.alertMap[Name].alertString += fmt.Sprintf("No WindowPost jobs found for miner %s deadline %d. ", maddr.String(), deadlineIndex) - } - } - } -} - -func wnPostCheck(al *alerts) { - Name := "WinningPost" - al.alertMap[Name] = &alertOut{} - head, err := al.api.ChainHead(al.ctx) - if err != nil { - al.alertMap[Name].err = err - return - } - - from := head.Height() - abi.ChainEpoch(math.Ceil(AlertMangerInterval.Seconds()/float64(build.BlockDelaySecs))) - 1 - if from < 0 { - from = 0 - } - - var wnDetails []struct { - Miner int64 `db:"sp_id"` - Block string `db:"mined_cid"` - Epoch abi.ChainEpoch `db:"epoch"` - } - - err = al.db.Select(al.ctx, &wnDetails, ` - SELECT sp_id, mined_cid, epoch - FROM mining_tasks - WHERE epoch > $1 AND won = TRUE - ORDER BY epoch;`, from) - if err != nil { - al.alertMap[Name].err = xerrors.Errorf("getting winningPost details from database: %w", err) - return - } - - var count []int64 - err = al.db.Select(al.ctx, &count, ` - SELECT COUNT(*) - FROM mining_tasks - WHERE epoch > $1;`, from) - if err != nil { - al.alertMap[Name].err = xerrors.Errorf("getting winningPost count details from database: %w", err) - return - } - - if count[0] == 0 { - al.alertMap[Name].alertString += "No winningPost tasks found in the last " + humanize.Time(time.Now().Add(-AlertMangerInterval)) - return - } - - epochs := int64(math.Ceil(AlertMangerInterval.Seconds() / float64(build.BlockDelaySecs))) - if (head.Height() - abi.ChainEpoch(epochs)) < 0 { - epochs = int64(head.Height()) - } - - if epochs != count[0]+1 && epochs != count[0]-1 && epochs != count[0] { - al.alertMap[Name].alertString += fmt.Sprintf("Expected %d WinningPost task and found %d in DB ", epochs, count[0]) - } - - if len(wnDetails) < 1 { - return - } - - to := wnDetails[len(wnDetails)-1].Epoch - - epochMap := make(map[abi.ChainEpoch]string) - - for head.Height() >= to { - epochMap[head.Height()] = head.String() - head, err = al.api.ChainGetTipSet(al.ctx, head.Parents()) - if err != nil { - al.alertMap[Name].err = xerrors.Errorf("getting tipset: %w", err) - } - if head == nil { - al.alertMap[Name].err = xerrors.Errorf("tipset is nil") - return - } - if head.Height() == 0 { - break - } - } - - winMap := make(map[abi.ChainEpoch]struct { - won bool - cid string - }) - - for _, wn := range wnDetails { - if strings.Contains(epochMap[wn.Epoch], wn.Block) { - winMap[wn.Epoch] = struct { - won bool - cid string - }{won: true, cid: wn.Block} - continue - } - winMap[wn.Epoch] = struct { - won bool - cid string - }{won: false, cid: wn.Block} - } - - for epoch, st := range winMap { - if !st.won { - al.alertMap[Name].alertString += fmt.Sprintf("Epoch %d: does not contain our block %s", epoch, st.cid) - } - } -} diff --git a/curiosrc/alertmanager/task_alert.go b/curiosrc/alertmanager/task_alert.go deleted file mode 100644 index 6cf9c053d93..00000000000 --- a/curiosrc/alertmanager/task_alert.go +++ /dev/null @@ -1,234 +0,0 @@ -// Nobody associated with this software's development has any business relationship to pagerduty. -// This is provided as a convenient trampoline to SP's alert system of choice. - -package alertmanager - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "time" - - logging "github.com/ipfs/go-log/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/dline" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" - "github.com/filecoin-project/lotus/lib/harmony/resources" - "github.com/filecoin-project/lotus/node/config" - "github.com/filecoin-project/lotus/storage/ctladdr" -) - -const AlertMangerInterval = time.Hour - -var log = logging.Logger("curio/alertmanager") - -type AlertAPI interface { - ctladdr.NodeApi - ChainHead(context.Context) (*types.TipSet, error) - ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error) - StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (api.MinerInfo, error) - StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*dline.Info, error) - StateMinerPartitions(context.Context, address.Address, uint64, types.TipSetKey) ([]api.Partition, error) -} - -type AlertTask struct { - api AlertAPI - cfg config.CurioAlerting - db *harmonydb.DB -} - -type alertOut struct { - err error - alertString string -} - -type alerts struct { - ctx context.Context - api AlertAPI - db *harmonydb.DB - cfg config.CurioAlerting - alertMap map[string]*alertOut -} - -type pdPayload struct { - Summary string `json:"summary"` - Severity string `json:"severity"` - Source string `json:"source"` - Component string `json:"component,omitempty"` - Group string `json:"group,omitempty"` - Class string `json:"class,omitempty"` - CustomDetails interface{} `json:"custom_details,omitempty"` -} - -type alertFunc func(al *alerts) - -var alertFuncs = []alertFunc{ - balanceCheck, - taskFailureCheck, - permanentStorageCheck, - wdPostCheck, - wnPostCheck, -} - -func NewAlertTask(api AlertAPI, db *harmonydb.DB, alertingCfg config.CurioAlerting) *AlertTask { - return &AlertTask{ - api: api, - db: db, - cfg: alertingCfg, - } -} - -func (a *AlertTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { - if a.cfg.PageDutyIntegrationKey == "" { - log.Warnf("PageDutyIntegrationKey is empty, not sending an alert") - return true, nil - } - - ctx := context.Background() - - alMap := make(map[string]*alertOut) - - altrs := &alerts{ - ctx: ctx, - api: a.api, - db: a.db, - cfg: a.cfg, - alertMap: alMap, - } - - for _, al := range alertFuncs { - al(altrs) - } - - details := make(map[string]interface{}) - - for k, v := range altrs.alertMap { - if v != nil { - if v.err != nil { - details[k] = v.err.Error() - continue - } - if v.alertString != "" { - details[k] = v.alertString - } - } - } - - // Alert only if required - if len(details) > 0 { - payloadData := &pdPayload{ - Summary: "Curio Alert", - Severity: "critical", - CustomDetails: details, - Source: "Curio Cluster", - } - - err = a.sendAlert(payloadData) - - if err != nil { - return false, err - } - } - - return true, nil - -} - -func (a *AlertTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { - id := ids[0] - return &id, nil -} - -func (a *AlertTask) TypeDetails() harmonytask.TaskTypeDetails { - return harmonytask.TaskTypeDetails{ - Max: 1, - Name: "AlertManager", - Cost: resources.Resources{ - Cpu: 1, - Ram: 64 << 20, - Gpu: 0, - }, - IAmBored: harmonytask.SingletonTaskAdder(AlertMangerInterval, a), - } -} - -func (a *AlertTask) Adder(taskFunc harmonytask.AddTaskFunc) { - return -} - -var _ harmonytask.TaskInterface = &AlertTask{} - -// sendAlert sends an alert to PagerDuty with the provided payload data. -// It creates a PDData struct with the provided routing key, event action and payload. -// It creates an HTTP POST request with the PagerDuty event URL as the endpoint and the marshaled JSON data as the request body. -// It sends the request using an HTTP client with a maximum of 5 retries for network errors with exponential backoff before each retry. -// It handles different HTTP response status codes and returns an error based on the status code(). -// If all retries fail, it returns an error indicating the last network error encountered. -func (a *AlertTask) sendAlert(data *pdPayload) error { - - type pdData struct { - RoutingKey string `json:"routing_key"` - EventAction string `json:"event_action"` - Payload *pdPayload `json:"payload"` - } - - payload := &pdData{ - RoutingKey: a.cfg.PageDutyIntegrationKey, - EventAction: "trigger", - Payload: data, - } - - jsonData, err := json.Marshal(payload) - if err != nil { - return fmt.Errorf("error marshaling JSON: %w", err) - } - - req, err := http.NewRequest("POST", a.cfg.PagerDutyEventURL, bytes.NewBuffer(jsonData)) - if err != nil { - return fmt.Errorf("error creating request: %w", err) - } - req.Header.Set("Content-Type", "application/json") - - client := &http.Client{} - var resp *http.Response - - for i := 0; i < 5; i++ { // Maximum of 5 retries - resp, err = client.Do(req) - if err != nil { - time.Sleep(time.Duration(2*i) * time.Second) // Exponential backoff - continue - } - defer func() { _ = resp.Body.Close() }() - - switch resp.StatusCode { - case 202: - log.Debug("Accepted: The event has been accepted by PagerDuty.") - return nil - case 400: - bd, rerr := io.ReadAll(resp.Body) - if rerr != nil { - return xerrors.Errorf("Bad request: payload JSON is invalid. Failed to read the body: %w", err) - } - return xerrors.Errorf("Bad request: payload JSON is invalid %s", string(bd)) - case 429: - log.Debug("Too many API calls, retrying after backoff...") - time.Sleep(time.Duration(5*i) * time.Second) // Exponential backoff - case 500, 501, 502, 503, 504: - log.Debug("Server error, retrying after backoff...") - time.Sleep(time.Duration(5*i) * time.Second) // Exponential backoff - default: - log.Errorw("Response status:", resp.Status) - return xerrors.Errorf("Unexpected HTTP response: %s", resp.Status) - } - } - return fmt.Errorf("after retries, last error: %w", err) -} diff --git a/curiosrc/build/build.go b/curiosrc/build/build.go deleted file mode 100644 index 0a69f55c183..00000000000 --- a/curiosrc/build/build.go +++ /dev/null @@ -1,9 +0,0 @@ -package build - -// IsOpencl is set to the value of FFI_USE_OPENCL -var IsOpencl string - -// Format: 8 HEX then underscore then ISO8701 date -// Ex: 4c5e98f28_2024-05-17T18:42:27-04:00 -// NOTE: git date for repeatabile builds. -var Commit string diff --git a/curiosrc/builder.go b/curiosrc/builder.go deleted file mode 100644 index 8219749353c..00000000000 --- a/curiosrc/builder.go +++ /dev/null @@ -1,45 +0,0 @@ -package curio - -import ( - "context" - "time" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/curiosrc/chainsched" - "github.com/filecoin-project/lotus/curiosrc/message" - "github.com/filecoin-project/lotus/curiosrc/multictladdr" - "github.com/filecoin-project/lotus/curiosrc/window" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/node/config" - dtypes "github.com/filecoin-project/lotus/node/modules/dtypes" - "github.com/filecoin-project/lotus/storage/paths" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -//var log = logging.Logger("provider") - -func WindowPostScheduler(ctx context.Context, fc config.CurioFees, pc config.CurioProvingConfig, - api api.FullNode, verif storiface.Verifier, sender *message.Sender, chainSched *chainsched.CurioChainSched, - as *multictladdr.MultiAddressSelector, addresses map[dtypes.MinerAddress]bool, db *harmonydb.DB, - stor paths.Store, idx paths.SectorIndex, max int) (*window.WdPostTask, *window.WdPostSubmitTask, *window.WdPostRecoverDeclareTask, error) { - - // todo config - ft := window.NewSimpleFaultTracker(stor, idx, pc.ParallelCheckLimit, time.Duration(pc.SingleCheckTimeout), time.Duration(pc.PartitionCheckTimeout)) - - computeTask, err := window.NewWdPostTask(db, api, ft, stor, verif, chainSched, addresses, max, pc.ParallelCheckLimit, time.Duration(pc.SingleCheckTimeout)) - if err != nil { - return nil, nil, nil, err - } - - submitTask, err := window.NewWdPostSubmitTask(chainSched, sender, db, api, fc.MaxWindowPoStGasFee, as) - if err != nil { - return nil, nil, nil, err - } - - recoverTask, err := window.NewWdPostRecoverDeclareTask(sender, db, api, ft, as, chainSched, fc.MaxWindowPoStGasFee, addresses) - if err != nil { - return nil, nil, nil, err - } - - return computeTask, submitTask, recoverTask, nil -} diff --git a/curiosrc/chainsched/chain_sched.go b/curiosrc/chainsched/chain_sched.go deleted file mode 100644 index 42a387fbc2a..00000000000 --- a/curiosrc/chainsched/chain_sched.go +++ /dev/null @@ -1,136 +0,0 @@ -package chainsched - -import ( - "context" - "time" - - logging "github.com/ipfs/go-log/v2" - "go.opencensus.io/trace" - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" -) - -var log = logging.Logger("curio/chainsched") - -type NodeAPI interface { - ChainHead(context.Context) (*types.TipSet, error) - ChainNotify(context.Context) (<-chan []*api.HeadChange, error) -} - -type CurioChainSched struct { - api NodeAPI - - callbacks []UpdateFunc - started bool -} - -func New(api NodeAPI) *CurioChainSched { - return &CurioChainSched{ - api: api, - } -} - -type UpdateFunc func(ctx context.Context, revert, apply *types.TipSet) error - -func (s *CurioChainSched) AddHandler(ch UpdateFunc) error { - if s.started { - return xerrors.Errorf("cannot add handler after start") - } - - s.callbacks = append(s.callbacks, ch) - return nil -} - -func (s *CurioChainSched) Run(ctx context.Context) { - s.started = true - - var ( - notifs <-chan []*api.HeadChange - err error - gotCur bool - ) - - // not fine to panic after this point - for { - if notifs == nil { - notifs, err = s.api.ChainNotify(ctx) - if err != nil { - log.Errorf("ChainNotify error: %+v", err) - - build.Clock.Sleep(10 * time.Second) - continue - } - - gotCur = false - log.Info("restarting chain scheduler") - } - - select { - case changes, ok := <-notifs: - if !ok { - log.Warn("chain notifs channel closed") - notifs = nil - continue - } - - if !gotCur { - if len(changes) != 1 { - log.Errorf("expected first notif to have len = 1") - continue - } - chg := changes[0] - if chg.Type != store.HCCurrent { - log.Errorf("expected first notif to tell current ts") - continue - } - - ctx, span := trace.StartSpan(ctx, "CurioChainSched.headChange") - - s.update(ctx, nil, chg.Val) - - span.End() - gotCur = true - continue - } - - ctx, span := trace.StartSpan(ctx, "CurioChainSched.headChange") - - var lowest, highest *types.TipSet = nil, nil - - for _, change := range changes { - if change.Val == nil { - log.Errorf("change.Val was nil") - } - switch change.Type { - case store.HCRevert: - lowest = change.Val - case store.HCApply: - highest = change.Val - } - } - - s.update(ctx, lowest, highest) - - span.End() - case <-ctx.Done(): - return - } - } -} - -func (s *CurioChainSched) update(ctx context.Context, revert, apply *types.TipSet) { - if apply == nil { - log.Error("no new tipset in CurioChainSched.update") - return - } - - for _, ch := range s.callbacks { - if err := ch(ctx, revert, apply); err != nil { - log.Errorf("handling head updates in curio chain sched: %+v", err) - } - } -} diff --git a/curiosrc/docker/.env b/curiosrc/docker/.env deleted file mode 100644 index b8cc5e80beb..00000000000 --- a/curiosrc/docker/.env +++ /dev/null @@ -1,5 +0,0 @@ -DOCKER_USER=curio -LOTUS_IMAGE=${DOCKER_USER}/lotus-dev:dev -LOTUS_MINER_IMAGE=${DOCKER_USER}/lotus-miner-dev:dev -CURIO_IMAGE=${DOCKER_USER}/curio-dev:dev -FIL_PROOFS_PARAMETER_CACHE=${HOME}/.cache/filecoin-proof-parameters \ No newline at end of file diff --git a/curiosrc/docker/.gitignore b/curiosrc/docker/.gitignore deleted file mode 100644 index 1269488f7fb..00000000000 --- a/curiosrc/docker/.gitignore +++ /dev/null @@ -1 +0,0 @@ -data diff --git a/curiosrc/docker/curio/Dockerfile b/curiosrc/docker/curio/Dockerfile deleted file mode 100644 index ad969f5bc48..00000000000 --- a/curiosrc/docker/curio/Dockerfile +++ /dev/null @@ -1,30 +0,0 @@ -ARG CURIO_TEST_IMAGE=curio/curio-all-in-one:latest -############################################################################# -FROM ${CURIO_TEST_IMAGE} - -ARG BUILD_VERSION=0.1 - -LABEL org.opencontainers.image.version=$BUILD_VERSION \ - org.opencontainers.image.authors="Curio Dev Team" \ - name="lotus-dev" \ - maintainer="Curio Dev Team" \ - vendor="Curio Dev Team" \ - version=$BUILD_VERSION \ - release=$BUILD_VERSION \ - summary="This image is used to host the curio dev service" \ - description="This image is used to host the curio dev service" - -EXPOSE 12300 4701 32100 - -VOLUME /var/tmp/filecoin-proof-parameters -VOLUME /var/lib/genesis -VOLUME /var/lib/builtin-actors - -WORKDIR /app -RUN mkdir -p /app - -COPY entrypoint.sh /app - -USER root - -ENTRYPOINT ["./entrypoint.sh"] diff --git a/curiosrc/docker/curio/entrypoint.sh b/curiosrc/docker/curio/entrypoint.sh deleted file mode 100755 index cbdde9fdb32..00000000000 --- a/curiosrc/docker/curio/entrypoint.sh +++ /dev/null @@ -1,60 +0,0 @@ -#!/usr/bin/env bash -set -e -echo CURIO_REPO_PATH=$CURIO_REPO_PATH -echo Wait for lotus is ready ... -lotus wait-api -echo Wait for lotus-miner is ready ... -lotus-miner wait-api -head=0 -# Loop until the head is greater than 9 -while [[ $head -le 9 ]]; do - head=$(lotus chain list | awk '{print $1}' | awk -F':' '{print $1}' | tail -1) - if [[ $head -le 9 ]]; then - echo "Current head: $head, which is not greater than 9. Waiting..." - sleep 1 # Wait for 4 seconds before checking again - else - echo "The head is now greater than 9: $head" - fi -done - -echo All ready. Lets go -myip=`nslookup curio | grep -v "#" | grep Address | awk '{print $2}'` - -if [ ! -f $CURIO_REPO_PATH/.init.curio ]; then - - if [ ! -f $CURIO_REPO_PATH/.init.setup ]; then - export DEFAULT_WALLET=`lotus wallet default` - echo Create a new miner actor ... - lotus-shed miner create $DEFAULT_WALLET $DEFAULT_WALLET $DEFAULT_WALLET 8MiB - touch $CURIO_REPO_PATH/.init.setup - fi - - if [ ! -f $CURIO_REPO_PATH/.init.config ]; then - - newminer=`lotus state list-miners | grep -v t01000` - echo "New Miner is $newminer" - echo Initiating a new Curio cluster ... - curio config new-cluster $newminer - echo Enabling market ... - curio config get seal | sed -e $'$a\\\n BoostAdapters = ["'"$newminer"':'"$myip"':32100"]\n EnableParkPiece = true' | curio config set --title seal - touch $CURIO_REPO_PATH/.init.config - fi - - echo Starting Curio node to attach storage ... - curio run --nosync --layers seal,post,gui & - CURIO_PID=`echo $!` - until curio cli --machine $myip:12300 wait-api; do - echo "Waiting for the curio CLI to become ready..." - sleep 5 - done - curio cli --machine $myip:12300 storage attach --init --seal --store $CURIO_REPO_PATH - touch $CURIO_REPO_PATH/.init.curio - echo Stopping Curio node ... - echo Try to stop boost... - kill -15 $CURIO_PID || kill -9 $CURIO_PID - echo Done -fi - -echo Starting curio node ... -exec curio run --nosync --layers seal,post,gui - diff --git a/curiosrc/docker/docker-compose.yaml b/curiosrc/docker/docker-compose.yaml deleted file mode 100644 index 2cc32bc65a6..00000000000 --- a/curiosrc/docker/docker-compose.yaml +++ /dev/null @@ -1,101 +0,0 @@ -version: '3.8' -name: curio-devnet - -x-logging: - &default-logging - options: - max-size: '20m' - max-file: '3' - driver: json-file - -networks: - curio-net: - driver: bridge - ipam: - config: - - subnet: 172.20.0.0/16 - -services: - lotus: - container_name: lotus - image: ${LOTUS_IMAGE} - init: true - ports: - - "1234:1234" - - "9090:9090" - environment: - - LOTUS_FEVM_ENABLEETHRPC=true - - LOTUS_API_LISTENADDRESS=/dns/lotus/tcp/1234/http - - LOTUS_LIBP2P_LISTENADDRESSES=/ip4/0.0.0.0/tcp/9090 - restart: unless-stopped - logging: *default-logging - volumes: - - ./data/lotus:/var/lib/lotus:rw - - ./data/genesis:/var/lib/genesis:rw - - ${FIL_PROOFS_PARAMETER_CACHE}:/var/tmp/filecoin-proof-parameters:rw - networks: - curio-net: - ipv4_address: 172.20.0.2 - - lotus-miner: - container_name: lotus-miner - image: ${LOTUS_MINER_IMAGE} - init: true - ports: - - "2345:2345" - environment: - - LOTUS_API_LISTENADDRESS=/dns/lotus-miner/tcp/2345/http - - LOTUS_API_REMOTELISTENADDRESS=lotus-miner:2345 - - LOTUS_SEALING_BATCHPRECOMMITS=false - - LOTUS_SEALING_AGGREGATECOMMITS=false - - LOTUS_SUBSYSTEMS_ENABLEMARKETS=false - - LOTUS_SEALING_WAITDEALSDELAY=20s - restart: unless-stopped - logging: *default-logging - volumes: - - ./data/lotus-miner:/var/lib/lotus-miner:rw - - ./data/lotus:/var/lib/lotus:ro - - ./data/genesis:/var/lib/genesis:ro - - ${FIL_PROOFS_PARAMETER_CACHE}:/var/tmp/filecoin-proof-parameters:rw - networks: - curio-net: - ipv4_address: 172.20.0.3 - - curio: - container_name: curio - image: ${CURIO_IMAGE} - init: true - ports: - - "12300:12300" # API - - "4701:4701" # UI - - "32100:32100" # Market - environment: - - CURIO_REPO_PATH=/var/lib/curio - - CURIO_HARMONYDB_HOSTS=yugabyte - restart: unless-stopped - logging: *default-logging - volumes: - - ./data/curio:/var/lib/curio:rw - - ./data/lotus:/var/lib/lotus:ro - - ./data/lotus-miner:/var/lib/lotus-miner:ro - - ${FIL_PROOFS_PARAMETER_CACHE}:/var/tmp/filecoin-proof-parameters:rw - networks: - curio-net: - ipv4_address: 172.20.0.4 - - yugabyte: - container_name: yugabyte - image: curio/yugabyte-dev:dev - init: true - ports: - - "5433:5433" - - "9000:9000" - - "9042:9042" - restart: unless-stopped - logging: *default-logging - volumes: - - ./data/yugabyte-data:/root/var/data - - ./data/yugabyte-logs:/root/var/logs - networks: - curio-net: - ipv4_address: 172.20.0.5 diff --git a/curiosrc/docker/lotus-miner/Dockerfile b/curiosrc/docker/lotus-miner/Dockerfile deleted file mode 100644 index 43a3e3fa4b2..00000000000 --- a/curiosrc/docker/lotus-miner/Dockerfile +++ /dev/null @@ -1,33 +0,0 @@ -ARG CURIO_TEST_IMAGE=curio/curio-all-in-one:latest -############################################################################# -FROM ${CURIO_TEST_IMAGE} - -ARG BUILD_VERSION=0.1 - -LABEL org.opencontainers.image.version=$BUILD_VERSION \ - org.opencontainers.image.authors="Curio Dev Team" \ - name="lotus-miner-dev" \ - maintainer="Curio Dev Team" \ - vendor="Curio Dev Team" \ - version=$BUILD_VERSION \ - release=$BUILD_VERSION \ - summary="This image is used to host the lotus-miner dev service" \ - description="This image is used to host the lotus-miner dev service" - -EXPOSE 2345 -ENV LOTUS_SKIP_GENESIS_CHECK=_yes_ -ENV GENESIS_PATH=/var/lib/genesis -ENV SECTOR_SIZE=8388608 - -VOLUME /var/tmp/filecoin-proof-parameters -VOLUME /var/lib/genesis -VOLUME /var/lib/builtin-actors - -WORKDIR /app -RUN mkdir -p /app - -COPY entrypoint.sh /app - -USER root - -ENTRYPOINT ["./entrypoint.sh"] diff --git a/curiosrc/docker/lotus-miner/entrypoint.sh b/curiosrc/docker/lotus-miner/entrypoint.sh deleted file mode 100755 index e1041ebaa2c..00000000000 --- a/curiosrc/docker/lotus-miner/entrypoint.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env bash -set -e -echo Wait for lotus is ready ... -lotus wait-api -echo Lotus ready. Lets go -if [ ! -f $LOTUS_MINER_PATH/.init.miner ]; then - echo Import the genesis miner key ... - lotus wallet import --as-default $GENESIS_PATH/pre-seal-t01000.key - echo Set up the genesis miner ... - lotus-miner init --genesis-miner --actor=t01000 --sector-size=$SECTOR_SIZE --pre-sealed-sectors=$GENESIS_PATH --pre-sealed-metadata=$GENESIS_PATH/pre-seal-t01000.json --nosync - touch $LOTUS_MINER_PATH/.init.miner - echo Done -fi - -echo Starting lotus miner ... -exec lotus-miner run --nosync diff --git a/curiosrc/docker/lotus/Dockerfile b/curiosrc/docker/lotus/Dockerfile deleted file mode 100644 index f73a8982b9b..00000000000 --- a/curiosrc/docker/lotus/Dockerfile +++ /dev/null @@ -1,35 +0,0 @@ -ARG CURIO_TEST_IMAGE=curio/curio-all-in-one:latest -############################################################################# -FROM ${CURIO_TEST_IMAGE} - -ARG BUILD_VERSION=0.1 - -LABEL org.opencontainers.image.version=$BUILD_VERSION \ - org.opencontainers.image.authors="Curio Dev Team" \ - name="lotus-dev" \ - maintainer="Curio Dev Team" \ - vendor="Curio Dev Team" \ - version=$BUILD_VERSION \ - release=$BUILD_VERSION \ - summary="This image is used to host the lotus dev service" \ - description="This image is used to host the lotus dev service" - -EXPOSE 1234 -EXPOSE 9090 -ENV LOTUS_SKIP_GENESIS_CHECK=_yes_ -ENV GENESIS_PATH=/var/lib/genesis -ENV SECTOR_SIZE=8388608 -ENV LOTUS_FEVM_ENABLEETHRPC=true - -VOLUME /var/tmp/filecoin-proof-parameters -VOLUME /var/lib/genesis -VOLUME /var/lib/builtin-actors - -WORKDIR /app -RUN mkdir -p /app - -COPY entrypoint.sh /app - -USER root - -ENTRYPOINT ["./entrypoint.sh"] diff --git a/curiosrc/docker/lotus/entrypoint.sh b/curiosrc/docker/lotus/entrypoint.sh deleted file mode 100755 index d748c0ef953..00000000000 --- a/curiosrc/docker/lotus/entrypoint.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/usr/bin/env bash -set -e -if [ ! -f $LOTUS_PATH/.init.params ]; then - echo Initializing fetch params ... - lotus fetch-params $SECTOR_SIZE - touch $LOTUS_PATH/.init.params - echo Done -fi - -if [ ! -f $LOTUS_PATH/.init.genesis ]; then - pushd $LOTUS_PATH - echo Generate root-key-1 for FIL plus - ROOT_KEY_1=`lotus-shed keyinfo new bls` - echo $ROOT_KEY_1 > rootkey-1 - echo Generate root-key-2 for FIL plus - ROOT_KEY_2=`lotus-shed keyinfo new bls` - echo $ROOT_KEY_2 > rootkey-2 - popd - - echo Initializing pre seal ... - lotus-seed --sector-dir $GENESIS_PATH pre-seal --sector-size $SECTOR_SIZE --num-sectors 1 - echo Initializing genesis ... - lotus-seed --sector-dir $GENESIS_PATH genesis new $LOTUS_PATH/localnet.json - echo Setting signers ... - lotus-seed --sector-dir $GENESIS_PATH genesis set-signers --threshold=2 --signers $ROOT_KEY_1 --signers $ROOT_KEY_2 $LOTUS_PATH/localnet.json - echo Initializing address ... - lotus-seed --sector-dir $GENESIS_PATH genesis add-miner $LOTUS_PATH/localnet.json $GENESIS_PATH/pre-seal-t01000.json - touch $LOTUS_PATH/.init.genesis - echo Done -fi - -echo Starting lotus deamon ... -exec lotus daemon --lotus-make-genesis=$LOTUS_PATH/devgen.car --genesis-template=$LOTUS_PATH/localnet.json --bootstrap=false diff --git a/curiosrc/docker/yugabyte/Dockerfile b/curiosrc/docker/yugabyte/Dockerfile deleted file mode 100644 index ad830fe9eff..00000000000 --- a/curiosrc/docker/yugabyte/Dockerfile +++ /dev/null @@ -1,12 +0,0 @@ -FROM centos:centos8 -RUN cd /etc/yum.repos.d/ -RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-* -RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-* -RUN yum upgrade -y -RUN yum install procps-ng wget libatomic python39 -y -RUN alternatives --set python /usr/bin/python3 -RUN arch=$(arch | sed s/aarch64/el8-aarch64/ | sed s/x86_64/linux-x86_64/) && wget "https://downloads.yugabyte.com/releases/2.20.2.0/yugabyte-2.20.2.0-b145-${arch}.tar.gz" -O /tmp/yugabyte.tar.gz -RUN tar xvfz /tmp/yugabyte.tar.gz -RUN ln -s /yugabyte-2.20.2.0 /yugabyte -RUN /yugabyte/bin/post_install.sh -CMD /yugabyte/bin/yugabyted start --daemon=false --ui=false diff --git a/curiosrc/ffi/piece_funcs.go b/curiosrc/ffi/piece_funcs.go deleted file mode 100644 index a548f5cc2df..00000000000 --- a/curiosrc/ffi/piece_funcs.go +++ /dev/null @@ -1,76 +0,0 @@ -package ffi - -import ( - "context" - "io" - "os" - "time" - - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -func (sb *SealCalls) WritePiece(ctx context.Context, taskID *harmonytask.TaskID, pieceID storiface.PieceNumber, size int64, data io.Reader) error { - // todo: config(?): allow setting PathStorage for this - // todo storage reservations - paths, _, done, err := sb.sectors.AcquireSector(ctx, taskID, pieceID.Ref(), storiface.FTNone, storiface.FTPiece, storiface.PathSealing) - if err != nil { - return err - } - defer done() - - dest := paths.Piece - tempDest := dest + ".tmp" - - destFile, err := os.OpenFile(tempDest, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) - if err != nil { - return xerrors.Errorf("creating temp piece file '%s': %w", tempDest, err) - } - - removeTemp := true - defer func() { - if removeTemp { - rerr := os.Remove(tempDest) - if rerr != nil { - log.Errorf("removing temp file: %+v", rerr) - } - } - }() - - copyStart := time.Now() - - n, err := io.CopyBuffer(destFile, io.LimitReader(data, size), make([]byte, 8<<20)) - if err != nil { - _ = destFile.Close() - return xerrors.Errorf("copying piece data: %w", err) - } - - if err := destFile.Close(); err != nil { - return xerrors.Errorf("closing temp piece file: %w", err) - } - - if n != size { - return xerrors.Errorf("short write: %d", n) - } - - copyEnd := time.Now() - - log.Infow("wrote parked piece", "piece", pieceID, "size", size, "duration", copyEnd.Sub(copyStart), "dest", dest, "MiB/s", float64(size)/(1<<20)/copyEnd.Sub(copyStart).Seconds()) - - if err := os.Rename(tempDest, dest); err != nil { - return xerrors.Errorf("rename temp piece to dest %s -> %s: %w", tempDest, dest, err) - } - - removeTemp = false - return nil -} - -func (sb *SealCalls) PieceReader(ctx context.Context, id storiface.PieceNumber) (io.ReadCloser, error) { - return sb.sectors.storage.ReaderSeq(ctx, id.Ref(), storiface.FTPiece) -} - -func (sb *SealCalls) RemovePiece(ctx context.Context, id storiface.PieceNumber) error { - return sb.sectors.storage.Remove(ctx, id.Ref().ID, storiface.FTPiece, true, nil) -} diff --git a/curiosrc/ffi/sdr_funcs.go b/curiosrc/ffi/sdr_funcs.go deleted file mode 100644 index 9cd8763b44c..00000000000 --- a/curiosrc/ffi/sdr_funcs.go +++ /dev/null @@ -1,661 +0,0 @@ -package ffi - -import ( - "context" - "encoding/json" - "fmt" - "io" - "os" - "path/filepath" - - "github.com/KarpelesLab/reflink" - "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log/v2" - "github.com/puzpuzpuz/xsync/v2" - "golang.org/x/xerrors" - - // TODO everywhere here that we call this we should call our proxy instead. - ffi "github.com/filecoin-project/filecoin-ffi" - commcid "github.com/filecoin-project/go-fil-commcid" - "github.com/filecoin-project/go-state-types/abi" - proof2 "github.com/filecoin-project/go-state-types/proof" - - "github.com/filecoin-project/lotus/curiosrc/proof" - "github.com/filecoin-project/lotus/lib/ffiselect" - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" - "github.com/filecoin-project/lotus/storage/paths" - "github.com/filecoin-project/lotus/storage/sealer/proofpaths" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -var log = logging.Logger("cu/ffi") - -/* -type ExternPrecommit2 func(ctx context.Context, sector storiface.SectorRef, cache, sealed string, pc1out storiface.PreCommit1Out) (sealedCID cid.Cid, unsealedCID cid.Cid, err error) - - type ExternalSealer struct { - PreCommit2 ExternPrecommit2 - } -*/ -type SealCalls struct { - sectors *storageProvider - - /*// externCalls cointain overrides for calling alternative sealing logic - externCalls ExternalSealer*/ -} - -func NewSealCalls(st *paths.Remote, ls *paths.Local, si paths.SectorIndex) *SealCalls { - return &SealCalls{ - sectors: &storageProvider{ - storage: st, - localStore: ls, - sindex: si, - storageReservations: xsync.NewIntegerMapOf[harmonytask.TaskID, *StorageReservation](), - }, - } -} - -type storageProvider struct { - storage *paths.Remote - localStore *paths.Local - sindex paths.SectorIndex - storageReservations *xsync.MapOf[harmonytask.TaskID, *StorageReservation] -} - -func (l *storageProvider) AcquireSector(ctx context.Context, taskID *harmonytask.TaskID, sector storiface.SectorRef, existing, allocate storiface.SectorFileType, sealing storiface.PathType) (fspaths, ids storiface.SectorPaths, release func(), err error) { - var sectorPaths, storageIDs storiface.SectorPaths - var releaseStorage func() - - var ok bool - var resv *StorageReservation - if taskID != nil { - resv, ok = l.storageReservations.Load(*taskID) - } - if ok && resv != nil { - if resv.Alloc != allocate || resv.Existing != existing { - // this should never happen, only when task definition is wrong - return storiface.SectorPaths{}, storiface.SectorPaths{}, nil, xerrors.Errorf("storage reservation type mismatch") - } - - log.Debugw("using existing storage reservation", "task", taskID, "sector", sector, "existing", existing, "allocate", allocate) - - sectorPaths = resv.Paths - storageIDs = resv.PathIDs - releaseStorage = resv.Release - - if len(existing.AllSet()) > 0 { - // there are some "existing" files in the reservation. Some of them may need fetching, so call l.storage.AcquireSector - // (which unlike in the reservation code will be called on the paths.Remote instance) to ensure that the files are - // present locally. Note that we do not care about 'allocate' reqeuests, those files don't exist, and are just - // proposed paths with a reservation of space. - - _, checkPathIDs, err := l.storage.AcquireSector(ctx, sector, existing, storiface.FTNone, sealing, storiface.AcquireMove, storiface.AcquireInto(storiface.PathsWithIDs{Paths: sectorPaths, IDs: storageIDs})) - if err != nil { - return storiface.SectorPaths{}, storiface.SectorPaths{}, nil, xerrors.Errorf("acquire reserved existing files: %w", err) - } - - // assert that checkPathIDs is the same as storageIDs - if storageIDs.Subset(existing) != checkPathIDs.Subset(existing) { - return storiface.SectorPaths{}, storiface.SectorPaths{}, nil, xerrors.Errorf("acquire reserved existing files: pathIDs mismatch %#v != %#v", storageIDs, checkPathIDs) - } - } - } else { - // No related reservation, acquire storage as usual - - var err error - sectorPaths, storageIDs, err = l.storage.AcquireSector(ctx, sector, existing, allocate, sealing, storiface.AcquireMove) - if err != nil { - return storiface.SectorPaths{}, storiface.SectorPaths{}, nil, err - } - - releaseStorage, err = l.localStore.Reserve(ctx, sector, allocate, storageIDs, storiface.FSOverheadSeal, paths.MinFreeStoragePercentage) - if err != nil { - return storiface.SectorPaths{}, storiface.SectorPaths{}, nil, xerrors.Errorf("reserving storage space: %w", err) - } - } - - log.Debugf("acquired sector %d (e:%d; a:%d): %v", sector, existing, allocate, sectorPaths) - - return sectorPaths, storageIDs, func() { - releaseStorage() - - for _, fileType := range storiface.PathTypes { - if fileType&allocate == 0 { - continue - } - - sid := storiface.PathByType(storageIDs, fileType) - if err := l.sindex.StorageDeclareSector(ctx, storiface.ID(sid), sector.ID, fileType, true); err != nil { - log.Errorf("declare sector error: %+v", err) - } - } - }, nil -} - -func (sb *SealCalls) GenerateSDR(ctx context.Context, taskID harmonytask.TaskID, sector storiface.SectorRef, ticket abi.SealRandomness, commKcid cid.Cid) error { - paths, pathIDs, releaseSector, err := sb.sectors.AcquireSector(ctx, &taskID, sector, storiface.FTNone, storiface.FTCache, storiface.PathSealing) - if err != nil { - return xerrors.Errorf("acquiring sector paths: %w", err) - } - defer releaseSector() - - // prepare SDR params - commp, err := commcid.CIDToDataCommitmentV1(commKcid) - if err != nil { - return xerrors.Errorf("computing commK: %w", err) - } - - replicaID, err := sector.ProofType.ReplicaId(sector.ID.Miner, sector.ID.Number, ticket, commp) - if err != nil { - return xerrors.Errorf("computing replica id: %w", err) - } - - // make sure the cache dir is empty - if err := os.RemoveAll(paths.Cache); err != nil { - return xerrors.Errorf("removing cache dir: %w", err) - } - if err := os.MkdirAll(paths.Cache, 0755); err != nil { - return xerrors.Errorf("mkdir cache dir: %w", err) - } - - // generate new sector key - err = ffi.GenerateSDR( - sector.ProofType, - paths.Cache, - replicaID, - ) - if err != nil { - return xerrors.Errorf("generating SDR %d (%s): %w", sector.ID.Number, paths.Unsealed, err) - } - - if err := sb.ensureOneCopy(ctx, sector.ID, pathIDs, storiface.FTCache); err != nil { - return xerrors.Errorf("ensure one copy: %w", err) - } - - return nil -} - -// ensureOneCopy makes sure that there is only one version of sector data. -// Usually called after a successful operation was done successfully on sector data. -func (sb *SealCalls) ensureOneCopy(ctx context.Context, sid abi.SectorID, pathIDs storiface.SectorPaths, fts storiface.SectorFileType) error { - if !pathIDs.HasAllSet(fts) { - return xerrors.Errorf("ensure one copy: not all paths are set") - } - - for _, fileType := range fts.AllSet() { - pid := storiface.PathByType(pathIDs, fileType) - keepIn := []storiface.ID{storiface.ID(pid)} - - log.Debugw("ensureOneCopy", "sector", sid, "type", fileType, "keep", keepIn) - - if err := sb.sectors.storage.Remove(ctx, sid, fileType, true, keepIn); err != nil { - return err - } - } - - return nil -} - -func (sb *SealCalls) TreeRC(ctx context.Context, task *harmonytask.TaskID, sector storiface.SectorRef, unsealed cid.Cid) (scid cid.Cid, ucid cid.Cid, err error) { - p1o, err := sb.makePhase1Out(unsealed, sector.ProofType) - if err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("make phase1 output: %w", err) - } - - fspaths, pathIDs, releaseSector, err := sb.sectors.AcquireSector(ctx, task, sector, storiface.FTCache, storiface.FTSealed, storiface.PathSealing) - if err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("acquiring sector paths: %w", err) - } - defer releaseSector() - - defer func() { - if err != nil { - clerr := removeDRCTrees(fspaths.Cache, false) - if clerr != nil { - log.Errorw("removing tree files after TreeDRC error", "error", clerr, "exec-error", err, "sector", sector, "cache", fspaths.Cache) - } - } - }() - - // create sector-sized file at paths.Sealed; PC2 transforms it into a sealed sector in-place - ssize, err := sector.ProofType.SectorSize() - if err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("getting sector size: %w", err) - } - - { - // copy TreeD prefix to sealed sector, SealPreCommitPhase2 will mutate it in place into the sealed sector - - // first try reflink + truncate, that should be way faster - err := reflink.Always(filepath.Join(fspaths.Cache, proofpaths.TreeDName), fspaths.Sealed) - if err == nil { - err = os.Truncate(fspaths.Sealed, int64(ssize)) - if err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("truncating reflinked sealed file: %w", err) - } - } else { - log.Errorw("reflink treed -> sealed failed, falling back to slow copy, use single scratch btrfs or xfs filesystem", "error", err, "sector", sector, "cache", fspaths.Cache, "sealed", fspaths.Sealed) - - // fallback to slow copy, copy ssize bytes from treed to sealed - dst, err := os.OpenFile(fspaths.Sealed, os.O_WRONLY|os.O_CREATE, 0644) - if err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("opening sealed sector file: %w", err) - } - src, err := os.Open(filepath.Join(fspaths.Cache, proofpaths.TreeDName)) - if err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("opening treed sector file: %w", err) - } - - _, err = io.CopyN(dst, src, int64(ssize)) - derr := dst.Close() - _ = src.Close() - if err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("copying treed -> sealed: %w", err) - } - if derr != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("closing sealed file: %w", derr) - } - } - } - - sl, uns, err := ffiselect.FFISelect{}.SealPreCommitPhase2(sector.ID, p1o, fspaths.Cache, fspaths.Sealed) - if err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("computing seal proof: %w", err) - } - - if uns != unsealed { - return cid.Undef, cid.Undef, xerrors.Errorf("unsealed cid changed after sealing") - } - - if err := sb.ensureOneCopy(ctx, sector.ID, pathIDs, storiface.FTCache|storiface.FTSealed); err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("ensure one copy: %w", err) - } - - return sl, uns, nil -} - -func removeDRCTrees(cache string, isDTree bool) error { - files, err := os.ReadDir(cache) - if err != nil { - return xerrors.Errorf("listing cache: %w", err) - } - - var testFunc func(string) bool - - if isDTree { - testFunc = proofpaths.IsTreeDFile - } else { - testFunc = proofpaths.IsTreeRCFile - } - - for _, file := range files { - if testFunc(file.Name()) { - err := os.Remove(filepath.Join(cache, file.Name())) - if err != nil { - return xerrors.Errorf("removing tree file: %w", err) - } - } - } - return nil -} - -func (sb *SealCalls) GenerateSynthPoRep() { - panic("todo") -} - -func (sb *SealCalls) PoRepSnark(ctx context.Context, sn storiface.SectorRef, sealed, unsealed cid.Cid, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness) ([]byte, error) { - vproof, err := sb.sectors.storage.GeneratePoRepVanillaProof(ctx, sn, sealed, unsealed, ticket, seed) - if err != nil { - return nil, xerrors.Errorf("failed to generate vanilla proof: %w", err) - } - - proof, err := ffiselect.FFISelect{}.SealCommitPhase2(vproof, sn.ID.Number, sn.ID.Miner) - if err != nil { - return nil, xerrors.Errorf("computing seal proof failed: %w", err) - } - - ok, err := ffi.VerifySeal(proof2.SealVerifyInfo{ - SealProof: sn.ProofType, - SectorID: sn.ID, - DealIDs: nil, - Randomness: ticket, - InteractiveRandomness: seed, - Proof: proof, - SealedCID: sealed, - UnsealedCID: unsealed, - }) - if err != nil { - return nil, xerrors.Errorf("failed to verify proof: %w", err) - } - if !ok { - return nil, xerrors.Errorf("porep failed to validate") - } - - return proof, nil -} - -func (sb *SealCalls) makePhase1Out(unsCid cid.Cid, spt abi.RegisteredSealProof) ([]byte, error) { - commd, err := commcid.CIDToDataCommitmentV1(unsCid) - if err != nil { - return nil, xerrors.Errorf("make uns cid: %w", err) - } - - type Config struct { - ID string `json:"id"` - Path string `json:"path"` - RowsToDiscard int `json:"rows_to_discard"` - Size int `json:"size"` - } - - type Labels struct { - H *string `json:"_h"` // proofs want this.. - Labels []Config `json:"labels"` - } - - var phase1Output struct { - CommD [32]byte `json:"comm_d"` - Config Config `json:"config"` // TreeD - Labels map[string]*Labels `json:"labels"` - RegisteredProof string `json:"registered_proof"` - } - - copy(phase1Output.CommD[:], commd) - - phase1Output.Config.ID = "tree-d" - phase1Output.Config.Path = "/placeholder" - phase1Output.Labels = map[string]*Labels{} - - switch spt { - case abi.RegisteredSealProof_StackedDrg2KiBV1_1, abi.RegisteredSealProof_StackedDrg2KiBV1_1_Feat_SyntheticPoRep: - phase1Output.Config.RowsToDiscard = 0 - phase1Output.Config.Size = 127 - phase1Output.Labels["StackedDrg2KiBV1"] = &Labels{} - phase1Output.RegisteredProof = "StackedDrg2KiBV1_1" - - for i := 0; i < 2; i++ { - phase1Output.Labels["StackedDrg2KiBV1"].Labels = append(phase1Output.Labels["StackedDrg2KiBV1"].Labels, Config{ - ID: fmt.Sprintf("layer-%d", i+1), - Path: "/placeholder", - RowsToDiscard: 0, - Size: 64, - }) - } - - case abi.RegisteredSealProof_StackedDrg8MiBV1_1, abi.RegisteredSealProof_StackedDrg8MiBV1_1_Feat_SyntheticPoRep: - phase1Output.Config.RowsToDiscard = 0 - phase1Output.Config.Size = 524287 - phase1Output.Labels["StackedDrg8MiBV1"] = &Labels{} - phase1Output.RegisteredProof = "StackedDrg8MiBV1_1" - - for i := 0; i < 2; i++ { - phase1Output.Labels["StackedDrg8MiBV1"].Labels = append(phase1Output.Labels["StackedDrg8MiBV1"].Labels, Config{ - ID: fmt.Sprintf("layer-%d", i+1), - Path: "/placeholder", - RowsToDiscard: 0, - Size: 262144, - }) - } - - case abi.RegisteredSealProof_StackedDrg512MiBV1_1: - phase1Output.Config.RowsToDiscard = 0 - phase1Output.Config.Size = 33554431 - phase1Output.Labels["StackedDrg512MiBV1"] = &Labels{} - phase1Output.RegisteredProof = "StackedDrg512MiBV1_1" - - for i := 0; i < 2; i++ { - phase1Output.Labels["StackedDrg512MiBV1"].Labels = append(phase1Output.Labels["StackedDrg512MiBV1"].Labels, Config{ - ID: fmt.Sprintf("layer-%d", i+1), - Path: "placeholder", - RowsToDiscard: 0, - Size: 16777216, - }) - } - - case abi.RegisteredSealProof_StackedDrg32GiBV1_1: - phase1Output.Config.RowsToDiscard = 0 - phase1Output.Config.Size = 2147483647 - phase1Output.Labels["StackedDrg32GiBV1"] = &Labels{} - phase1Output.RegisteredProof = "StackedDrg32GiBV1_1" - - for i := 0; i < 11; i++ { - phase1Output.Labels["StackedDrg32GiBV1"].Labels = append(phase1Output.Labels["StackedDrg32GiBV1"].Labels, Config{ - ID: fmt.Sprintf("layer-%d", i+1), - Path: "/placeholder", - RowsToDiscard: 0, - Size: 1073741824, - }) - } - - case abi.RegisteredSealProof_StackedDrg64GiBV1_1: - phase1Output.Config.RowsToDiscard = 0 - phase1Output.Config.Size = 4294967295 - phase1Output.Labels["StackedDrg64GiBV1"] = &Labels{} - phase1Output.RegisteredProof = "StackedDrg64GiBV1_1" - - for i := 0; i < 11; i++ { - phase1Output.Labels["StackedDrg64GiBV1"].Labels = append(phase1Output.Labels["StackedDrg64GiBV1"].Labels, Config{ - ID: fmt.Sprintf("layer-%d", i+1), - Path: "/placeholder", - RowsToDiscard: 0, - Size: 2147483648, - }) - } - - default: - panic("proof type not handled") - } - - return json.Marshal(phase1Output) -} - -func (sb *SealCalls) LocalStorage(ctx context.Context) ([]storiface.StoragePath, error) { - return sb.sectors.localStore.Local(ctx) -} - -func (sb *SealCalls) FinalizeSector(ctx context.Context, sector storiface.SectorRef, keepUnsealed bool) error { - alloc := storiface.FTNone - if keepUnsealed { - // note: In Curio we don't write the unsealed file in any of the previous stages, it's only written here from tree-d - alloc = storiface.FTUnsealed - } - - sectorPaths, pathIDs, releaseSector, err := sb.sectors.AcquireSector(ctx, nil, sector, storiface.FTCache, alloc, storiface.PathSealing) - if err != nil { - return xerrors.Errorf("acquiring sector paths: %w", err) - } - defer releaseSector() - - ssize, err := sector.ProofType.SectorSize() - if err != nil { - return xerrors.Errorf("getting sector size: %w", err) - } - - if keepUnsealed { - // tree-d contains exactly unsealed data in the prefix, so - // * we move it to a temp file - // * we truncate the temp file to the sector size - // * we move the temp file to the unsealed location - - // temp path in cache where we'll move tree-d before truncating - // it is in the cache directory so that we can use os.Rename to move it - // to unsealed (which may be on a different filesystem) - tempUnsealed := filepath.Join(sectorPaths.Cache, storiface.SectorName(sector.ID)) - - _, terr := os.Stat(tempUnsealed) - tempUnsealedExists := terr == nil - - // First handle an edge case where we have already gone through this step, - // but ClearCache or later steps failed. In that case we'll see tree-d missing and unsealed present - - if _, err := os.Stat(filepath.Join(sectorPaths.Cache, proofpaths.TreeDName)); err != nil { - if os.IsNotExist(err) { - // check that unsealed exists and is the right size - st, err := os.Stat(sectorPaths.Unsealed) - if err != nil { - if os.IsNotExist(err) { - if tempUnsealedExists { - // unsealed file does not exist, but temp unsealed file does - // so we can just resume where the previous attempt left off - goto retryUnsealedMove - } - return xerrors.Errorf("neither unsealed file nor temp-unsealed file exists") - } - return xerrors.Errorf("stat unsealed file: %w", err) - } - if st.Size() != int64(ssize) { - if tempUnsealedExists { - // unsealed file exists but is the wrong size, and temp unsealed file exists - // so we can just resume where the previous attempt left off with some cleanup - - if err := os.Remove(sectorPaths.Unsealed); err != nil { - return xerrors.Errorf("removing unsealed file from last attempt: %w", err) - } - - goto retryUnsealedMove - } - return xerrors.Errorf("unsealed file is not the right size: %d != %d and temp unsealed is missing", st.Size(), ssize) - } - - // all good, just log that this edge case happened - log.Warnw("unsealed file exists but tree-d is missing, skipping move", "sector", sector.ID, "unsealed", sectorPaths.Unsealed, "cache", sectorPaths.Cache) - goto afterUnsealedMove - } - return xerrors.Errorf("stat tree-d file: %w", err) - } - - // If the state in clean do the move - - // move tree-d to temp file - if err := os.Rename(filepath.Join(sectorPaths.Cache, proofpaths.TreeDName), tempUnsealed); err != nil { - return xerrors.Errorf("moving tree-d to temp file: %w", err) - } - - retryUnsealedMove: - - // truncate sealed file to sector size - if err := os.Truncate(tempUnsealed, int64(ssize)); err != nil { - return xerrors.Errorf("truncating unsealed file to sector size: %w", err) - } - - // move temp file to unsealed location - if err := paths.Move(tempUnsealed, sectorPaths.Unsealed); err != nil { - return xerrors.Errorf("move temp unsealed sector to final location (%s -> %s): %w", tempUnsealed, sectorPaths.Unsealed, err) - } - } - -afterUnsealedMove: - if err := ffi.ClearCache(uint64(ssize), sectorPaths.Cache); err != nil { - return xerrors.Errorf("clearing cache: %w", err) - } - - if err := sb.ensureOneCopy(ctx, sector.ID, pathIDs, storiface.FTCache|alloc); err != nil { - return xerrors.Errorf("ensure one copy: %w", err) - } - - return nil -} - -func (sb *SealCalls) MoveStorage(ctx context.Context, sector storiface.SectorRef, taskID *harmonytask.TaskID) error { - // only move the unsealed file if it still exists and needs moving - moveUnsealed := storiface.FTUnsealed - { - found, unsealedPathType, err := sb.sectorStorageType(ctx, sector, storiface.FTUnsealed) - if err != nil { - return xerrors.Errorf("checking cache storage type: %w", err) - } - - if !found || unsealedPathType == storiface.PathStorage { - moveUnsealed = storiface.FTNone - } - } - - toMove := storiface.FTCache | storiface.FTSealed | moveUnsealed - - var opts []storiface.AcquireOption - if taskID != nil { - resv, ok := sb.sectors.storageReservations.Load(*taskID) - // if the reservation is missing MoveStorage will simply create one internally. This is fine as the reservation - // will only be missing when the node is restarting, which means that the missing reservations will get recreated - // anyways, and before we start claiming other tasks. - if ok { - defer resv.Release() - - if resv.Alloc != storiface.FTNone { - return xerrors.Errorf("task %d has storage reservation with alloc", taskID) - } - if resv.Existing != toMove|storiface.FTUnsealed { - return xerrors.Errorf("task %d has storage reservation with different existing", taskID) - } - - opts = append(opts, storiface.AcquireInto(storiface.PathsWithIDs{Paths: resv.Paths, IDs: resv.PathIDs})) - } - } - - err := sb.sectors.storage.MoveStorage(ctx, sector, toMove, opts...) - if err != nil { - return xerrors.Errorf("moving storage: %w", err) - } - - for _, fileType := range toMove.AllSet() { - if err := sb.sectors.storage.RemoveCopies(ctx, sector.ID, fileType); err != nil { - return xerrors.Errorf("rm copies (t:%s, s:%v): %w", fileType, sector, err) - } - } - - return nil -} - -func (sb *SealCalls) sectorStorageType(ctx context.Context, sector storiface.SectorRef, ft storiface.SectorFileType) (sectorFound bool, ptype storiface.PathType, err error) { - stores, err := sb.sectors.sindex.StorageFindSector(ctx, sector.ID, ft, 0, false) - if err != nil { - return false, "", xerrors.Errorf("finding sector: %w", err) - } - if len(stores) == 0 { - return false, "", nil - } - - for _, store := range stores { - if store.CanSeal { - return true, storiface.PathSealing, nil - } - } - - return true, storiface.PathStorage, nil -} - -// PreFetch fetches the sector file to local storage before SDR and TreeRC Tasks -func (sb *SealCalls) PreFetch(ctx context.Context, sector storiface.SectorRef, task *harmonytask.TaskID) (fsPath, pathID storiface.SectorPaths, releaseSector func(), err error) { - fsPath, pathID, releaseSector, err = sb.sectors.AcquireSector(ctx, task, sector, storiface.FTCache, storiface.FTNone, storiface.PathSealing) - if err != nil { - return storiface.SectorPaths{}, storiface.SectorPaths{}, nil, xerrors.Errorf("acquiring sector paths: %w", err) - } - // Don't release the storage locks. They will be released in TreeD func() - return -} - -func (sb *SealCalls) TreeD(ctx context.Context, sector storiface.SectorRef, unsealed cid.Cid, size abi.PaddedPieceSize, data io.Reader, unpaddedData bool, fspaths, pathIDs storiface.SectorPaths) error { - var err error - defer func() { - if err != nil { - clerr := removeDRCTrees(fspaths.Cache, true) - if clerr != nil { - log.Errorw("removing tree files after TreeDRC error", "error", clerr, "exec-error", err, "sector", sector, "cache", fspaths.Cache) - } - } - }() - - treeDUnsealed, err := proof.BuildTreeD(data, unpaddedData, filepath.Join(fspaths.Cache, proofpaths.TreeDName), size) - if err != nil { - return xerrors.Errorf("building tree-d: %w", err) - } - - if treeDUnsealed != unsealed { - return xerrors.Errorf("tree-d cid %s mismatch with supplied unsealed cid %s", treeDUnsealed, unsealed) - } - - if err := sb.ensureOneCopy(ctx, sector.ID, pathIDs, storiface.FTCache); err != nil { - return xerrors.Errorf("ensure one copy: %w", err) - } - - return nil -} diff --git a/curiosrc/ffi/task_storage.go b/curiosrc/ffi/task_storage.go deleted file mode 100644 index 4cd9adffe03..00000000000 --- a/curiosrc/ffi/task_storage.go +++ /dev/null @@ -1,232 +0,0 @@ -package ffi - -import ( - "context" - "sync" - "time" - - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" - "github.com/filecoin-project/lotus/lib/harmony/resources" - storagePaths "github.com/filecoin-project/lotus/storage/paths" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -type SectorRef struct { - SpID int64 `db:"sp_id"` - SectorNumber int64 `db:"sector_number"` - RegSealProof abi.RegisteredSealProof `db:"reg_seal_proof"` -} - -func (sr SectorRef) ID() abi.SectorID { - return abi.SectorID{ - Miner: abi.ActorID(sr.SpID), - Number: abi.SectorNumber(sr.SectorNumber), - } -} - -func (sr SectorRef) Ref() storiface.SectorRef { - return storiface.SectorRef{ - ID: sr.ID(), - ProofType: sr.RegSealProof, - } -} - -type TaskStorage struct { - sc *SealCalls - - alloc, existing storiface.SectorFileType - ssize abi.SectorSize - pathType storiface.PathType - - taskToSectorRef func(taskID harmonytask.TaskID) (SectorRef, error) - - // Minimum free storage percentage cutoff for reservation rejection - MinFreeStoragePercentage float64 -} - -type ReleaseStorageFunc func() // free storage reservation - -type StorageReservation struct { - SectorRef SectorRef - Release ReleaseStorageFunc - Paths storiface.SectorPaths - PathIDs storiface.SectorPaths - - Alloc, Existing storiface.SectorFileType -} - -func (sb *SealCalls) Storage(taskToSectorRef func(taskID harmonytask.TaskID) (SectorRef, error), alloc, existing storiface.SectorFileType, ssize abi.SectorSize, pathType storiface.PathType, MinFreeStoragePercentage float64) *TaskStorage { - return &TaskStorage{ - sc: sb, - alloc: alloc, - existing: existing, - ssize: ssize, - pathType: pathType, - taskToSectorRef: taskToSectorRef, - MinFreeStoragePercentage: MinFreeStoragePercentage, - } -} - -func (t *TaskStorage) HasCapacity() bool { - ctx := context.Background() - - paths, err := t.sc.sectors.sindex.StorageBestAlloc(ctx, t.alloc, t.ssize, t.pathType, storagePaths.NoMinerFilter) - if err != nil { - log.Errorf("finding best alloc in HasCapacity: %+v", err) - return false - } - - local, err := t.sc.sectors.localStore.Local(ctx) - if err != nil { - log.Errorf("getting local storage: %+v", err) - return false - } - - for _, path := range paths { - if t.pathType == storiface.PathStorage && !path.CanStore { - continue // we want to store, and this isn't a store path - } - if t.pathType == storiface.PathSealing && !path.CanSeal { - continue // we want to seal, and this isn't a seal path - } - - // check if this path is on this node - var found bool - for _, storagePath := range local { - if storagePath.ID == path.ID { - found = true - break - } - } - if !found { - // this path isn't on this node - continue - } - - // StorageBestAlloc already checks that there is enough space; Not atomic like reserving space, but it's - // good enough for HasCapacity - return true - } - - return false // no path found -} - -func (t *TaskStorage) Claim(taskID int) (func() error, error) { - // TaskStorage Claim Attempts to reserve storage for the task - // A: Create a reservation for files to be allocated - // B: Create a reservation for existing files to be fetched into local storage - // C: Create a reservation for existing files in local storage which may be extended (e.g. sector cache when computing Trees) - - ctx := context.Background() - - sectorRef, err := t.taskToSectorRef(harmonytask.TaskID(taskID)) - if err != nil { - return nil, xerrors.Errorf("getting sector ref: %w", err) - } - - // storage writelock sector - lkctx, cancel := context.WithCancel(ctx) - - requestedTypes := t.alloc | t.existing - - lockAcquireTimuout := time.Second * 10 - lockAcquireTimer := time.NewTimer(lockAcquireTimuout) - - go func() { - defer cancel() - - select { - case <-lockAcquireTimer.C: - case <-ctx.Done(): - } - }() - - if err := t.sc.sectors.sindex.StorageLock(lkctx, sectorRef.ID(), storiface.FTNone, requestedTypes); err != nil { - // timer will expire - return nil, xerrors.Errorf("claim StorageLock: %w", err) - } - - if !lockAcquireTimer.Stop() { - // timer expired, so lkctx is done, and that means the lock was acquired and dropped.. - return nil, xerrors.Errorf("failed to acquire lock") - } - defer func() { - // make sure we release the sector lock - lockAcquireTimer.Reset(0) - }() - - // First see what we have locally. We are putting allocate and existing together because local acquire will look - // for existing files for allocate requests, separately existing files which aren't found locally will be need to - // be fetched, so we will need to create reservations for that too. - // NOTE localStore.AcquireSector does not open or create any files, nor does it reserve space. It only proposes - // paths to be used. - pathsFs, pathIDs, err := t.sc.sectors.localStore.AcquireSector(ctx, sectorRef.Ref(), storiface.FTNone, requestedTypes, t.pathType, storiface.AcquireMove) - if err != nil { - return nil, err - } - - // reserve the space - release, err := t.sc.sectors.localStore.Reserve(ctx, sectorRef.Ref(), requestedTypes, pathIDs, storiface.FSOverheadSeal, t.MinFreeStoragePercentage) - if err != nil { - return nil, err - } - - var releaseOnce sync.Once - releaseFunc := func() { - releaseOnce.Do(release) - } - - sres := &StorageReservation{ - SectorRef: sectorRef, - Release: releaseFunc, - Paths: pathsFs, - PathIDs: pathIDs, - - Alloc: t.alloc, - Existing: t.existing, - } - - t.sc.sectors.storageReservations.Store(harmonytask.TaskID(taskID), sres) - - log.Debugw("claimed storage", "task_id", taskID, "sector", sectorRef.ID(), "paths", pathsFs) - - // note: we drop the sector writelock on return; THAT IS INTENTIONAL, this code runs in CanAccept, which doesn't - // guarantee that the work for this sector will happen on this node; SDR CanAccept just ensures that the node can - // run the job, harmonytask is what ensures that only one SDR runs at a time - return func() error { - return t.markComplete(taskID, sectorRef) - }, nil -} - -func (t *TaskStorage) markComplete(taskID int, sectorRef SectorRef) error { - // MarkComplete is ALWAYS called after the task is done or not scheduled - // If Claim is called and returns without errors, MarkComplete with the same - // taskID is guaranteed to eventually be called - - sres, ok := t.sc.sectors.storageReservations.Load(harmonytask.TaskID(taskID)) - if !ok { - return xerrors.Errorf("no reservation found for task %d", taskID) - } - - if sectorRef != sres.SectorRef { - return xerrors.Errorf("reservation sector ref doesn't match task sector ref: %+v != %+v", sectorRef, sres.SectorRef) - } - - log.Debugw("marking storage complete", "task_id", taskID, "sector", sectorRef.ID(), "paths", sres.Paths) - - // remove the reservation - t.sc.sectors.storageReservations.Delete(harmonytask.TaskID(taskID)) - - // release the reservation - sres.Release() - - // note: this only frees the reservation, allocated sectors are declared in AcquireSector which is aware of - // the reservation - return nil -} - -var _ resources.Storage = &TaskStorage{} diff --git a/curiosrc/gc/storage_endpoint_gc.go b/curiosrc/gc/storage_endpoint_gc.go deleted file mode 100644 index d49c51a1bb3..00000000000 --- a/curiosrc/gc/storage_endpoint_gc.go +++ /dev/null @@ -1,288 +0,0 @@ -package gc - -import ( - "context" - "strings" - "sync" - "time" - - logging "github.com/ipfs/go-log/v2" - "github.com/samber/lo" - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" - "github.com/filecoin-project/lotus/lib/harmony/resources" - "github.com/filecoin-project/lotus/lib/result" - "github.com/filecoin-project/lotus/storage/paths" - "github.com/filecoin-project/lotus/storage/sealer/fsutil" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -var log = logging.Logger("curiogc") - -const StorageEndpointGCInterval = 21 * time.Minute -const StorageEndpointDeadTime = StorageEndpointGCInterval * 6 // ~2h -const MaxParallelEndpointChecks = 32 - -type StorageEndpointGC struct { - si *paths.DBIndex - remote *paths.Remote - db *harmonydb.DB -} - -func NewStorageEndpointGC(si *paths.DBIndex, remote *paths.Remote, db *harmonydb.DB) *StorageEndpointGC { - return &StorageEndpointGC{ - si: si, - remote: remote, - db: db, - } -} - -func (s *StorageEndpointGC) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { - /* - 1. Get all storage paths + urls (endpoints) - 2. Ping each url, record results - 3. Update sector_path_url_liveness with success/failure - 4.1 If a URL was consistently down for StorageEndpointDeadTime, remove it from the storage_path table - 4.2 Remove storage paths with no URLs remaining - 4.2.1 in the same transaction remove sector refs to the dead path - */ - - ctx := context.Background() - - var pathRefs []struct { - StorageID storiface.ID `db:"storage_id"` - Urls string `db:"urls"` - LastHeartbeat *time.Time `db:"last_heartbeat"` - } - - err = s.db.Select(ctx, &pathRefs, `SELECT storage_id, urls, last_heartbeat FROM storage_path`) - if err != nil { - return false, xerrors.Errorf("getting path metadata: %w", err) - } - - type pingResult struct { - storageID storiface.ID - url string - - res result.Result[fsutil.FsStat] - } - - var pingResults []pingResult - var resultLk sync.Mutex - var resultThrottle = make(chan struct{}, MaxParallelEndpointChecks) - - for _, pathRef := range pathRefs { - pathRef := pathRef - urls := strings.Split(pathRef.Urls, paths.URLSeparator) - - for _, url := range urls { - url := url - - select { - case resultThrottle <- struct{}{}: - case <-ctx.Done(): - return false, ctx.Err() - } - - go func() { - defer func() { - <-resultThrottle - }() - - st, err := s.remote.StatUrl(ctx, url, pathRef.StorageID) - - res := pingResult{ - storageID: pathRef.StorageID, - url: url, - res: result.Wrap(st, err), - } - - resultLk.Lock() - pingResults = append(pingResults, res) - resultLk.Unlock() - }() - } - } - - // Wait for all pings to finish - for i := 0; i < MaxParallelEndpointChecks; i++ { - select { - case resultThrottle <- struct{}{}: - case <-ctx.Done(): - return false, ctx.Err() - } - } - - // Update the liveness table - - /* - create table sector_path_url_liveness ( - storage_id text, - url text, - - last_checked timestamp not null, - last_live timestamp, - last_dead timestamp, - last_dead_reason text, - - primary key (storage_id, url), - - foreign key (storage_id) references storage_path (storage_id) on delete cascade - ) - */ - - currentTime := time.Now().UTC() - - committed, err := s.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (bool, error) { - for _, pingResult := range pingResults { - var lastLive, lastDead, lastDeadReason interface{} - if pingResult.res.Error == nil { - lastLive = currentTime.UTC() - lastDead = nil - lastDeadReason = nil - } else { - lastLive = nil - lastDead = currentTime.UTC() - lastDeadReason = pingResult.res.Error.Error() - } - - // This function updates the liveness data for a URL in the `sector_path_url_liveness` table. - // - // On conflict, where the same `storage_id` and `url` are found: - // - last_checked is always updated to the current timestamp. - // - last_live is updated to the new `last_live` if it is not null; otherwise, it retains the existing value. - // - last_dead is conditionally updated based on two criteria: - // 1. It is set to the new `last_dead` if the existing `last_dead` is null (indicating this is the first recorded failure). - // 2. It is updated to the new `last_dead` if there has been a live instance recorded after the most recent dead timestamp, indicating the resource was alive again before this new failure. - // 3. It retains the existing value if none of the above conditions are met. - // - last_dead_reason is updated similarly to `last_live`, using COALESCE to prefer the new reason if it's provided. - _, err := tx.Exec(` - INSERT INTO sector_path_url_liveness (storage_id, url, last_checked, last_live, last_dead, last_dead_reason) - VALUES ($1, $2, $3, $4, $5, $6) - ON CONFLICT (storage_id, url) DO UPDATE - SET last_checked = EXCLUDED.last_checked, - last_live = COALESCE(EXCLUDED.last_live, sector_path_url_liveness.last_live), - last_dead = CASE - WHEN sector_path_url_liveness.last_dead IS NULL THEN EXCLUDED.last_dead - WHEN sector_path_url_liveness.last_dead IS NOT NULL AND sector_path_url_liveness.last_live > sector_path_url_liveness.last_dead THEN EXCLUDED.last_dead - ELSE sector_path_url_liveness.last_dead - END, - last_dead_reason = COALESCE(EXCLUDED.last_dead_reason, sector_path_url_liveness.last_dead_reason) - `, pingResult.storageID, pingResult.url, currentTime, lastLive, lastDead, lastDeadReason) - if err != nil { - return false, xerrors.Errorf("updating liveness data: %w", err) - } - } - - return true, nil - }, harmonydb.OptionRetry()) - if err != nil { - return false, xerrors.Errorf("sector_path_url_liveness update: %w", err) - } - if !committed { - return false, xerrors.Errorf("sector_path_url_liveness update: transaction didn't commit") - } - - /////// - // Now we do the actual database cleanup - if !stillOwned() { - return false, xerrors.Errorf("task no longer owned") - } - - committed, err = s.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (bool, error) { - // Identify URLs that are consistently down - var deadURLs []struct { - StorageID storiface.ID - URL string - } - err = tx.Select(&deadURLs, ` - SELECT storage_id, url FROM sector_path_url_liveness - WHERE last_dead > COALESCE(last_live, '1970-01-01') AND last_dead < $1 - `, currentTime.Add(-StorageEndpointDeadTime).UTC()) - if err != nil { - return false, xerrors.Errorf("selecting dead URLs: %w", err) - } - - log.Debugw("dead urls", "dead_urls", deadURLs) - - // Remove dead URLs from storage_path entries and handle path cleanup - for _, du := range deadURLs { - du := du - // Fetch the current URLs for the storage path - var URLs string - err = tx.QueryRow("SELECT urls FROM storage_path WHERE storage_id = $1", du.StorageID).Scan(&URLs) - if err != nil { - return false, xerrors.Errorf("fetching storage paths: %w", err) - } - - // Filter out the dead URL using lo.Reject and prepare the updated list - urls := strings.Split(URLs, paths.URLSeparator) - urls = lo.Reject(urls, func(u string, _ int) bool { - return u == du.URL - }) - - log.Debugw("filtered urls", "urls", urls, "dead_url", du.URL, "storage_id", du.StorageID) - - if len(urls) == 0 { - // If no URLs left, remove the storage path entirely - _, err = tx.Exec("DELETE FROM storage_path WHERE storage_id = $1", du.StorageID) - if err != nil { - return false, xerrors.Errorf("deleting storage path: %w", err) - } - _, err = tx.Exec("DELETE FROM sector_location WHERE storage_id = $1", du.StorageID) - if err != nil { - return false, xerrors.Errorf("deleting sector locations: %w", err) - } - } else { - // Update the storage path with the filtered URLs - newURLs := strings.Join(urls, paths.URLSeparator) - _, err = tx.Exec("UPDATE storage_path SET urls = $1 WHERE storage_id = $2", newURLs, du.StorageID) - if err != nil { - return false, xerrors.Errorf("updating storage path urls: %w", err) - } - // Remove sector_path_url_liveness entry - _, err = tx.Exec("DELETE FROM sector_path_url_liveness WHERE storage_id = $1 AND url = $2", du.StorageID, du.URL) - if err != nil { - return false, xerrors.Errorf("deleting sector_path_url_liveness entry: %w", err) - } - } - } - - return true, nil - }, harmonydb.OptionRetry()) - if err != nil { - return false, xerrors.Errorf("removing dead URLs and cleaning storage paths: %w", err) - } - if !committed { - return false, xerrors.Errorf("transaction for removing dead URLs and cleaning paths did not commit") - } - - return true, nil -} - -func (s *StorageEndpointGC) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { - id := ids[0] - return &id, nil -} - -func (s *StorageEndpointGC) TypeDetails() harmonytask.TaskTypeDetails { - return harmonytask.TaskTypeDetails{ - Max: 1, - Name: "StorageMetaGC", - Cost: resources.Resources{ - Cpu: 1, - Ram: 64 << 20, - Gpu: 0, - }, - IAmBored: harmonytask.SingletonTaskAdder(StorageEndpointGCInterval, s), - } -} - -func (s *StorageEndpointGC) Adder(taskFunc harmonytask.AddTaskFunc) { - // lazy endpoint, added when bored - return -} - -var _ harmonytask.TaskInterface = &StorageEndpointGC{} diff --git a/curiosrc/market/deal_ingest.go b/curiosrc/market/deal_ingest.go deleted file mode 100644 index 8aa41811b36..00000000000 --- a/curiosrc/market/deal_ingest.go +++ /dev/null @@ -1,547 +0,0 @@ -package market - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "time" - - logging "github.com/ipfs/go-log/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-bitfield" - "github.com/filecoin-project/go-padreader" - "github.com/filecoin-project/go-state-types/abi" - verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" - "github.com/filecoin-project/go-state-types/network" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/curiosrc/seal" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - lpiece "github.com/filecoin-project/lotus/storage/pipeline/piece" -) - -var log = logging.Logger("piece-ingestor") - -const loopFrequency = 10 * time.Second - -type Ingester interface { - AllocatePieceToSector(ctx context.Context, maddr address.Address, piece lpiece.PieceDealInfo, rawSize int64, source url.URL, header http.Header) (api.SectorOffset, error) -} - -type PieceIngesterApi interface { - ChainHead(context.Context) (*types.TipSet, error) - StateMinerInfo(context.Context, address.Address, types.TipSetKey) (api.MinerInfo, error) - StateMinerAllocated(ctx context.Context, a address.Address, key types.TipSetKey) (*bitfield.BitField, error) - StateNetworkVersion(ctx context.Context, key types.TipSetKey) (network.Version, error) - StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationId verifregtypes.AllocationId, tsk types.TipSetKey) (*verifregtypes.Allocation, error) - StateGetAllocationForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*verifregtypes.Allocation, error) -} - -type openSector struct { - number abi.SectorNumber - currentSize abi.PaddedPieceSize - earliestStartEpoch abi.ChainEpoch - index uint64 - openedAt *time.Time - latestEndEpoch abi.ChainEpoch -} - -type PieceIngester struct { - ctx context.Context - db *harmonydb.DB - api PieceIngesterApi - miner address.Address - mid uint64 // miner ID - windowPoStProofType abi.RegisteredPoStProof - synth bool - sectorSize abi.SectorSize - sealRightNow bool // Should be true only for CurioAPI AllocatePieceToSector method - maxWaitTime time.Duration -} - -type pieceDetails struct { - Sector abi.SectorNumber `db:"sector_number"` - Size abi.PaddedPieceSize `db:"piece_size"` - StartEpoch abi.ChainEpoch `db:"deal_start_epoch"` - EndEpoch abi.ChainEpoch `db:"deal_end_epoch"` - Index uint64 `db:"piece_index"` - CreatedAt *time.Time `db:"created_at"` -} - -type verifiedDeal struct { - isVerified bool - tmin abi.ChainEpoch - tmax abi.ChainEpoch -} - -func NewPieceIngester(ctx context.Context, db *harmonydb.DB, api PieceIngesterApi, maddr address.Address, sealRightNow bool, maxWaitTime time.Duration) (*PieceIngester, error) { - mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK) - if err != nil { - return nil, err - } - - mid, err := address.IDFromAddress(maddr) - if err != nil { - return nil, xerrors.Errorf("getting miner ID: %w", err) - } - - pi := &PieceIngester{ - ctx: ctx, - db: db, - api: api, - sealRightNow: sealRightNow, - miner: maddr, - maxWaitTime: maxWaitTime, - sectorSize: mi.SectorSize, - windowPoStProofType: mi.WindowPoStProofType, - mid: mid, - synth: false, // TODO: synthetic porep config - } - - go pi.start() - - return pi, nil -} - -func (p *PieceIngester) start() { - ticker := time.NewTicker(loopFrequency) - defer ticker.Stop() - - for { - select { - case <-p.ctx.Done(): - return - case <-ticker.C: - err := p.Seal() - if err != nil { - log.Error(err) - } - } - } -} - -func (p *PieceIngester) Seal() error { - head, err := p.api.ChainHead(p.ctx) - if err != nil { - return xerrors.Errorf("getting chain head: %w", err) - } - - spt, err := p.getSealProofType() - if err != nil { - return xerrors.Errorf("getting seal proof type: %w", err) - } - - shouldSeal := func(sector *openSector) bool { - // Start sealing a sector if - // 1. If sector is full - // 2. We have been waiting for MaxWaitDuration - // 3. StartEpoch is less than 8 hours // todo: make this config? - if sector.currentSize == abi.PaddedPieceSize(p.sectorSize) { - log.Debugf("start sealing sector %d of miner %d: %s", sector.number, p.miner.String(), "sector full") - return true - } - if time.Since(*sector.openedAt) > p.maxWaitTime { - log.Debugf("start sealing sector %d of miner %d: %s", sector.number, p.miner.String(), "MaxWaitTime reached") - return true - } - if sector.earliestStartEpoch < head.Height()+abi.ChainEpoch(960) { - log.Debugf("start sealing sector %d of miner %d: %s", sector.number, p.miner.String(), "earliest start epoch") - return true - } - return false - } - - comm, err := p.db.BeginTransaction(p.ctx, func(tx *harmonydb.Tx) (commit bool, err error) { - - openSectors, err := p.getOpenSectors(tx) - if err != nil { - return false, err - } - - for _, sector := range openSectors { - sector := sector - if shouldSeal(sector) { - // Start sealing the sector - cn, err := tx.Exec(`INSERT INTO sectors_sdr_pipeline (sp_id, sector_number, reg_seal_proof) VALUES ($1, $2, $3);`, p.mid, sector.number, spt) - - if err != nil { - return false, xerrors.Errorf("adding sector to pipeline: %w", err) - } - - if cn != 1 { - return false, xerrors.Errorf("adding sector to pipeline: incorrect number of rows returned") - } - - _, err = tx.Exec("SELECT transfer_and_delete_open_piece($1, $2)", p.mid, sector.number) - if err != nil { - return false, xerrors.Errorf("adding sector to pipeline: %w", err) - } - } - - } - return true, nil - }, harmonydb.OptionRetry()) - - if err != nil { - return xerrors.Errorf("start sealing sector: %w", err) - } - - if !comm { - return xerrors.Errorf("start sealing sector: commit failed") - } - - return nil -} - -func (p *PieceIngester) AllocatePieceToSector(ctx context.Context, maddr address.Address, piece lpiece.PieceDealInfo, rawSize int64, source url.URL, header http.Header) (api.SectorOffset, error) { - if maddr != p.miner { - return api.SectorOffset{}, xerrors.Errorf("miner address doesn't match") - } - - // check raw size - if piece.Size() != padreader.PaddedSize(uint64(rawSize)).Padded() { - return api.SectorOffset{}, xerrors.Errorf("raw size doesn't match padded piece size") - } - - var propJson []byte - - dataHdrJson, err := json.Marshal(header) - if err != nil { - return api.SectorOffset{}, xerrors.Errorf("json.Marshal(header): %w", err) - } - - vd := verifiedDeal{ - isVerified: false, - } - - if piece.DealProposal != nil { - vd.isVerified = piece.DealProposal.VerifiedDeal - if vd.isVerified { - alloc, err := p.api.StateGetAllocationForPendingDeal(ctx, piece.DealID, types.EmptyTSK) - if err != nil { - return api.SectorOffset{}, xerrors.Errorf("getting pending allocation for deal %d: %w", piece.DealID, err) - } - if alloc == nil { - return api.SectorOffset{}, xerrors.Errorf("no allocation found for deal %d: %w", piece.DealID, err) - } - vd.tmin = alloc.TermMin - vd.tmax = alloc.TermMax - } - propJson, err = json.Marshal(piece.DealProposal) - if err != nil { - return api.SectorOffset{}, xerrors.Errorf("json.Marshal(piece.DealProposal): %w", err) - } - } else { - vd.isVerified = piece.PieceActivationManifest.VerifiedAllocationKey != nil - if vd.isVerified { - client, err := address.NewIDAddress(uint64(piece.PieceActivationManifest.VerifiedAllocationKey.Client)) - if err != nil { - return api.SectorOffset{}, xerrors.Errorf("getting client address from actor ID: %w", err) - } - alloc, err := p.api.StateGetAllocation(ctx, client, verifregtypes.AllocationId(piece.PieceActivationManifest.VerifiedAllocationKey.ID), types.EmptyTSK) - if err != nil { - return api.SectorOffset{}, xerrors.Errorf("getting allocation details for %d: %w", piece.PieceActivationManifest.VerifiedAllocationKey.ID, err) - } - if alloc == nil { - return api.SectorOffset{}, xerrors.Errorf("no allocation found for ID %d: %w", piece.PieceActivationManifest.VerifiedAllocationKey.ID, err) - } - vd.tmin = alloc.TermMin - vd.tmax = alloc.TermMax - } - propJson, err = json.Marshal(piece.PieceActivationManifest) - if err != nil { - return api.SectorOffset{}, xerrors.Errorf("json.Marshal(piece.PieceActivationManifest): %w", err) - } - } - - if !p.sealRightNow { - // Try to allocate the piece to an open sector - allocated, ret, err := p.allocateToExisting(ctx, piece, rawSize, source, dataHdrJson, propJson, vd) - if err != nil { - return api.SectorOffset{}, err - } - if allocated { - return ret, nil - } - } - - // Allocation to open sector failed, create a new sector and add the piece to it - num, err := seal.AllocateSectorNumbers(ctx, p.api, p.db, maddr, 1, func(tx *harmonydb.Tx, numbers []abi.SectorNumber) (bool, error) { - if len(numbers) != 1 { - return false, xerrors.Errorf("expected one sector number") - } - n := numbers[0] - - if piece.DealProposal != nil { - _, err = tx.Exec(`SELECT insert_sector_market_piece($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14)`, - p.mid, n, 0, - piece.DealProposal.PieceCID, piece.DealProposal.PieceSize, - source.String(), dataHdrJson, rawSize, !piece.KeepUnsealed, - piece.PublishCid, piece.DealID, propJson, piece.DealSchedule.StartEpoch, piece.DealSchedule.EndEpoch) - if err != nil { - return false, xerrors.Errorf("adding deal to sector: %w", err) - } - } else { - _, err = tx.Exec(`SELECT insert_sector_ddo_piece($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12)`, - p.mid, n, 0, - piece.PieceActivationManifest.CID, piece.PieceActivationManifest.Size, - source.String(), dataHdrJson, rawSize, !piece.KeepUnsealed, - piece.DealSchedule.StartEpoch, piece.DealSchedule.EndEpoch, propJson) - if err != nil { - return false, xerrors.Errorf("adding deal to sector: %w", err) - } - } - return true, nil - }) - if err != nil { - return api.SectorOffset{}, xerrors.Errorf("allocating sector numbers: %w", err) - } - - if len(num) != 1 { - return api.SectorOffset{}, xerrors.Errorf("expected one sector number") - } - - if p.sealRightNow { - err = p.SectorStartSealing(ctx, num[0]) - if err != nil { - return api.SectorOffset{}, xerrors.Errorf("SectorStartSealing: %w", err) - } - } - - return api.SectorOffset{ - Sector: num[0], - Offset: 0, - }, nil -} - -func (p *PieceIngester) allocateToExisting(ctx context.Context, piece lpiece.PieceDealInfo, rawSize int64, source url.URL, dataHdrJson, propJson []byte, vd verifiedDeal) (bool, api.SectorOffset, error) { - - var ret api.SectorOffset - var allocated bool - var rerr error - - comm, err := p.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { - openSectors, err := p.getOpenSectors(tx) - if err != nil { - return false, err - } - - pieceSize := piece.Size() - for _, sec := range openSectors { - sec := sec - if sec.currentSize+pieceSize <= abi.PaddedPieceSize(p.sectorSize) { - if vd.isVerified { - sectorLifeTime := sec.latestEndEpoch - sec.earliestStartEpoch - // Allocation's TMin must fit in sector and TMax should be at least sector lifetime or more - // Based on https://github.com/filecoin-project/builtin-actors/blob/a0e34d22665ac8c84f02fea8a099216f29ffaeeb/actors/verifreg/src/lib.rs#L1071-L1086 - if sectorLifeTime <= vd.tmin && sectorLifeTime >= vd.tmax { - continue - } - } - - ret.Sector = sec.number - ret.Offset = sec.currentSize - - // Insert market deal to DB for the sector - if piece.DealProposal != nil { - cn, err := tx.Exec(`SELECT insert_sector_market_piece($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14)`, - p.mid, sec.number, sec.index+1, - piece.DealProposal.PieceCID, piece.DealProposal.PieceSize, - source.String(), dataHdrJson, rawSize, !piece.KeepUnsealed, - piece.PublishCid, piece.DealID, propJson, piece.DealSchedule.StartEpoch, piece.DealSchedule.EndEpoch) - - if err != nil { - return false, fmt.Errorf("adding deal to sector: %v", err) - } - - if cn != 1 { - return false, xerrors.Errorf("expected one piece") - } - - } else { // Insert DDO deal to DB for the sector - cn, err := tx.Exec(`SELECT insert_sector_ddo_piece($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12)`, - p.mid, sec.number, sec.index+1, - piece.PieceActivationManifest.CID, piece.PieceActivationManifest.Size, - source.String(), dataHdrJson, rawSize, !piece.KeepUnsealed, - piece.DealSchedule.StartEpoch, piece.DealSchedule.EndEpoch, propJson) - - if err != nil { - return false, fmt.Errorf("adding deal to sector: %v", err) - } - - if cn != 1 { - return false, xerrors.Errorf("expected one piece") - } - - } - allocated = true - break - } - } - return true, nil - }, harmonydb.OptionRetry()) - - if !comm { - rerr = xerrors.Errorf("allocating piece to a sector: commit failed") - } - - if err != nil { - rerr = xerrors.Errorf("allocating piece to a sector: %w", err) - } - - return allocated, ret, rerr - -} - -func (p *PieceIngester) SectorStartSealing(ctx context.Context, sector abi.SectorNumber) error { - - spt, err := p.getSealProofType() - if err != nil { - return xerrors.Errorf("getting seal proof type: %w", err) - } - - comm, err := p.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { - // Get current open sector pieces from DB - var pieces []pieceDetails - err = tx.Select(&pieces, ` - SELECT - sector_number, - piece_size, - piece_index, - COALESCE(direct_start_epoch, f05_deal_start_epoch, 0) AS deal_start_epoch, - COALESCE(direct_end_epoch, f05_deal_end_epoch, 0) AS deal_end_epoch, - created_at - FROM - open_sector_pieces - WHERE - sp_id = $1 AND sector_number = $2 - ORDER BY - piece_index DESC;`, p.mid, sector) - if err != nil { - return false, xerrors.Errorf("getting open sectors from DB") - } - - if len(pieces) < 1 { - return false, xerrors.Errorf("sector %d is not waiting to be sealed", sector) - } - - cn, err := tx.Exec(`INSERT INTO sectors_sdr_pipeline (sp_id, sector_number, reg_seal_proof) VALUES ($1, $2, $3);`, p.mid, sector, spt) - - if err != nil { - return false, xerrors.Errorf("adding sector to pipeline: %w", err) - } - - if cn != 1 { - return false, xerrors.Errorf("incorrect number of rows returned") - } - - _, err = tx.Exec("SELECT transfer_and_delete_open_piece($1, $2)", p.mid, sector) - if err != nil { - return false, xerrors.Errorf("adding sector to pipeline: %w", err) - } - - return true, nil - - }, harmonydb.OptionRetry()) - - if err != nil { - return xerrors.Errorf("start sealing sector: %w", err) - } - - if !comm { - return xerrors.Errorf("start sealing sector: commit failed") - } - - return nil -} - -func (p *PieceIngester) getOpenSectors(tx *harmonydb.Tx) ([]*openSector, error) { - // Get current open sector pieces from DB - var pieces []pieceDetails - err := tx.Select(&pieces, ` - SELECT - sector_number, - piece_size, - piece_index, - COALESCE(direct_start_epoch, f05_deal_start_epoch, 0) AS deal_start_epoch, - COALESCE(direct_end_epoch, f05_deal_end_epoch, 0) AS deal_end_epoch, - created_at - FROM - open_sector_pieces - WHERE - sp_id = $1 - ORDER BY - piece_index DESC;`, p.mid) - if err != nil { - return nil, xerrors.Errorf("getting open sectors from DB") - } - - getStartEpoch := func(new abi.ChainEpoch, cur abi.ChainEpoch) abi.ChainEpoch { - if cur > 0 && cur < new { - return cur - } - return new - } - - getEndEpoch := func(new abi.ChainEpoch, cur abi.ChainEpoch) abi.ChainEpoch { - if cur > 0 && cur > new { - return cur - } - return new - } - - getOpenedAt := func(piece pieceDetails, cur *time.Time) *time.Time { - if piece.CreatedAt.Before(*cur) { - return piece.CreatedAt - } - return cur - } - - sectorMap := map[abi.SectorNumber]*openSector{} - for _, pi := range pieces { - pi := pi - sector, ok := sectorMap[pi.Sector] - if !ok { - sectorMap[pi.Sector] = &openSector{ - number: pi.Sector, - currentSize: pi.Size, - earliestStartEpoch: getStartEpoch(pi.StartEpoch, 0), - index: pi.Index, - openedAt: pi.CreatedAt, - latestEndEpoch: getEndEpoch(pi.EndEpoch, 0), - } - continue - } - sector.currentSize += pi.Size - sector.earliestStartEpoch = getStartEpoch(pi.StartEpoch, sector.earliestStartEpoch) - sector.latestEndEpoch = getEndEpoch(pi.EndEpoch, sector.earliestStartEpoch) - if sector.index < pi.Index { - sector.index = pi.Index - } - sector.openedAt = getOpenedAt(pi, sector.openedAt) - } - - var os []*openSector - - for _, v := range sectorMap { - v := v - os = append(os, v) - } - - return os, nil -} - -func (p *PieceIngester) getSealProofType() (abi.RegisteredSealProof, error) { - nv, err := p.api.StateNetworkVersion(p.ctx, types.EmptyTSK) - if err != nil { - return 0, xerrors.Errorf("getting network version: %w", err) - } - - return miner.PreferredSealProofTypeFromWindowPoStType(nv, p.windowPoStProofType, p.synth) -} diff --git a/curiosrc/market/fakelm/iface.go b/curiosrc/market/fakelm/iface.go deleted file mode 100644 index 1bc91b35e75..00000000000 --- a/curiosrc/market/fakelm/iface.go +++ /dev/null @@ -1,33 +0,0 @@ -package fakelm - -import ( - "context" - - "github.com/google/uuid" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -// MinimalLMApi is a subset of the LotusMiner API that is exposed by Curio -// for consumption by boost -type MinimalLMApi interface { - ActorAddress(context.Context) (address.Address, error) - - WorkerJobs(context.Context) (map[uuid.UUID][]storiface.WorkerJob, error) - - SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (api.SectorInfo, error) - - SectorsList(context.Context) ([]abi.SectorNumber, error) - SectorsSummary(ctx context.Context) (map[api.SectorState]int, error) - - SectorsListInStates(context.Context, []api.SectorState) ([]abi.SectorNumber, error) - - StorageRedeclareLocal(context.Context, *storiface.ID, bool) error - - ComputeDataCid(ctx context.Context, pieceSize abi.UnpaddedPieceSize, pieceData storiface.Data) (abi.PieceInfo, error) - SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storiface.Data, d api.PieceDealInfo) (api.SectorOffset, error) -} diff --git a/curiosrc/market/fakelm/lmimpl.go b/curiosrc/market/fakelm/lmimpl.go deleted file mode 100644 index 04444939027..00000000000 --- a/curiosrc/market/fakelm/lmimpl.go +++ /dev/null @@ -1,381 +0,0 @@ -package fakelm - -import ( - "context" - "encoding/base64" - "net/http" - "net/url" - - "github.com/gbrlsnchs/jwt/v3" - "github.com/google/uuid" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-jsonrpc/auth" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/go-state-types/network" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/curiosrc/market" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/node/config" - "github.com/filecoin-project/lotus/storage/paths" - sealing "github.com/filecoin-project/lotus/storage/pipeline" - lpiece "github.com/filecoin-project/lotus/storage/pipeline/piece" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -type LMRPCProvider struct { - si paths.SectorIndex - full api.FullNode - - maddr address.Address // lotus-miner RPC is single-actor - minerID abi.ActorID - - ssize abi.SectorSize - - pi market.Ingester - db *harmonydb.DB - conf *config.CurioConfig -} - -func NewLMRPCProvider(si paths.SectorIndex, full api.FullNode, maddr address.Address, minerID abi.ActorID, ssize abi.SectorSize, pi market.Ingester, db *harmonydb.DB, conf *config.CurioConfig) *LMRPCProvider { - return &LMRPCProvider{ - si: si, - full: full, - maddr: maddr, - minerID: minerID, - ssize: ssize, - pi: pi, - db: db, - conf: conf, - } -} - -func (l *LMRPCProvider) ActorAddress(ctx context.Context) (address.Address, error) { - return l.maddr, nil -} - -func (l *LMRPCProvider) WorkerJobs(ctx context.Context) (map[uuid.UUID][]storiface.WorkerJob, error) { - // correct enough - return map[uuid.UUID][]storiface.WorkerJob{}, nil -} - -func (l *LMRPCProvider) SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (api.SectorInfo, error) { - var ssip []struct { - PieceCID *string `db:"piece_cid"` - DealID *int64 `db:"f05_deal_id"` - Complete bool `db:"after_commit_msg_success"` - Failed bool `db:"failed"` - SDR bool `db:"after_sdr"` - PoRep bool `db:"after_porep"` - } - - err := l.db.Select(ctx, &ssip, ` - WITH CheckCommit AS ( - SELECT - sp_id, - sector_number, - after_commit_msg, - failed, - after_sdr, - after_porep, - after_commit_msg_success - FROM - sectors_sdr_pipeline - WHERE - sp_id = $1 AND sector_number = $2 - ), - MetaPieces AS ( - SELECT - mp.piece_cid, - mp.f05_deal_id, - cc.after_commit_msg_success, - cc.failed, - cc.after_sdr, - cc.after_porep - FROM - sectors_meta_pieces mp - INNER JOIN - CheckCommit cc ON mp.sp_id = cc.sp_id AND mp.sector_num = cc.sector_number - WHERE - cc.after_commit_msg IS TRUE - ), - InitialPieces AS ( - SELECT - ip.piece_cid, - ip.f05_deal_id, - cc.after_commit_msg_success, - cc.failed, - cc.after_sdr, - cc.after_porep - FROM - sectors_sdr_initial_pieces ip - INNER JOIN - CheckCommit cc ON ip.sp_id = cc.sp_id AND ip.sector_number = cc.sector_number - WHERE - cc.after_commit_msg IS FALSE - ), - FallbackPieces AS ( - SELECT - op.piece_cid, - op.f05_deal_id, - FALSE as after_commit_msg_success, - FALSE as failed, - FALSE as after_sdr, - FALSE as after_porep - FROM - open_sector_pieces op - WHERE - op.sp_id = $1 AND op.sector_number = $2 - AND NOT EXISTS (SELECT 1 FROM sectors_sdr_pipeline sp WHERE sp.sp_id = op.sp_id AND sp.sector_number = op.sector_number) - ) - SELECT * FROM MetaPieces - UNION ALL - SELECT * FROM InitialPieces - UNION ALL - SELECT * FROM FallbackPieces;`, l.minerID, sid) - if err != nil { - return api.SectorInfo{}, err - } - - var deals []abi.DealID - if len(ssip) > 0 { - for _, d := range ssip { - if d.DealID != nil { - deals = append(deals, abi.DealID(*d.DealID)) - } - } - } - - spt, err := miner.SealProofTypeFromSectorSize(l.ssize, network.Version20, false) // good enough, just need this for ssize anyways - if err != nil { - return api.SectorInfo{}, err - } - - ret := api.SectorInfo{ - SectorID: sid, - CommD: nil, - CommR: nil, - Proof: nil, - Deals: deals, - Pieces: nil, - Ticket: api.SealTicket{}, - Seed: api.SealSeed{}, - PreCommitMsg: nil, - CommitMsg: nil, - Retries: 0, - ToUpgrade: false, - ReplicaUpdateMessage: nil, - LastErr: "", - Log: nil, - SealProof: spt, - Activation: 0, - Expiration: 0, - DealWeight: big.Zero(), - VerifiedDealWeight: big.Zero(), - InitialPledge: big.Zero(), - OnTime: 0, - Early: 0, - } - - // If no rows found i.e. sector doesn't exist in DB - //assign ssip[0] to a local variable for easier reading. - currentSSIP := ssip[0] - - switch { - case len(ssip) == 0: - ret.State = api.SectorState(sealing.UndefinedSectorState) - case currentSSIP.Failed: - ret.State = api.SectorState(sealing.FailedUnrecoverable) - case !currentSSIP.SDR: - ret.State = api.SectorState(sealing.WaitDeals) - case currentSSIP.SDR && !currentSSIP.PoRep: - ret.State = api.SectorState(sealing.PreCommit1) - case currentSSIP.SDR && currentSSIP.PoRep && !currentSSIP.Complete: - ret.State = api.SectorState(sealing.PreCommit2) - case currentSSIP.Complete: - ret.State = api.SectorState(sealing.Proving) - default: - return api.SectorInfo{}, nil - } - return ret, nil -} - -func (l *LMRPCProvider) SectorsList(ctx context.Context) ([]abi.SectorNumber, error) { - decls, err := l.si.StorageList(ctx) - if err != nil { - return nil, err - } - - var out []abi.SectorNumber - for _, decl := range decls { - for _, s := range decl { - if s.Miner != l.minerID { - continue - } - - out = append(out, s.SectorID.Number) - } - } - - return out, nil -} - -type sectorParts struct { - sealed, unsealed, cache bool - inStorage bool -} - -func (l *LMRPCProvider) SectorsSummary(ctx context.Context) (map[api.SectorState]int, error) { - decls, err := l.si.StorageList(ctx) - if err != nil { - return nil, err - } - - states := map[abi.SectorID]sectorParts{} - for si, decll := range decls { - sinfo, err := l.si.StorageInfo(ctx, si) - if err != nil { - return nil, err - } - - for _, decl := range decll { - if decl.Miner != l.minerID { - continue - } - - state := states[abi.SectorID{Miner: decl.Miner, Number: decl.SectorID.Number}] - state.sealed = state.sealed || decl.Has(storiface.FTSealed) - state.unsealed = state.unsealed || decl.Has(storiface.FTUnsealed) - state.cache = state.cache || decl.Has(storiface.FTCache) - state.inStorage = state.inStorage || sinfo.CanStore - states[abi.SectorID{Miner: decl.Miner, Number: decl.SectorID.Number}] = state - } - } - - out := map[api.SectorState]int{} - for _, state := range states { - switch { - case state.sealed && state.inStorage: - out[api.SectorState(sealing.Proving)]++ - default: - // not even close to correct, but good enough for now - out[api.SectorState(sealing.PreCommit1)]++ - } - } - - return out, nil -} - -func (l *LMRPCProvider) SectorsListInStates(ctx context.Context, want []api.SectorState) ([]abi.SectorNumber, error) { - decls, err := l.si.StorageList(ctx) - if err != nil { - return nil, err - } - - wantProving, wantPrecommit1 := false, false - for _, s := range want { - switch s { - case api.SectorState(sealing.Proving): - wantProving = true - case api.SectorState(sealing.PreCommit1): - wantPrecommit1 = true - } - } - - states := map[abi.SectorID]sectorParts{} - - for si, decll := range decls { - sinfo, err := l.si.StorageInfo(ctx, si) - if err != nil { - return nil, err - } - - for _, decl := range decll { - if decl.Miner != l.minerID { - continue - } - - state := states[abi.SectorID{Miner: decl.Miner, Number: decl.SectorID.Number}] - state.sealed = state.sealed || decl.Has(storiface.FTSealed) - state.unsealed = state.unsealed || decl.Has(storiface.FTUnsealed) - state.cache = state.cache || decl.Has(storiface.FTCache) - state.inStorage = state.inStorage || sinfo.CanStore - states[abi.SectorID{Miner: decl.Miner, Number: decl.SectorID.Number}] = state - } - } - var out []abi.SectorNumber - - for id, state := range states { - switch { - case state.sealed && state.inStorage: - if wantProving { - out = append(out, id.Number) - } - default: - // not even close to correct, but good enough for now - if wantPrecommit1 { - out = append(out, id.Number) - } - } - } - - return out, nil -} - -func (l *LMRPCProvider) StorageRedeclareLocal(ctx context.Context, id *storiface.ID, b bool) error { - // so this rescans and redeclares sectors on lotus-miner; whyyy is boost even calling this? - - return nil -} - -func (l *LMRPCProvider) IsUnsealed(ctx context.Context, sectorNum abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (bool, error) { - sectorID := abi.SectorID{Miner: l.minerID, Number: sectorNum} - - si, err := l.si.StorageFindSector(ctx, sectorID, storiface.FTUnsealed, 0, false) - if err != nil { - return false, err - } - - // yes, yes, technically sectors can be partially unsealed, but that is never done in practice - // and can't even be easily done with the current implementation - return len(si) > 0, nil -} - -func (l *LMRPCProvider) ComputeDataCid(ctx context.Context, pieceSize abi.UnpaddedPieceSize, pieceData storiface.Data) (abi.PieceInfo, error) { - return abi.PieceInfo{}, xerrors.Errorf("not supported") -} - -func (l *LMRPCProvider) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storiface.Data, d api.PieceDealInfo) (api.SectorOffset, error) { - if d.DealProposal.PieceSize != abi.PaddedPieceSize(l.ssize) { - return api.SectorOffset{}, xerrors.Errorf("only full-sector pieces are supported") - } - - return api.SectorOffset{}, xerrors.Errorf("not supported, use AllocatePieceToSector") -} - -func (l *LMRPCProvider) AllocatePieceToSector(ctx context.Context, maddr address.Address, piece lpiece.PieceDealInfo, rawSize int64, source url.URL, header http.Header) (api.SectorOffset, error) { - return l.pi.AllocatePieceToSector(ctx, maddr, piece, rawSize, source, header) -} - -func (l *LMRPCProvider) AuthNew(ctx context.Context, perms []auth.Permission) ([]byte, error) { - type jwtPayload struct { - Allow []auth.Permission - } - - p := jwtPayload{ - Allow: perms, - } - - sk, err := base64.StdEncoding.DecodeString(l.conf.Apis.StorageRPCSecret) - if err != nil { - return nil, xerrors.Errorf("decode secret: %w", err) - } - - return jwt.Sign(&p, jwt.NewHS256(sk)) -} - -var _ MinimalLMApi = &LMRPCProvider{} diff --git a/curiosrc/market/lmrpc/lmrpc.go b/curiosrc/market/lmrpc/lmrpc.go deleted file mode 100644 index b111d515d10..00000000000 --- a/curiosrc/market/lmrpc/lmrpc.go +++ /dev/null @@ -1,620 +0,0 @@ -package lmrpc - -import ( - "context" - "errors" - "fmt" - "io" - "net" - "net/http" - "net/url" - "sort" - "strconv" - "strings" - "sync" - "time" - - "github.com/google/uuid" - logging "github.com/ipfs/go-log/v2" - manet "github.com/multiformats/go-multiaddr/net" - "github.com/yugabyte/pgx/v5" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/types" - cumarket "github.com/filecoin-project/lotus/curiosrc/market" - "github.com/filecoin-project/lotus/curiosrc/market/fakelm" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/lib/nullreader" - "github.com/filecoin-project/lotus/metrics/proxy" - "github.com/filecoin-project/lotus/node" - "github.com/filecoin-project/lotus/node/config" - "github.com/filecoin-project/lotus/storage/paths" - lpiece "github.com/filecoin-project/lotus/storage/pipeline/piece" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -var log = logging.Logger("lmrpc") - -const backpressureWaitTime = 30 * time.Second - -func ServeCurioMarketRPCFromConfig(db *harmonydb.DB, full api.FullNode, cfg *config.CurioConfig) error { - return forEachMarketRPC(cfg, func(maddr string, listen string) error { - addr, err := address.NewFromString(maddr) - if err != nil { - return xerrors.Errorf("parsing actor address: %w", err) - } - - go func() { - err := ServeCurioMarketRPC(db, full, addr, cfg, listen) - if err != nil { - log.Errorf("failed to serve market rpc: %s", err) - } - }() - - return nil - }) -} - -func MakeTokens(cfg *config.CurioConfig) (map[address.Address]string, error) { - out := map[address.Address]string{} - - err := forEachMarketRPC(cfg, func(smaddr string, listen string) error { - ctx := context.Background() - - laddr, err := net.ResolveTCPAddr("tcp", listen) - if err != nil { - return xerrors.Errorf("net resolve: %w", err) - } - - if len(laddr.IP) == 0 || laddr.IP.IsUnspecified() { - return xerrors.Errorf("market rpc server listen address must be a specific address, not %s (probably missing bind IP)", listen) - } - - // need minimal provider with just the config - lp := fakelm.NewLMRPCProvider(nil, nil, address.Undef, 0, 0, nil, nil, cfg) - - tok, err := lp.AuthNew(ctx, api.AllPermissions) - if err != nil { - return err - } - - // parse listen into multiaddr - ma, err := manet.FromNetAddr(laddr) - if err != nil { - return xerrors.Errorf("net from addr (%v): %w", laddr, err) - } - - maddr, err := address.NewFromString(smaddr) - if err != nil { - return xerrors.Errorf("parsing actor address: %w", err) - } - - token := fmt.Sprintf("%s:%s", tok, ma) - out[maddr] = token - - return nil - }) - - return out, err -} - -func forEachMarketRPC(cfg *config.CurioConfig, cb func(string, string) error) error { - for n, server := range cfg.Subsystems.BoostAdapters { - n := n - - // server: [f0.. actor address]:[bind address] - // bind address is either a numeric port or a full address - - // first split at first : to get the actor address and the bind address - split := strings.SplitN(server, ":", 2) - - // if the split length is not 2, return an error - if len(split) != 2 { - return fmt.Errorf("bad market rpc server config %d %s, expected [f0.. actor address]:[bind address]", n, server) - } - - // get the actor address and the bind address - strMaddr, strListen := split[0], split[1] - - maddr, err := address.NewFromString(strMaddr) - if err != nil { - return xerrors.Errorf("parsing actor address: %w", err) - } - - // check the listen address - if strListen == "" { - return fmt.Errorf("bad market rpc server config %d %s, expected [f0.. actor address]:[bind address]", n, server) - } - // if listen address is numeric, prepend the default host - if _, err := strconv.Atoi(strListen); err == nil { - strListen = "0.0.0.0:" + strListen - } - // check if the listen address is a valid address - if _, _, err := net.SplitHostPort(strListen); err != nil { - return fmt.Errorf("bad market rpc server config %d %s, expected [f0.. actor address]:[bind address]", n, server) - } - - log.Infow("Starting market RPC server", "actor", maddr, "listen", strListen) - - if err := cb(strMaddr, strListen); err != nil { - return err - } - } - - return nil -} - -func ServeCurioMarketRPC(db *harmonydb.DB, full api.FullNode, maddr address.Address, conf *config.CurioConfig, listen string) error { - ctx := context.Background() - - pin, err := cumarket.NewPieceIngester(ctx, db, full, maddr, false, time.Duration(conf.Ingest.MaxDealWaitTime)) - if err != nil { - return xerrors.Errorf("starting piece ingestor") - } - - si := paths.NewDBIndex(nil, db) - - mid, err := address.IDFromAddress(maddr) - if err != nil { - return xerrors.Errorf("getting miner id: %w", err) - } - - mi, err := full.StateMinerInfo(ctx, maddr, types.EmptyTSK) - if err != nil { - return xerrors.Errorf("getting miner info: %w", err) - } - - lp := fakelm.NewLMRPCProvider(si, full, maddr, abi.ActorID(mid), mi.SectorSize, pin, db, conf) - - laddr, err := net.ResolveTCPAddr("tcp", listen) - if err != nil { - return xerrors.Errorf("net resolve: %w", err) - } - - if len(laddr.IP) == 0 || laddr.IP.IsUnspecified() { - return xerrors.Errorf("market rpc server listen address must be a specific address, not %s (probably missing bind IP)", listen) - } - rootUrl := url.URL{ - Scheme: "http", - Host: laddr.String(), - } - - ast := api.StorageMinerStruct{} - - ast.CommonStruct.Internal.Version = func(ctx context.Context) (api.APIVersion, error) { - return api.APIVersion{ - Version: "curio-proxy-v0", - APIVersion: api.MinerAPIVersion0, - BlockDelay: build.BlockDelaySecs, - }, nil - } - - pieceInfoLk := new(sync.Mutex) - pieceInfos := map[uuid.UUID][]pieceInfo{} - - ast.CommonStruct.Internal.AuthNew = lp.AuthNew - ast.Internal.ActorAddress = lp.ActorAddress - ast.Internal.WorkerJobs = lp.WorkerJobs - ast.Internal.SectorsStatus = lp.SectorsStatus - ast.Internal.SectorsList = lp.SectorsList - ast.Internal.SectorsSummary = lp.SectorsSummary - ast.Internal.SectorsListInStates = lp.SectorsListInStates - ast.Internal.StorageRedeclareLocal = lp.StorageRedeclareLocal - ast.Internal.ComputeDataCid = lp.ComputeDataCid - ast.Internal.SectorAddPieceToAny = sectorAddPieceToAnyOperation(maddr, rootUrl, conf, pieceInfoLk, pieceInfos, pin, db, mi.SectorSize) - ast.Internal.StorageList = si.StorageList - ast.Internal.StorageDetach = si.StorageDetach - ast.Internal.StorageReportHealth = si.StorageReportHealth - ast.Internal.StorageDeclareSector = si.StorageDeclareSector - ast.Internal.StorageDropSector = si.StorageDropSector - ast.Internal.StorageFindSector = si.StorageFindSector - ast.Internal.StorageInfo = si.StorageInfo - ast.Internal.StorageBestAlloc = si.StorageBestAlloc - ast.Internal.StorageLock = si.StorageLock - ast.Internal.StorageTryLock = si.StorageTryLock - ast.Internal.StorageGetLocks = si.StorageGetLocks - ast.Internal.SectorStartSealing = pin.SectorStartSealing - - var pieceHandler http.HandlerFunc = func(w http.ResponseWriter, r *http.Request) { - // /piece?piece_id=xxxx - pieceUUID := r.URL.Query().Get("piece_id") - - pu, err := uuid.Parse(pieceUUID) - if err != nil { - http.Error(w, "bad piece id", http.StatusBadRequest) - return - } - - if r.Method != http.MethodGet { - http.Error(w, "bad method", http.StatusMethodNotAllowed) - return - } - - fmt.Printf("%s request for piece from %s\n", pieceUUID, r.RemoteAddr) - - pieceInfoLk.Lock() - pis, ok := pieceInfos[pu] - if !ok { - http.Error(w, "piece not found", http.StatusNotFound) - log.Warnw("piece not found", "piece_uuid", pu) - pieceInfoLk.Unlock() - return - } - - // pop - pi := pis[0] - pis = pis[1:] - - pieceInfos[pu] = pis - if len(pis) == 0 { - delete(pieceInfos, pu) - } - - pieceInfoLk.Unlock() - - start := time.Now() - - pieceData := io.LimitReader(io.MultiReader( - pi.data, - nullreader.Reader{}, - ), int64(pi.size)) - - n, err := io.Copy(w, pieceData) - close(pi.done) - - took := time.Since(start) - mbps := float64(n) / (1024 * 1024) / took.Seconds() - - if err != nil { - log.Errorf("copying piece data: %s", err) - return - } - - log.Infow("piece served", "piece_uuid", pu, "size", float64(n)/(1024*1024), "duration", took, "speed", mbps) - } - - finalApi := proxy.LoggingAPI[api.StorageMiner, api.StorageMinerStruct](&ast) - - mh, err := node.MinerHandler(finalApi, false) // todo permissioned - if err != nil { - return err - } - - mux := http.NewServeMux() - mux.Handle("/piece", pieceHandler) - mux.Handle("/", mh) // todo: create a method for sealNow for sectors - - server := &http.Server{ - Addr: listen, - Handler: mux, - ReadTimeout: 48 * time.Hour, - WriteTimeout: 48 * time.Hour, // really high because we block until pieces are saved in PiecePark - } - - return server.ListenAndServe() -} - -type pieceInfo struct { - data storiface.Data - size abi.UnpaddedPieceSize - - done chan struct{} -} - -func sectorAddPieceToAnyOperation(maddr address.Address, rootUrl url.URL, conf *config.CurioConfig, pieceInfoLk *sync.Mutex, pieceInfos map[uuid.UUID][]pieceInfo, pin *cumarket.PieceIngester, db *harmonydb.DB, ssize abi.SectorSize) func(ctx context.Context, pieceSize abi.UnpaddedPieceSize, pieceData storiface.Data, deal lpiece.PieceDealInfo) (api.SectorOffset, error) { - return func(ctx context.Context, pieceSize abi.UnpaddedPieceSize, pieceData storiface.Data, deal lpiece.PieceDealInfo) (api.SectorOffset, error) { - if (deal.PieceActivationManifest == nil && deal.DealProposal == nil) || (deal.PieceActivationManifest != nil && deal.DealProposal != nil) { - return api.SectorOffset{}, xerrors.Errorf("deal info must have either deal proposal or piece manifest") - } - - origPieceData := pieceData - defer func() { - closer, ok := origPieceData.(io.Closer) - if !ok { - log.Warnf("DataCid: cannot close pieceData reader %T because it is not an io.Closer", origPieceData) - return - } - if err := closer.Close(); err != nil { - log.Warnw("closing pieceData in DataCid", "error", err) - } - }() - - pi := pieceInfo{ - data: pieceData, - size: pieceSize, - - done: make(chan struct{}), - } - - pieceUUID := uuid.New() - - if deal.DealProposal != nil { - log.Infow("piece assign request", "piece_cid", deal.PieceCID().String(), "provider", deal.DealProposal.Provider, "piece_uuid", pieceUUID) - } - - pieceInfoLk.Lock() - pieceInfos[pieceUUID] = append(pieceInfos[pieceUUID], pi) - pieceInfoLk.Unlock() - - // /piece?piece_cid=xxxx - dataUrl := rootUrl - dataUrl.Path = "/piece" - dataUrl.RawQuery = "piece_id=" + pieceUUID.String() - - // add piece entry - refID, pieceWasCreated, err := addPieceEntry(ctx, db, conf, deal, pieceSize, dataUrl, ssize) - if err != nil { - return api.SectorOffset{}, err - } - - // wait for piece to be parked - if pieceWasCreated { - <-pi.done - } else { - // If the piece was not created, we need to close the done channel - close(pi.done) - - closeDataReader(pieceData) - } - - { - // piece park is either done or currently happening from another AP call - // now we need to make sure that the piece is definitely parked successfully - // - in case of errors we return, and boost should be able to retry the call - - // * If piece is completed, return - // * If piece is not completed but has null taskID, wait - // * If piece has a non-null taskID - // * If the task is in harmony_tasks, wait - // * Otherwise look for an error in harmony_task_history and return that - - for { - var taskID *int64 - var complete bool - err := db.QueryRow(ctx, `SELECT pp.task_id, pp.complete - FROM parked_pieces pp - JOIN parked_piece_refs ppr ON pp.id = ppr.piece_id - WHERE ppr.ref_id = $1;`, refID).Scan(&taskID, &complete) - if err != nil { - return api.SectorOffset{}, xerrors.Errorf("getting piece park status: %w", err) - } - - if complete { - break - } - - if taskID == nil { - // piece is not parked yet - time.Sleep(5 * time.Second) - continue - } - - // check if task is in harmony_tasks - var taskName string - err = db.QueryRow(ctx, `SELECT name FROM harmony_task WHERE id = $1`, *taskID).Scan(&taskName) - if err == nil { - // task is in harmony_tasks, wait - time.Sleep(5 * time.Second) - continue - } - if err != pgx.ErrNoRows { - return api.SectorOffset{}, xerrors.Errorf("checking park-piece task in harmony_tasks: %w", err) - } - - // task is not in harmony_tasks, check harmony_task_history (latest work_end) - var taskError string - var taskResult bool - err = db.QueryRow(ctx, `SELECT result, err FROM harmony_task_history WHERE task_id = $1 ORDER BY work_end DESC LIMIT 1`, *taskID).Scan(&taskResult, &taskError) - if err != nil { - return api.SectorOffset{}, xerrors.Errorf("checking park-piece task history: %w", err) - } - if !taskResult { - return api.SectorOffset{}, xerrors.Errorf("park-piece task failed: %s", taskError) - } - return api.SectorOffset{}, xerrors.Errorf("park task succeeded but piece is not marked as complete") - } - } - - pieceIDUrl := url.URL{ - Scheme: "pieceref", - Opaque: fmt.Sprintf("%d", refID), - } - - // make a sector - so, err := pin.AllocatePieceToSector(ctx, maddr, deal, int64(pieceSize), pieceIDUrl, nil) - if err != nil { - return api.SectorOffset{}, err - } - - log.Infow("piece assigned to sector", "piece_cid", deal.PieceCID().String(), "sector", so.Sector, "offset", so.Offset) - - return so, nil - } -} - -func addPieceEntry(ctx context.Context, db *harmonydb.DB, conf *config.CurioConfig, deal lpiece.PieceDealInfo, pieceSize abi.UnpaddedPieceSize, dataUrl url.URL, ssize abi.SectorSize) (int64, bool, error) { - var refID int64 - var pieceWasCreated bool - - for { - var backpressureWait bool - - comm, err := db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { - // BACKPRESSURE - wait, err := maybeApplyBackpressure(tx, conf.Ingest, ssize) - if err != nil { - return false, xerrors.Errorf("backpressure checks: %w", err) - } - if wait { - backpressureWait = true - return false, nil - } - - var pieceID int64 - // Attempt to select the piece ID first - err = tx.QueryRow(`SELECT id FROM parked_pieces WHERE piece_cid = $1`, deal.PieceCID().String()).Scan(&pieceID) - - if err != nil { - if errors.Is(err, pgx.ErrNoRows) { - // Piece does not exist, attempt to insert - err = tx.QueryRow(` - INSERT INTO parked_pieces (piece_cid, piece_padded_size, piece_raw_size) - VALUES ($1, $2, $3) - ON CONFLICT (piece_cid) DO NOTHING - RETURNING id`, deal.PieceCID().String(), int64(pieceSize.Padded()), int64(pieceSize)).Scan(&pieceID) - if err != nil { - return false, xerrors.Errorf("inserting new parked piece and getting id: %w", err) - } - pieceWasCreated = true // New piece was created - } else { - // Some other error occurred during select - return false, xerrors.Errorf("checking existing parked piece: %w", err) - } - } else { - pieceWasCreated = false // Piece already exists, no new piece was created - } - - // Add parked_piece_ref - err = tx.QueryRow(`INSERT INTO parked_piece_refs (piece_id, data_url) - VALUES ($1, $2) RETURNING ref_id`, pieceID, dataUrl.String()).Scan(&refID) - if err != nil { - return false, xerrors.Errorf("inserting parked piece ref: %w", err) - } - - // If everything went well, commit the transaction - return true, nil // This will commit the transaction - }, harmonydb.OptionRetry()) - if err != nil { - return refID, pieceWasCreated, xerrors.Errorf("inserting parked piece: %w", err) - } - if !comm { - if backpressureWait { - // Backpressure was applied, wait and try again - select { - case <-time.After(backpressureWaitTime): - case <-ctx.Done(): - return refID, pieceWasCreated, xerrors.Errorf("context done while waiting for backpressure: %w", ctx.Err()) - } - continue - } - - return refID, pieceWasCreated, xerrors.Errorf("piece tx didn't commit") - } - - break - } - return refID, pieceWasCreated, nil -} - -func closeDataReader(pieceData storiface.Data) { - go func() { - // close the data reader (drain to eof if it's not a closer) - if closer, ok := pieceData.(io.Closer); ok { - if err := closer.Close(); err != nil { - log.Warnw("closing pieceData in DataCid", "error", err) - } - } else { - log.Warnw("pieceData is not an io.Closer", "type", fmt.Sprintf("%T", pieceData)) - - _, err := io.Copy(io.Discard, pieceData) - if err != nil { - log.Warnw("draining pieceData in DataCid", "error", err) - } - } - }() -} - -func maybeApplyBackpressure(tx *harmonydb.Tx, cfg config.CurioIngestConfig, ssize abi.SectorSize) (wait bool, err error) { - var bufferedSDR, bufferedTrees, bufferedPoRep, waitDealSectors int - err = tx.QueryRow(` - WITH BufferedSDR AS ( - SELECT COUNT(p.task_id_sdr) - COUNT(t.owner_id) AS buffered_sdr_count - FROM sectors_sdr_pipeline p - LEFT JOIN harmony_task t ON p.task_id_sdr = t.id - WHERE p.after_sdr = false - ), - BufferedTrees AS ( - SELECT COUNT(p.task_id_tree_r) - COUNT(t.owner_id) AS buffered_trees_count - FROM sectors_sdr_pipeline p - LEFT JOIN harmony_task t ON p.task_id_tree_r = t.id - WHERE p.after_sdr = true AND p.after_tree_r = false - ), - BufferedPoRep AS ( - SELECT COUNT(p.task_id_porep) - COUNT(t.owner_id) AS buffered_porep_count - FROM sectors_sdr_pipeline p - LEFT JOIN harmony_task t ON p.task_id_porep = t.id - WHERE p.after_tree_r = true AND p.after_porep = false - ), - WaitDealSectors AS ( - SELECT COUNT(DISTINCT sip.sector_number) AS wait_deal_sectors_count - FROM sectors_sdr_initial_pieces sip - LEFT JOIN sectors_sdr_pipeline sp ON sip.sp_id = sp.sp_id AND sip.sector_number = sp.sector_number - WHERE sp.sector_number IS NULL - ) - SELECT - (SELECT buffered_sdr_count FROM BufferedSDR) AS total_buffered_sdr, - (SELECT buffered_trees_count FROM BufferedTrees) AS buffered_trees_count, - (SELECT buffered_porep_count FROM BufferedPoRep) AS buffered_porep_count, - (SELECT wait_deal_sectors_count FROM WaitDealSectors) AS wait_deal_sectors_count -`).Scan(&bufferedSDR, &bufferedTrees, &bufferedPoRep, &waitDealSectors) - if err != nil { - return false, xerrors.Errorf("counting buffered sectors: %w", err) - } - - var pieceSizes []abi.PaddedPieceSize - - err = tx.Select(&pieceSizes, `SELECT piece_padded_size FROM parked_pieces WHERE complete = false;`) - if err != nil { - return false, xerrors.Errorf("getting in-process pieces") - } - - sectors := sectorCount(pieceSizes, abi.PaddedPieceSize(ssize)) - if cfg.MaxQueueDealSector != 0 && waitDealSectors+sectors > cfg.MaxQueueDealSector { - log.Debugw("backpressure", "reason", "too many wait deal sectors", "wait_deal_sectors", waitDealSectors, "max", cfg.MaxQueueDealSector) - return true, nil - } - - if bufferedSDR > cfg.MaxQueueSDR { - log.Debugw("backpressure", "reason", "too many SDR tasks", "buffered", bufferedSDR, "max", cfg.MaxQueueSDR) - return true, nil - } - if cfg.MaxQueueTrees != 0 && bufferedTrees > cfg.MaxQueueTrees { - log.Debugw("backpressure", "reason", "too many tree tasks", "buffered", bufferedTrees, "max", cfg.MaxQueueTrees) - return true, nil - } - if cfg.MaxQueuePoRep != 0 && bufferedPoRep > cfg.MaxQueuePoRep { - log.Debugw("backpressure", "reason", "too many PoRep tasks", "buffered", bufferedPoRep, "max", cfg.MaxQueuePoRep) - return true, nil - } - - return false, nil -} - -func sectorCount(sizes []abi.PaddedPieceSize, targetSize abi.PaddedPieceSize) int { - sort.Slice(sizes, func(i, j int) bool { - return sizes[i] > sizes[j] - }) - - sectors := make([]abi.PaddedPieceSize, 0) - - for _, size := range sizes { - placed := false - for i := range sectors { - if sectors[i]+size <= targetSize { - sectors[i] += size - placed = true - break - } - } - if !placed { - sectors = append(sectors, size) - } - } - - return len(sectors) -} diff --git a/curiosrc/message/sender.go b/curiosrc/message/sender.go deleted file mode 100644 index 614bc0be23a..00000000000 --- a/curiosrc/message/sender.go +++ /dev/null @@ -1,396 +0,0 @@ -package message - -import ( - "bytes" - "context" - "time" - - "github.com/google/uuid" - "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log/v2" - "go.uber.org/multierr" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/big" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" - "github.com/filecoin-project/lotus/lib/harmony/resources" - "github.com/filecoin-project/lotus/lib/promise" -) - -var log = logging.Logger("curio/message") - -var SendLockedWait = 100 * time.Millisecond - -type SenderAPI interface { - StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) - GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error) - WalletBalance(ctx context.Context, addr address.Address) (big.Int, error) - MpoolGetNonce(context.Context, address.Address) (uint64, error) - MpoolPush(context.Context, *types.SignedMessage) (cid.Cid, error) -} - -type SignerAPI interface { - WalletSignMessage(context.Context, address.Address, *types.Message) (*types.SignedMessage, error) -} - -// Sender abstracts away highly-available message sending with coordination through -// HarmonyDB. It make sure that nonces are assigned transactionally, and that -// messages are correctly broadcasted to the network. It ensures that messages -// are sent serially, and that failures to send don't cause nonce gaps. -type Sender struct { - api SenderAPI - - sendTask *SendTask - - db *harmonydb.DB -} - -type SendTask struct { - sendTF promise.Promise[harmonytask.AddTaskFunc] - - api SenderAPI - signer SignerAPI - - db *harmonydb.DB -} - -func (s *SendTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { - ctx := context.TODO() - - // get message from db - - var dbMsg struct { - FromKey string `db:"from_key"` - ToAddr string `db:"to_addr"` - - UnsignedData []byte `db:"unsigned_data"` - UnsignedCid string `db:"unsigned_cid"` - - // may not be null if we have somehow already signed but failed to send this message - Nonce *uint64 `db:"nonce"` - SignedData []byte `db:"signed_data"` - } - - err = s.db.QueryRow(ctx, ` - SELECT from_key, nonce, to_addr, unsigned_data, unsigned_cid - FROM message_sends - WHERE send_task_id = $1`, taskID).Scan( - &dbMsg.FromKey, &dbMsg.Nonce, &dbMsg.ToAddr, &dbMsg.UnsignedData, &dbMsg.UnsignedCid) - if err != nil { - return false, xerrors.Errorf("getting message from db: %w", err) - } - - // deserialize the message - var msg types.Message - err = msg.UnmarshalCBOR(bytes.NewReader(dbMsg.UnsignedData)) - if err != nil { - return false, xerrors.Errorf("unmarshaling unsigned db message: %w", err) - } - - // get db send lock - for { - // check if we still own the task - if !stillOwned() { - return false, xerrors.Errorf("lost ownership of task") - } - - // try to acquire lock - cn, err := s.db.Exec(ctx, ` - INSERT INTO message_send_locks (from_key, task_id, claimed_at) - VALUES ($1, $2, CURRENT_TIMESTAMP) ON CONFLICT (from_key) DO UPDATE - SET task_id = EXCLUDED.task_id, claimed_at = CURRENT_TIMESTAMP - WHERE message_send_locks.task_id = $2;`, dbMsg.FromKey, taskID) - if err != nil { - return false, xerrors.Errorf("acquiring send lock: %w", err) - } - - if cn == 1 { - // we got the lock - break - } - - // we didn't get the lock, wait a bit and try again - log.Infow("waiting for send lock", "task_id", taskID, "from", dbMsg.FromKey) - time.Sleep(SendLockedWait) - } - - // defer release db send lock - defer func() { - _, err2 := s.db.Exec(ctx, ` - DELETE from message_send_locks WHERE from_key = $1 AND task_id = $2`, dbMsg.FromKey, taskID) - if err2 != nil { - log.Errorw("releasing send lock", "task_id", taskID, "from", dbMsg.FromKey, "error", err2) - - // make sure harmony retries this task so that we eventually release this lock - done = false - err = multierr.Append(err, xerrors.Errorf("releasing send lock: %w", err2)) - } - }() - - // assign nonce IF NOT ASSIGNED (max(api.MpoolGetNonce, db nonce+1)) - var sigMsg *types.SignedMessage - - if dbMsg.Nonce == nil { - msgNonce, err := s.api.MpoolGetNonce(ctx, msg.From) - if err != nil { - return false, xerrors.Errorf("getting nonce from mpool: %w", err) - } - - // get nonce from db - var dbNonce *uint64 - r := s.db.QueryRow(ctx, ` - SELECT MAX(nonce) FROM message_sends WHERE from_key = $1 AND send_success = true`, msg.From.String()) - if err := r.Scan(&dbNonce); err != nil { - return false, xerrors.Errorf("getting nonce from db: %w", err) - } - - if dbNonce != nil && *dbNonce+1 > msgNonce { - msgNonce = *dbNonce + 1 - } - - msg.Nonce = msgNonce - - // sign message - sigMsg, err = s.signer.WalletSignMessage(ctx, msg.From, &msg) - if err != nil { - return false, xerrors.Errorf("signing message: %w", err) - } - - data, err := sigMsg.Serialize() - if err != nil { - return false, xerrors.Errorf("serializing message: %w", err) - } - - jsonBytes, err := sigMsg.MarshalJSON() - if err != nil { - return false, xerrors.Errorf("marshaling message: %w", err) - } - - // write to db - - n, err := s.db.Exec(ctx, ` - UPDATE message_sends SET nonce = $1, signed_data = $2, signed_json = $3, signed_cid = $4 - WHERE send_task_id = $5`, - msg.Nonce, data, string(jsonBytes), sigMsg.Cid().String(), taskID) - if err != nil { - return false, xerrors.Errorf("updating db record: %w", err) - } - if n != 1 { - log.Errorw("updating db record: expected 1 row to be affected, got %d", n) - return false, xerrors.Errorf("updating db record: expected 1 row to be affected, got %d", n) - } - } else { - // Note: this handles an unlikely edge-case: - // We have previously signed the message but either failed to send it or failed to update the db - // note that when that happens the likely cause is the curio process losing its db connection - // or getting killed before it can update the db. In that case the message lock will still be held - // so it will be safe to rebroadcast the signed message - - // deserialize the signed message - sigMsg = new(types.SignedMessage) - err = sigMsg.UnmarshalCBOR(bytes.NewReader(dbMsg.SignedData)) - if err != nil { - return false, xerrors.Errorf("unmarshaling signed db message: %w", err) - } - } - - // send! - _, err = s.api.MpoolPush(ctx, sigMsg) - - // persist send result - var sendSuccess = err == nil - var sendError string - if err != nil { - sendError = err.Error() - } - - _, err = s.db.Exec(ctx, ` - UPDATE message_sends SET send_success = $1, send_error = $2, send_time = CURRENT_TIMESTAMP - WHERE send_task_id = $3`, sendSuccess, sendError, taskID) - if err != nil { - return false, xerrors.Errorf("updating db record: %w", err) - } - - return true, nil -} - -func (s *SendTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { - if len(ids) == 0 { - // probably can't happen, but panicking is bad - return nil, nil - } - - if s.signer == nil { - // can't sign messages here - return nil, nil - } - - return &ids[0], nil -} - -func (s *SendTask) TypeDetails() harmonytask.TaskTypeDetails { - return harmonytask.TaskTypeDetails{ - Max: 1024, - Name: "SendMessage", - Cost: resources.Resources{ - Cpu: 0, - Gpu: 0, - Ram: 1 << 20, - }, - MaxFailures: 1000, - Follows: nil, - } -} - -func (s *SendTask) Adder(taskFunc harmonytask.AddTaskFunc) { - s.sendTF.Set(taskFunc) -} - -var _ harmonytask.TaskInterface = &SendTask{} - -// NewSender creates a new Sender. -func NewSender(api SenderAPI, signer SignerAPI, db *harmonydb.DB) (*Sender, *SendTask) { - st := &SendTask{ - api: api, - signer: signer, - db: db, - } - - return &Sender{ - api: api, - db: db, - - sendTask: st, - }, st -} - -// Send atomically assigns a nonce, signs, and pushes a message -// to mempool. -// maxFee is only used when GasFeeCap/GasPremium fields aren't specified -// -// When maxFee is set to 0, Send will guess appropriate fee -// based on current chain conditions -// -// Send behaves much like fullnodeApi.MpoolPushMessage, but it coordinates -// through HarmonyDB, making it safe to broadcast messages from multiple independent -// API nodes -// -// Send is also currently more strict about required parameters than MpoolPushMessage -func (s *Sender) Send(ctx context.Context, msg *types.Message, mss *api.MessageSendSpec, reason string) (cid.Cid, error) { - if mss == nil { - return cid.Undef, xerrors.Errorf("MessageSendSpec cannot be nil") - } - if (mss.MsgUuid != uuid.UUID{}) { - return cid.Undef, xerrors.Errorf("MessageSendSpec.MsgUuid must be zero") - } - - fromA, err := s.api.StateAccountKey(ctx, msg.From, types.EmptyTSK) - if err != nil { - return cid.Undef, xerrors.Errorf("getting key address: %w", err) - } - - msg.From = fromA - - if msg.Nonce != 0 { - return cid.Undef, xerrors.Errorf("Send expects message nonce to be 0, was %d", msg.Nonce) - } - - msg, err = s.api.GasEstimateMessageGas(ctx, msg, mss, types.EmptyTSK) - if err != nil { - return cid.Undef, xerrors.Errorf("GasEstimateMessageGas error: %w", err) - } - - b, err := s.api.WalletBalance(ctx, msg.From) - if err != nil { - return cid.Undef, xerrors.Errorf("mpool push: getting origin balance: %w", err) - } - - requiredFunds := big.Add(msg.Value, msg.RequiredFunds()) - if b.LessThan(requiredFunds) { - return cid.Undef, xerrors.Errorf("mpool push: not enough funds: %s < %s", b, requiredFunds) - } - - // push the task - taskAdder := s.sendTask.sendTF.Val(ctx) - - unsBytes := new(bytes.Buffer) - err = msg.MarshalCBOR(unsBytes) - if err != nil { - return cid.Undef, xerrors.Errorf("marshaling message: %w", err) - } - - var sendTaskID *harmonytask.TaskID - taskAdder(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { - _, err := tx.Exec(`insert into message_sends (from_key, to_addr, send_reason, unsigned_data, unsigned_cid, send_task_id) values ($1, $2, $3, $4, $5, $6)`, - msg.From.String(), msg.To.String(), reason, unsBytes.Bytes(), msg.Cid().String(), id) - if err != nil { - return false, xerrors.Errorf("inserting message into db: %w", err) - } - - sendTaskID = &id - - return true, nil - }) - - if sendTaskID == nil { - return cid.Undef, xerrors.Errorf("failed to add task") - } - - // wait for exec - var ( - pollInterval = 50 * time.Millisecond - pollIntervalMul = 2 - maxPollInterval = 5 * time.Second - pollLoops = 0 - - sigCid cid.Cid - sendErr error - ) - - for { - var err error - var sigCidStr, sendError *string - var sendSuccess *bool - - err = s.db.QueryRow(ctx, `select signed_cid, send_success, send_error from message_sends where send_task_id = $1`, &sendTaskID).Scan(&sigCidStr, &sendSuccess, &sendError) - if err != nil { - return cid.Undef, xerrors.Errorf("getting cid for task: %w", err) - } - - if sendSuccess == nil { - time.Sleep(pollInterval) - pollLoops++ - pollInterval *= time.Duration(pollIntervalMul) - if pollInterval > maxPollInterval { - pollInterval = maxPollInterval - } - - continue - } - - if sigCidStr == nil || sendError == nil { - // should never happen because sendSuccess is already not null here - return cid.Undef, xerrors.Errorf("got null values for sigCidStr or sendError, this should never happen") - } - - if !*sendSuccess { - sendErr = xerrors.Errorf("send error: %s", *sendError) - } else { - sigCid, err = cid.Parse(*sigCidStr) - if err != nil { - return cid.Undef, xerrors.Errorf("parsing signed cid: %w", err) - } - } - - break - } - - log.Infow("sent message", "cid", sigCid, "task_id", sendTaskID, "send_error", sendErr, "poll_loops", pollLoops) - - return sigCid, sendErr -} diff --git a/curiosrc/message/watch.go b/curiosrc/message/watch.go deleted file mode 100644 index 2253df28434..00000000000 --- a/curiosrc/message/watch.go +++ /dev/null @@ -1,214 +0,0 @@ -package message - -import ( - "context" - "encoding/json" - "sync/atomic" - - "github.com/ipfs/go-cid" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/curiosrc/chainsched" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" -) - -const MinConfidence = 6 - -type MessageWaiterApi interface { - StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) - ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) - ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error) - StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) - ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error) -} - -type MessageWatcher struct { - db *harmonydb.DB - ht *harmonytask.TaskEngine - api MessageWaiterApi - - stopping, stopped chan struct{} - - updateCh chan struct{} - bestTs atomic.Pointer[types.TipSetKey] -} - -func NewMessageWatcher(db *harmonydb.DB, ht *harmonytask.TaskEngine, pcs *chainsched.CurioChainSched, api MessageWaiterApi) (*MessageWatcher, error) { - mw := &MessageWatcher{ - db: db, - ht: ht, - api: api, - stopping: make(chan struct{}), - stopped: make(chan struct{}), - updateCh: make(chan struct{}), - } - go mw.run() - if err := pcs.AddHandler(mw.processHeadChange); err != nil { - return nil, err - } - return mw, nil -} - -func (mw *MessageWatcher) run() { - defer close(mw.stopped) - - for { - select { - case <-mw.stopping: - // todo cleanup assignments - return - case <-mw.updateCh: - mw.update() - } - } -} - -func (mw *MessageWatcher) update() { - ctx := context.Background() - - tsk := *mw.bestTs.Load() - - ts, err := mw.api.ChainGetTipSet(ctx, tsk) - if err != nil { - log.Errorf("failed to get tipset: %+v", err) - return - } - - lbts, err := mw.api.ChainGetTipSetByHeight(ctx, ts.Height()-MinConfidence, tsk) - if err != nil { - log.Errorf("failed to get tipset: %+v", err) - return - } - lbtsk := lbts.Key() - - machineID := mw.ht.ResourcesAvailable().MachineID - - // first if we see pending messages with null owner, assign them to ourselves - { - n, err := mw.db.Exec(ctx, `UPDATE message_waits SET waiter_machine_id = $1 WHERE waiter_machine_id IS NULL AND executed_tsk_cid IS NULL`, machineID) - if err != nil { - log.Errorf("failed to assign pending messages: %+v", err) - return - } - if n > 0 { - log.Debugw("assigned pending messages to ourselves", "assigned", n) - } - } - - // get messages assigned to us - var msgs []struct { - Cid string `db:"signed_message_cid"` - From string `db:"from_key"` - Nonce uint64 `db:"nonce"` - - FromAddr address.Address `db:"-"` - } - - // really large limit in case of things getting stuck and backlogging severely - err = mw.db.Select(ctx, &msgs, `SELECT signed_message_cid, from_key, nonce FROM message_waits - JOIN message_sends ON signed_message_cid = signed_cid - WHERE waiter_machine_id = $1 LIMIT 10000`, machineID) - if err != nil { - log.Errorf("failed to get assigned messages: %+v", err) - return - } - - // get address/nonce set to check - toCheck := make(map[address.Address]uint64) - - for i := range msgs { - msgs[i].FromAddr, err = address.NewFromString(msgs[i].From) - if err != nil { - log.Errorf("failed to parse from address: %+v", err) - return - } - toCheck[msgs[i].FromAddr] = 0 - } - - // get the nonce for each address - for addr := range toCheck { - act, err := mw.api.StateGetActor(ctx, addr, lbtsk) - if err != nil { - log.Errorf("failed to get actor: %+v", err) - return - } - - toCheck[addr] = act.Nonce - } - - // check if any of the messages we have assigned to us are now on chain, and have been for MinConfidence epochs - for _, msg := range msgs { - if msg.Nonce > toCheck[msg.FromAddr] { - continue // definitely not on chain yet - } - - look, err := mw.api.StateSearchMsg(ctx, lbtsk, cid.MustParse(msg.Cid), api.LookbackNoLimit, false) - if err != nil { - log.Errorf("failed to search for message: %+v", err) - return - } - - if look == nil { - continue // not on chain yet (or not executed yet) - } - - tskCid, err := look.TipSet.Cid() - if err != nil { - log.Errorf("failed to get tipset cid: %+v", err) - return - } - - emsg, err := mw.api.ChainGetMessage(ctx, look.Message) - if err != nil { - log.Errorf("failed to get message: %+v", err) - return - } - - execMsg, err := json.Marshal(emsg) - if err != nil { - log.Errorf("failed to marshal message: %+v", err) - return - } - - // record in db - _, err = mw.db.Exec(ctx, `UPDATE message_waits SET - waiter_machine_id = NULL, - executed_tsk_cid = $1, executed_tsk_epoch = $2, - executed_msg_cid = $3, executed_msg_data = $4, - executed_rcpt_exitcode = $5, executed_rcpt_return = $6, executed_rcpt_gas_used = $7 - WHERE signed_message_cid = $8`, tskCid, look.Height, - look.Message, execMsg, - look.Receipt.ExitCode, look.Receipt.Return, look.Receipt.GasUsed, - msg.Cid) - if err != nil { - log.Errorf("failed to update message wait: %+v", err) - return - } - } -} - -func (mw *MessageWatcher) Stop(ctx context.Context) error { - close(mw.stopping) - select { - case <-mw.stopped: - case <-ctx.Done(): - return ctx.Err() - } - - return nil -} - -func (mw *MessageWatcher) processHeadChange(ctx context.Context, revert *types.TipSet, apply *types.TipSet) error { - best := apply.Key() - mw.bestTs.Store(&best) - select { - case mw.updateCh <- struct{}{}: - default: - } - return nil -} diff --git a/curiosrc/multictladdr/multiaddresses.go b/curiosrc/multictladdr/multiaddresses.go deleted file mode 100644 index af751ff17e7..00000000000 --- a/curiosrc/multictladdr/multiaddresses.go +++ /dev/null @@ -1,81 +0,0 @@ -package multictladdr - -import ( - "context" - - logging "github.com/ipfs/go-log/v2" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/storage/ctladdr" -) - -var log = logging.Logger("curio/multictladdr") - -type MultiAddressSelector struct { - MinerMap map[address.Address]api.AddressConfig -} - -func (as *MultiAddressSelector) AddressFor(ctx context.Context, a ctladdr.NodeApi, minerID address.Address, mi api.MinerInfo, use api.AddrUse, goodFunds, minFunds abi.TokenAmount) (address.Address, abi.TokenAmount, error) { - if as == nil { - // should only happen in some tests - log.Warnw("smart address selection disabled, using worker address") - return mi.Worker, big.Zero(), nil - } - - tmp := as.MinerMap[minerID] - - var addrs []address.Address - switch use { - case api.PreCommitAddr: - addrs = append(addrs, tmp.PreCommitControl...) - case api.CommitAddr: - addrs = append(addrs, tmp.CommitControl...) - case api.TerminateSectorsAddr: - addrs = append(addrs, tmp.TerminateControl...) - case api.DealPublishAddr: - addrs = append(addrs, tmp.DealPublishControl...) - default: - defaultCtl := map[address.Address]struct{}{} - for _, a := range mi.ControlAddresses { - defaultCtl[a] = struct{}{} - } - delete(defaultCtl, mi.Owner) - delete(defaultCtl, mi.Worker) - - configCtl := append([]address.Address{}, tmp.PreCommitControl...) - configCtl = append(configCtl, tmp.CommitControl...) - configCtl = append(configCtl, tmp.TerminateControl...) - configCtl = append(configCtl, tmp.DealPublishControl...) - - for _, addr := range configCtl { - if addr.Protocol() != address.ID { - var err error - addr, err = a.StateLookupID(ctx, addr, types.EmptyTSK) - if err != nil { - log.Warnw("looking up control address", "address", addr, "error", err) - continue - } - } - - delete(defaultCtl, addr) - } - - for a := range defaultCtl { - addrs = append(addrs, a) - } - } - - if len(addrs) == 0 || !tmp.DisableWorkerFallback { - addrs = append(addrs, mi.Worker) - } - if !tmp.DisableOwnerFallback { - addrs = append(addrs, mi.Owner) - } - - return ctladdr.PickAddress(ctx, a, mi, goodFunds, minFunds, addrs) -} diff --git a/curiosrc/piece/task_cleanup_piece.go b/curiosrc/piece/task_cleanup_piece.go deleted file mode 100644 index e4110e3eed9..00000000000 --- a/curiosrc/piece/task_cleanup_piece.go +++ /dev/null @@ -1,135 +0,0 @@ -package piece - -import ( - "context" - "time" - - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/curiosrc/ffi" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" - "github.com/filecoin-project/lotus/lib/harmony/resources" - "github.com/filecoin-project/lotus/lib/promise" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -type CleanupPieceTask struct { - max int - db *harmonydb.DB - sc *ffi.SealCalls - - TF promise.Promise[harmonytask.AddTaskFunc] -} - -func NewCleanupPieceTask(db *harmonydb.DB, sc *ffi.SealCalls, max int) *CleanupPieceTask { - pt := &CleanupPieceTask{ - db: db, - sc: sc, - - max: max, - } - go pt.pollCleanupTasks(context.Background()) - return pt -} - -func (c *CleanupPieceTask) pollCleanupTasks(ctx context.Context) { - for { - // select pieces with no refs and null cleanup_task_id - var pieceIDs []struct { - ID storiface.PieceNumber `db:"id"` - } - - err := c.db.Select(ctx, &pieceIDs, `SELECT id FROM parked_pieces WHERE cleanup_task_id IS NULL AND (SELECT count(*) FROM parked_piece_refs WHERE piece_id = parked_pieces.id) = 0`) - if err != nil { - log.Errorf("failed to get parked pieces: %s", err) - time.Sleep(PieceParkPollInterval) - continue - } - - if len(pieceIDs) == 0 { - time.Sleep(PieceParkPollInterval) - continue - } - - for _, pieceID := range pieceIDs { - pieceID := pieceID - - // create a task for each piece - c.TF.Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, err error) { - // update - n, err := tx.Exec(`UPDATE parked_pieces SET cleanup_task_id = $1 WHERE id = $2 AND (SELECT count(*) FROM parked_piece_refs WHERE piece_id = parked_pieces.id) = 0`, id, pieceID.ID) - if err != nil { - return false, xerrors.Errorf("updating parked piece: %w", err) - } - - // commit only if we updated the piece - return n > 0, nil - }) - } - } -} - -func (c *CleanupPieceTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { - ctx := context.Background() - - // select by cleanup_task_id - var pieceID int64 - - err = c.db.QueryRow(ctx, "SELECT id FROM parked_pieces WHERE cleanup_task_id = $1", taskID).Scan(&pieceID) - if err != nil { - return false, xerrors.Errorf("query parked_piece: %w", err) - } - - // delete from parked_pieces where id = $1 where ref count = 0 - // note: we delete from the db first because that guarantees that the piece is no longer in use - // if storage delete fails, it will be retried later is other cleanup tasks - n, err := c.db.Exec(ctx, "DELETE FROM parked_pieces WHERE id = $1 AND (SELECT count(*) FROM parked_piece_refs WHERE piece_id = $1) = 0", pieceID) - if err != nil { - return false, xerrors.Errorf("delete parked_piece: %w", err) - } - - if n == 0 { - _, err = c.db.Exec(ctx, `UPDATE parked_pieces SET cleanup_task_id = NULL WHERE id = $1`, pieceID) - if err != nil { - return false, xerrors.Errorf("marking piece as complete: %w", err) - } - - return true, nil - } - - // remove from storage - err = c.sc.RemovePiece(ctx, storiface.PieceNumber(pieceID)) - if err != nil { - log.Errorw("remove piece", "piece_id", pieceID, "error", err) - } - - return true, nil -} - -func (c *CleanupPieceTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { - // the remove call runs on paths.Remote storage, so it doesn't really matter where it runs - - id := ids[0] - return &id, nil -} - -func (c *CleanupPieceTask) TypeDetails() harmonytask.TaskTypeDetails { - return harmonytask.TaskTypeDetails{ - Max: c.max, - Name: "DropPiece", - Cost: resources.Resources{ - Cpu: 1, - Gpu: 0, - Ram: 64 << 20, - Storage: nil, - }, - MaxFailures: 10, - } -} - -func (c *CleanupPieceTask) Adder(taskFunc harmonytask.AddTaskFunc) { - c.TF.Set(taskFunc) -} - -var _ harmonytask.TaskInterface = &CleanupPieceTask{} diff --git a/curiosrc/piece/task_park_piece.go b/curiosrc/piece/task_park_piece.go deleted file mode 100644 index 80632e99cf8..00000000000 --- a/curiosrc/piece/task_park_piece.go +++ /dev/null @@ -1,239 +0,0 @@ -package piece - -import ( - "context" - "encoding/json" - "strconv" - "time" - - "github.com/hashicorp/go-multierror" - logging "github.com/ipfs/go-log/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/curiosrc/ffi" - "github.com/filecoin-project/lotus/curiosrc/seal" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" - "github.com/filecoin-project/lotus/lib/harmony/resources" - "github.com/filecoin-project/lotus/lib/promise" - "github.com/filecoin-project/lotus/storage/paths" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -var log = logging.Logger("cu-piece") -var PieceParkPollInterval = time.Second * 15 - -// ParkPieceTask gets a piece from some origin, and parks it in storage -// Pieces are always f00, piece ID is mapped to pieceCID in the DB -type ParkPieceTask struct { - db *harmonydb.DB - sc *ffi.SealCalls - - TF promise.Promise[harmonytask.AddTaskFunc] - - max int -} - -func NewParkPieceTask(db *harmonydb.DB, sc *ffi.SealCalls, max int) (*ParkPieceTask, error) { - pt := &ParkPieceTask{ - db: db, - sc: sc, - - max: max, - } - - ctx := context.Background() - - // We should delete all incomplete pieces before we start - // as we would have lost reader for these. The RPC caller will get an error - // when Curio shuts down before parking a piece. They can always retry. - // Leaving these pieces we utilise unnecessary resources in the form of ParkPieceTask - - _, err := db.Exec(ctx, `DELETE FROM parked_pieces WHERE complete = FALSE AND task_id IS NULL`) - if err != nil { - return nil, xerrors.Errorf("failed to delete incomplete parked pieces: %w", err) - } - - go pt.pollPieceTasks(ctx) - return pt, nil -} - -func (p *ParkPieceTask) pollPieceTasks(ctx context.Context) { - for { - // select parked pieces with no task_id - var pieceIDs []struct { - ID storiface.PieceNumber `db:"id"` - } - - err := p.db.Select(ctx, &pieceIDs, `SELECT id FROM parked_pieces WHERE complete = FALSE AND task_id IS NULL`) - if err != nil { - log.Errorf("failed to get parked pieces: %s", err) - time.Sleep(PieceParkPollInterval) - continue - } - - if len(pieceIDs) == 0 { - time.Sleep(PieceParkPollInterval) - continue - } - - for _, pieceID := range pieceIDs { - pieceID := pieceID - - // create a task for each piece - p.TF.Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, err error) { - // update - n, err := tx.Exec(`UPDATE parked_pieces SET task_id = $1 WHERE id = $2 AND complete = FALSE AND task_id IS NULL`, id, pieceID.ID) - if err != nil { - return false, xerrors.Errorf("updating parked piece: %w", err) - } - - // commit only if we updated the piece - return n > 0, nil - }) - } - } -} - -func (p *ParkPieceTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { - ctx := context.Background() - - // Define a struct to hold piece data. - var piecesData []struct { - PieceID int64 `db:"id"` - PieceCreatedAt time.Time `db:"created_at"` - PieceCID string `db:"piece_cid"` - Complete bool `db:"complete"` - PiecePaddedSize int64 `db:"piece_padded_size"` - PieceRawSize string `db:"piece_raw_size"` - } - - // Select the piece data using the task ID. - err = p.db.Select(ctx, &piecesData, ` - SELECT id, created_at, piece_cid, complete, piece_padded_size, piece_raw_size - FROM parked_pieces - WHERE task_id = $1 - `, taskID) - if err != nil { - return false, xerrors.Errorf("fetching piece data: %w", err) - } - - if len(piecesData) == 0 { - return false, xerrors.Errorf("no piece data found for task_id: %d", taskID) - } - - pieceData := piecesData[0] - - if pieceData.Complete { - log.Warnw("park piece task already complete", "task_id", taskID, "piece_cid", pieceData.PieceCID) - return true, nil - } - - // Define a struct for reference data. - var refData []struct { - DataURL string `db:"data_url"` - DataHeaders json.RawMessage `db:"data_headers"` - } - - // Now, select the first reference data that has a URL. - err = p.db.Select(ctx, &refData, ` - SELECT data_url, data_headers - FROM parked_piece_refs - WHERE piece_id = $1 AND data_url IS NOT NULL`, pieceData.PieceID) - if err != nil { - return false, xerrors.Errorf("fetching reference data: %w", err) - } - - if len(refData) == 0 { - return false, xerrors.Errorf("no refs found for piece_id: %d", pieceData.PieceID) - } - - // Convert piece_raw_size from string to int64. - pieceRawSize, err := strconv.ParseInt(pieceData.PieceRawSize, 10, 64) - if err != nil { - return false, xerrors.Errorf("parsing piece raw size: %w", err) - } - - var merr error - - for i := range refData { - if refData[i].DataURL != "" { - upr := &seal.UrlPieceReader{ - Url: refData[0].DataURL, - RawSize: pieceRawSize, - } - defer func() { - _ = upr.Close() - }() - - pnum := storiface.PieceNumber(pieceData.PieceID) - - if err := p.sc.WritePiece(ctx, &taskID, pnum, pieceRawSize, upr); err != nil { - merr = multierror.Append(merr, xerrors.Errorf("write piece: %w", err)) - continue - } - - // Update the piece as complete after a successful write. - _, err = p.db.Exec(ctx, `UPDATE parked_pieces SET complete = TRUE task_id = NULL WHERE id = $1`, pieceData.PieceID) - if err != nil { - return false, xerrors.Errorf("marking piece as complete: %w", err) - } - - return true, nil - } - return false, merr - } - - // If no URL is found, this indicates an issue since at least one URL is expected. - return false, xerrors.Errorf("no data URL found for piece_id: %d", pieceData.PieceID) -} - -func (p *ParkPieceTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { - id := ids[0] - return &id, nil -} - -func (p *ParkPieceTask) TypeDetails() harmonytask.TaskTypeDetails { - const maxSizePiece = 64 << 30 - - return harmonytask.TaskTypeDetails{ - Max: p.max, - Name: "ParkPiece", - Cost: resources.Resources{ - Cpu: 1, - Gpu: 0, - Ram: 64 << 20, - Storage: p.sc.Storage(p.taskToRef, storiface.FTPiece, storiface.FTNone, maxSizePiece, storiface.PathSealing, paths.MinFreeStoragePercentage), - }, - MaxFailures: 10, - } -} - -func (p *ParkPieceTask) taskToRef(id harmonytask.TaskID) (ffi.SectorRef, error) { - var pieceIDs []struct { - ID storiface.PieceNumber `db:"id"` - } - - err := p.db.Select(context.Background(), &pieceIDs, `SELECT id FROM parked_pieces WHERE task_id = $1`, id) - if err != nil { - return ffi.SectorRef{}, xerrors.Errorf("getting piece id: %w", err) - } - - if len(pieceIDs) != 1 { - return ffi.SectorRef{}, xerrors.Errorf("expected 1 piece id, got %d", len(pieceIDs)) - } - - pref := pieceIDs[0].ID.Ref() - - return ffi.SectorRef{ - SpID: int64(pref.ID.Miner), - SectorNumber: int64(pref.ID.Number), - RegSealProof: pref.ProofType, - }, nil -} - -func (p *ParkPieceTask) Adder(taskFunc harmonytask.AddTaskFunc) { - p.TF.Set(taskFunc) -} - -var _ harmonytask.TaskInterface = &ParkPieceTask{} diff --git a/curiosrc/proof/treed_build.go b/curiosrc/proof/treed_build.go deleted file mode 100644 index 7145c9257ca..00000000000 --- a/curiosrc/proof/treed_build.go +++ /dev/null @@ -1,292 +0,0 @@ -package proof - -import ( - "io" - "math/bits" - "os" - "runtime" - "sync" - "time" - - "github.com/hashicorp/go-multierror" - "github.com/ipfs/go-cid" - pool "github.com/libp2p/go-buffer-pool" - "github.com/minio/sha256-simd" - "golang.org/x/xerrors" - - commcid "github.com/filecoin-project/go-fil-commcid" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/storage/sealer/fr32" -) - -const nodeSize = 32 -const threadChunkSize = 1 << 20 - -func hashChunk(data [][]byte) { - l1Nodes := len(data[0]) / nodeSize / 2 - - d := sha256.New() - - sumBuf := make([]byte, nodeSize) - - for i := 0; i < l1Nodes; i++ { - levels := bits.TrailingZeros(^uint(i)) + 1 - - inNode := i * 2 // at level 0 - outNode := i - - for l := 0; l < levels; l++ { - d.Reset() - inNodeData := data[l][inNode*nodeSize : (inNode+2)*nodeSize] - d.Write(inNodeData) - copy(data[l+1][outNode*nodeSize:(outNode+1)*nodeSize], d.Sum(sumBuf[:0])) - // set top bits to 00 - data[l+1][outNode*nodeSize+nodeSize-1] &= 0x3f - - inNode-- - inNode >>= 1 - outNode >>= 1 - } - } -} - -func BuildTreeD(data io.Reader, unpaddedData bool, outPath string, size abi.PaddedPieceSize) (_ cid.Cid, err error) { - out, err := os.Create(outPath) - if err != nil { - return cid.Undef, err - } - defer func() { - cerr := out.Close() - - if err != nil { - // remove the file, it's probably bad - rerr := os.Remove(outPath) - if rerr != nil { - err = multierror.Append(err, rerr) - } - } - - if cerr != nil { - err = multierror.Append(err, cerr) - } - }() - - outSize := treeSize(size) - - // allocate space for the tree - err = out.Truncate(int64(outSize)) - if err != nil { - return cid.Undef, err - } - - // setup buffers - maxThreads := int64(size) / threadChunkSize - if maxThreads > int64(runtime.NumCPU())*15/10 { - maxThreads = int64(runtime.NumCPU()) * 15 / 10 - } - if maxThreads < 1 { - maxThreads = 1 - } - - // allocate buffers - var bufLk sync.Mutex - workerBuffers := make([][][]byte, maxThreads) // [worker][level][levelSize] - - for i := range workerBuffers { - workerBuffer := make([][]byte, 1) - - bottomBufSize := int64(threadChunkSize) - if bottomBufSize > int64(size) { - bottomBufSize = int64(size) - } - workerBuffer[0] = pool.Get(int(bottomBufSize)) - - // append levels until we get to a 32 byte level - for len(workerBuffer[len(workerBuffer)-1]) > 32 { - newLevel := pool.Get(len(workerBuffer[len(workerBuffer)-1]) / 2) - workerBuffer = append(workerBuffer, newLevel) - } - workerBuffers[i] = workerBuffer - } - - // prepare apex buffer - var apexBuf [][]byte - { - apexBottomSize := uint64(size) / uint64(len(workerBuffers[0][0])) - if apexBottomSize == 0 { - apexBottomSize = 1 - } - - apexBuf = make([][]byte, 1) - apexBuf[0] = pool.Get(int(apexBottomSize * nodeSize)) - for len(apexBuf[len(apexBuf)-1]) > 32 { - newLevel := pool.Get(len(apexBuf[len(apexBuf)-1]) / 2) - apexBuf = append(apexBuf, newLevel) - } - } - - // defer free pool buffers - defer func() { - for _, workerBuffer := range workerBuffers { - for _, level := range workerBuffer { - pool.Put(level) - } - } - for _, level := range apexBuf { - pool.Put(level) - } - }() - - // start processing - var processed uint64 - var workWg sync.WaitGroup - var errLock sync.Mutex - var oerr error - - for processed < uint64(size) { - // get a buffer - bufLk.Lock() - if len(workerBuffers) == 0 { - bufLk.Unlock() - time.Sleep(50 * time.Microsecond) - continue - } - - // pop last - workBuffer := workerBuffers[len(workerBuffers)-1] - workerBuffers = workerBuffers[:len(workerBuffers)-1] - - bufLk.Unlock() - - // before reading check that we didn't get a write error - errLock.Lock() - if oerr != nil { - errLock.Unlock() - return cid.Undef, oerr - } - errLock.Unlock() - - // read data into the bottom level - // note: the bottom level will never be too big; data is power of two - // size, and if it's smaller than a single buffer, we only have one - // smaller buffer - - processedSize := uint64(len(workBuffer[0])) - if unpaddedData { - workBuffer[0] = workBuffer[0][:abi.PaddedPieceSize(len(workBuffer[0])).Unpadded()] - } - - _, err := io.ReadFull(data, workBuffer[0]) - if err != nil && err != io.EOF { - return cid.Undef, err - } - - // start processing - workWg.Add(1) - go func(startOffset uint64) { - defer workWg.Done() - - if unpaddedData { - paddedBuf := pool.Get(int(abi.UnpaddedPieceSize(len(workBuffer[0])).Padded())) - fr32.PadSingle(workBuffer[0], paddedBuf) - pool.Put(workBuffer[0]) - workBuffer[0] = paddedBuf - } - hashChunk(workBuffer) - - // persist apex - { - apexHash := workBuffer[len(workBuffer)-1] - hashPos := startOffset / uint64(len(workBuffer[0])) * nodeSize - - copy(apexBuf[0][hashPos:hashPos+nodeSize], apexHash) - } - - // write results - offsetInLayer := startOffset - for layer, layerData := range workBuffer { - - // layerOff is outSize:bits[most significant bit - layer] - layerOff := layerOffset(uint64(size), layer) - dataOff := offsetInLayer + layerOff - offsetInLayer /= 2 - - _, werr := out.WriteAt(layerData, int64(dataOff)) - if werr != nil { - errLock.Lock() - oerr = multierror.Append(oerr, werr) - errLock.Unlock() - return - } - } - - // return buffer - bufLk.Lock() - workerBuffers = append(workerBuffers, workBuffer) - bufLk.Unlock() - }(processed) - - processed += processedSize - } - - workWg.Wait() - - if oerr != nil { - return cid.Undef, oerr - } - - threadLayers := bits.Len(uint(len(workerBuffers[0][0])) / nodeSize) - - if len(apexBuf) > 0 { - // hash the apex - hashChunk(apexBuf) - - // write apex - for apexLayer, layerData := range apexBuf { - if apexLayer == 0 { - continue - } - layer := apexLayer + threadLayers - 1 - - layerOff := layerOffset(uint64(size), layer) - _, werr := out.WriteAt(layerData, int64(layerOff)) - if werr != nil { - return cid.Undef, xerrors.Errorf("write apex: %w", werr) - } - } - } - - var commp [32]byte - copy(commp[:], apexBuf[len(apexBuf)-1]) - - commCid, err := commcid.DataCommitmentV1ToCID(commp[:]) - if err != nil { - return cid.Undef, err - } - - return commCid, nil -} - -func treeSize(data abi.PaddedPieceSize) uint64 { - bytesToAlloc := uint64(data) - - // append bytes until we get to nodeSize - for todo := bytesToAlloc; todo > nodeSize; todo /= 2 { - bytesToAlloc += todo / 2 - } - - return bytesToAlloc -} - -func layerOffset(size uint64, layer int) uint64 { - allOnes := uint64(0xffff_ffff_ffff_ffff) - - // get 'layer' bits set to 1 - layerOnes := allOnes >> uint64(64-layer) - - // shift layerOnes to the left such that the highest bit is at the same position as the highest bit in size (which is power-of-two) - sizeBitPos := bits.Len64(size) - 1 - layerOnes <<= sizeBitPos - (layer - 1) - return layerOnes -} diff --git a/curiosrc/proof/treed_build_test.go b/curiosrc/proof/treed_build_test.go deleted file mode 100644 index f69e9832247..00000000000 --- a/curiosrc/proof/treed_build_test.go +++ /dev/null @@ -1,516 +0,0 @@ -package proof - -import ( - "bufio" - "bytes" - "crypto/rand" - "fmt" - "io" - "os" - "path/filepath" - "runtime" - "testing" - - pool "github.com/libp2p/go-buffer-pool" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/storage/pipeline/lib/nullreader" -) - -func TestTreeSize(t *testing.T) { - require.Equal(t, uint64(32), treeSize(abi.PaddedPieceSize(32))) - require.Equal(t, uint64(64+32), treeSize(abi.PaddedPieceSize(64))) - require.Equal(t, uint64(128+64+32), treeSize(abi.PaddedPieceSize(128))) - require.Equal(t, uint64(256+128+64+32), treeSize(abi.PaddedPieceSize(256))) -} - -func TestTreeLayerOffset(t *testing.T) { - require.Equal(t, uint64(0), layerOffset(128, 0)) - require.Equal(t, uint64(128), layerOffset(128, 1)) - require.Equal(t, uint64(128+64), layerOffset(128, 2)) - require.Equal(t, uint64(128+64+32), layerOffset(128, 3)) -} - -func TestHashChunk(t *testing.T) { - chunk := make([]byte, 64) - chunk[0] = 0x01 - - out := make([]byte, 32) - - data := [][]byte{chunk, out} - hashChunk(data) - - // 16 ab ab 34 1f b7 f3 70 e2 7e 4d ad cf 81 76 6d - // d0 df d0 ae 64 46 94 77 bb 2c f6 61 49 38 b2 2f - expect := []byte{ - 0x16, 0xab, 0xab, 0x34, 0x1f, 0xb7, 0xf3, 0x70, - 0xe2, 0x7e, 0x4d, 0xad, 0xcf, 0x81, 0x76, 0x6d, - 0xd0, 0xdf, 0xd0, 0xae, 0x64, 0x46, 0x94, 0x77, - 0xbb, 0x2c, 0xf6, 0x61, 0x49, 0x38, 0xb2, 0x2f, - } - - require.Equal(t, expect, out) -} - -func TestHashChunk2L(t *testing.T) { - data0 := make([]byte, 128) - data0[0] = 0x01 - - l1 := make([]byte, 64) - l2 := make([]byte, 32) - - data := [][]byte{data0, l1, l2} - hashChunk(data) - - // 16 ab ab 34 1f b7 f3 70 e2 7e 4d ad cf 81 76 6d - // d0 df d0 ae 64 46 94 77 bb 2c f6 61 49 38 b2 2f - expectL1Left := []byte{ - 0x16, 0xab, 0xab, 0x34, 0x1f, 0xb7, 0xf3, 0x70, - 0xe2, 0x7e, 0x4d, 0xad, 0xcf, 0x81, 0x76, 0x6d, - 0xd0, 0xdf, 0xd0, 0xae, 0x64, 0x46, 0x94, 0x77, - 0xbb, 0x2c, 0xf6, 0x61, 0x49, 0x38, 0xb2, 0x2f, - } - - // f5 a5 fd 42 d1 6a 20 30 27 98 ef 6e d3 09 97 9b - // 43 00 3d 23 20 d9 f0 e8 ea 98 31 a9 27 59 fb 0b - expectL1Rest := []byte{ - 0xf5, 0xa5, 0xfd, 0x42, 0xd1, 0x6a, 0x20, 0x30, - 0x27, 0x98, 0xef, 0x6e, 0xd3, 0x09, 0x97, 0x9b, - 0x43, 0x00, 0x3d, 0x23, 0x20, 0xd9, 0xf0, 0xe8, - 0xea, 0x98, 0x31, 0xa9, 0x27, 0x59, 0xfb, 0x0b, - } - - require.Equal(t, expectL1Left, l1[:32]) - require.Equal(t, expectL1Rest, l1[32:]) - - // 0d d6 da e4 1c 2f 75 55 01 29 59 4f b6 44 e4 a8 - // 42 cf af b3 16 a2 d5 93 21 e3 88 fe 84 a1 ec 2f - expectL2 := []byte{ - 0x0d, 0xd6, 0xda, 0xe4, 0x1c, 0x2f, 0x75, 0x55, - 0x01, 0x29, 0x59, 0x4f, 0xb6, 0x44, 0xe4, 0xa8, - 0x42, 0xcf, 0xaf, 0xb3, 0x16, 0xa2, 0xd5, 0x93, - 0x21, 0xe3, 0x88, 0xfe, 0x84, 0xa1, 0xec, 0x2f, - } - - require.Equal(t, expectL2, l2) -} - -func Test2K(t *testing.T) { - data := make([]byte, 2048) - data[0] = 0x01 - - tempFile := filepath.Join(t.TempDir(), "tree.dat") - - commd, err := BuildTreeD(bytes.NewReader(data), false, tempFile, 2048) - require.NoError(t, err) - fmt.Println(commd) - - // dump tree.dat - dat, err := os.ReadFile(tempFile) - require.NoError(t, err) - - for i, b := range dat { - // 32 values per line - if i%32 == 0 { - fmt.Println() - - // line offset hexdump style - fmt.Printf("%04x: ", i) - } - fmt.Printf("%02x ", b) - } - fmt.Println() - - require.Equal(t, "baga6ea4seaqovgk4kr4eoifujh6jfmdqvw3m6zrvyjqzu6s6abkketui6jjoydi", commd.String()) - -} - -const expectD8M = `00000000: 01 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 -00000020: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 -* -00800000: 16 ab ab 34 1f b7 f3 70 e2 7e 4d ad cf 81 76 6d d0 df d0 ae 64 46 94 77 bb 2c f6 61 49 38 b2 2f -00800020: f5 a5 fd 42 d1 6a 20 30 27 98 ef 6e d3 09 97 9b 43 00 3d 23 20 d9 f0 e8 ea 98 31 a9 27 59 fb 0b -* -00c00000: 0d d6 da e4 1c 2f 75 55 01 29 59 4f b6 44 e4 a8 42 cf af b3 16 a2 d5 93 21 e3 88 fe 84 a1 ec 2f -00c00020: 37 31 bb 99 ac 68 9f 66 ee f5 97 3e 4a 94 da 18 8f 4d dc ae 58 07 24 fc 6f 3f d6 0d fd 48 83 33 -* -00e00000: 11 b1 c4 80 05 21 d5 e5 83 4a de b3 70 7c 74 15 9f f3 37 b0 96 16 3c 94 31 16 73 40 e7 b1 17 1d -00e00020: 64 2a 60 7e f8 86 b0 04 bf 2c 19 78 46 3a e1 d4 69 3a c0 f4 10 eb 2d 1b 7a 47 fe 20 5e 5e 75 0f -* -00f00000: ec 69 25 55 9b cc 52 84 0a 22 38 5b 2b 6b 35 b4 50 14 50 04 28 f4 59 fe c1 23 01 0f e7 ef 18 1c -00f00020: 57 a2 38 1a 28 65 2b f4 7f 6b ef 7a ca 67 9b e4 ae de 58 71 ab 5c f3 eb 2c 08 11 44 88 cb 85 26 -* -00f80000: 3d d2 eb 19 3e e2 f0 47 34 87 bf 4b 83 aa 3a bd a9 c8 4e fa e5 52 6d 8a fd 61 2d 5d 9e 3d 79 34 -00f80020: 1f 7a c9 59 55 10 e0 9e a4 1c 46 0b 17 64 30 bb 32 2c d6 fb 41 2e c5 7c b1 7d 98 9a 43 10 37 2f -* -00fc0000: ea 99 5c 54 78 47 20 b4 49 fc 92 b0 70 ad b6 cf 66 35 c2 61 9a 7a 5e 00 54 a2 4e 88 f2 52 ec 0d -00fc0020: fc 7e 92 82 96 e5 16 fa ad e9 86 b2 8f 92 d4 4a 4f 24 b9 35 48 52 23 37 6a 79 90 27 bc 18 f8 33 -* -00fe0000: b9 97 02 8b 06 d7 2e 96 07 86 79 58 e1 5f 8d 07 b7 ae 37 ab 29 ab 3f a9 de fe c9 8e aa 37 6e 28 -00fe0020: 08 c4 7b 38 ee 13 bc 43 f4 1b 91 5c 0e ed 99 11 a2 60 86 b3 ed 62 40 1b f9 d5 8b 8d 19 df f6 24 -* -00ff0000: a0 c4 4f 7b a4 4c d2 3c 2e bf 75 98 7b e8 98 a5 63 80 73 b2 f9 11 cf ee ce 14 5a 77 58 0c 6c 12 -00ff0020: b2 e4 7b fb 11 fa cd 94 1f 62 af 5c 75 0f 3e a5 cc 4d f5 17 d5 c4 f1 6d b2 b4 d7 7b ae c1 a3 2f -* -00ff8000: 89 2d 2b 00 a5 c1 54 10 94 ca 65 de 21 3b bd 45 90 14 15 ed d1 10 17 cd 29 f3 ed 75 73 02 a0 3f -00ff8020: f9 22 61 60 c8 f9 27 bf dc c4 18 cd f2 03 49 31 46 00 8e ae fb 7d 02 19 4d 5e 54 81 89 00 51 08 -* -00ffc000: 22 48 54 8b ba a5 8f e2 db 0b 07 18 c1 d7 20 1f ed 64 c7 8d 7d 22 88 36 b2 a1 b2 f9 42 0b ef 3c -00ffc020: 2c 1a 96 4b b9 0b 59 eb fe 0f 6d a2 9a d6 5a e3 e4 17 72 4a 8f 7c 11 74 5a 40 ca c1 e5 e7 40 11 -* -00ffe000: 1c 6a 48 08 3e 17 49 90 ef c0 56 ec b1 44 75 1d e2 76 d8 a5 1c 3d 93 d7 4c 81 92 48 ab 78 cc 30 -00ffe020: fe e3 78 ce f1 64 04 b1 99 ed e0 b1 3e 11 b6 24 ff 9d 78 4f bb ed 87 8d 83 29 7e 79 5e 02 4f 02 -* -00fff000: 0a b4 26 38 1b 72 cd 3b b3 e3 c7 82 18 fe 1f 18 3b 3a 19 db c4 d9 26 94 30 03 cd 01 b6 d1 8d 0b -00fff020: 8e 9e 24 03 fa 88 4c f6 23 7f 60 df 25 f8 3e e4 0d ca 9e d8 79 eb 6f 63 52 d1 50 84 f5 ad 0d 3f -* -00fff800: 16 0d 87 17 1b e7 ae e4 20 a3 54 24 cf df 4f fe a2 fd 7b 94 58 89 58 f3 45 11 57 fc 39 8f 34 26 -00fff820: 75 2d 96 93 fa 16 75 24 39 54 76 e3 17 a9 85 80 f0 09 47 af b7 a3 05 40 d6 25 a9 29 1c c1 2a 07 -* -00fffc00: 1f 40 60 11 da 08 f8 09 80 63 97 dc 1c 57 b9 87 83 37 5a 59 5d d6 81 42 6c 1e cd d4 3c ab e3 3c -00fffc20: 70 22 f6 0f 7e f6 ad fa 17 11 7a 52 61 9e 30 ce a8 2c 68 07 5a df 1c 66 77 86 ec 50 6e ef 2d 19 -* -00fffe00: 51 4e dd 2f 6f 8f 6d fd 54 b0 d1 20 7b b7 06 df 85 c5 a3 19 0e af 38 72 37 20 c5 07 56 67 7f 14 -00fffe20: d9 98 87 b9 73 57 3a 96 e1 13 93 64 52 36 c1 7b 1f 4c 70 34 d7 23 c7 a9 9f 70 9b b4 da 61 16 2b -* -00ffff00: 5a 1d 84 74 85 a3 4b 28 08 93 a9 cf b2 8b 54 44 67 12 8b eb c0 22 bd de c1 04 be ca b4 f4 81 31 -00ffff20: d0 b5 30 db b0 b4 f2 5c 5d 2f 2a 28 df ee 80 8b 53 41 2a 02 93 1f 18 c4 99 f5 a2 54 08 6b 13 26 -* -00ffff80: c5 fb f3 f9 4c c2 2b 3c 51 ad c1 ea af e9 4b a0 9f b2 73 f3 73 d2 10 1f 12 0b 11 c6 85 21 66 2f -00ffffa0: 84 c0 42 1b a0 68 5a 01 bf 79 5a 23 44 06 4f e4 24 bd 52 a9 d2 43 77 b3 94 ff 4c 4b 45 68 e8 11 -00ffffc0: 23 40 4a 88 80 f9 cb c7 20 39 cb 86 14 35 9c 28 34 84 55 70 fe 95 19 0b bd 4d 93 41 42 e8 25 2c -` - -func Test8MiB(t *testing.T) { - data := make([]byte, 8<<20) - data[0] = 0x01 - - tempFile := filepath.Join(t.TempDir(), "tree.dat") - - commd, err := BuildTreeD(bytes.NewReader(data), false, tempFile, 8<<20) - require.NoError(t, err) - fmt.Println(commd) - - // dump tree.dat - dat, err := os.ReadFile(tempFile) - require.NoError(t, err) - - actualD := hexPrint32LDedup(bytes.NewReader(dat)) - fmt.Println(actualD) - - require.EqualValues(t, expectD8M, actualD) - require.Equal(t, "baga6ea4seaqcgqckrcapts6hea44xbqugwocqneekvyp5fizbo6u3e2biluckla", commd.String()) -} - -func Test8MiBUnpad(t *testing.T) { - data := make([]byte, abi.PaddedPieceSize(8<<20).Unpadded()) - data[0] = 0x01 - - tempFile := filepath.Join(t.TempDir(), "tree.dat") - - commd, err := BuildTreeD(bytes.NewReader(data), true, tempFile, 8<<20) - require.NoError(t, err) - fmt.Println(commd) - - // dump tree.dat - dat, err := os.ReadFile(tempFile) - require.NoError(t, err) - - actualD := hexPrint32LDedup(bytes.NewReader(dat)) - fmt.Println(actualD) - - require.EqualValues(t, expectD8M, actualD) - require.Equal(t, "baga6ea4seaqcgqckrcapts6hea44xbqugwocqneekvyp5fizbo6u3e2biluckla", commd.String()) -} - -/*func Test32Golden(t *testing.T) { - datFile, err := os.Open("../../seal/cac/sc-02-data-tree-d.dat") - require.NoError(t, err) - - bufReader := bufio.NewReaderSize(datFile, 1<<20) - - actualD := hexPrint32LDedup(bufReader) - fmt.Println(actualD) -} -*/ - -var expect32Null = `00000000: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 -* -800000000: f5 a5 fd 42 d1 6a 20 30 27 98 ef 6e d3 09 97 9b 43 00 3d 23 20 d9 f0 e8 ea 98 31 a9 27 59 fb 0b -* -c00000000: 37 31 bb 99 ac 68 9f 66 ee f5 97 3e 4a 94 da 18 8f 4d dc ae 58 07 24 fc 6f 3f d6 0d fd 48 83 33 -* -e00000000: 64 2a 60 7e f8 86 b0 04 bf 2c 19 78 46 3a e1 d4 69 3a c0 f4 10 eb 2d 1b 7a 47 fe 20 5e 5e 75 0f -* -f00000000: 57 a2 38 1a 28 65 2b f4 7f 6b ef 7a ca 67 9b e4 ae de 58 71 ab 5c f3 eb 2c 08 11 44 88 cb 85 26 -* -f80000000: 1f 7a c9 59 55 10 e0 9e a4 1c 46 0b 17 64 30 bb 32 2c d6 fb 41 2e c5 7c b1 7d 98 9a 43 10 37 2f -* -fc0000000: fc 7e 92 82 96 e5 16 fa ad e9 86 b2 8f 92 d4 4a 4f 24 b9 35 48 52 23 37 6a 79 90 27 bc 18 f8 33 -* -fe0000000: 08 c4 7b 38 ee 13 bc 43 f4 1b 91 5c 0e ed 99 11 a2 60 86 b3 ed 62 40 1b f9 d5 8b 8d 19 df f6 24 -* -ff0000000: b2 e4 7b fb 11 fa cd 94 1f 62 af 5c 75 0f 3e a5 cc 4d f5 17 d5 c4 f1 6d b2 b4 d7 7b ae c1 a3 2f -* -ff8000000: f9 22 61 60 c8 f9 27 bf dc c4 18 cd f2 03 49 31 46 00 8e ae fb 7d 02 19 4d 5e 54 81 89 00 51 08 -* -ffc000000: 2c 1a 96 4b b9 0b 59 eb fe 0f 6d a2 9a d6 5a e3 e4 17 72 4a 8f 7c 11 74 5a 40 ca c1 e5 e7 40 11 -* -ffe000000: fe e3 78 ce f1 64 04 b1 99 ed e0 b1 3e 11 b6 24 ff 9d 78 4f bb ed 87 8d 83 29 7e 79 5e 02 4f 02 -* -fff000000: 8e 9e 24 03 fa 88 4c f6 23 7f 60 df 25 f8 3e e4 0d ca 9e d8 79 eb 6f 63 52 d1 50 84 f5 ad 0d 3f -* -fff800000: 75 2d 96 93 fa 16 75 24 39 54 76 e3 17 a9 85 80 f0 09 47 af b7 a3 05 40 d6 25 a9 29 1c c1 2a 07 -* -fffc00000: 70 22 f6 0f 7e f6 ad fa 17 11 7a 52 61 9e 30 ce a8 2c 68 07 5a df 1c 66 77 86 ec 50 6e ef 2d 19 -* -fffe00000: d9 98 87 b9 73 57 3a 96 e1 13 93 64 52 36 c1 7b 1f 4c 70 34 d7 23 c7 a9 9f 70 9b b4 da 61 16 2b -* -ffff00000: d0 b5 30 db b0 b4 f2 5c 5d 2f 2a 28 df ee 80 8b 53 41 2a 02 93 1f 18 c4 99 f5 a2 54 08 6b 13 26 -* -ffff80000: 84 c0 42 1b a0 68 5a 01 bf 79 5a 23 44 06 4f e4 24 bd 52 a9 d2 43 77 b3 94 ff 4c 4b 45 68 e8 11 -* -ffffc0000: 65 f2 9e 5d 98 d2 46 c3 8b 38 8c fc 06 db 1f 6b 02 13 03 c5 a2 89 00 0b dc e8 32 a9 c3 ec 42 1c -* -ffffe0000: a2 24 75 08 28 58 50 96 5b 7e 33 4b 31 27 b0 c0 42 b1 d0 46 dc 54 40 21 37 62 7c d8 79 9c e1 3a -* -fffff0000: da fd ab 6d a9 36 44 53 c2 6d 33 72 6b 9f ef e3 43 be 8f 81 64 9e c0 09 aa d3 fa ff 50 61 75 08 -* -fffff8000: d9 41 d5 e0 d6 31 4a 99 5c 33 ff bd 4f be 69 11 8d 73 d4 e5 fd 2c d3 1f 0f 7c 86 eb dd 14 e7 06 -* -fffffc000: 51 4c 43 5c 3d 04 d3 49 a5 36 5f bd 59 ff c7 13 62 91 11 78 59 91 c1 a3 c5 3a f2 20 79 74 1a 2f -* -fffffe000: ad 06 85 39 69 d3 7d 34 ff 08 e0 9f 56 93 0a 4a d1 9a 89 de f6 0c bf ee 7e 1d 33 81 c1 e7 1c 37 -* -ffffff000: 39 56 0e 7b 13 a9 3b 07 a2 43 fd 27 20 ff a7 cb 3e 1d 2e 50 5a b3 62 9e 79 f4 63 13 51 2c da 06 -* -ffffff800: cc c3 c0 12 f5 b0 5e 81 1a 2b bf dd 0f 68 33 b8 42 75 b4 7b f2 29 c0 05 2a 82 48 4f 3c 1a 5b 3d -* -ffffffc00: 7d f2 9b 69 77 31 99 e8 f2 b4 0b 77 91 9d 04 85 09 ee d7 68 e2 c7 29 7b 1f 14 37 03 4f c3 c6 2c -* -ffffffe00: 66 ce 05 a3 66 75 52 cf 45 c0 2b cc 4e 83 92 91 9b de ac 35 de 2f f5 62 71 84 8e 9f 7b 67 51 07 -* -fffffff00: d8 61 02 18 42 5a b5 e9 5b 1c a6 23 9d 29 a2 e4 20 d7 06 a9 6f 37 3e 2f 9c 9a 91 d7 59 d1 9b 01 -* -fffffff80: 6d 36 4b 1e f8 46 44 1a 5a 4a 68 86 23 14 ac c0 a4 6f 01 67 17 e5 34 43 e8 39 ee df 83 c2 85 3c -* -fffffffc0: 07 7e 5f de 35 c5 0a 93 03 a5 50 09 e3 49 8a 4e be df f3 9c 42 b7 10 b7 30 d8 ec 7a c7 af a6 3e -` - -func Test32G(t *testing.T) { - if os.Getenv("LOTUS_TEST_LARGE_SECTORS") != "1" { - t.Skip("skipping large sector test without env LOTUS_TEST_LARGE_SECTORS=1") - } - - data := nullreader.NewNullReader(abi.PaddedPieceSize(32 << 30).Unpadded()) - - tempFile := filepath.Join(t.TempDir(), "tree.dat") - - commd, err := BuildTreeD(data, true, tempFile, 32<<30) - require.NoError(t, err) - fmt.Println(commd) - - // dump tree.dat - datFile, err := os.Open(tempFile) - require.NoError(t, err) - defer func() { - require.NoError(t, datFile.Close()) - }() - - actualD := hexPrint32LDedup(bufio.NewReaderSize(datFile, 1<<20)) - fmt.Println(actualD) - - require.EqualValues(t, expect32Null, actualD) - require.Equal(t, "baga6ea4seaqao7s73y24kcutaosvacpdjgfe5pw76ooefnyqw4ynr3d2y6x2mpq", commd.String()) -} - -func hexPrint32LDedup(r io.Reader) string { - var prevLine []byte - var outStr string - var duplicateLine bool - buffer := make([]byte, 32) - offset := 0 - - for { - n, err := r.Read(buffer) - if err == io.EOF { - break - } - if err != nil { - // Handle the error according to your application's requirements - fmt.Println("Error reading:", err) - break - } - - if string(prevLine) == string(buffer) { - // Mark as duplicate and skip processing - duplicateLine = true - } else { - if duplicateLine { - // Output a marker for the previous duplicate line - outStr += "*\n" - duplicateLine = false - } - // Convert to hex and output - outStr += fmt.Sprintf("%08x: %s\n", offset, toHex(buffer)) - - // Update prevLine - if len(prevLine) != 32 { - prevLine = make([]byte, 32) - } - copy(prevLine, buffer) - } - - offset += n - } - - // If the last line was a duplicate, ensure we mark it - if duplicateLine { - outStr += "*\n" - } - - return outStr -} - -func toHex(data []byte) string { - var hexStr string - for _, b := range data { - hexStr += fmt.Sprintf("%02x ", b) - } - return hexStr -} - -func BenchmarkHashChunk(b *testing.B) { - const benchSize = 1024 * 1024 - - // Generate 1 MiB of random data - randomData := make([]byte, benchSize) - if _, err := rand.Read(randomData); err != nil { - b.Fatalf("Failed to generate random data: %v", err) - } - - // Prepare data structure for hashChunk - data := make([][]byte, 1) - data[0] = randomData - - // append levels until we get to a 32 byte level - for len(data[len(data)-1]) > 32 { - newLevel := make([]byte, len(data[len(data)-1])/2) - data = append(data, newLevel) - } - - b.SetBytes(benchSize) // Set the number of bytes for the benchmark - - b.ResetTimer() // Start the timer after setup - - for i := 0; i < b.N; i++ { - hashChunk(data) - // Use the result in some way to avoid compiler optimization - _ = data[1] - } -} - -func BenchmarkBuildTreeD512M(b *testing.B) { - const dataSize = 512 * 1024 * 1024 // 512 MiB - - // Generate 512 MiB of random data - data := make([]byte, dataSize) - if _, err := rand.Read(data); err != nil { - b.Fatalf("Failed to generate random data: %v", err) - } - - // preallocate NumCPU+1 1MiB/512k/256k/... - // with Pool.Get / Pool.Put, so that they are in the pool - { - nc := runtime.NumCPU() - bufs := [][]byte{} - for i := 0; i < nc+1; i++ { - for sz := 1 << 20; sz > 32; sz >>= 1 { - b := pool.Get(sz) - bufs = append(bufs, b) - } - } - for _, b := range bufs { - pool.Put(b) - } - } - - /*if b.N == 1 { - b.N = 10 - }*/ - - b.SetBytes(int64(dataSize)) // Set the number of bytes for the benchmark - - for i := 0; i < b.N; i++ { - // Create a temporary file for each iteration - tempFile, err := os.CreateTemp("", "tree.dat") - if err != nil { - b.Fatalf("Failed to create temporary file: %v", err) - } - tempFilePath := tempFile.Name() - err = tempFile.Close() - if err != nil { - b.Fatalf("Failed to close temporary file: %v", err) - } - - b.StartTimer() // Start the timer for the BuildTreeD operation - - _, err = BuildTreeD(bytes.NewReader(data), false, tempFilePath, dataSize) - if err != nil { - b.Fatalf("BuildTreeD failed: %v", err) - } - - b.StopTimer() // Stop the timer after BuildTreeD completes - - // Clean up the temporary file - err = os.Remove(tempFilePath) - if err != nil { - b.Fatalf("Failed to remove temporary file: %v", err) - } - } -} - -func TestLayerOffset(t *testing.T) { - { - size := uint64(2048) - - require.Equal(t, uint64(0), layerOffset(size, 0)) - require.Equal(t, size, layerOffset(size, 1)) - require.Equal(t, size+(size/2), layerOffset(size, 2)) - require.Equal(t, size+(size/2)+(size/4), layerOffset(size, 3)) - require.Equal(t, size+(size/2)+(size/4)+(size/8), layerOffset(size, 4)) - require.Equal(t, size+(size/2)+(size/4)+(size/8)+(size/16), layerOffset(size, 5)) - } - - { - size := uint64(32 << 30) - maxLayers := 30 - - for i := 0; i <= maxLayers; i++ { - var expect uint64 - for j := 0; j < i; j++ { - expect += size >> uint64(j) - } - - fmt.Printf("layer %d: %d\n", i, expect) - require.Equal(t, expect, layerOffset(size, i)) - } - } - - { - size := uint64(64 << 30) - maxLayers := 31 - - for i := 0; i <= maxLayers; i++ { - var expect uint64 - for j := 0; j < i; j++ { - expect += size >> uint64(j) - } - - fmt.Printf("layer %d: %d\n", i, expect) - require.Equal(t, expect, layerOffset(size, i)) - } - } -} diff --git a/curiosrc/seal/README.md b/curiosrc/seal/README.md deleted file mode 100644 index b148e4204d1..00000000000 --- a/curiosrc/seal/README.md +++ /dev/null @@ -1,28 +0,0 @@ -# Curio Sealer - -## Overview - -The Curio sealer is a collection of harmony tasks and a common poller -which implement the sealing functionality of the Filecoin protocol. - -## Pipeline Tasks - -* SDR pipeline - * `SDR` - Generate SDR layers - * `SDRTrees` - Generate tree files (TreeD, TreeR, TreeC) - * `PreCommitSubmit` - Submit precommit message to the network - * `PoRep` - Generate PoRep proof - * `CommitSubmit` - Submit commit message to the network - -# Poller - -The poller is a background process running on every node which runs any of the -SDR pipeline tasks. It periodically checks the state of sectors in the SDR pipeline -and schedules any tasks to run which will move the sector along the pipeline. - -# Error Handling - -* Pipeline tasks are expected to always finish successfully as harmonytask tasks. - If a sealing task encounters an error, it should mark the sector pipeline entry - as failed and exit without erroring. The poller will then figure out a recovery - strategy for the sector. diff --git a/curiosrc/seal/finalize_pieces.go b/curiosrc/seal/finalize_pieces.go deleted file mode 100644 index 354eed1413e..00000000000 --- a/curiosrc/seal/finalize_pieces.go +++ /dev/null @@ -1,51 +0,0 @@ -package seal - -import ( - "context" - "net/url" - "strconv" - - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" -) - -func DropSectorPieceRefs(ctx context.Context, db *harmonydb.DB, sid abi.SectorID) error { - //_, err := db.Exec(ctx, `SELECT FROM sectors_sdr_initial_pieces WHERE sp_id = $1 AND sector_number = $2`, sid.Miner, sid.Number) - - var PieceURL []struct { - URL string `db:"data_url"` - } - - err := db.Select(ctx, &PieceURL, `SELECT data_url FROM sectors_sdr_initial_pieces WHERE sp_id = $1 AND sector_number = $2`, sid.Miner, sid.Number) - if err != nil { - return xerrors.Errorf("getting piece url: %w", err) - } - - for _, pu := range PieceURL { - gourl, err := url.Parse(pu.URL) - if err != nil { - log.Errorw("failed to parse piece url", "url", pu.URL, "error", err, "miner", sid.Miner, "sector", sid.Number) - continue - } - - if gourl.Scheme == "pieceref" { - refID, err := strconv.ParseInt(gourl.Opaque, 10, 64) - if err != nil { - log.Errorw("failed to parse piece ref id", "url", pu.URL, "error", err, "miner", sid.Miner, "sector", sid.Number) - continue - } - - n, err := db.Exec(ctx, `DELETE FROM parked_piece_refs WHERE ref_id = $1`, refID) - if err != nil { - log.Errorw("failed to delete piece ref", "url", pu.URL, "error", err, "miner", sid.Miner, "sector", sid.Number) - } - - log.Debugw("deleted piece ref", "url", pu.URL, "miner", sid.Miner, "sector", sid.Number, "rows", n) - } - } - - return err -} diff --git a/curiosrc/seal/poller.go b/curiosrc/seal/poller.go deleted file mode 100644 index 75aed876231..00000000000 --- a/curiosrc/seal/poller.go +++ /dev/null @@ -1,304 +0,0 @@ -package seal - -import ( - "context" - "time" - - logging "github.com/ipfs/go-log/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" - "github.com/filecoin-project/lotus/lib/promise" -) - -var log = logging.Logger("cu/seal") - -const ( - pollerSDR = iota - pollerTreeD - pollerTreeRC - pollerPrecommitMsg - pollerPoRep - pollerCommitMsg - pollerFinalize - pollerMoveStorage - - numPollers -) - -const sealPollerInterval = 10 * time.Second -const seedEpochConfidence = 3 - -type SealPollerAPI interface { - StateSectorPreCommitInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorPreCommitOnChainInfo, error) - StateSectorGetInfo(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error) - ChainHead(context.Context) (*types.TipSet, error) -} - -type SealPoller struct { - db *harmonydb.DB - api SealPollerAPI - - pollers [numPollers]promise.Promise[harmonytask.AddTaskFunc] -} - -func NewPoller(db *harmonydb.DB, api SealPollerAPI) *SealPoller { - return &SealPoller{ - db: db, - api: api, - } -} - -func (s *SealPoller) RunPoller(ctx context.Context) { - ticker := time.NewTicker(sealPollerInterval) - defer ticker.Stop() - - for { - select { - case <-ctx.Done(): - return - case <-ticker.C: - if err := s.poll(ctx); err != nil { - log.Errorw("polling failed", "error", err) - } - } - } -} - -/* -NOTE: TaskIDs are ONLY set while the tasks are executing or waiting to execute. - This means that there are ~4 states each task can be in: -* Not run, and dependencies not solved (dependencies are 'After' fields of previous stages), task is null, After is false -* Not run, and dependencies solved, task is null, After is false -* Running or queued, task is set, After is false -* Finished, task is null, After is true -*/ - -type pollTask struct { - SpID int64 `db:"sp_id"` - SectorNumber int64 `db:"sector_number"` - - TaskSDR *int64 `db:"task_id_sdr"` - AfterSDR bool `db:"after_sdr"` - - TaskTreeD *int64 `db:"task_id_tree_d"` - AfterTreeD bool `db:"after_tree_d"` - - TaskTreeC *int64 `db:"task_id_tree_c"` - AfterTreeC bool `db:"after_tree_c"` - - TaskTreeR *int64 `db:"task_id_tree_r"` - AfterTreeR bool `db:"after_tree_r"` - - TaskPrecommitMsg *int64 `db:"task_id_precommit_msg"` - AfterPrecommitMsg bool `db:"after_precommit_msg"` - - AfterPrecommitMsgSuccess bool `db:"after_precommit_msg_success"` - SeedEpoch *int64 `db:"seed_epoch"` - - TaskPoRep *int64 `db:"task_id_porep"` - PoRepProof []byte `db:"porep_proof"` - AfterPoRep bool `db:"after_porep"` - - TaskFinalize *int64 `db:"task_id_finalize"` - AfterFinalize bool `db:"after_finalize"` - - TaskMoveStorage *int64 `db:"task_id_move_storage"` - AfterMoveStorage bool `db:"after_move_storage"` - - TaskCommitMsg *int64 `db:"task_id_commit_msg"` - AfterCommitMsg bool `db:"after_commit_msg"` - - AfterCommitMsgSuccess bool `db:"after_commit_msg_success"` - - Failed bool `db:"failed"` - FailedReason string `db:"failed_reason"` -} - -func (s *SealPoller) poll(ctx context.Context) error { - var tasks []pollTask - - err := s.db.Select(ctx, &tasks, `SELECT - sp_id, sector_number, - task_id_sdr, after_sdr, - task_id_tree_d, after_tree_d, - task_id_tree_c, after_tree_c, - task_id_tree_r, after_tree_r, - task_id_precommit_msg, after_precommit_msg, - after_precommit_msg_success, seed_epoch, - task_id_porep, porep_proof, after_porep, - task_id_finalize, after_finalize, - task_id_move_storage, after_move_storage, - task_id_commit_msg, after_commit_msg, - after_commit_msg_success, - failed, failed_reason - FROM sectors_sdr_pipeline WHERE after_commit_msg_success != TRUE OR after_move_storage != TRUE`) - if err != nil { - return err - } - - for _, task := range tasks { - task := task - if task.Failed { - continue - } - - ts, err := s.api.ChainHead(ctx) - if err != nil { - return xerrors.Errorf("getting chain head: %w", err) - } - - s.pollStartSDR(ctx, task) - s.pollStartSDRTreeD(ctx, task) - s.pollStartSDRTreeRC(ctx, task) - s.pollStartPrecommitMsg(ctx, task) - s.mustPoll(s.pollPrecommitMsgLanded(ctx, task)) - s.pollStartPoRep(ctx, task, ts) - s.pollStartFinalize(ctx, task, ts) - s.pollStartMoveStorage(ctx, task) - s.pollStartCommitMsg(ctx, task) - s.mustPoll(s.pollCommitMsgLanded(ctx, task)) - } - - return nil -} - -func (s *SealPoller) pollStartSDR(ctx context.Context, task pollTask) { - if !task.AfterSDR && task.TaskSDR == nil && s.pollers[pollerSDR].IsSet() { - s.pollers[pollerSDR].Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { - n, err := tx.Exec(`UPDATE sectors_sdr_pipeline SET task_id_sdr = $1 WHERE sp_id = $2 AND sector_number = $3 AND task_id_sdr IS NULL`, id, task.SpID, task.SectorNumber) - if err != nil { - return false, xerrors.Errorf("update sectors_sdr_pipeline: %w", err) - } - if n != 1 { - return false, xerrors.Errorf("expected to update 1 row, updated %d", n) - } - - return true, nil - }) - } -} - -func (t pollTask) afterSDR() bool { - return t.AfterSDR -} - -func (s *SealPoller) pollStartSDRTreeD(ctx context.Context, task pollTask) { - if !task.AfterTreeD && task.TaskTreeD == nil && s.pollers[pollerTreeD].IsSet() && task.afterSDR() { - s.pollers[pollerTreeD].Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { - n, err := tx.Exec(`UPDATE sectors_sdr_pipeline SET task_id_tree_d = $1 WHERE sp_id = $2 AND sector_number = $3 AND after_sdr = TRUE AND task_id_tree_d IS NULL`, id, task.SpID, task.SectorNumber) - if err != nil { - return false, xerrors.Errorf("update sectors_sdr_pipeline: %w", err) - } - if n != 1 { - return false, xerrors.Errorf("expected to update 1 row, updated %d", n) - } - - return true, nil - }) - } -} - -func (t pollTask) afterTreeD() bool { - return t.AfterTreeD && t.afterSDR() -} - -func (s *SealPoller) pollStartSDRTreeRC(ctx context.Context, task pollTask) { - if !task.AfterTreeC && !task.AfterTreeR && task.TaskTreeC == nil && task.TaskTreeR == nil && s.pollers[pollerTreeRC].IsSet() && task.afterTreeD() { - s.pollers[pollerTreeRC].Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { - n, err := tx.Exec(`UPDATE sectors_sdr_pipeline SET task_id_tree_c = $1, task_id_tree_r = $1 - WHERE sp_id = $2 AND sector_number = $3 AND after_tree_d = TRUE AND task_id_tree_c IS NULL AND task_id_tree_r IS NULL`, id, task.SpID, task.SectorNumber) - if err != nil { - return false, xerrors.Errorf("update sectors_sdr_pipeline: %w", err) - } - if n != 1 { - return false, xerrors.Errorf("expected to update 1 row, updated %d", n) - } - - return true, nil - }) - } -} - -func (t pollTask) afterTreeRC() bool { - return t.AfterTreeC && t.AfterTreeR && t.afterTreeD() -} - -func (t pollTask) afterPrecommitMsg() bool { - return t.AfterPrecommitMsg && t.afterTreeRC() -} - -func (t pollTask) afterPrecommitMsgSuccess() bool { - return t.AfterPrecommitMsgSuccess && t.afterPrecommitMsg() -} - -func (s *SealPoller) pollStartPoRep(ctx context.Context, task pollTask, ts *types.TipSet) { - if s.pollers[pollerPoRep].IsSet() && task.afterPrecommitMsgSuccess() && task.SeedEpoch != nil && - task.TaskPoRep == nil && !task.AfterPoRep && - ts.Height() >= abi.ChainEpoch(*task.SeedEpoch+seedEpochConfidence) { - - s.pollers[pollerPoRep].Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { - n, err := tx.Exec(`UPDATE sectors_sdr_pipeline SET task_id_porep = $1 WHERE sp_id = $2 AND sector_number = $3 AND task_id_porep IS NULL`, id, task.SpID, task.SectorNumber) - if err != nil { - return false, xerrors.Errorf("update sectors_sdr_pipeline: %w", err) - } - if n != 1 { - return false, xerrors.Errorf("expected to update 1 row, updated %d", n) - } - - return true, nil - }) - } -} - -func (t pollTask) afterPoRep() bool { - return t.AfterPoRep && t.afterPrecommitMsgSuccess() -} - -func (s *SealPoller) pollStartFinalize(ctx context.Context, task pollTask, ts *types.TipSet) { - if s.pollers[pollerFinalize].IsSet() && task.afterPoRep() && !task.AfterFinalize && task.TaskFinalize == nil { - s.pollers[pollerFinalize].Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { - n, err := tx.Exec(`UPDATE sectors_sdr_pipeline SET task_id_finalize = $1 WHERE sp_id = $2 AND sector_number = $3 AND task_id_finalize IS NULL`, id, task.SpID, task.SectorNumber) - if err != nil { - return false, xerrors.Errorf("update sectors_sdr_pipeline: %w", err) - } - if n != 1 { - return false, xerrors.Errorf("expected to update 1 row, updated %d", n) - } - - return true, nil - }) - } -} - -func (t pollTask) afterFinalize() bool { - return t.AfterFinalize && t.afterPoRep() -} - -func (s *SealPoller) pollStartMoveStorage(ctx context.Context, task pollTask) { - if s.pollers[pollerMoveStorage].IsSet() && task.afterFinalize() && !task.AfterMoveStorage && task.TaskMoveStorage == nil { - s.pollers[pollerMoveStorage].Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { - n, err := tx.Exec(`UPDATE sectors_sdr_pipeline SET task_id_move_storage = $1 WHERE sp_id = $2 AND sector_number = $3 AND task_id_move_storage IS NULL`, id, task.SpID, task.SectorNumber) - if err != nil { - return false, xerrors.Errorf("update sectors_sdr_pipeline: %w", err) - } - if n != 1 { - return false, xerrors.Errorf("expected to update 1 row, updated %d", n) - } - - return true, nil - }) - } -} - -func (s *SealPoller) mustPoll(err error) { - if err != nil { - log.Errorw("poller operation failed", "error", err) - } -} diff --git a/curiosrc/seal/poller_commit_msg.go b/curiosrc/seal/poller_commit_msg.go deleted file mode 100644 index 9a88129b04e..00000000000 --- a/curiosrc/seal/poller_commit_msg.go +++ /dev/null @@ -1,108 +0,0 @@ -package seal - -import ( - "context" - - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/exitcode" - - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" -) - -func (s *SealPoller) pollStartCommitMsg(ctx context.Context, task pollTask) { - if task.afterPoRep() && len(task.PoRepProof) > 0 && task.TaskCommitMsg == nil && !task.AfterCommitMsg && s.pollers[pollerCommitMsg].IsSet() { - s.pollers[pollerCommitMsg].Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { - n, err := tx.Exec(`UPDATE sectors_sdr_pipeline SET task_id_commit_msg = $1 WHERE sp_id = $2 AND sector_number = $3 AND task_id_commit_msg IS NULL`, id, task.SpID, task.SectorNumber) - if err != nil { - return false, xerrors.Errorf("update sectors_sdr_pipeline: %w", err) - } - if n != 1 { - return false, xerrors.Errorf("expected to update 1 row, updated %d", n) - } - - return true, nil - }) - } -} - -func (s *SealPoller) pollCommitMsgLanded(ctx context.Context, task pollTask) error { - if task.AfterCommitMsg && !task.AfterCommitMsgSuccess && s.pollers[pollerCommitMsg].IsSet() { - var execResult []dbExecResult - - err := s.db.Select(ctx, &execResult, `SELECT spipeline.precommit_msg_cid, spipeline.commit_msg_cid, executed_tsk_cid, executed_tsk_epoch, executed_msg_cid, executed_rcpt_exitcode, executed_rcpt_gas_used - FROM sectors_sdr_pipeline spipeline - JOIN message_waits ON spipeline.commit_msg_cid = message_waits.signed_message_cid - WHERE sp_id = $1 AND sector_number = $2 AND executed_tsk_epoch IS NOT NULL`, task.SpID, task.SectorNumber) - if err != nil { - log.Errorw("failed to query message_waits", "error", err) - } - - if len(execResult) > 0 { - maddr, err := address.NewIDAddress(uint64(task.SpID)) - if err != nil { - return err - } - - if exitcode.ExitCode(execResult[0].ExecutedRcptExitCode) != exitcode.Ok { - return s.pollCommitMsgFail(ctx, task, execResult[0]) - } - - si, err := s.api.StateSectorGetInfo(ctx, maddr, abi.SectorNumber(task.SectorNumber), types.EmptyTSK) - if err != nil { - return xerrors.Errorf("get sector info: %w", err) - } - - if si == nil { - log.Errorw("todo handle missing sector info (not found after cron)", "sp", task.SpID, "sector", task.SectorNumber, "exec_epoch", execResult[0].ExecutedTskEpoch, "exec_tskcid", execResult[0].ExecutedTskCID, "msg_cid", execResult[0].ExecutedMsgCID) - // todo handdle missing sector info (not found after cron) - } else { - // yay! - - _, err := s.db.Exec(ctx, `UPDATE sectors_sdr_pipeline SET - after_commit_msg_success = TRUE, commit_msg_tsk = $1 - WHERE sp_id = $2 AND sector_number = $3 AND after_commit_msg_success = FALSE`, - execResult[0].ExecutedTskCID, task.SpID, task.SectorNumber) - if err != nil { - return xerrors.Errorf("update sectors_sdr_pipeline: %w", err) - } - } - } - } - - return nil -} - -func (s *SealPoller) pollCommitMsgFail(ctx context.Context, task pollTask, execResult dbExecResult) error { - switch exitcode.ExitCode(execResult.ExecutedRcptExitCode) { - case exitcode.SysErrInsufficientFunds: - fallthrough - case exitcode.SysErrOutOfGas: - // just retry - return s.pollRetryCommitMsgSend(ctx, task, execResult) - default: - return xerrors.Errorf("commit message failed with exit code %s", exitcode.ExitCode(execResult.ExecutedRcptExitCode)) - } -} - -func (s *SealPoller) pollRetryCommitMsgSend(ctx context.Context, task pollTask, execResult dbExecResult) error { - if execResult.CommitMsgCID == nil { - return xerrors.Errorf("commit msg cid was nil") - } - - // make the pipeline entry seem like precommit send didn't happen, next poll loop will retry - - _, err := s.db.Exec(ctx, `UPDATE sectors_sdr_pipeline SET - commit_msg_cid = NULL, task_id_commit_msg = NULL, after_commit_msg = FALSE - WHERE commit_msg_cid = $1 AND sp_id = $2 AND sector_number = $3 AND after_commit_msg_success = FALSE`, - *execResult.CommitMsgCID, task.SpID, task.SectorNumber) - if err != nil { - return xerrors.Errorf("update sectors_sdr_pipeline to retry precommit msg send: %w", err) - } - - return nil -} diff --git a/curiosrc/seal/poller_precommit_msg.go b/curiosrc/seal/poller_precommit_msg.go deleted file mode 100644 index 42986499f61..00000000000 --- a/curiosrc/seal/poller_precommit_msg.go +++ /dev/null @@ -1,119 +0,0 @@ -package seal - -import ( - "context" - - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/exitcode" - - "github.com/filecoin-project/lotus/chain/actors/policy" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" -) - -func (s *SealPoller) pollStartPrecommitMsg(ctx context.Context, task pollTask) { - if task.TaskPrecommitMsg == nil && !task.AfterPrecommitMsg && task.afterTreeRC() && s.pollers[pollerPrecommitMsg].IsSet() { - s.pollers[pollerPrecommitMsg].Val(ctx)(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { - n, err := tx.Exec(`UPDATE sectors_sdr_pipeline SET task_id_precommit_msg = $1 WHERE sp_id = $2 AND sector_number = $3 AND task_id_precommit_msg IS NULL AND after_tree_r = TRUE AND after_tree_d = TRUE`, id, task.SpID, task.SectorNumber) - if err != nil { - return false, xerrors.Errorf("update sectors_sdr_pipeline: %w", err) - } - if n != 1 { - return false, xerrors.Errorf("expected to update 1 row, updated %d", n) - } - - return true, nil - }) - } -} - -type dbExecResult struct { - PrecommitMsgCID *string `db:"precommit_msg_cid"` - CommitMsgCID *string `db:"commit_msg_cid"` - - ExecutedTskCID string `db:"executed_tsk_cid"` - ExecutedTskEpoch int64 `db:"executed_tsk_epoch"` - ExecutedMsgCID string `db:"executed_msg_cid"` - - ExecutedRcptExitCode int64 `db:"executed_rcpt_exitcode"` - ExecutedRcptGasUsed int64 `db:"executed_rcpt_gas_used"` -} - -func (s *SealPoller) pollPrecommitMsgLanded(ctx context.Context, task pollTask) error { - if task.AfterPrecommitMsg && !task.AfterPrecommitMsgSuccess { - var execResult []dbExecResult - - err := s.db.Select(ctx, &execResult, `SELECT spipeline.precommit_msg_cid, spipeline.commit_msg_cid, executed_tsk_cid, executed_tsk_epoch, executed_msg_cid, executed_rcpt_exitcode, executed_rcpt_gas_used - FROM sectors_sdr_pipeline spipeline - JOIN message_waits ON spipeline.precommit_msg_cid = message_waits.signed_message_cid - WHERE sp_id = $1 AND sector_number = $2 AND executed_tsk_epoch IS NOT NULL`, task.SpID, task.SectorNumber) - if err != nil { - log.Errorw("failed to query message_waits", "error", err) - } - - if len(execResult) > 0 { - if exitcode.ExitCode(execResult[0].ExecutedRcptExitCode) != exitcode.Ok { - return s.pollPrecommitMsgFail(ctx, task, execResult[0]) - } - - maddr, err := address.NewIDAddress(uint64(task.SpID)) - if err != nil { - return err - } - - pci, err := s.api.StateSectorPreCommitInfo(ctx, maddr, abi.SectorNumber(task.SectorNumber), types.EmptyTSK) - if err != nil { - return xerrors.Errorf("get precommit info: %w", err) - } - - if pci != nil { - randHeight := pci.PreCommitEpoch + policy.GetPreCommitChallengeDelay() - - _, err := s.db.Exec(ctx, `UPDATE sectors_sdr_pipeline SET - seed_epoch = $1, precommit_msg_tsk = $2, after_precommit_msg_success = TRUE - WHERE sp_id = $3 AND sector_number = $4 AND seed_epoch IS NULL`, - randHeight, execResult[0].ExecutedTskCID, task.SpID, task.SectorNumber) - if err != nil { - return xerrors.Errorf("update sectors_sdr_pipeline: %w", err) - } - } // todo handle missing precommit info (eg expired precommit) - - } - } - - return nil -} - -func (s *SealPoller) pollPrecommitMsgFail(ctx context.Context, task pollTask, execResult dbExecResult) error { - switch exitcode.ExitCode(execResult.ExecutedRcptExitCode) { - case exitcode.SysErrInsufficientFunds: - fallthrough - case exitcode.SysErrOutOfGas: - // just retry - return s.pollRetryPrecommitMsgSend(ctx, task, execResult) - default: - return xerrors.Errorf("precommit message failed with exit code %s", exitcode.ExitCode(execResult.ExecutedRcptExitCode)) - } -} - -func (s *SealPoller) pollRetryPrecommitMsgSend(ctx context.Context, task pollTask, execResult dbExecResult) error { - if execResult.PrecommitMsgCID == nil { - return xerrors.Errorf("precommit msg cid was nil") - } - - // make the pipeline entry seem like precommit send didn't happen, next poll loop will retry - - _, err := s.db.Exec(ctx, `UPDATE sectors_sdr_pipeline SET - precommit_msg_cid = NULL, task_id_precommit_msg = NULL, after_precommit_msg = FALSE - WHERE precommit_msg_cid = $1 AND sp_id = $2 AND sector_number = $3 AND after_precommit_msg_success = FALSE`, - *execResult.PrecommitMsgCID, task.SpID, task.SectorNumber) - if err != nil { - return xerrors.Errorf("update sectors_sdr_pipeline to retry precommit msg send: %w", err) - } - - return nil -} diff --git a/curiosrc/seal/sector_num_alloc.go b/curiosrc/seal/sector_num_alloc.go deleted file mode 100644 index 010ebee395d..00000000000 --- a/curiosrc/seal/sector_num_alloc.go +++ /dev/null @@ -1,127 +0,0 @@ -package seal - -import ( - "context" - - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-bitfield" - rlepluslazy "github.com/filecoin-project/go-bitfield/rle" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" -) - -type AllocAPI interface { - StateMinerAllocated(context.Context, address.Address, types.TipSetKey) (*bitfield.BitField, error) -} - -func AllocateSectorNumbers(ctx context.Context, a AllocAPI, db *harmonydb.DB, maddr address.Address, count int, txcb ...func(*harmonydb.Tx, []abi.SectorNumber) (bool, error)) ([]abi.SectorNumber, error) { - chainAlloc, err := a.StateMinerAllocated(ctx, maddr, types.EmptyTSK) - if err != nil { - return nil, xerrors.Errorf("getting on-chain allocated sector numbers: %w", err) - } - - mid, err := address.IDFromAddress(maddr) - if err != nil { - return nil, xerrors.Errorf("getting miner id: %w", err) - } - - var res []abi.SectorNumber - - comm, err := db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { - res = nil // reset result in case of retry - - // query from db, if exists unmarsal to bitfield - var dbAllocated bitfield.BitField - var rawJson []byte - - err = tx.QueryRow("SELECT COALESCE(allocated, '[0]') from sectors_allocated_numbers sa FULL OUTER JOIN (SELECT 1) AS d ON TRUE WHERE sp_id = $1 OR sp_id IS NULL", mid).Scan(&rawJson) - if err != nil { - return false, xerrors.Errorf("querying allocated sector numbers: %w", err) - } - - if rawJson != nil { - err = dbAllocated.UnmarshalJSON(rawJson) - if err != nil { - return false, xerrors.Errorf("unmarshaling allocated sector numbers: %w", err) - } - } - - if err := dbAllocated.UnmarshalJSON(rawJson); err != nil { - return false, xerrors.Errorf("unmarshaling allocated sector numbers: %w", err) - } - - merged, err := bitfield.MergeBitFields(*chainAlloc, dbAllocated) - if err != nil { - return false, xerrors.Errorf("merging allocated sector numbers: %w", err) - } - - allAssignable, err := bitfield.NewFromIter(&rlepluslazy.RunSliceIterator{Runs: []rlepluslazy.Run{ - { - Val: true, - Len: abi.MaxSectorNumber, - }, - }}) - if err != nil { - return false, xerrors.Errorf("creating assignable sector numbers: %w", err) - } - - inverted, err := bitfield.SubtractBitField(allAssignable, merged) - if err != nil { - return false, xerrors.Errorf("subtracting allocated sector numbers: %w", err) - } - - toAlloc, err := inverted.Slice(0, uint64(count)) - if err != nil { - return false, xerrors.Errorf("getting slice of allocated sector numbers: %w", err) - } - - err = toAlloc.ForEach(func(u uint64) error { - res = append(res, abi.SectorNumber(u)) - return nil - }) - if err != nil { - return false, xerrors.Errorf("iterating allocated sector numbers: %w", err) - } - - toPersist, err := bitfield.MergeBitFields(merged, toAlloc) - if err != nil { - return false, xerrors.Errorf("merging allocated sector numbers: %w", err) - } - - rawJson, err = toPersist.MarshalJSON() - if err != nil { - return false, xerrors.Errorf("marshaling allocated sector numbers: %w", err) - } - - _, err = tx.Exec("INSERT INTO sectors_allocated_numbers(sp_id, allocated) VALUES($1, $2) ON CONFLICT(sp_id) DO UPDATE SET allocated = $2", mid, rawJson) - if err != nil { - return false, xerrors.Errorf("persisting allocated sector numbers: %w", err) - } - - for i, f := range txcb { - commit, err = f(tx, res) - if err != nil { - return false, xerrors.Errorf("executing tx callback %d: %w", i, err) - } - - if !commit { - return false, nil - } - } - - return true, nil - }, harmonydb.OptionRetry()) - - if err != nil { - return nil, xerrors.Errorf("allocating sector numbers: %w", err) - } - if !comm { - return nil, xerrors.Errorf("allocating sector numbers: commit failed") - } - - return res, nil -} diff --git a/curiosrc/seal/task_finalize.go b/curiosrc/seal/task_finalize.go deleted file mode 100644 index 9fbc6cf18fd..00000000000 --- a/curiosrc/seal/task_finalize.go +++ /dev/null @@ -1,156 +0,0 @@ -package seal - -import ( - "context" - - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/curiosrc/ffi" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" - "github.com/filecoin-project/lotus/lib/harmony/resources" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -type FinalizeTask struct { - max int - sp *SealPoller - sc *ffi.SealCalls - db *harmonydb.DB -} - -func NewFinalizeTask(max int, sp *SealPoller, sc *ffi.SealCalls, db *harmonydb.DB) *FinalizeTask { - return &FinalizeTask{ - max: max, - sp: sp, - sc: sc, - db: db, - } -} - -func (f *FinalizeTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { - var tasks []struct { - SpID int64 `db:"sp_id"` - SectorNumber int64 `db:"sector_number"` - RegSealProof int64 `db:"reg_seal_proof"` - } - - ctx := context.Background() - - err = f.db.Select(ctx, &tasks, ` - SELECT sp_id, sector_number, reg_seal_proof FROM sectors_sdr_pipeline WHERE task_id_finalize = $1`, taskID) - if err != nil { - return false, xerrors.Errorf("getting task: %w", err) - } - - if len(tasks) != 1 { - return false, xerrors.Errorf("expected one task") - } - task := tasks[0] - - var keepUnsealed bool - - if err := f.db.QueryRow(ctx, `SELECT COALESCE(BOOL_OR(NOT data_delete_on_finalize), FALSE) FROM sectors_sdr_initial_pieces WHERE sp_id = $1 AND sector_number = $2`, task.SpID, task.SectorNumber).Scan(&keepUnsealed); err != nil { - return false, err - } - - sector := storiface.SectorRef{ - ID: abi.SectorID{ - Miner: abi.ActorID(task.SpID), - Number: abi.SectorNumber(task.SectorNumber), - }, - ProofType: abi.RegisteredSealProof(task.RegSealProof), - } - - err = f.sc.FinalizeSector(ctx, sector, keepUnsealed) - if err != nil { - return false, xerrors.Errorf("finalizing sector: %w", err) - } - - if err := DropSectorPieceRefs(ctx, f.db, sector.ID); err != nil { - return false, xerrors.Errorf("dropping sector piece refs: %w", err) - } - - // set after_finalize - _, err = f.db.Exec(ctx, `UPDATE sectors_sdr_pipeline SET after_finalize = TRUE, task_id_finalize = NULL WHERE task_id_finalize = $1`, taskID) - if err != nil { - return false, xerrors.Errorf("updating task: %w", err) - } - - return true, nil -} - -func (f *FinalizeTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { - var tasks []struct { - TaskID harmonytask.TaskID `db:"task_id_finalize"` - SpID int64 `db:"sp_id"` - SectorNumber int64 `db:"sector_number"` - StorageID string `db:"storage_id"` - } - - if storiface.FTCache != 4 { - panic("storiface.FTCache != 4") - } - - ctx := context.Background() - - indIDs := make([]int64, len(ids)) - for i, id := range ids { - indIDs[i] = int64(id) - } - - err := f.db.Select(ctx, &tasks, ` - SELECT p.task_id_finalize, p.sp_id, p.sector_number, l.storage_id FROM sectors_sdr_pipeline p - INNER JOIN sector_location l ON p.sp_id = l.miner_id AND p.sector_number = l.sector_num - WHERE task_id_finalize = ANY ($1) AND l.sector_filetype = 4 -`, indIDs) - if err != nil { - return nil, xerrors.Errorf("getting tasks: %w", err) - } - - ls, err := f.sc.LocalStorage(ctx) - if err != nil { - return nil, xerrors.Errorf("getting local storage: %w", err) - } - - acceptables := map[harmonytask.TaskID]bool{} - - for _, t := range ids { - acceptables[t] = true - } - - for _, t := range tasks { - if _, ok := acceptables[t.TaskID]; !ok { - continue - } - - for _, l := range ls { - if string(l.ID) == t.StorageID { - return &t.TaskID, nil - } - } - } - - return nil, nil -} - -func (f *FinalizeTask) TypeDetails() harmonytask.TaskTypeDetails { - return harmonytask.TaskTypeDetails{ - Max: f.max, - Name: "Finalize", - Cost: resources.Resources{ - Cpu: 1, - Gpu: 0, - Ram: 100 << 20, - }, - MaxFailures: 10, - } -} - -func (f *FinalizeTask) Adder(taskFunc harmonytask.AddTaskFunc) { - f.sp.pollers[pollerFinalize].Set(taskFunc) -} - -var _ harmonytask.TaskInterface = &FinalizeTask{} diff --git a/curiosrc/seal/task_movestorage.go b/curiosrc/seal/task_movestorage.go deleted file mode 100644 index f4bcfd863e1..00000000000 --- a/curiosrc/seal/task_movestorage.go +++ /dev/null @@ -1,177 +0,0 @@ -package seal - -import ( - "context" - - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/curiosrc/ffi" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" - "github.com/filecoin-project/lotus/lib/harmony/resources" - "github.com/filecoin-project/lotus/storage/paths" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -type MoveStorageTask struct { - sp *SealPoller - sc *ffi.SealCalls - db *harmonydb.DB - - max int -} - -func NewMoveStorageTask(sp *SealPoller, sc *ffi.SealCalls, db *harmonydb.DB, max int) *MoveStorageTask { - return &MoveStorageTask{ - max: max, - sp: sp, - sc: sc, - db: db, - } -} - -func (m *MoveStorageTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { - var tasks []struct { - SpID int64 `db:"sp_id"` - SectorNumber int64 `db:"sector_number"` - RegSealProof int64 `db:"reg_seal_proof"` - } - - ctx := context.Background() - - err = m.db.Select(ctx, &tasks, ` - SELECT sp_id, sector_number, reg_seal_proof FROM sectors_sdr_pipeline WHERE task_id_move_storage = $1`, taskID) - if err != nil { - return false, xerrors.Errorf("getting task: %w", err) - } - if len(tasks) != 1 { - return false, xerrors.Errorf("expected one task") - } - task := tasks[0] - - sector := storiface.SectorRef{ - ID: abi.SectorID{ - Miner: abi.ActorID(task.SpID), - Number: abi.SectorNumber(task.SectorNumber), - }, - ProofType: abi.RegisteredSealProof(task.RegSealProof), - } - - err = m.sc.MoveStorage(ctx, sector, &taskID) - if err != nil { - return false, xerrors.Errorf("moving storage: %w", err) - } - - _, err = m.db.Exec(ctx, `UPDATE sectors_sdr_pipeline SET after_move_storage = TRUE, task_id_move_storage = NULL WHERE task_id_move_storage = $1`, taskID) - if err != nil { - return false, xerrors.Errorf("updating task: %w", err) - } - - return true, nil -} - -func (m *MoveStorageTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { - - ctx := context.Background() - /* - - var tasks []struct { - TaskID harmonytask.TaskID `db:"task_id_finalize"` - SpID int64 `db:"sp_id"` - SectorNumber int64 `db:"sector_number"` - StorageID string `db:"storage_id"` - } - - indIDs := make([]int64, len(ids)) - for i, id := range ids { - indIDs[i] = int64(id) - } - err := m.db.Select(ctx, &tasks, ` - select p.task_id_move_storage, p.sp_id, p.sector_number, l.storage_id from sectors_sdr_pipeline p - inner join sector_location l on p.sp_id=l.miner_id and p.sector_number=l.sector_num - where task_id_move_storage in ($1) and l.sector_filetype=4`, indIDs) - if err != nil { - return nil, xerrors.Errorf("getting tasks: %w", err) - } - - ls, err := m.sc.LocalStorage(ctx) - if err != nil { - return nil, xerrors.Errorf("getting local storage: %w", err) - } - - acceptables := map[harmonytask.TaskID]bool{} - - for _, t := range ids { - acceptables[t] = true - } - - for _, t := range tasks { - - } - - todo some smarts - * yield a schedule cycle/s if we have moves already in progress - */ - - //// - ls, err := m.sc.LocalStorage(ctx) - if err != nil { - return nil, xerrors.Errorf("getting local storage: %w", err) - } - var haveStorage bool - for _, l := range ls { - if l.CanStore { - haveStorage = true - break - } - } - - if !haveStorage { - return nil, nil - } - - id := ids[0] - return &id, nil -} - -func (m *MoveStorageTask) TypeDetails() harmonytask.TaskTypeDetails { - ssize := abi.SectorSize(32 << 30) // todo task details needs taskID to get correct sector size - if isDevnet { - ssize = abi.SectorSize(2 << 20) - } - - return harmonytask.TaskTypeDetails{ - Max: m.max, - Name: "MoveStorage", - Cost: resources.Resources{ - Cpu: 1, - Gpu: 0, - Ram: 128 << 20, - Storage: m.sc.Storage(m.taskToSector, storiface.FTNone, storiface.FTCache|storiface.FTSealed|storiface.FTUnsealed, ssize, storiface.PathStorage, paths.MinFreeStoragePercentage), - }, - MaxFailures: 10, - } -} - -func (m *MoveStorageTask) taskToSector(id harmonytask.TaskID) (ffi.SectorRef, error) { - var refs []ffi.SectorRef - - err := m.db.Select(context.Background(), &refs, `SELECT sp_id, sector_number, reg_seal_proof FROM sectors_sdr_pipeline WHERE task_id_move_storage = $1`, id) - if err != nil { - return ffi.SectorRef{}, xerrors.Errorf("getting sector ref: %w", err) - } - - if len(refs) != 1 { - return ffi.SectorRef{}, xerrors.Errorf("expected 1 sector ref, got %d", len(refs)) - } - - return refs[0], nil -} - -func (m *MoveStorageTask) Adder(taskFunc harmonytask.AddTaskFunc) { - m.sp.pollers[pollerMoveStorage].Set(taskFunc) -} - -var _ harmonytask.TaskInterface = &MoveStorageTask{} diff --git a/curiosrc/seal/task_porep.go b/curiosrc/seal/task_porep.go deleted file mode 100644 index f2ede4376ab..00000000000 --- a/curiosrc/seal/task_porep.go +++ /dev/null @@ -1,177 +0,0 @@ -package seal - -import ( - "bytes" - "context" - - "github.com/ipfs/go-cid" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/crypto" - - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/curiosrc/ffi" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" - "github.com/filecoin-project/lotus/lib/harmony/resources" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -type PoRepAPI interface { - ChainHead(context.Context) (*types.TipSet, error) - StateGetRandomnessFromBeacon(context.Context, crypto.DomainSeparationTag, abi.ChainEpoch, []byte, types.TipSetKey) (abi.Randomness, error) -} - -type PoRepTask struct { - db *harmonydb.DB - api PoRepAPI - sp *SealPoller - sc *ffi.SealCalls - - max int -} - -func NewPoRepTask(db *harmonydb.DB, api PoRepAPI, sp *SealPoller, sc *ffi.SealCalls, maxPoRep int) *PoRepTask { - return &PoRepTask{ - db: db, - api: api, - sp: sp, - sc: sc, - max: maxPoRep, - } -} - -func (p *PoRepTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { - ctx := context.Background() - - var sectorParamsArr []struct { - SpID int64 `db:"sp_id"` - SectorNumber int64 `db:"sector_number"` - RegSealProof abi.RegisteredSealProof `db:"reg_seal_proof"` - TicketEpoch abi.ChainEpoch `db:"ticket_epoch"` - TicketValue []byte `db:"ticket_value"` - SeedEpoch abi.ChainEpoch `db:"seed_epoch"` - SealedCID string `db:"tree_r_cid"` - UnsealedCID string `db:"tree_d_cid"` - } - - err = p.db.Select(ctx, §orParamsArr, ` - SELECT sp_id, sector_number, reg_seal_proof, ticket_epoch, ticket_value, seed_epoch, tree_r_cid, tree_d_cid - FROM sectors_sdr_pipeline - WHERE task_id_porep = $1`, taskID) - if err != nil { - return false, err - } - if len(sectorParamsArr) != 1 { - return false, xerrors.Errorf("expected 1 sector params, got %d", len(sectorParamsArr)) - } - sectorParams := sectorParamsArr[0] - - sealed, err := cid.Parse(sectorParams.SealedCID) - if err != nil { - return false, xerrors.Errorf("failed to parse sealed cid: %w", err) - } - - unsealed, err := cid.Parse(sectorParams.UnsealedCID) - if err != nil { - return false, xerrors.Errorf("failed to parse unsealed cid: %w", err) - } - - ts, err := p.api.ChainHead(ctx) - if err != nil { - return false, xerrors.Errorf("failed to get chain head: %w", err) - } - - maddr, err := address.NewIDAddress(uint64(sectorParams.SpID)) - if err != nil { - return false, xerrors.Errorf("failed to create miner address: %w", err) - } - - buf := new(bytes.Buffer) - if err := maddr.MarshalCBOR(buf); err != nil { - return false, xerrors.Errorf("failed to marshal miner address: %w", err) - } - - rand, err := p.api.StateGetRandomnessFromBeacon(ctx, crypto.DomainSeparationTag_InteractiveSealChallengeSeed, sectorParams.SeedEpoch, buf.Bytes(), ts.Key()) - if err != nil { - return false, xerrors.Errorf("failed to get randomness for computing seal proof: %w", err) - } - - sr := storiface.SectorRef{ - ID: abi.SectorID{ - Miner: abi.ActorID(sectorParams.SpID), - Number: abi.SectorNumber(sectorParams.SectorNumber), - }, - ProofType: sectorParams.RegSealProof, - } - - // COMPUTE THE PROOF! - - proof, err := p.sc.PoRepSnark(ctx, sr, sealed, unsealed, sectorParams.TicketValue, abi.InteractiveSealRandomness(rand)) - if err != nil { - //end, rerr := p.recoverErrors(ctx, sectorParams.SpID, sectorParams.SectorNumber, err) - //if rerr != nil { - // return false, xerrors.Errorf("recover errors: %w", rerr) - //} - //if end { - // // done, but the error handling has stored a different than success state - // return true, nil - //} - - return false, xerrors.Errorf("failed to compute seal proof: %w", err) - } - - // store success! - n, err := p.db.Exec(ctx, `UPDATE sectors_sdr_pipeline - SET after_porep = TRUE, seed_value = $3, porep_proof = $4, task_id_porep = NULL - WHERE sp_id = $1 AND sector_number = $2`, - sectorParams.SpID, sectorParams.SectorNumber, []byte(rand), proof) - if err != nil { - return false, xerrors.Errorf("store sdr success: updating pipeline: %w", err) - } - if n != 1 { - return false, xerrors.Errorf("store sdr success: updated %d rows", n) - } - - return true, nil -} - -func (p *PoRepTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { - // todo sort by priority - - id := ids[0] - return &id, nil -} - -func (p *PoRepTask) TypeDetails() harmonytask.TaskTypeDetails { - gpu := 1.0 - if isDevnet { - gpu = 0 - } - res := harmonytask.TaskTypeDetails{ - Max: p.max, - Name: "PoRep", - Cost: resources.Resources{ - Cpu: 1, - Gpu: gpu, - Ram: 50 << 30, // todo correct value - MachineID: 0, - }, - MaxFailures: 5, - Follows: nil, - } - - if isDevnet { - res.Cost.Ram = 1 << 30 - } - - return res -} - -func (p *PoRepTask) Adder(taskFunc harmonytask.AddTaskFunc) { - p.sp.pollers[pollerPoRep].Set(taskFunc) -} - -var _ harmonytask.TaskInterface = &PoRepTask{} diff --git a/curiosrc/seal/task_sdr.go b/curiosrc/seal/task_sdr.go deleted file mode 100644 index c43bf5b5198..00000000000 --- a/curiosrc/seal/task_sdr.go +++ /dev/null @@ -1,279 +0,0 @@ -package seal - -import ( - "bytes" - "context" - - "github.com/ipfs/go-cid" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-commp-utils/nonffi" - "github.com/filecoin-project/go-commp-utils/zerocomm" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/crypto" - - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/actors/policy" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/curiosrc/ffi" - "github.com/filecoin-project/lotus/lib/filler" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" - "github.com/filecoin-project/lotus/lib/harmony/resources" - "github.com/filecoin-project/lotus/storage/paths" - "github.com/filecoin-project/lotus/storage/sealer/ffiwrapper" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -var isDevnet = build.BlockDelaySecs < 30 - -func SetDevnet(value bool) { - isDevnet = value -} - -func GetDevnet() bool { - return isDevnet -} - -type SDRAPI interface { - ChainHead(context.Context) (*types.TipSet, error) - StateGetRandomnessFromTickets(context.Context, crypto.DomainSeparationTag, abi.ChainEpoch, []byte, types.TipSetKey) (abi.Randomness, error) -} - -type SDRTask struct { - api SDRAPI - db *harmonydb.DB - sp *SealPoller - - sc *ffi.SealCalls - - max int -} - -func NewSDRTask(api SDRAPI, db *harmonydb.DB, sp *SealPoller, sc *ffi.SealCalls, maxSDR int) *SDRTask { - return &SDRTask{ - api: api, - db: db, - sp: sp, - sc: sc, - max: maxSDR, - } -} - -func (s *SDRTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { - ctx := context.Background() - - var sectorParamsArr []struct { - SpID int64 `db:"sp_id"` - SectorNumber int64 `db:"sector_number"` - RegSealProof abi.RegisteredSealProof `db:"reg_seal_proof"` - } - - err = s.db.Select(ctx, §orParamsArr, ` - SELECT sp_id, sector_number, reg_seal_proof - FROM sectors_sdr_pipeline - WHERE task_id_sdr = $1`, taskID) - if err != nil { - return false, xerrors.Errorf("getting sector params: %w", err) - } - - if len(sectorParamsArr) != 1 { - return false, xerrors.Errorf("expected 1 sector params, got %d", len(sectorParamsArr)) - } - sectorParams := sectorParamsArr[0] - - var pieces []struct { - PieceIndex int64 `db:"piece_index"` - PieceCID string `db:"piece_cid"` - PieceSize int64 `db:"piece_size"` - DataRawSize *int64 `db:"data_raw_size"` - } - - err = s.db.Select(ctx, &pieces, ` - SELECT piece_index, piece_cid, piece_size, data_raw_size - FROM sectors_sdr_initial_pieces - WHERE sp_id = $1 AND sector_number = $2 ORDER BY piece_index ASC`, sectorParams.SpID, sectorParams.SectorNumber) - if err != nil { - return false, xerrors.Errorf("getting pieces: %w", err) - } - - ssize, err := sectorParams.RegSealProof.SectorSize() - if err != nil { - return false, xerrors.Errorf("getting sector size: %w", err) - } - - var commd cid.Cid - - var offset abi.UnpaddedPieceSize - var allocated abi.UnpaddedPieceSize - var pieceInfos []abi.PieceInfo - - if len(pieces) > 0 { - for _, p := range pieces { - c, err := cid.Parse(p.PieceCID) - if err != nil { - return false, xerrors.Errorf("parsing piece cid: %w", err) - } - - allocated += abi.UnpaddedPieceSize(*p.DataRawSize) - - pads, padLength := ffiwrapper.GetRequiredPadding(offset.Padded(), abi.PaddedPieceSize(p.PieceSize)) - offset += padLength.Unpadded() - - for _, pad := range pads { - pieceInfos = append(pieceInfos, abi.PieceInfo{ - Size: pad, - PieceCID: zerocomm.ZeroPieceCommitment(pad.Unpadded()), - }) - } - - pieceInfos = append(pieceInfos, abi.PieceInfo{ - Size: abi.PaddedPieceSize(p.PieceSize), - PieceCID: c, - }) - offset += abi.UnpaddedPieceSize(*p.DataRawSize) - } - - fillerSize, err := filler.FillersFromRem(abi.PaddedPieceSize(ssize).Unpadded() - allocated) - if err != nil { - return false, xerrors.Errorf("failed to calculate the final padding: %w", err) - } - for _, fil := range fillerSize { - pieceInfos = append(pieceInfos, abi.PieceInfo{ - Size: fil.Padded(), - PieceCID: zerocomm.ZeroPieceCommitment(fil), - }) - } - - commd, err = nonffi.GenerateUnsealedCID(sectorParams.RegSealProof, pieceInfos) - if err != nil { - return false, xerrors.Errorf("computing CommD: %w", err) - } - } else { - commd = zerocomm.ZeroPieceCommitment(abi.PaddedPieceSize(ssize).Unpadded()) - } - - sref := storiface.SectorRef{ - ID: abi.SectorID{ - Miner: abi.ActorID(sectorParams.SpID), - Number: abi.SectorNumber(sectorParams.SectorNumber), - }, - ProofType: sectorParams.RegSealProof, - } - - // get ticket - maddr, err := address.NewIDAddress(uint64(sectorParams.SpID)) - if err != nil { - return false, xerrors.Errorf("getting miner address: %w", err) - } - - // FAIL: api may be down - // FAIL-RESP: rely on harmony retry - ticket, ticketEpoch, err := s.getTicket(ctx, maddr) - if err != nil { - return false, xerrors.Errorf("getting ticket: %w", err) - } - - // do the SDR!! - - // FAIL: storage may not have enough space - // FAIL-RESP: rely on harmony retry - - // LATEFAIL: compute error in sdr - // LATEFAIL-RESP: Check in Trees task should catch this; Will retry computing - // Trees; After one retry, it should return the sector to the - // SDR stage; max number of retries should be configurable - - err = s.sc.GenerateSDR(ctx, taskID, sref, ticket, commd) - if err != nil { - return false, xerrors.Errorf("generating sdr: %w", err) - } - - // store success! - n, err := s.db.Exec(ctx, `UPDATE sectors_sdr_pipeline - SET after_sdr = true, ticket_epoch = $3, ticket_value = $4, task_id_sdr = NULL - WHERE sp_id = $1 AND sector_number = $2`, - sectorParams.SpID, sectorParams.SectorNumber, ticketEpoch, []byte(ticket)) - if err != nil { - return false, xerrors.Errorf("store sdr success: updating pipeline: %w", err) - } - if n != 1 { - return false, xerrors.Errorf("store sdr success: updated %d rows", n) - } - - return true, nil -} - -func (s *SDRTask) getTicket(ctx context.Context, maddr address.Address) (abi.SealRandomness, abi.ChainEpoch, error) { - ts, err := s.api.ChainHead(ctx) - if err != nil { - return nil, 0, xerrors.Errorf("getting chain head: %w", err) - } - - ticketEpoch := ts.Height() - policy.SealRandomnessLookback - buf := new(bytes.Buffer) - if err := maddr.MarshalCBOR(buf); err != nil { - return nil, 0, xerrors.Errorf("marshaling miner address: %w", err) - } - - rand, err := s.api.StateGetRandomnessFromTickets(ctx, crypto.DomainSeparationTag_SealRandomness, ticketEpoch, buf.Bytes(), ts.Key()) - if err != nil { - return nil, 0, xerrors.Errorf("getting randomness from tickets: %w", err) - } - - return abi.SealRandomness(rand), ticketEpoch, nil -} - -func (s *SDRTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { - id := ids[0] - return &id, nil -} - -func (s *SDRTask) TypeDetails() harmonytask.TaskTypeDetails { - ssize := abi.SectorSize(32 << 30) // todo task details needs taskID to get correct sector size - if isDevnet { - ssize = abi.SectorSize(2 << 20) - } - - res := harmonytask.TaskTypeDetails{ - Max: s.max, - Name: "SDR", - Cost: resources.Resources{ // todo offset for prefetch? - Cpu: 4, // todo multicore sdr - Gpu: 0, - Ram: 54 << 30, - Storage: s.sc.Storage(s.taskToSector, storiface.FTCache, storiface.FTNone, ssize, storiface.PathSealing, paths.MinFreeStoragePercentage), - }, - MaxFailures: 2, - Follows: nil, - } - - if isDevnet { - res.Cost.Ram = 1 << 30 - res.Cost.Cpu = 1 - } - - return res -} - -func (s *SDRTask) Adder(taskFunc harmonytask.AddTaskFunc) { - s.sp.pollers[pollerSDR].Set(taskFunc) -} - -func (s *SDRTask) taskToSector(id harmonytask.TaskID) (ffi.SectorRef, error) { - var refs []ffi.SectorRef - - err := s.db.Select(context.Background(), &refs, `SELECT sp_id, sector_number, reg_seal_proof FROM sectors_sdr_pipeline WHERE task_id_sdr = $1`, id) - if err != nil { - return ffi.SectorRef{}, xerrors.Errorf("getting sector ref: %w", err) - } - - if len(refs) != 1 { - return ffi.SectorRef{}, xerrors.Errorf("expected 1 sector ref, got %d", len(refs)) - } - - return refs[0], nil -} - -var _ harmonytask.TaskInterface = &SDRTask{} diff --git a/curiosrc/seal/task_submit_commit.go b/curiosrc/seal/task_submit_commit.go deleted file mode 100644 index 8c99f0b0a82..00000000000 --- a/curiosrc/seal/task_submit_commit.go +++ /dev/null @@ -1,423 +0,0 @@ -package seal - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - cborutil "github.com/filecoin-project/go-cbor-util" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/go-state-types/builtin" - miner2 "github.com/filecoin-project/go-state-types/builtin/v13/miner" - verifreg13 "github.com/filecoin-project/go-state-types/builtin/v13/verifreg" - verifregtypes9 "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/actors/builtin/market" - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/curiosrc/message" - "github.com/filecoin-project/lotus/curiosrc/multictladdr" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" - "github.com/filecoin-project/lotus/lib/harmony/resources" - "github.com/filecoin-project/lotus/node/config" - "github.com/filecoin-project/lotus/storage/ctladdr" -) - -type SubmitCommitAPI interface { - ChainHead(context.Context) (*types.TipSet, error) - StateMinerInfo(context.Context, address.Address, types.TipSetKey) (api.MinerInfo, error) - StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (big.Int, error) - StateSectorPreCommitInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorPreCommitOnChainInfo, error) - StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationId verifregtypes9.AllocationId, tsk types.TipSetKey) (*verifregtypes9.Allocation, error) - StateGetAllocationIdForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (verifregtypes9.AllocationId, error) - ctladdr.NodeApi -} - -type commitConfig struct { - maxFee types.FIL - RequireActivationSuccess bool - RequireNotificationSuccess bool -} - -type SubmitCommitTask struct { - sp *SealPoller - db *harmonydb.DB - api SubmitCommitAPI - - sender *message.Sender - as *multictladdr.MultiAddressSelector - cfg commitConfig -} - -func NewSubmitCommitTask(sp *SealPoller, db *harmonydb.DB, api SubmitCommitAPI, sender *message.Sender, as *multictladdr.MultiAddressSelector, cfg *config.CurioConfig) *SubmitCommitTask { - - cnfg := commitConfig{ - maxFee: cfg.Fees.MaxCommitGasFee, - RequireActivationSuccess: cfg.Subsystems.RequireActivationSuccess, - RequireNotificationSuccess: cfg.Subsystems.RequireNotificationSuccess, - } - - return &SubmitCommitTask{ - sp: sp, - db: db, - api: api, - sender: sender, - as: as, - cfg: cnfg, - } -} - -func (s *SubmitCommitTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { - ctx := context.Background() - - var sectorParamsArr []struct { - SpID int64 `db:"sp_id"` - SectorNumber int64 `db:"sector_number"` - Proof []byte `db:"porep_proof"` - } - - err = s.db.Select(ctx, §orParamsArr, ` - SELECT sp_id, sector_number, porep_proof - FROM sectors_sdr_pipeline - WHERE task_id_commit_msg = $1`, taskID) - if err != nil { - return false, xerrors.Errorf("getting sector params: %w", err) - } - - if len(sectorParamsArr) != 1 { - return false, xerrors.Errorf("expected 1 sector params, got %d", len(sectorParamsArr)) - } - sectorParams := sectorParamsArr[0] - - var pieces []struct { - PieceIndex int64 `db:"piece_index"` - PieceCID string `db:"piece_cid"` - PieceSize int64 `db:"piece_size"` - Proposal json.RawMessage `db:"f05_deal_proposal"` - Manifest json.RawMessage `db:"direct_piece_activation_manifest"` - DealID abi.DealID `db:"f05_deal_id"` - } - - err = s.db.Select(ctx, &pieces, ` - SELECT piece_index, - piece_cid, - piece_size, - f05_deal_proposal, - direct_piece_activation_manifest, - COALESCE(f05_deal_id, 0) AS f05_deal_id - FROM sectors_sdr_initial_pieces - WHERE sp_id = $1 AND sector_number = $2 ORDER BY piece_index ASC`, sectorParams.SpID, sectorParams.SectorNumber) - if err != nil { - return false, xerrors.Errorf("getting pieces: %w", err) - } - - maddr, err := address.NewIDAddress(uint64(sectorParams.SpID)) - if err != nil { - return false, xerrors.Errorf("getting miner address: %w", err) - } - - ts, err := s.api.ChainHead(ctx) - if err != nil { - return false, xerrors.Errorf("getting chain head: %w", err) - } - - pci, err := s.api.StateSectorPreCommitInfo(ctx, maddr, abi.SectorNumber(sectorParams.SectorNumber), ts.Key()) - if err != nil { - return false, xerrors.Errorf("getting precommit info: %w", err) - } - if pci == nil { - return false, xerrors.Errorf("precommit info not found on chain") - } - - mi, err := s.api.StateMinerInfo(ctx, maddr, types.EmptyTSK) - if err != nil { - return false, xerrors.Errorf("getting miner info: %w", err) - } - - params := miner.ProveCommitSectors3Params{ - RequireActivationSuccess: s.cfg.RequireActivationSuccess, - RequireNotificationSuccess: s.cfg.RequireNotificationSuccess, - } - - var pams []miner.PieceActivationManifest - - for _, piece := range pieces { - if piece.Proposal != nil { - var prop *market.DealProposal - err = json.Unmarshal(piece.Proposal, &prop) - if err != nil { - return false, xerrors.Errorf("marshalling json to deal proposal: %w", err) - } - alloc, err := s.api.StateGetAllocationIdForPendingDeal(ctx, piece.DealID, types.EmptyTSK) - if err != nil { - return false, xerrors.Errorf("getting allocation for deal %d: %w", piece.DealID, err) - } - clid, err := s.api.StateLookupID(ctx, prop.Client, types.EmptyTSK) - if err != nil { - return false, xerrors.Errorf("getting client address for deal %d: %w", piece.DealID, err) - } - - clientId, err := address.IDFromAddress(clid) - if err != nil { - return false, xerrors.Errorf("getting client address for deal %d: %w", piece.DealID, err) - } - - var vac *miner2.VerifiedAllocationKey - if alloc != verifregtypes9.NoAllocationID { - vac = &miner2.VerifiedAllocationKey{ - Client: abi.ActorID(clientId), - ID: verifreg13.AllocationId(alloc), - } - } - - payload, err := cborutil.Dump(piece.DealID) - if err != nil { - return false, xerrors.Errorf("serializing deal id: %w", err) - } - - pams = append(pams, miner.PieceActivationManifest{ - CID: prop.PieceCID, - Size: prop.PieceSize, - VerifiedAllocationKey: vac, - Notify: []miner2.DataActivationNotification{ - { - Address: market.Address, - Payload: payload, - }, - }, - }) - } else { - var pam *miner.PieceActivationManifest - err = json.Unmarshal(piece.Manifest, &pam) - if err != nil { - return false, xerrors.Errorf("marshalling json to PieceManifest: %w", err) - } - err = s.allocationCheck(ctx, pam, pci, abi.ActorID(sectorParams.SpID), ts) - if err != nil { - return false, err - } - pams = append(pams, *pam) - } - } - - params.SectorActivations = append(params.SectorActivations, miner.SectorActivationManifest{ - SectorNumber: abi.SectorNumber(sectorParams.SectorNumber), - Pieces: pams, - }) - params.SectorProofs = append(params.SectorProofs, sectorParams.Proof) - - enc := new(bytes.Buffer) - if err := params.MarshalCBOR(enc); err != nil { - return false, xerrors.Errorf("could not serialize commit params: %w", err) - } - - collateral, err := s.api.StateMinerInitialPledgeCollateral(ctx, maddr, pci.Info, ts.Key()) - if err != nil { - return false, xerrors.Errorf("getting initial pledge collateral: %w", err) - } - - collateral = big.Sub(collateral, pci.PreCommitDeposit) - if collateral.LessThan(big.Zero()) { - collateral = big.Zero() - } - - a, _, err := s.as.AddressFor(ctx, s.api, maddr, mi, api.CommitAddr, collateral, big.Zero()) - if err != nil { - return false, xerrors.Errorf("getting address for precommit: %w", err) - } - - msg := &types.Message{ - To: maddr, - From: a, - Method: builtin.MethodsMiner.ProveCommitSectors3, - Params: enc.Bytes(), - Value: collateral, // todo config for pulling from miner balance!! - } - - mss := &api.MessageSendSpec{ - MaxFee: abi.TokenAmount(s.cfg.maxFee), - } - - mcid, err := s.sender.Send(ctx, msg, mss, "commit") - if err != nil { - return false, xerrors.Errorf("pushing message to mpool: %w", err) - } - - _, err = s.db.Exec(ctx, `UPDATE sectors_sdr_pipeline SET commit_msg_cid = $1, after_commit_msg = TRUE, task_id_commit_msg = NULL WHERE sp_id = $2 AND sector_number = $3`, mcid, sectorParams.SpID, sectorParams.SectorNumber) - if err != nil { - return false, xerrors.Errorf("updating commit_msg_cid: %w", err) - } - - _, err = s.db.Exec(ctx, `INSERT INTO message_waits (signed_message_cid) VALUES ($1)`, mcid) - if err != nil { - return false, xerrors.Errorf("inserting into message_waits: %w", err) - } - - if err := s.transferFinalizedSectorData(ctx, sectorParams.SpID, sectorParams.SectorNumber); err != nil { - return false, xerrors.Errorf("transferring finalized sector data: %w", err) - } - - return true, nil -} - -func (s *SubmitCommitTask) transferFinalizedSectorData(ctx context.Context, spID, sectorNum int64) error { - if _, err := s.db.Exec(ctx, ` - INSERT INTO sectors_meta ( - sp_id, - sector_num, - reg_seal_proof, - ticket_epoch, - ticket_value, - orig_sealed_cid, - orig_unsealed_cid, - cur_sealed_cid, - cur_unsealed_cid, - msg_cid_precommit, - msg_cid_commit, - seed_epoch, - seed_value - ) - SELECT - sp_id, - sector_number as sector_num, - reg_seal_proof, - ticket_epoch, - ticket_value, - tree_r_cid as orig_sealed_cid, - tree_d_cid as orig_unsealed_cid, - tree_r_cid as cur_sealed_cid, - tree_d_cid as cur_unsealed_cid, - precommit_msg_cid, - commit_msg_cid, - seed_epoch, - seed_value - FROM - sectors_sdr_pipeline - WHERE - sp_id = $1 AND - sector_number = $2 - ON CONFLICT (sp_id, sector_num) DO UPDATE SET - reg_seal_proof = excluded.reg_seal_proof, - ticket_epoch = excluded.ticket_epoch, - ticket_value = excluded.ticket_value, - orig_sealed_cid = excluded.orig_sealed_cid, - cur_sealed_cid = excluded.cur_sealed_cid, - msg_cid_precommit = excluded.msg_cid_precommit, - msg_cid_commit = excluded.msg_cid_commit, - seed_epoch = excluded.seed_epoch, - seed_value = excluded.seed_value; - `, spID, sectorNum); err != nil { - return fmt.Errorf("failed to insert/update sectors_meta: %w", err) - } - - // Execute the query for piece metadata - if _, err := s.db.Exec(ctx, ` - INSERT INTO sectors_meta_pieces ( - sp_id, - sector_num, - piece_num, - piece_cid, - piece_size, - requested_keep_data, - raw_data_size, - start_epoch, - orig_end_epoch, - f05_deal_id, - ddo_pam - ) - SELECT - sp_id, - sector_number AS sector_num, - piece_index AS piece_num, - piece_cid, - piece_size, - not data_delete_on_finalize as requested_keep_data, - data_raw_size, - COALESCE(f05_deal_start_epoch, direct_start_epoch) as start_epoch, - COALESCE(f05_deal_end_epoch, direct_end_epoch) as orig_end_epoch, - f05_deal_id, - direct_piece_activation_manifest as ddo_pam - FROM - sectors_sdr_initial_pieces - WHERE - sp_id = $1 AND - sector_number = $2 - ON CONFLICT (sp_id, sector_num, piece_num) DO UPDATE SET - piece_cid = excluded.piece_cid, - piece_size = excluded.piece_size, - requested_keep_data = excluded.requested_keep_data, - raw_data_size = excluded.raw_data_size, - start_epoch = excluded.start_epoch, - orig_end_epoch = excluded.orig_end_epoch, - f05_deal_id = excluded.f05_deal_id, - ddo_pam = excluded.ddo_pam; - `, spID, sectorNum); err != nil { - return fmt.Errorf("failed to insert/update sector_meta_pieces: %w", err) - } - - return nil -} - -func (s *SubmitCommitTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { - id := ids[0] - return &id, nil -} - -func (s *SubmitCommitTask) TypeDetails() harmonytask.TaskTypeDetails { - return harmonytask.TaskTypeDetails{ - Max: 128, - Name: "CommitSubmit", - Cost: resources.Resources{ - Cpu: 0, - Gpu: 0, - Ram: 1 << 20, - }, - MaxFailures: 16, - } -} - -func (s *SubmitCommitTask) Adder(taskFunc harmonytask.AddTaskFunc) { - s.sp.pollers[pollerCommitMsg].Set(taskFunc) -} - -func (s *SubmitCommitTask) allocationCheck(ctx context.Context, piece *miner.PieceActivationManifest, precomitInfo *miner.SectorPreCommitOnChainInfo, miner abi.ActorID, ts *types.TipSet) error { - // skip pieces not claiming an allocation - if piece.VerifiedAllocationKey == nil { - return nil - } - addr, err := address.NewIDAddress(uint64(piece.VerifiedAllocationKey.Client)) - if err != nil { - return err - } - - alloc, err := s.api.StateGetAllocation(ctx, addr, verifregtypes9.AllocationId(piece.VerifiedAllocationKey.ID), ts.Key()) - if err != nil { - return err - } - if alloc == nil { - return xerrors.Errorf("no allocation found for piece %s with allocation ID %d", piece.CID.String(), piece.VerifiedAllocationKey.ID) - } - if alloc.Provider != miner { - return xerrors.Errorf("provider id mismatch for piece %s: expected %d and found %d", piece.CID.String(), miner, alloc.Provider) - } - if alloc.Size != piece.Size { - return xerrors.Errorf("size mismatch for piece %s: expected %d and found %d", piece.CID.String(), piece.Size, alloc.Size) - } - if precomitInfo.Info.Expiration < ts.Height()+alloc.TermMin { - return xerrors.Errorf("sector expiration %d is before than allocation TermMin %d for piece %s", precomitInfo.Info.Expiration, ts.Height()+alloc.TermMin, piece.CID.String()) - } - if precomitInfo.Info.Expiration > ts.Height()+alloc.TermMax { - return xerrors.Errorf("sector expiration %d is later than allocation TermMax %d for piece %s", precomitInfo.Info.Expiration, ts.Height()+alloc.TermMax, piece.CID.String()) - } - - return nil -} - -var _ harmonytask.TaskInterface = &SubmitCommitTask{} diff --git a/curiosrc/seal/task_submit_precommit.go b/curiosrc/seal/task_submit_precommit.go deleted file mode 100644 index c55a48bd706..00000000000 --- a/curiosrc/seal/task_submit_precommit.go +++ /dev/null @@ -1,297 +0,0 @@ -package seal - -import ( - "bytes" - "context" - - "github.com/ipfs/go-cid" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - actorstypes "github.com/filecoin-project/go-state-types/actors" - "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/go-state-types/builtin" - miner12 "github.com/filecoin-project/go-state-types/builtin/v12/miner" - "github.com/filecoin-project/go-state-types/network" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/actors/policy" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/curiosrc/message" - "github.com/filecoin-project/lotus/curiosrc/multictladdr" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" - "github.com/filecoin-project/lotus/lib/harmony/resources" - "github.com/filecoin-project/lotus/storage/ctladdr" -) - -type SubmitPrecommitTaskApi interface { - ChainHead(context.Context) (*types.TipSet, error) - StateMinerPreCommitDepositForPower(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (big.Int, error) - StateMinerInfo(context.Context, address.Address, types.TipSetKey) (api.MinerInfo, error) - StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error) - ctladdr.NodeApi -} - -type SubmitPrecommitTask struct { - sp *SealPoller - db *harmonydb.DB - api SubmitPrecommitTaskApi - sender *message.Sender - as *multictladdr.MultiAddressSelector - - maxFee types.FIL -} - -func NewSubmitPrecommitTask(sp *SealPoller, db *harmonydb.DB, api SubmitPrecommitTaskApi, sender *message.Sender, as *multictladdr.MultiAddressSelector, maxFee types.FIL) *SubmitPrecommitTask { - return &SubmitPrecommitTask{ - sp: sp, - db: db, - api: api, - sender: sender, - as: as, - - maxFee: maxFee, - } -} - -func (s *SubmitPrecommitTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { - ctx := context.Background() - - // 1. Load sector info - - var sectorParamsArr []struct { - SpID int64 `db:"sp_id"` - SectorNumber int64 `db:"sector_number"` - RegSealProof abi.RegisteredSealProof `db:"reg_seal_proof"` - TicketEpoch abi.ChainEpoch `db:"ticket_epoch"` - SealedCID string `db:"tree_r_cid"` - UnsealedCID string `db:"tree_d_cid"` - } - - err = s.db.Select(ctx, §orParamsArr, ` - SELECT sp_id, sector_number, reg_seal_proof, ticket_epoch, tree_r_cid, tree_d_cid - FROM sectors_sdr_pipeline - WHERE task_id_precommit_msg = $1`, taskID) - if err != nil { - return false, xerrors.Errorf("getting sector params: %w", err) - } - - if len(sectorParamsArr) != 1 { - return false, xerrors.Errorf("expected 1 sector params, got %d", len(sectorParamsArr)) - } - sectorParams := sectorParamsArr[0] - - maddr, err := address.NewIDAddress(uint64(sectorParams.SpID)) - if err != nil { - return false, xerrors.Errorf("getting miner address: %w", err) - } - - sealedCID, err := cid.Parse(sectorParams.SealedCID) - if err != nil { - return false, xerrors.Errorf("parsing sealed CID: %w", err) - } - - unsealedCID, err := cid.Parse(sectorParams.UnsealedCID) - if err != nil { - return false, xerrors.Errorf("parsing unsealed CID: %w", err) - } - - // 2. Prepare message params - - head, err := s.api.ChainHead(ctx) - if err != nil { - return false, xerrors.Errorf("getting chain head: %w", err) - } - - params := miner.PreCommitSectorBatchParams2{} - - expiration := sectorParams.TicketEpoch + miner12.MaxSectorExpirationExtension - - params.Sectors = append(params.Sectors, miner.SectorPreCommitInfo{ - SealProof: sectorParams.RegSealProof, - SectorNumber: abi.SectorNumber(sectorParams.SectorNumber), - SealedCID: sealedCID, - SealRandEpoch: sectorParams.TicketEpoch, - Expiration: expiration, - }) - - { - var pieces []struct { - PieceIndex int64 `db:"piece_index"` - PieceCID string `db:"piece_cid"` - PieceSize int64 `db:"piece_size"` - DealStartEpoch int64 `db:"deal_start_epoch"` - DealEndEpoch int64 `db:"deal_end_epoch"` - } - - err = s.db.Select(ctx, &pieces, ` - SELECT piece_index, - piece_cid, - piece_size, - COALESCE(f05_deal_end_epoch, direct_end_epoch, 0) AS deal_end_epoch, - COALESCE(f05_deal_start_epoch, direct_start_epoch, 0) AS deal_start_epoch - FROM sectors_sdr_initial_pieces - WHERE sp_id = $1 AND sector_number = $2 ORDER BY piece_index ASC`, sectorParams.SpID, sectorParams.SectorNumber) - if err != nil { - return false, xerrors.Errorf("getting pieces: %w", err) - } - - if len(pieces) > 0 { - params.Sectors[0].UnsealedCid = &unsealedCID - for _, p := range pieces { - if p.DealStartEpoch > 0 && abi.ChainEpoch(p.DealStartEpoch) < head.Height() { - // deal start epoch is in the past, can't precommit this sector anymore - _, perr := s.db.Exec(ctx, `UPDATE sectors_sdr_pipeline - SET failed = TRUE, failed_at = NOW(), failed_reason = 'past-start-epoch', failed_reason_msg = 'precommit: start epoch is in the past', task_id_precommit_msg = NULL - WHERE task_id_precommit_msg = $1`, taskID) - if perr != nil { - return false, xerrors.Errorf("persisting precommit start epoch expiry: %w", perr) - } - return true, xerrors.Errorf("deal start epoch is in the past") - } - if p.DealEndEpoch > 0 && abi.ChainEpoch(p.DealEndEpoch) > params.Sectors[0].Expiration { - params.Sectors[0].Expiration = abi.ChainEpoch(p.DealEndEpoch) - } - } - } - } - - nv, err := s.api.StateNetworkVersion(ctx, types.EmptyTSK) - if err != nil { - return false, xerrors.Errorf("getting network version: %w", err) - } - av, err := actorstypes.VersionForNetwork(nv) - if err != nil { - return false, xerrors.Errorf("failed to get actors version: %w", err) - } - msd, err := policy.GetMaxProveCommitDuration(av, sectorParams.RegSealProof) - if err != nil { - return false, xerrors.Errorf("failed to get max prove commit duration: %w", err) - } - - if minExpiration := sectorParams.TicketEpoch + policy.MaxPreCommitRandomnessLookback + msd + miner.MinSectorExpiration; params.Sectors[0].Expiration < minExpiration { - params.Sectors[0].Expiration = minExpiration - } - - // 3. Check precommit - - { - record, err := s.checkPrecommit(ctx, params) - if err != nil { - if record { - _, perr := s.db.Exec(ctx, `UPDATE sectors_sdr_pipeline - SET failed = TRUE, failed_at = NOW(), failed_reason = 'precommit-check', failed_reason_msg = $1, task_id_precommit_msg = NULL - WHERE task_id_precommit_msg = $2`, err.Error(), taskID) - if perr != nil { - return false, xerrors.Errorf("persisting precommit check error: %w", perr) - } - } - - return record, xerrors.Errorf("checking precommit: %w", err) - } - } - - // 4. Prepare and send message - - var pbuf bytes.Buffer - if err := params.MarshalCBOR(&pbuf); err != nil { - return false, xerrors.Errorf("serializing params: %w", err) - } - - collateral, err := s.api.StateMinerPreCommitDepositForPower(ctx, maddr, params.Sectors[0], types.EmptyTSK) - if err != nil { - return false, xerrors.Errorf("getting precommit deposit: %w", err) - } - - mi, err := s.api.StateMinerInfo(ctx, maddr, types.EmptyTSK) - if err != nil { - return false, xerrors.Errorf("getting miner info: %w", err) - } - - a, _, err := s.as.AddressFor(ctx, s.api, maddr, mi, api.PreCommitAddr, collateral, big.Zero()) - if err != nil { - return false, xerrors.Errorf("getting address for precommit: %w", err) - } - - msg := &types.Message{ - To: maddr, - From: a, - Method: builtin.MethodsMiner.PreCommitSectorBatch2, - Params: pbuf.Bytes(), - Value: collateral, // todo config for pulling from miner balance!! - } - - mss := &api.MessageSendSpec{ - MaxFee: abi.TokenAmount(s.maxFee), - } - - mcid, err := s.sender.Send(ctx, msg, mss, "precommit") - if err != nil { - return false, xerrors.Errorf("sending message: %w", err) - } - - // set precommit_msg_cid - _, err = s.db.Exec(ctx, `UPDATE sectors_sdr_pipeline - SET precommit_msg_cid = $1, after_precommit_msg = TRUE, task_id_precommit_msg = NULL - WHERE task_id_precommit_msg = $2`, mcid, taskID) - if err != nil { - return false, xerrors.Errorf("updating precommit_msg_cid: %w", err) - } - - _, err = s.db.Exec(ctx, `INSERT INTO message_waits (signed_message_cid) VALUES ($1)`, mcid) - if err != nil { - return false, xerrors.Errorf("inserting into message_waits: %w", err) - } - - return true, nil -} - -func (s *SubmitPrecommitTask) checkPrecommit(ctx context.Context, params miner.PreCommitSectorBatchParams2) (record bool, err error) { - if len(params.Sectors) != 1 { - return false, xerrors.Errorf("expected 1 sector") - } - - preCommitInfo := params.Sectors[0] - - head, err := s.api.ChainHead(ctx) - if err != nil { - return false, xerrors.Errorf("getting chain head: %w", err) - } - height := head.Height() - - //never commit P2 message before, check ticket expiration - ticketEarliest := height - policy.MaxPreCommitRandomnessLookback - - if preCommitInfo.SealRandEpoch < ticketEarliest { - return true, xerrors.Errorf("ticket expired: seal height: %d, head: %d", preCommitInfo.SealRandEpoch+policy.SealRandomnessLookback, height) - } - - return true, nil -} - -func (s *SubmitPrecommitTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { - id := ids[0] - return &id, nil -} - -func (s *SubmitPrecommitTask) TypeDetails() harmonytask.TaskTypeDetails { - return harmonytask.TaskTypeDetails{ - Max: 1024, - Name: "PreCommitSubmit", - Cost: resources.Resources{ - Cpu: 0, - Gpu: 0, - Ram: 1 << 20, - }, - MaxFailures: 16, - } -} - -func (s *SubmitPrecommitTask) Adder(taskFunc harmonytask.AddTaskFunc) { - s.sp.pollers[pollerPrecommitMsg].Set(taskFunc) -} - -var _ harmonytask.TaskInterface = &SubmitPrecommitTask{} diff --git a/curiosrc/seal/task_treed.go b/curiosrc/seal/task_treed.go deleted file mode 100644 index 548e54c7a7b..00000000000 --- a/curiosrc/seal/task_treed.go +++ /dev/null @@ -1,366 +0,0 @@ -package seal - -import ( - "context" - "io" - "net/http" - "net/url" - "strconv" - - "github.com/ipfs/go-cid" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-commp-utils/nonffi" - "github.com/filecoin-project/go-commp-utils/zerocomm" - "github.com/filecoin-project/go-padreader" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/curiosrc/ffi" - "github.com/filecoin-project/lotus/lib/filler" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" - "github.com/filecoin-project/lotus/lib/harmony/resources" - "github.com/filecoin-project/lotus/storage/pipeline/lib/nullreader" - "github.com/filecoin-project/lotus/storage/sealer/ffiwrapper" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -type TreeDTask struct { - sp *SealPoller - db *harmonydb.DB - sc *ffi.SealCalls - - max int -} - -func (t *TreeDTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { - if isDevnet { - return &ids[0], nil - } - if engine.Resources().Gpu > 0 { - return &ids[0], nil - } - return nil, nil -} - -func (t *TreeDTask) TypeDetails() harmonytask.TaskTypeDetails { - ssize := abi.SectorSize(32 << 30) // todo task details needs taskID to get correct sector size - if isDevnet { - ssize = abi.SectorSize(2 << 20) - } - - return harmonytask.TaskTypeDetails{ - Max: t.max, - Name: "TreeD", - Cost: resources.Resources{ - Cpu: 1, - Ram: 1 << 30, - Gpu: 0, - Storage: t.sc.Storage(t.taskToSector, storiface.FTNone, storiface.FTCache, ssize, storiface.PathSealing, 1.0), - }, - MaxFailures: 3, - Follows: nil, - } -} - -func (t *TreeDTask) taskToSector(id harmonytask.TaskID) (ffi.SectorRef, error) { - var refs []ffi.SectorRef - - err := t.db.Select(context.Background(), &refs, `SELECT sp_id, sector_number, reg_seal_proof FROM sectors_sdr_pipeline WHERE task_id_tree_d = $1`, id) - if err != nil { - return ffi.SectorRef{}, xerrors.Errorf("getting sector ref: %w", err) - } - - if len(refs) != 1 { - return ffi.SectorRef{}, xerrors.Errorf("expected 1 sector ref, got %d", len(refs)) - } - - return refs[0], nil -} - -func (t *TreeDTask) Adder(taskFunc harmonytask.AddTaskFunc) { - t.sp.pollers[pollerTreeD].Set(taskFunc) -} - -func NewTreeDTask(sp *SealPoller, db *harmonydb.DB, sc *ffi.SealCalls, maxTrees int) *TreeDTask { - return &TreeDTask{ - sp: sp, - db: db, - sc: sc, - - max: maxTrees, - } -} - -func (t *TreeDTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { - ctx := context.Background() - - var sectorParamsArr []struct { - SpID int64 `db:"sp_id"` - SectorNumber int64 `db:"sector_number"` - RegSealProof abi.RegisteredSealProof `db:"reg_seal_proof"` - } - - err = t.db.Select(ctx, §orParamsArr, ` - SELECT sp_id, sector_number, reg_seal_proof - FROM sectors_sdr_pipeline - WHERE task_id_tree_d = $1`, taskID) - if err != nil { - return false, xerrors.Errorf("getting sector params: %w", err) - } - - if len(sectorParamsArr) != 1 { - return false, xerrors.Errorf("expected 1 sector params, got %d", len(sectorParamsArr)) - } - sectorParams := sectorParamsArr[0] - - sref := storiface.SectorRef{ - ID: abi.SectorID{ - Miner: abi.ActorID(sectorParams.SpID), - Number: abi.SectorNumber(sectorParams.SectorNumber), - }, - ProofType: sectorParams.RegSealProof, - } - - // Fetch the Sector to local storage - fsPaths, pathIds, release, err := t.sc.PreFetch(ctx, sref, &taskID) - if err != nil { - return false, xerrors.Errorf("failed to prefetch sectors: %w", err) - } - defer release() - - var pieces []struct { - PieceIndex int64 `db:"piece_index"` - PieceCID string `db:"piece_cid"` - PieceSize int64 `db:"piece_size"` - - DataUrl *string `db:"data_url"` - DataHeaders *[]byte `db:"data_headers"` - DataRawSize *int64 `db:"data_raw_size"` - } - - err = t.db.Select(ctx, &pieces, ` - SELECT piece_index, piece_cid, piece_size, data_url, data_headers, data_raw_size - FROM sectors_sdr_initial_pieces - WHERE sp_id = $1 AND sector_number = $2 ORDER BY piece_index ASC`, sectorParams.SpID, sectorParams.SectorNumber) - if err != nil { - return false, xerrors.Errorf("getting pieces: %w", err) - } - - ssize, err := sectorParams.RegSealProof.SectorSize() - if err != nil { - return false, xerrors.Errorf("getting sector size: %w", err) - } - - var commd cid.Cid - var dataReader io.Reader - var unpaddedData bool - - var closers []io.Closer - defer func() { - for _, c := range closers { - if err := c.Close(); err != nil { - log.Errorw("error closing piece reader", "error", err) - } - } - }() - - if len(pieces) > 0 { - var pieceInfos []abi.PieceInfo - var pieceReaders []io.Reader - var offset abi.UnpaddedPieceSize - var allocated abi.UnpaddedPieceSize - - for _, p := range pieces { - // make pieceInfo - c, err := cid.Parse(p.PieceCID) - if err != nil { - return false, xerrors.Errorf("parsing piece cid: %w", err) - } - - allocated += abi.UnpaddedPieceSize(*p.DataRawSize) - - pads, padLength := ffiwrapper.GetRequiredPadding(offset.Padded(), abi.PaddedPieceSize(p.PieceSize)) - offset += padLength.Unpadded() - - for _, pad := range pads { - pieceInfos = append(pieceInfos, abi.PieceInfo{ - Size: pad, - PieceCID: zerocomm.ZeroPieceCommitment(pad.Unpadded()), - }) - pieceReaders = append(pieceReaders, nullreader.NewNullReader(pad.Unpadded())) - } - - pieceInfos = append(pieceInfos, abi.PieceInfo{ - Size: abi.PaddedPieceSize(p.PieceSize), - PieceCID: c, - }) - - offset += abi.UnpaddedPieceSize(*p.DataRawSize) - - // make pieceReader - if p.DataUrl != nil { - dataUrl := *p.DataUrl - - goUrl, err := url.Parse(dataUrl) - if err != nil { - return false, xerrors.Errorf("parsing data URL: %w", err) - } - - if goUrl.Scheme == "pieceref" { - // url is to a piece reference - - refNum, err := strconv.ParseInt(goUrl.Opaque, 10, 64) - if err != nil { - return false, xerrors.Errorf("parsing piece reference number: %w", err) - } - - // get pieceID - var pieceID []struct { - PieceID storiface.PieceNumber `db:"piece_id"` - } - err = t.db.Select(ctx, &pieceID, `SELECT piece_id FROM parked_piece_refs WHERE ref_id = $1`, refNum) - if err != nil { - return false, xerrors.Errorf("getting pieceID: %w", err) - } - - if len(pieceID) != 1 { - return false, xerrors.Errorf("expected 1 pieceID, got %d", len(pieceID)) - } - - pr, err := t.sc.PieceReader(ctx, pieceID[0].PieceID) - if err != nil { - return false, xerrors.Errorf("getting piece reader: %w", err) - } - - closers = append(closers, pr) - - reader, _ := padreader.New(pr, uint64(*p.DataRawSize)) - pieceReaders = append(pieceReaders, reader) - } else { - reader, _ := padreader.New(&UrlPieceReader{ - Url: dataUrl, - RawSize: *p.DataRawSize, - }, uint64(*p.DataRawSize)) - pieceReaders = append(pieceReaders, reader) - } - - } else { // padding piece (w/o fr32 padding, added in TreeD) - pieceReaders = append(pieceReaders, nullreader.NewNullReader(abi.PaddedPieceSize(p.PieceSize).Unpadded())) - } - } - - fillerSize, err := filler.FillersFromRem(abi.PaddedPieceSize(ssize).Unpadded() - allocated) - if err != nil { - return false, xerrors.Errorf("failed to calculate the final padding: %w", err) - } - for _, fil := range fillerSize { - pieceInfos = append(pieceInfos, abi.PieceInfo{ - Size: fil.Padded(), - PieceCID: zerocomm.ZeroPieceCommitment(fil), - }) - pieceReaders = append(pieceReaders, nullreader.NewNullReader(fil)) - } - - commd, err = nonffi.GenerateUnsealedCID(sectorParams.RegSealProof, pieceInfos) - if err != nil { - return false, xerrors.Errorf("computing CommD: %w", err) - } - - dataReader = io.MultiReader(pieceReaders...) - unpaddedData = true - } else { - commd = zerocomm.ZeroPieceCommitment(abi.PaddedPieceSize(ssize).Unpadded()) - dataReader = nullreader.NewNullReader(abi.UnpaddedPieceSize(ssize)) - unpaddedData = false // nullreader includes fr32 zero bits - } - - // Generate Tree D - err = t.sc.TreeD(ctx, sref, commd, abi.PaddedPieceSize(ssize), dataReader, unpaddedData, fsPaths, pathIds) - if err != nil { - return false, xerrors.Errorf("failed to generate TreeD: %w", err) - } - - n, err := t.db.Exec(ctx, `UPDATE sectors_sdr_pipeline - SET after_tree_d = true, tree_d_cid = $3, task_id_tree_d = NULL WHERE sp_id = $1 AND sector_number = $2`, - sectorParams.SpID, sectorParams.SectorNumber, commd) - if err != nil { - return false, xerrors.Errorf("store TreeD success: updating pipeline: %w", err) - } - if n != 1 { - return false, xerrors.Errorf("store TreeD success: updated %d rows", n) - } - - return true, nil -} - -type UrlPieceReader struct { - Url string - RawSize int64 // the exact number of bytes read, if we read more or less that's an error - - readSoFar int64 - closed bool - active io.ReadCloser // auto-closed on EOF -} - -func (u *UrlPieceReader) Read(p []byte) (n int, err error) { - // Check if we have already read the required amount of data - if u.readSoFar >= u.RawSize { - return 0, io.EOF - } - - // If 'active' is nil, initiate the HTTP request - if u.active == nil { - resp, err := http.Get(u.Url) - if err != nil { - return 0, err - } - - // Set 'active' to the response body - u.active = resp.Body - } - - // Calculate the maximum number of bytes we can read without exceeding RawSize - toRead := u.RawSize - u.readSoFar - if int64(len(p)) > toRead { - p = p[:toRead] - } - - n, err = u.active.Read(p) - - // Update the number of bytes read so far - u.readSoFar += int64(n) - - // If the number of bytes read exceeds RawSize, return an error - if u.readSoFar > u.RawSize { - return n, xerrors.New("read beyond the specified RawSize") - } - - // If EOF is reached, close the reader - if err == io.EOF { - cerr := u.active.Close() - u.closed = true - if cerr != nil { - log.Errorf("error closing http piece reader: %s", cerr) - } - - // if we're below the RawSize, return an unexpected EOF error - if u.readSoFar < u.RawSize { - log.Errorw("unexpected EOF", "readSoFar", u.readSoFar, "rawSize", u.RawSize, "url", u.Url) - return n, io.ErrUnexpectedEOF - } - } - - return n, err -} - -func (u *UrlPieceReader) Close() error { - if !u.closed { - u.closed = true - return u.active.Close() - } - - return nil -} - -var _ harmonytask.TaskInterface = &TreeDTask{} diff --git a/curiosrc/seal/task_treed_test.go b/curiosrc/seal/task_treed_test.go deleted file mode 100644 index b65ddd4e858..00000000000 --- a/curiosrc/seal/task_treed_test.go +++ /dev/null @@ -1,74 +0,0 @@ -package seal - -import ( - "io" - "net/http" - "net/http/httptest" - "testing" - - "github.com/stretchr/testify/require" -) - -// TestUrlPieceReader_Read tests various scenarios of reading data from UrlPieceReader -func TestUrlPieceReader_Read(t *testing.T) { - // Create a test server - testData := "This is a test string." - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - _, err := io.WriteString(w, testData) - require.NoError(t, err) - })) - defer ts.Close() - - tests := []struct { - name string - rawSize int64 - expected string - expectError bool - expectEOF bool - }{ - {"ReadExact", int64(len(testData)), testData, false, true}, - {"ReadLess", 10, testData[:10], false, false}, - {"ReadMore", int64(len(testData)) + 10, "", true, false}, - } - - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - reader := UrlPieceReader{ - Url: ts.URL, - RawSize: tt.rawSize, - } - buffer, err := io.ReadAll(&reader) - if err != nil { - if (err != io.EOF && !tt.expectError) || (err == io.EOF && !tt.expectEOF) { - t.Errorf("Read() error = %v, expectError %v, expectEOF %v", err, tt.expectError, tt.expectEOF) - } - } else { - if got := string(buffer); got != tt.expected { - t.Errorf("Read() got = %v, expected %v", got, tt.expected) - } - } - }) - } -} - -// TestUrlPieceReader_Read_Error tests the error handling of UrlPieceReader -func TestUrlPieceReader_Read_Error(t *testing.T) { - // Simulate a server that returns an error - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - http.Error(w, "error", http.StatusInternalServerError) - })) - defer ts.Close() - - reader := UrlPieceReader{ - Url: ts.URL, - RawSize: 100, - } - buffer := make([]byte, 200) - - _, err := reader.Read(buffer) - if err == nil { - t.Errorf("Expected an error, but got nil") - } -} diff --git a/curiosrc/seal/task_treerc.go b/curiosrc/seal/task_treerc.go deleted file mode 100644 index 2f20815f39e..00000000000 --- a/curiosrc/seal/task_treerc.go +++ /dev/null @@ -1,200 +0,0 @@ -package seal - -import ( - "context" - - "github.com/ipfs/go-cid" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/curiosrc/ffi" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" - "github.com/filecoin-project/lotus/lib/harmony/resources" - "github.com/filecoin-project/lotus/storage/paths" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -type TreeRCTask struct { - sp *SealPoller - db *harmonydb.DB - sc *ffi.SealCalls - - max int -} - -func NewTreeRCTask(sp *SealPoller, db *harmonydb.DB, sc *ffi.SealCalls, maxTrees int) *TreeRCTask { - return &TreeRCTask{ - sp: sp, - db: db, - sc: sc, - - max: maxTrees, - } -} - -func (t *TreeRCTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { - ctx := context.Background() - - var sectorParamsArr []struct { - SpID int64 `db:"sp_id"` - SectorNumber int64 `db:"sector_number"` - RegSealProof abi.RegisteredSealProof `db:"reg_seal_proof"` - CommD string `db:"tree_d_cid"` - } - - err = t.db.Select(ctx, §orParamsArr, ` - SELECT sp_id, sector_number, reg_seal_proof, tree_d_cid - FROM sectors_sdr_pipeline - WHERE task_id_tree_c = $1 AND task_id_tree_r = $1`, taskID) - if err != nil { - return false, xerrors.Errorf("getting sector params: %w", err) - } - - if len(sectorParamsArr) != 1 { - return false, xerrors.Errorf("expected 1 sector params, got %d", len(sectorParamsArr)) - } - sectorParams := sectorParamsArr[0] - - commd, err := cid.Parse(sectorParams.CommD) - if err != nil { - return false, xerrors.Errorf("parsing unsealed CID: %w", err) - } - - sref := storiface.SectorRef{ - ID: abi.SectorID{ - Miner: abi.ActorID(sectorParams.SpID), - Number: abi.SectorNumber(sectorParams.SectorNumber), - }, - ProofType: sectorParams.RegSealProof, - } - - // R / C - sealed, unsealed, err := t.sc.TreeRC(ctx, &taskID, sref, commd) - if err != nil { - return false, xerrors.Errorf("computing tree r and c: %w", err) - } - - if unsealed != commd { - return false, xerrors.Errorf("commd %s does match unsealed %s", commd.String(), unsealed.String()) - } - - // todo synth porep - - // todo porep challenge check - - n, err := t.db.Exec(ctx, `UPDATE sectors_sdr_pipeline - SET after_tree_r = true, after_tree_c = true, tree_r_cid = $3, task_id_tree_r = NULL, task_id_tree_c = NULL - WHERE sp_id = $1 AND sector_number = $2`, - sectorParams.SpID, sectorParams.SectorNumber, sealed) - if err != nil { - return false, xerrors.Errorf("store sdr-trees success: updating pipeline: %w", err) - } - if n != 1 { - return false, xerrors.Errorf("store sdr-trees success: updated %d rows", n) - } - - return true, nil -} - -func (t *TreeRCTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { - var tasks []struct { - TaskID harmonytask.TaskID `db:"task_id_tree_c"` - SpID int64 `db:"sp_id"` - SectorNumber int64 `db:"sector_number"` - StorageID string `db:"storage_id"` - } - - if storiface.FTCache != 4 { - panic("storiface.FTCache != 4") - } - - ctx := context.Background() - - indIDs := make([]int64, len(ids)) - for i, id := range ids { - indIDs[i] = int64(id) - } - - err := t.db.Select(ctx, &tasks, ` - SELECT p.task_id_tree_c, p.sp_id, p.sector_number, l.storage_id FROM sectors_sdr_pipeline p - INNER JOIN sector_location l ON p.sp_id = l.miner_id AND p.sector_number = l.sector_num - WHERE task_id_tree_r = ANY ($1) AND l.sector_filetype = 4 -`, indIDs) - if err != nil { - return nil, xerrors.Errorf("getting tasks: %w", err) - } - - ls, err := t.sc.LocalStorage(ctx) - if err != nil { - return nil, xerrors.Errorf("getting local storage: %w", err) - } - - acceptables := map[harmonytask.TaskID]bool{} - - for _, t := range ids { - acceptables[t] = true - } - - for _, t := range tasks { - if _, ok := acceptables[t.TaskID]; !ok { - continue - } - - for _, l := range ls { - if string(l.ID) == t.StorageID { - return &t.TaskID, nil - } - } - } - - return nil, nil -} - -func (t *TreeRCTask) TypeDetails() harmonytask.TaskTypeDetails { - ssize := abi.SectorSize(32 << 30) // todo task details needs taskID to get correct sector size - if isDevnet { - ssize = abi.SectorSize(2 << 20) - } - gpu := 1.0 - ram := uint64(8 << 30) - if isDevnet { - gpu = 0 - ram = 512 << 20 - } - - return harmonytask.TaskTypeDetails{ - Max: t.max, - Name: "TreeRC", - Cost: resources.Resources{ - Cpu: 1, - Gpu: gpu, - Ram: ram, - Storage: t.sc.Storage(t.taskToSector, storiface.FTSealed, storiface.FTCache, ssize, storiface.PathSealing, paths.MinFreeStoragePercentage), - }, - MaxFailures: 3, - Follows: nil, - } -} - -func (t *TreeRCTask) Adder(taskFunc harmonytask.AddTaskFunc) { - t.sp.pollers[pollerTreeRC].Set(taskFunc) -} - -func (t *TreeRCTask) taskToSector(id harmonytask.TaskID) (ffi.SectorRef, error) { - var refs []ffi.SectorRef - - err := t.db.Select(context.Background(), &refs, `SELECT sp_id, sector_number, reg_seal_proof FROM sectors_sdr_pipeline WHERE task_id_tree_r = $1`, id) - if err != nil { - return ffi.SectorRef{}, xerrors.Errorf("getting sector ref: %w", err) - } - - if len(refs) != 1 { - return ffi.SectorRef{}, xerrors.Errorf("expected 1 sector ref, got %d", len(refs)) - } - - return refs[0], nil -} - -var _ harmonytask.TaskInterface = &TreeRCTask{} diff --git a/curiosrc/web/api/apihelper/apihelper.go b/curiosrc/web/api/apihelper/apihelper.go deleted file mode 100644 index 4729d66a44a..00000000000 --- a/curiosrc/web/api/apihelper/apihelper.go +++ /dev/null @@ -1,19 +0,0 @@ -package apihelper - -import ( - "net/http" - "runtime/debug" - - logging "github.com/ipfs/go-log/v2" -) - -var log = logging.Logger("cu/web/apihelper") - -func OrHTTPFail(w http.ResponseWriter, err error) { - if err != nil { - w.WriteHeader(500) - _, _ = w.Write([]byte(err.Error())) - log.Errorw("http fail", "err", err, "stack", string(debug.Stack())) - panic(err) - } -} diff --git a/curiosrc/web/api/config/config.go b/curiosrc/web/api/config/config.go deleted file mode 100644 index 414ee7e6ff6..00000000000 --- a/curiosrc/web/api/config/config.go +++ /dev/null @@ -1,202 +0,0 @@ -package config - -import ( - "bytes" - "context" - "encoding/json" - "net/http" - "reflect" - "time" - - "github.com/BurntSushi/toml" - "github.com/gorilla/mux" - "github.com/invopop/jsonschema" - logging "github.com/ipfs/go-log/v2" - - "github.com/filecoin-project/lotus/cmd/curio/deps" - "github.com/filecoin-project/lotus/curiosrc/web/api/apihelper" - "github.com/filecoin-project/lotus/node/config" -) - -var log = logging.Logger("curio/web/config") - -type cfg struct { - *deps.Deps -} - -func Routes(r *mux.Router, deps *deps.Deps) { - c := &cfg{deps} - // At menu.html: - r.Methods("GET").Path("/layers").HandlerFunc(c.getLayers) - r.Methods("GET").Path("/topo").HandlerFunc(c.topo) - - // At edit.html: - r.Methods("GET").Path("/schema").HandlerFunc(getSch) - r.Methods("GET").Path("/layers/{layer}").HandlerFunc(c.getLayer) - r.Methods("POST").Path("/addlayer").HandlerFunc(c.addLayer) - r.Methods("POST").Path("/layers/{layer}").HandlerFunc(c.setLayer) - r.Methods("GET").Path("/default").HandlerFunc(c.def) -} - -func (c *cfg) addLayer(w http.ResponseWriter, r *http.Request) { - var layer struct { - Name string - } - apihelper.OrHTTPFail(w, json.NewDecoder(r.Body).Decode(&layer)) - ct, err := c.DB.Exec(context.Background(), `INSERT INTO harmony_config (title, config) VALUES ($1, $2)`, layer.Name, "") - apihelper.OrHTTPFail(w, err) - if ct != 1 { - w.WriteHeader(http.StatusConflict) - _, err = w.Write([]byte("Layer already exists")) - if err != nil { - log.Errorf("Failed to write response: %s", err) - } - return - } - w.WriteHeader(200) -} - -func getSch(w http.ResponseWriter, r *http.Request) { - ref := jsonschema.Reflector{ - Mapper: func(i reflect.Type) *jsonschema.Schema { - if i == reflect.TypeOf(config.Duration(time.Second)) { - return &jsonschema.Schema{ - Type: "string", - Format: "duration", - } - } - return nil - }, - } - sch := ref.Reflect(config.CurioConfig{}) - // add comments - for k, doc := range config.Doc { - item, ok := sch.Definitions[k] - if !ok { - continue - } - for _, line := range doc { - item, ok := item.Properties.Get(line.Name) - if !ok { - continue - } - if line.Comment != "" { - extra := make(map[string]any) - type options struct { - InfoText string `json:"infoText"` - } - opt := options{ - InfoText: line.Comment, - } - extra["options"] = opt - item.Extras = extra - } - } - } - - var allOpt func(s *jsonschema.Schema) - allOpt = func(s *jsonschema.Schema) { - s.Required = []string{} - for _, v := range s.Definitions { - v.Required = []string{} - - allOpt(v) - } - } - allOpt(sch) - - apihelper.OrHTTPFail(w, json.NewEncoder(w).Encode(sch)) -} - -func (c *cfg) getLayers(w http.ResponseWriter, r *http.Request) { - var layers []string - apihelper.OrHTTPFail(w, c.DB.Select(context.Background(), &layers, `SELECT title FROM harmony_config ORDER BY title`)) - apihelper.OrHTTPFail(w, json.NewEncoder(w).Encode(layers)) -} - -func (c *cfg) getLayer(w http.ResponseWriter, r *http.Request) { - var layer string - apihelper.OrHTTPFail(w, c.DB.QueryRow(context.Background(), `SELECT config FROM harmony_config WHERE title = $1`, mux.Vars(r)["layer"]).Scan(&layer)) - - // Read the TOML into a struct - configStruct := map[string]any{} // NOT CurioConfig b/c we want to preserve unsets - _, err := toml.Decode(layer, &configStruct) - apihelper.OrHTTPFail(w, err) - - // Encode the struct as JSON - jsonData, err := json.Marshal(configStruct) - apihelper.OrHTTPFail(w, err) - - // Write the JSON response - w.Header().Set("Content-Type", "application/json") - _, err = w.Write(jsonData) - apihelper.OrHTTPFail(w, err) -} - -func (c *cfg) setLayer(w http.ResponseWriter, r *http.Request) { - layer := mux.Vars(r)["layer"] - var configStruct map[string]any - dec := json.NewDecoder(r.Body) - dec.UseNumber() // JSON lib by default treats number is float64() - apihelper.OrHTTPFail(w, dec.Decode(&configStruct)) - - //Encode the struct as TOML - var tomlData bytes.Buffer - err := toml.NewEncoder(&tomlData).Encode(configStruct) - apihelper.OrHTTPFail(w, err) - - configStr := tomlData.String() - - // Generate a full commented string if this is base layer - if layer == "base" { - // Parse the into CurioConfig TOML - curioCfg := config.DefaultCurioConfig() - _, err = deps.LoadConfigWithUpgrades(tomlData.String(), curioCfg) - apihelper.OrHTTPFail(w, err) - cb, err := config.ConfigUpdate(curioCfg, config.DefaultCurioConfig(), config.Commented(true), config.DefaultKeepUncommented(), config.NoEnv()) - apihelper.OrHTTPFail(w, err) - configStr = string(cb) - } - - //Write the TOML to the database - _, err = c.DB.Exec(context.Background(), `INSERT INTO harmony_config (title, config) VALUES ($1, $2) ON CONFLICT (title) DO UPDATE SET config = $2`, layer, configStr) - apihelper.OrHTTPFail(w, err) -} - -func (c *cfg) topo(w http.ResponseWriter, r *http.Request) { - var topology []struct { - Server string `db:"server"` - CPU int `db:"cpu"` - GPU int `db:"gpu"` - RAM int `db:"ram"` - LayersCSV string `db:"layers"` - TasksCSV string `db:"tasks"` - } - apihelper.OrHTTPFail(w, c.DB.Select(context.Background(), &topology, ` - SELECT - m.host_and_port as server, - cpu, gpu, ram, layers, tasks - FROM harmony_machines m JOIN harmony_machine_details d ON m.id=d.machine_id - ORDER BY server`)) - w.Header().Set("Content-Type", "application/json") - apihelper.OrHTTPFail(w, json.NewEncoder(w).Encode(topology)) -} - -func (c *cfg) def(w http.ResponseWriter, r *http.Request) { - cb, err := config.ConfigUpdate(config.DefaultCurioConfig(), nil, config.Commented(false), config.DefaultKeepUncommented(), config.NoEnv()) - apihelper.OrHTTPFail(w, err) - - // Read the TOML into a struct - configStruct := map[string]any{} // NOT CurioConfig b/c we want to preserve unsets - _, err = toml.Decode(string(cb), &configStruct) - apihelper.OrHTTPFail(w, err) - - // Encode the struct as JSON - jsonData, err := json.Marshal(configStruct) - apihelper.OrHTTPFail(w, err) - - // Write the JSON response - w.Header().Set("Content-Type", "application/json") - _, err = w.Write(jsonData) - apihelper.OrHTTPFail(w, err) -} diff --git a/curiosrc/web/api/routes.go b/curiosrc/web/api/routes.go deleted file mode 100644 index b0dec6d5069..00000000000 --- a/curiosrc/web/api/routes.go +++ /dev/null @@ -1,17 +0,0 @@ -// Package api provides the HTTP API for the lotus curio web gui. -package api - -import ( - "github.com/gorilla/mux" - - "github.com/filecoin-project/lotus/cmd/curio/deps" - "github.com/filecoin-project/lotus/curiosrc/web/api/config" - "github.com/filecoin-project/lotus/curiosrc/web/api/sector" - "github.com/filecoin-project/lotus/curiosrc/web/api/webrpc" -) - -func Routes(r *mux.Router, deps *deps.Deps) { - webrpc.Routes(r.PathPrefix("/webrpc").Subrouter(), deps) - config.Routes(r.PathPrefix("/config").Subrouter(), deps) - sector.Routes(r.PathPrefix("/sector").Subrouter(), deps) -} diff --git a/curiosrc/web/api/sector/sector.go b/curiosrc/web/api/sector/sector.go deleted file mode 100644 index 31113fd4345..00000000000 --- a/curiosrc/web/api/sector/sector.go +++ /dev/null @@ -1,375 +0,0 @@ -package sector - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "sync" - "time" - - "github.com/docker/go-units" - "github.com/gorilla/mux" - "github.com/samber/lo" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-bitfield" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/go-state-types/builtin/v9/market" - - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/cli/spcli" - "github.com/filecoin-project/lotus/cmd/curio/deps" - "github.com/filecoin-project/lotus/curiosrc/web/api/apihelper" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -const verifiedPowerGainMul = 9 - -type cfg struct { - *deps.Deps -} - -func Routes(r *mux.Router, deps *deps.Deps) { - c := &cfg{deps} - // At menu.html: - r.Methods("GET").Path("/all").HandlerFunc(c.getSectors) - r.Methods("POST").Path("/terminate").HandlerFunc(c.terminateSectors) -} - -func (c *cfg) terminateSectors(w http.ResponseWriter, r *http.Request) { - var in []struct { - MinerID int - Sector int - } - apihelper.OrHTTPFail(w, json.NewDecoder(r.Body).Decode(&in)) - toDel := map[int][]int{} - for _, s := range in { - toDel[s.MinerID] = append(toDel[s.MinerID], s.Sector) - } - - for minerInt, sectors := range toDel { - maddr, err := address.NewIDAddress(uint64(minerInt)) - apihelper.OrHTTPFail(w, err) - mi, err := c.Full.StateMinerInfo(r.Context(), maddr, types.EmptyTSK) - apihelper.OrHTTPFail(w, err) - _, err = spcli.TerminateSectors(r.Context(), c.Full, maddr, sectors, mi.Worker) - apihelper.OrHTTPFail(w, err) - for _, sectorNumber := range sectors { - id := abi.SectorID{Miner: abi.ActorID(minerInt), Number: abi.SectorNumber(sectorNumber)} - apihelper.OrHTTPFail(w, c.Stor.Remove(r.Context(), id, storiface.FTAll, true, nil)) - } - } -} - -func (c *cfg) getSectors(w http.ResponseWriter, r *http.Request) { - // TODO get sector info from chain and from database, then fold them together - // and return the result. - type sector struct { - MinerID int64 `db:"miner_id"` - SectorNum int64 `db:"sector_num"` - SectorFiletype int `db:"sector_filetype" json:"-"` // Useless? - HasSealed bool - HasUnsealed bool - HasSnap bool - ExpiresAt abi.ChainEpoch // map to Duration - IsOnChain bool - IsFilPlus bool - SealInfo string - Proving bool - Flag bool - DealWeight string - Deals string - //StorageID string `db:"storage_id"` // map to serverName - // Activation abi.ChainEpoch // map to time.Time. advanced view only - // DealIDs []abi.DealID // advanced view only - //ExpectedDayReward abi.TokenAmount - //SealProof abi.RegisteredSealProof - } - - type piece struct { - Size int64 `db:"piece_size"` - DealID uint64 `db:"f05_deal_id"` - Proposal json.RawMessage `db:"f05_deal_proposal"` - Manifest json.RawMessage `db:"direct_piece_activation_manifest"` - Miner int64 `db:"sp_id"` - Sector int64 `db:"sector_number"` - } - var sectors []sector - var pieces []piece - apihelper.OrHTTPFail(w, c.DB.Select(r.Context(), §ors, `SELECT - miner_id, sector_num, SUM(sector_filetype) as sector_filetype - FROM sector_location WHERE sector_filetype != 32 - GROUP BY miner_id, sector_num - ORDER BY miner_id, sector_num`)) - minerToAddr := map[int64]address.Address{} - head, err := c.Full.ChainHead(r.Context()) - apihelper.OrHTTPFail(w, err) - - type sectorID struct { - mID int64 - sNum uint64 - } - sectorIdx := map[sectorID]int{} - for i, s := range sectors { - sectors[i].HasSealed = s.SectorFiletype&int(storiface.FTSealed) != 0 || s.SectorFiletype&int(storiface.FTUpdate) != 0 - sectors[i].HasUnsealed = s.SectorFiletype&int(storiface.FTUnsealed) != 0 - sectors[i].HasSnap = s.SectorFiletype&int(storiface.FTUpdate) != 0 - sectorIdx[sectorID{s.MinerID, uint64(s.SectorNum)}] = i - if _, ok := minerToAddr[s.MinerID]; !ok { - minerToAddr[s.MinerID], err = address.NewIDAddress(uint64(s.MinerID)) - apihelper.OrHTTPFail(w, err) - } - } - - // Get all pieces - apihelper.OrHTTPFail(w, c.DB.Select(r.Context(), &pieces, `SELECT - sp_id, - sector_number, - piece_size, - COALESCE(f05_deal_id, 0) AS f05_deal_id, - f05_deal_proposal, - direct_piece_activation_manifest - FROM sectors_sdr_initial_pieces - ORDER BY sp_id, sector_number`)) - pieceIndex := map[sectorID][]int{} - for i, piece := range pieces { - piece := piece - cur := pieceIndex[sectorID{mID: piece.Miner, sNum: uint64(piece.Sector)}] - pieceIndex[sectorID{mID: piece.Miner, sNum: uint64(piece.Sector)}] = append(cur, i) - } - - for minerID, maddr := range minerToAddr { - onChainInfo, err := c.getCachedSectorInfo(w, r, maddr, head.Key()) - apihelper.OrHTTPFail(w, err) - for _, chainy := range onChainInfo { - st := chainy.onChain - if i, ok := sectorIdx[sectorID{minerID, uint64(st.SectorNumber)}]; ok { - sectors[i].IsOnChain = true - sectors[i].ExpiresAt = st.Expiration - sectors[i].IsFilPlus = st.VerifiedDealWeight.GreaterThan(big.NewInt(0)) - if ss, err := st.SealProof.SectorSize(); err == nil { - sectors[i].SealInfo = ss.ShortString() - } - sectors[i].Proving = chainy.active - if st.Expiration < head.Height() { - sectors[i].Flag = true // Flag expired sectors - } - - dw, vp := .0, .0 - f05, ddo := 0, 0 - var pi []piece - if j, ok := pieceIndex[sectorID{sectors[i].MinerID, uint64(sectors[i].SectorNum)}]; ok { - for _, k := range j { - pi = append(pi, pieces[k]) - } - } - estimate := st.Expiration-st.Activation <= 0 || sectors[i].HasSnap - if estimate { - for _, p := range pi { - if p.Proposal != nil { - var prop *market.DealProposal - apihelper.OrHTTPFail(w, json.Unmarshal(p.Proposal, &prop)) - dw += float64(prop.PieceSize) - if prop.VerifiedDeal { - vp += float64(prop.PieceSize) * verifiedPowerGainMul - } - f05++ - } - if p.Manifest != nil { - var pam *miner.PieceActivationManifest - apihelper.OrHTTPFail(w, json.Unmarshal(p.Manifest, &pam)) - dw += float64(pam.Size) - if pam.VerifiedAllocationKey != nil { - vp += float64(pam.Size) * verifiedPowerGainMul - } - ddo++ - } - } - } else { - rdw := big.Add(st.DealWeight, st.VerifiedDealWeight) - dw = float64(big.Div(rdw, big.NewInt(int64(st.Expiration-st.Activation))).Uint64()) - vp = float64(big.Div(big.Mul(st.VerifiedDealWeight, big.NewInt(verifiedPowerGainMul)), big.NewInt(int64(st.Expiration-st.Activation))).Uint64()) - // DDO sectors don't have deal info on chain - for _, p := range pi { - if p.Manifest != nil { - ddo++ - } - if p.Proposal != nil { - f05++ - } - } - } - sectors[i].DealWeight = "CC" - if dw > 0 { - sectors[i].DealWeight = fmt.Sprintf("%s", units.BytesSize(dw)) - } - if vp > 0 { - sectors[i].DealWeight = fmt.Sprintf("%s", units.BytesSize(vp)) - } - sectors[i].Deals = fmt.Sprintf("Market: %d, DDO: %d", f05, ddo) - } else { - // sector is on chain but not in db - s := sector{ - MinerID: minerID, - SectorNum: int64(chainy.onChain.SectorNumber), - IsOnChain: true, - ExpiresAt: chainy.onChain.Expiration, - IsFilPlus: chainy.onChain.VerifiedDealWeight.GreaterThan(big.NewInt(0)), - Proving: chainy.active, - Flag: true, // All such sectors should be flagged to be terminated - } - if ss, err := chainy.onChain.SealProof.SectorSize(); err == nil { - s.SealInfo = ss.ShortString() - } - sectors = append(sectors, s) - } - /* - info, err := c.Full.StateSectorGetInfo(r.Context(), minerToAddr[s], abi.SectorNumber(uint64(sectors[i].SectorNum)), headKey) - if err != nil { - sectors[i].IsValid = false - continue - }*/ - } - } - - // Add deal details to sectors which are not on chain - for i := range sectors { - if !sectors[i].IsOnChain { - var pi []piece - dw, vp := .0, .0 - f05, ddo := 0, 0 - - // Find if there are any deals for this sector - if j, ok := pieceIndex[sectorID{sectors[i].MinerID, uint64(sectors[i].SectorNum)}]; ok { - for _, k := range j { - pi = append(pi, pieces[k]) - } - } - - if len(pi) > 0 { - for _, p := range pi { - if p.Proposal != nil { - var prop *market.DealProposal - apihelper.OrHTTPFail(w, json.Unmarshal(p.Proposal, &prop)) - dw += float64(prop.PieceSize) - if prop.VerifiedDeal { - vp += float64(prop.PieceSize) * verifiedPowerGainMul - } - f05++ - } - if p.Manifest != nil { - var pam *miner.PieceActivationManifest - apihelper.OrHTTPFail(w, json.Unmarshal(p.Manifest, &pam)) - dw += float64(pam.Size) - if pam.VerifiedAllocationKey != nil { - vp += float64(pam.Size) * verifiedPowerGainMul - } - ddo++ - } - } - } - sectors[i].IsFilPlus = vp > 0 - if dw > 0 { - sectors[i].DealWeight = fmt.Sprintf("%s", units.BytesSize(dw)) - } else if vp > 0 { - sectors[i].DealWeight = fmt.Sprintf("%s", units.BytesSize(vp)) - } else { - sectors[i].DealWeight = "CC" - } - sectors[i].Deals = fmt.Sprintf("Market: %d, DDO: %d", f05, ddo) - } - } - apihelper.OrHTTPFail(w, json.NewEncoder(w).Encode(map[string]any{"data": sectors})) -} - -type sectorInfo struct { - onChain *miner.SectorOnChainInfo - active bool -} - -type sectorCacheEntry struct { - sectors []sectorInfo - loading chan struct{} - time.Time -} - -const cacheTimeout = 30 * time.Minute - -var mx sync.Mutex -var sectorInfoCache = map[address.Address]sectorCacheEntry{} - -// getCachedSectorInfo returns the sector info for the given miner address, -// either from the cache or by querying the chain. -// Cache can be invalidated by setting the "sector_refresh" cookie to "true". -// This is thread-safe. -// Parallel requests share the chain's first response. -func (c *cfg) getCachedSectorInfo(w http.ResponseWriter, r *http.Request, maddr address.Address, headKey types.TipSetKey) ([]sectorInfo, error) { - mx.Lock() - v, ok := sectorInfoCache[maddr] - mx.Unlock() - - if ok && v.loading != nil { - <-v.loading - mx.Lock() - v, ok = sectorInfoCache[maddr] - mx.Unlock() - } - - shouldRefreshCookie, found := lo.Find(r.Cookies(), func(item *http.Cookie) bool { return item.Name == "sector_refresh" }) - shouldRefresh := found && shouldRefreshCookie.Value == "true" - w.Header().Set("Set-Cookie", "sector_refresh=; Max-Age=0; Path=/") - - if !ok || time.Since(v.Time) > cacheTimeout || shouldRefresh { - v = sectorCacheEntry{nil, make(chan struct{}), time.Now()} - mx.Lock() - sectorInfoCache[maddr] = v - mx.Unlock() - - // Intentionally not using the context from the request, as this is a cache - onChainInfo, err := c.Full.StateMinerSectors(context.Background(), maddr, nil, headKey) - if err != nil { - mx.Lock() - delete(sectorInfoCache, maddr) - close(v.loading) - mx.Unlock() - return nil, err - } - active, err := c.Full.StateMinerActiveSectors(context.Background(), maddr, headKey) - if err != nil { - mx.Lock() - delete(sectorInfoCache, maddr) - close(v.loading) - mx.Unlock() - return nil, err - } - activebf := bitfield.New() - for i := range active { - activebf.Set(uint64(active[i].SectorNumber)) - } - infos := make([]sectorInfo, len(onChainInfo)) - for i, info := range onChainInfo { - info := info - set, err := activebf.IsSet(uint64(info.SectorNumber)) - if err != nil { - mx.Lock() - delete(sectorInfoCache, maddr) - close(v.loading) - mx.Unlock() - return nil, err - } - infos[i] = sectorInfo{ - onChain: info, - active: set, - } - } - mx.Lock() - sectorInfoCache[maddr] = sectorCacheEntry{infos, nil, time.Now()} - close(v.loading) - mx.Unlock() - return infos, nil - } - return v.sectors, nil -} diff --git a/curiosrc/web/api/webrpc/routes.go b/curiosrc/web/api/webrpc/routes.go deleted file mode 100644 index f5975e70147..00000000000 --- a/curiosrc/web/api/webrpc/routes.go +++ /dev/null @@ -1,37 +0,0 @@ -package webrpc - -import ( - "context" - - "github.com/gorilla/mux" - logging "github.com/ipfs/go-log/v2" - - "github.com/filecoin-project/go-jsonrpc" - - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/cmd/curio/deps" -) - -var log = logging.Logger("webrpc") - -type WebRPC struct { - deps *deps.Deps -} - -func (a *WebRPC) Version(context.Context) (string, error) { - return build.UserVersion(), nil -} - -func (a *WebRPC) BlockDelaySecs(context.Context) (uint64, error) { - return build.BlockDelaySecs, nil -} - -func Routes(r *mux.Router, deps *deps.Deps) { - handler := &WebRPC{ - deps: deps, - } - - rpcSrv := jsonrpc.NewServer() - rpcSrv.Register("CurioWeb", handler) - r.Handle("/v0", rpcSrv) -} diff --git a/curiosrc/web/api/webrpc/sync_state.go b/curiosrc/web/api/webrpc/sync_state.go deleted file mode 100644 index 533a52be19b..00000000000 --- a/curiosrc/web/api/webrpc/sync_state.go +++ /dev/null @@ -1,183 +0,0 @@ -package webrpc - -import ( - "context" - "fmt" - "sort" - "sync" - "time" - - "github.com/BurntSushi/toml" - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/api/client" - "github.com/filecoin-project/lotus/build" - cliutil "github.com/filecoin-project/lotus/cli/util" -) - -func forEachConfig[T any](a *WebRPC, cb func(name string, v T) error) error { - confs, err := a.loadConfigs(context.Background()) - if err != nil { - return err - } - - for name, tomlStr := range confs { // todo for-each-config - var info T - if err := toml.Unmarshal([]byte(tomlStr), &info); err != nil { - return xerrors.Errorf("unmarshaling %s config: %w", name, err) - } - - if err := cb(name, info); err != nil { - return xerrors.Errorf("cb: %w", err) - } - } - - return nil -} - -func (a *WebRPC) loadConfigs(ctx context.Context) (map[string]string, error) { - //err := db.QueryRow(cctx.Context, `SELECT config FROM harmony_config WHERE title=$1`, layer).Scan(&text) - - rows, err := a.deps.DB.Query(ctx, `SELECT title, config FROM harmony_config`) - if err != nil { - return nil, xerrors.Errorf("getting db configs: %w", err) - } - - configs := make(map[string]string) - for rows.Next() { - var title, config string - if err := rows.Scan(&title, &config); err != nil { - return nil, xerrors.Errorf("scanning db configs: %w", err) - } - configs[title] = config - } - - return configs, nil -} - -type RpcInfo struct { - Address string - CLayers []string - Reachable bool - SyncState string - Version string -} - -func (a *WebRPC) SyncerState(ctx context.Context) ([]RpcInfo, error) { - type minimalApiInfo struct { - Apis struct { - ChainApiInfo []string - } - } - - rpcInfos := map[string]minimalApiInfo{} // config name -> api info - confNameToAddr := map[string]string{} // config name -> api address - - err := forEachConfig[minimalApiInfo](a, func(name string, info minimalApiInfo) error { - if len(info.Apis.ChainApiInfo) == 0 { - return nil - } - - rpcInfos[name] = info - - for _, addr := range info.Apis.ChainApiInfo { - ai := cliutil.ParseApiInfo(addr) - confNameToAddr[name] = ai.Addr - } - - return nil - }) - if err != nil { - return nil, err - } - - dedup := map[string]bool{} // for dedup by address - - infos := map[string]RpcInfo{} // api address -> rpc info - var infosLk sync.Mutex - - var wg sync.WaitGroup - for _, info := range rpcInfos { - ai := cliutil.ParseApiInfo(info.Apis.ChainApiInfo[0]) - if dedup[ai.Addr] { - continue - } - dedup[ai.Addr] = true - wg.Add(1) - go func() { - defer wg.Done() - var clayers []string - for layer, a := range confNameToAddr { - if a == ai.Addr { - clayers = append(clayers, layer) - } - } - - myinfo := RpcInfo{ - Address: ai.Addr, - Reachable: false, - CLayers: clayers, - } - defer func() { - infosLk.Lock() - defer infosLk.Unlock() - infos[ai.Addr] = myinfo - }() - da, err := ai.DialArgs("v1") - if err != nil { - log.Warnw("DialArgs", "error", err) - return - } - - ah := ai.AuthHeader() - - v1api, closer, err := client.NewFullNodeRPCV1(ctx, da, ah) - if err != nil { - log.Warnf("Not able to establish connection to node with addr: %s", ai.Addr) - return - } - defer closer() - - ver, err := v1api.Version(ctx) - if err != nil { - log.Warnw("Version", "error", err) - return - } - - head, err := v1api.ChainHead(ctx) - if err != nil { - log.Warnw("ChainHead", "error", err) - return - } - - var syncState string - switch { - case time.Now().Unix()-int64(head.MinTimestamp()) < int64(build.BlockDelaySecs*3/2): // within 1.5 epochs - syncState = "ok" - case time.Now().Unix()-int64(head.MinTimestamp()) < int64(build.BlockDelaySecs*5): // within 5 epochs - syncState = fmt.Sprintf("slow (%s behind)", time.Since(time.Unix(int64(head.MinTimestamp()), 0)).Truncate(time.Second)) - default: - syncState = fmt.Sprintf("behind (%s behind)", time.Since(time.Unix(int64(head.MinTimestamp()), 0)).Truncate(time.Second)) - } - - myinfo = RpcInfo{ - Address: ai.Addr, - CLayers: clayers, - Reachable: true, - Version: ver.Version, - SyncState: syncState, - } - }() - } - wg.Wait() - - var infoList []RpcInfo - for _, i := range infos { - infoList = append(infoList, i) - } - sort.Slice(infoList, func(i, j int) bool { - return infoList[i].Address < infoList[j].Address - }) - - return infoList, nil -} diff --git a/curiosrc/web/hapi/robust_rpc.go b/curiosrc/web/hapi/robust_rpc.go deleted file mode 100644 index c10b43a03f3..00000000000 --- a/curiosrc/web/hapi/robust_rpc.go +++ /dev/null @@ -1,102 +0,0 @@ -package hapi - -import ( - "context" - "time" - - lru "github.com/hashicorp/golang-lru/v2" - blocks "github.com/ipfs/go-block-format" - - "github.com/filecoin-project/lotus/api/client" - "github.com/filecoin-project/lotus/blockstore" - "github.com/filecoin-project/lotus/chain/store" - cliutil "github.com/filecoin-project/lotus/cli/util" - "github.com/filecoin-project/lotus/lib/must" -) - -var ChainBlockCache = must.One(lru.New[blockstore.MhString, blocks.Block](4096)) - -func (a *app) watchRpc() { - ticker := time.NewTicker(watchInterval) - for { - err := a.updateRpc(context.TODO()) - if err != nil { - log.Errorw("updating rpc info", "error", err) - } - select { - case <-ticker.C: - } - } -} - -type minimalApiInfo struct { - Apis struct { - ChainApiInfo []string - } -} - -func (a *app) updateRpc(ctx context.Context) error { - rpcInfos := map[string]minimalApiInfo{} // config name -> api info - confNameToAddr := map[string]string{} // config name -> api address - - err := forEachConfig[minimalApiInfo](a, func(name string, info minimalApiInfo) error { - if len(info.Apis.ChainApiInfo) == 0 { - return nil - } - - rpcInfos[name] = info - - for _, addr := range info.Apis.ChainApiInfo { - ai := cliutil.ParseApiInfo(addr) - confNameToAddr[name] = ai.Addr - } - - return nil - }) - if err != nil { - return err - } - - apiInfos := map[string][]byte{} // api address -> token - - // for dedup by address - for _, info := range rpcInfos { - ai := cliutil.ParseApiInfo(info.Apis.ChainApiInfo[0]) - apiInfos[ai.Addr] = ai.Token - } - - a.rpcInfoLk.Lock() - - // todo improve this shared rpc logic - if a.workingApi == nil { - for addr, token := range apiInfos { - ai := cliutil.APIInfo{ - Addr: addr, - Token: token, - } - - da, err := ai.DialArgs("v1") - if err != nil { - continue - } - - ah := ai.AuthHeader() - - v1api, closer, err := client.NewFullNodeRPCV1(ctx, da, ah) - if err != nil { - continue - } - go func() { - <-ctx.Done() - closer() - }() - - a.workingApi = v1api - a.stor = store.ActorStore(ctx, blockstore.NewReadCachedBlockstore(blockstore.NewAPIBlockstore(a.workingApi), ChainBlockCache)) - } - } - - a.rpcInfoLk.Unlock() - - return nil -} diff --git a/curiosrc/web/hapi/routes.go b/curiosrc/web/hapi/routes.go deleted file mode 100644 index 61724ec0ae5..00000000000 --- a/curiosrc/web/hapi/routes.go +++ /dev/null @@ -1,58 +0,0 @@ -package hapi - -import ( - "embed" - "text/template" - - "github.com/gorilla/mux" - logging "github.com/ipfs/go-log/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/cmd/curio/deps" -) - -//go:embed web/* -var templateFS embed.FS - -func Routes(r *mux.Router, deps *deps.Deps) error { - t, err := makeTemplate().ParseFS(templateFS, "web/*") - if err != nil { - return xerrors.Errorf("parse templates: %w", err) - } - - a := &app{ - db: deps.DB, - t: t, - } - - go a.watchRpc() - go a.watchActor() - - // index page (simple info) - r.HandleFunc("/simpleinfo/actorsummary", a.actorSummary) - r.HandleFunc("/simpleinfo/machines", a.indexMachines) - r.HandleFunc("/simpleinfo/tasks", a.indexTasks) - r.HandleFunc("/simpleinfo/taskhistory", a.indexTasksHistory) - r.HandleFunc("/simpleinfo/pipeline-porep", a.indexPipelinePorep) - - // pipeline-porep page - r.HandleFunc("/pipeline-porep/sectors", a.pipelinePorepSectors) - - // node info page - r.HandleFunc("/node/{id}", a.nodeInfo) - - // sector info page - r.HandleFunc("/sector/{sp}/{id}", a.sectorInfo) - return nil -} - -func makeTemplate() *template.Template { - return template.New("").Funcs(template.FuncMap{ - "toHumanBytes": func(b int64) string { - return types.SizeStr(types.NewInt(uint64(b))) - }, - }) -} - -var log = logging.Logger("curio/web") diff --git a/curiosrc/web/hapi/simpleinfo.go b/curiosrc/web/hapi/simpleinfo.go deleted file mode 100644 index 351137db40e..00000000000 --- a/curiosrc/web/hapi/simpleinfo.go +++ /dev/null @@ -1,962 +0,0 @@ -package hapi - -import ( - "bytes" - "context" - "fmt" - "net/http" - "os" - "sort" - "strconv" - "strings" - "sync" - "text/template" - "time" - - "github.com/dustin/go-humanize" - "github.com/gorilla/mux" - "github.com/samber/lo" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - - "github.com/filecoin-project/lotus/api/v1api" - "github.com/filecoin-project/lotus/chain/actors/adt" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/lib/must" - "github.com/filecoin-project/lotus/storage/paths" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -type app struct { - db *harmonydb.DB - t *template.Template - - rpcInfoLk sync.Mutex - workingApi v1api.FullNode - stor adt.Store - - actorInfoLk sync.Mutex - actorInfos []actorInfo -} - -type actorInfo struct { - Address string - CLayers []string - - QualityAdjustedPower string - RawBytePower string - - ActorBalance, ActorAvailable, WorkerBalance string - - Win1, Win7, Win30 int64 - - Deadlines []actorDeadline -} - -type actorDeadline struct { - Empty bool - Current bool - Proven bool - PartFaulty bool - Faulty bool -} - -func (a *app) actorSummary(w http.ResponseWriter, r *http.Request) { - a.actorInfoLk.Lock() - defer a.actorInfoLk.Unlock() - - a.executeTemplate(w, "actor_summary", a.actorInfos) -} - -func (a *app) indexMachines(w http.ResponseWriter, r *http.Request) { - s, err := a.clusterMachineSummary(r.Context()) - if err != nil { - log.Errorf("cluster machine summary: %v", err) - http.Error(w, "internal server error", http.StatusInternalServerError) - return - } - - a.executeTemplate(w, "cluster_machines", s) -} - -func (a *app) indexTasks(w http.ResponseWriter, r *http.Request) { - s, err := a.clusterTaskSummary(r.Context()) - if err != nil { - log.Errorf("cluster task summary: %v", err) - http.Error(w, "internal server error", http.StatusInternalServerError) - return - } - - a.executeTemplate(w, "cluster_tasks", s) -} - -func (a *app) indexTasksHistory(w http.ResponseWriter, r *http.Request) { - s, err := a.clusterTaskHistorySummary(r.Context()) - if err != nil { - log.Errorf("cluster task history summary: %v", err) - http.Error(w, "internal server error", http.StatusInternalServerError) - return - } - - a.executeTemplate(w, "cluster_task_history", s) -} - -func (a *app) indexPipelinePorep(w http.ResponseWriter, r *http.Request) { - s, err := a.porepPipelineSummary(r.Context()) - if err != nil { - log.Errorf("porep pipeline summary: %v", err) - http.Error(w, "internal server error", http.StatusInternalServerError) - return - } - - a.executeTemplate(w, "pipeline_porep", s) -} - -func (a *app) nodeInfo(writer http.ResponseWriter, request *http.Request) { - params := mux.Vars(request) - - id, ok := params["id"] - if !ok { - http.Error(writer, "missing id", http.StatusBadRequest) - return - } - - intid, err := strconv.ParseInt(id, 10, 64) - if err != nil { - http.Error(writer, "invalid id", http.StatusBadRequest) - return - } - - mi, err := a.clusterNodeInfo(request.Context(), intid) - if err != nil { - log.Errorf("machine info: %v", err) - http.Error(writer, "internal server error", http.StatusInternalServerError) - return - } - - a.executePageTemplate(writer, "node_info", "Node Info", mi) -} - -func (a *app) sectorInfo(w http.ResponseWriter, r *http.Request) { - params := mux.Vars(r) - - id, ok := params["id"] - if !ok { - http.Error(w, "missing id", http.StatusBadRequest) - return - } - - intid, err := strconv.ParseInt(id, 10, 64) - if err != nil { - http.Error(w, "invalid id", http.StatusBadRequest) - return - } - - sp, ok := params["sp"] - if !ok { - http.Error(w, "missing sp", http.StatusBadRequest) - return - } - - maddr, err := address.NewFromString(sp) - if err != nil { - http.Error(w, "invalid sp", http.StatusBadRequest) - return - } - - spid, err := address.IDFromAddress(maddr) - if err != nil { - http.Error(w, "invalid sp", http.StatusBadRequest) - return - } - - ctx := r.Context() - var tasks []PipelineTask - - err = a.db.Select(ctx, &tasks, `SELECT - sp_id, sector_number, - create_time, - task_id_sdr, after_sdr, - task_id_tree_d, after_tree_d, - task_id_tree_c, after_tree_c, - task_id_tree_r, after_tree_r, - task_id_precommit_msg, after_precommit_msg, - after_precommit_msg_success, seed_epoch, - task_id_porep, porep_proof, after_porep, - task_id_finalize, after_finalize, - task_id_move_storage, after_move_storage, - task_id_commit_msg, after_commit_msg, - after_commit_msg_success, - failed, failed_reason - FROM sectors_sdr_pipeline WHERE sp_id = $1 AND sector_number = $2`, spid, intid) - if err != nil { - http.Error(w, xerrors.Errorf("failed to fetch pipeline task info: %w", err).Error(), http.StatusInternalServerError) - return - } - - if len(tasks) == 0 { - http.Error(w, "sector not found", http.StatusInternalServerError) - return - } - - head, err := a.workingApi.ChainHead(ctx) - if err != nil { - http.Error(w, xerrors.Errorf("failed to fetch chain head: %w", err).Error(), http.StatusInternalServerError) - return - } - epoch := head.Height() - - mbf, err := a.getMinerBitfields(ctx, maddr, a.stor) - if err != nil { - http.Error(w, xerrors.Errorf("failed to load miner bitfields: %w", err).Error(), http.StatusInternalServerError) - return - } - - task := tasks[0] - - afterSeed := task.SeedEpoch != nil && *task.SeedEpoch <= int64(epoch) - - var sectorLocations []struct { - CanSeal, CanStore bool - FileType storiface.SectorFileType `db:"sector_filetype"` - StorageID string `db:"storage_id"` - Urls string `db:"urls"` - } - - err = a.db.Select(ctx, §orLocations, `SELECT p.can_seal, p.can_store, l.sector_filetype, l.storage_id, p.urls FROM sector_location l - JOIN storage_path p ON l.storage_id = p.storage_id - WHERE l.sector_num = $1 and l.miner_id = $2 ORDER BY p.can_seal, p.can_store, l.storage_id`, intid, spid) - if err != nil { - http.Error(w, xerrors.Errorf("failed to fetch sector locations: %w", err).Error(), http.StatusInternalServerError) - return - } - - type fileLocations struct { - StorageID string - Urls []string - } - - type locationTable struct { - PathType *string - PathTypeRowSpan int - - FileType *string - FileTypeRowSpan int - - Locations []fileLocations - } - locs := []locationTable{} - - for i, loc := range sectorLocations { - loc := loc - - urlList := strings.Split(loc.Urls, paths.URLSeparator) - - fLoc := fileLocations{ - StorageID: loc.StorageID, - Urls: urlList, - } - - var pathTypeStr *string - var fileTypeStr *string - pathTypeRowSpan := 1 - fileTypeRowSpan := 1 - - pathType := "None" - if loc.CanSeal && loc.CanStore { - pathType = "Seal/Store" - } else if loc.CanSeal { - pathType = "Seal" - } else if loc.CanStore { - pathType = "Store" - } - pathTypeStr = &pathType - - fileType := loc.FileType.String() - fileTypeStr = &fileType - - if i > 0 { - prevNonNilPathTypeLoc := i - 1 - for prevNonNilPathTypeLoc > 0 && locs[prevNonNilPathTypeLoc].PathType == nil { - prevNonNilPathTypeLoc-- - } - if *locs[prevNonNilPathTypeLoc].PathType == *pathTypeStr { - pathTypeRowSpan = 0 - pathTypeStr = nil - locs[prevNonNilPathTypeLoc].PathTypeRowSpan++ - // only if we have extended path type we may need to extend file type - - prevNonNilFileTypeLoc := i - 1 - for prevNonNilFileTypeLoc > 0 && locs[prevNonNilFileTypeLoc].FileType == nil { - prevNonNilFileTypeLoc-- - } - if *locs[prevNonNilFileTypeLoc].FileType == *fileTypeStr { - fileTypeRowSpan = 0 - fileTypeStr = nil - locs[prevNonNilFileTypeLoc].FileTypeRowSpan++ - } - } - } - - locTable := locationTable{ - PathType: pathTypeStr, - PathTypeRowSpan: pathTypeRowSpan, - FileType: fileTypeStr, - FileTypeRowSpan: fileTypeRowSpan, - Locations: []fileLocations{fLoc}, - } - - locs = append(locs, locTable) - - } - - // Pieces - type sectorPieceMeta struct { - PieceIndex int64 `db:"piece_index"` - PieceCid string `db:"piece_cid"` - PieceSize int64 `db:"piece_size"` - - DataUrl string `db:"data_url"` - DataRawSize int64 `db:"data_raw_size"` - DeleteOnFinalize bool `db:"data_delete_on_finalize"` - - F05PublishCid *string `db:"f05_publish_cid"` - F05DealID *int64 `db:"f05_deal_id"` - - DDOPam *string `db:"direct_piece_activation_manifest"` - - // display - StrPieceSize string `db:"-"` - StrDataRawSize string `db:"-"` - - // piece park - IsParkedPiece bool `db:"-"` - IsParkedPieceFound bool `db:"-"` - PieceParkID int64 `db:"-"` - PieceParkDataUrl string `db:"-"` - PieceParkCreatedAt time.Time `db:"-"` - PieceParkComplete bool `db:"-"` - PieceParkTaskID *int64 `db:"-"` - PieceParkCleanupTaskID *int64 `db:"-"` - } - var pieces []sectorPieceMeta - - err = a.db.Select(ctx, &pieces, `SELECT piece_index, piece_cid, piece_size, - data_url, data_raw_size, data_delete_on_finalize, - f05_publish_cid, f05_deal_id, direct_piece_activation_manifest FROM sectors_sdr_initial_pieces WHERE sp_id = $1 AND sector_number = $2`, spid, intid) - if err != nil { - http.Error(w, xerrors.Errorf("failed to fetch sector pieces: %w", err).Error(), http.StatusInternalServerError) - return - } - - for i := range pieces { - pieces[i].StrPieceSize = types.SizeStr(types.NewInt(uint64(pieces[i].PieceSize))) - pieces[i].StrDataRawSize = types.SizeStr(types.NewInt(uint64(pieces[i].DataRawSize))) - - id, isPiecePark := strings.CutPrefix(pieces[i].DataUrl, "pieceref:") - if !isPiecePark { - continue - } - - intID, err := strconv.ParseInt(id, 10, 64) - if err != nil { - log.Errorw("failed to parse piece park id", "id", id, "error", err) - continue - } - - var parkedPiece []struct { - // parked_piece_refs - PieceID int64 `db:"piece_id"` - DataUrl string `db:"data_url"` - - // parked_pieces - CreatedAt time.Time `db:"created_at"` - Complete bool `db:"complete"` - ParkTaskID *int64 `db:"task_id"` - CleanupTaskID *int64 `db:"cleanup_task_id"` - } - - err = a.db.Select(ctx, &parkedPiece, `SELECT ppr.piece_id, ppr.data_url, pp.created_at, pp.complete, pp.task_id, pp.cleanup_task_id FROM parked_piece_refs ppr - INNER JOIN parked_pieces pp ON pp.id = ppr.piece_id - WHERE ppr.ref_id = $1`, intID) - if err != nil { - http.Error(w, xerrors.Errorf("failed to fetch parked piece: %w", err).Error(), http.StatusInternalServerError) - return - } - - if len(parkedPiece) == 0 { - pieces[i].IsParkedPieceFound = false - continue - } - - pieces[i].IsParkedPieceFound = true - pieces[i].IsParkedPiece = true - - pieces[i].PieceParkID = parkedPiece[0].PieceID - pieces[i].PieceParkDataUrl = parkedPiece[0].DataUrl - pieces[i].PieceParkCreatedAt = parkedPiece[0].CreatedAt.Local() - pieces[i].PieceParkComplete = parkedPiece[0].Complete - pieces[i].PieceParkTaskID = parkedPiece[0].ParkTaskID - pieces[i].PieceParkCleanupTaskID = parkedPiece[0].CleanupTaskID - } - - // TaskIDs - taskIDs := map[int64]struct{}{} - var htasks []taskSummary - { - // get non-nil task IDs - appendNonNil := func(id *int64) { - if id != nil { - taskIDs[*id] = struct{}{} - } - } - appendNonNil(task.TaskSDR) - appendNonNil(task.TaskTreeD) - appendNonNil(task.TaskTreeC) - appendNonNil(task.TaskTreeR) - appendNonNil(task.TaskPrecommitMsg) - appendNonNil(task.TaskPoRep) - appendNonNil(task.TaskFinalize) - appendNonNil(task.TaskMoveStorage) - appendNonNil(task.TaskCommitMsg) - - if len(taskIDs) > 0 { - ids := lo.Keys(taskIDs) - - var dbtasks []struct { - OwnerID *string `db:"owner_id"` - HostAndPort *string `db:"host_and_port"` - TaskID int64 `db:"id"` - Name string `db:"name"` - UpdateTime time.Time `db:"update_time"` - } - err = a.db.Select(ctx, &dbtasks, `SELECT t.owner_id, hm.host_and_port, t.id, t.name, t.update_time FROM harmony_task t LEFT JOIN curio.harmony_machines hm ON hm.id = t.owner_id WHERE t.id = ANY($1)`, ids) - if err != nil { - http.Error(w, fmt.Sprintf("failed to fetch task names: %v", err), http.StatusInternalServerError) - return - } - - for _, tn := range dbtasks { - htasks = append(htasks, taskSummary{ - Name: tn.Name, - SincePosted: time.Since(tn.UpdateTime.Local()).Round(time.Second).String(), - Owner: tn.HostAndPort, - OwnerID: tn.OwnerID, - ID: tn.TaskID, - }) - } - } - } - - mi := struct { - SectorNumber int64 - PipelinePoRep sectorListEntry - - Pieces []sectorPieceMeta - Locations []locationTable - Tasks []taskSummary - }{ - SectorNumber: intid, - PipelinePoRep: sectorListEntry{ - PipelineTask: tasks[0], - AfterSeed: afterSeed, - - ChainAlloc: must.One(mbf.alloc.IsSet(uint64(task.SectorNumber))), - ChainSector: must.One(mbf.sectorSet.IsSet(uint64(task.SectorNumber))), - ChainActive: must.One(mbf.active.IsSet(uint64(task.SectorNumber))), - ChainUnproven: must.One(mbf.unproven.IsSet(uint64(task.SectorNumber))), - ChainFaulty: must.One(mbf.faulty.IsSet(uint64(task.SectorNumber))), - }, - - Pieces: pieces, - Locations: locs, - Tasks: htasks, - } - - a.executePageTemplate(w, "sector_info", "Sector Info", mi) -} - -var templateDev = os.Getenv("CURIO_WEB_DEV") == "1" - -func (a *app) executeTemplate(w http.ResponseWriter, name string, data interface{}) { - if templateDev { - fs := os.DirFS("./curiosrc/web/hapi/web") - a.t = template.Must(makeTemplate().ParseFS(fs, "*")) - } - if err := a.t.ExecuteTemplate(w, name, data); err != nil { - log.Errorf("execute template %s: %v", name, err) - http.Error(w, "internal server error", http.StatusInternalServerError) - } -} - -func (a *app) executePageTemplate(w http.ResponseWriter, name, title string, data interface{}) { - if templateDev { - fs := os.DirFS("./curiosrc/web/hapi/web") - a.t = template.Must(makeTemplate().ParseFS(fs, "*")) - } - var contentBuf bytes.Buffer - if err := a.t.ExecuteTemplate(&contentBuf, name, data); err != nil { - log.Errorf("execute template %s: %v", name, err) - http.Error(w, "internal server error", http.StatusInternalServerError) - } - a.executeTemplate(w, "root", map[string]interface{}{ - "PageTitle": title, - "Content": contentBuf.String(), - }) -} - -type machineRecentTask struct { - TaskName string - Success int64 - Fail int64 -} - -type machineSummary struct { - Address string - ID int64 - SinceContact string - - RecentTasks []*machineRecentTask - Cpu int - RamHumanized string - Gpu int -} - -type taskSummary struct { - Name string - SincePosted string - Owner, OwnerID *string - ID int64 -} - -type taskHistorySummary struct { - Name string - TaskID int64 - - Posted, Start, Queued, Took string - - Result bool - Err string - - CompletedBy string -} - -func (a *app) clusterMachineSummary(ctx context.Context) ([]machineSummary, error) { - // First get task summary for tasks completed in the last 24 hours - // NOTE: This query uses harmony_task_history_work_index, task history may get big - tsrows, err := a.db.Query(ctx, `SELECT hist.completed_by_host_and_port, hist.name, hist.result, count(1) FROM harmony_task_history hist - WHERE hist.work_end > now() - INTERVAL '1 day' - GROUP BY hist.completed_by_host_and_port, hist.name, hist.result - ORDER BY completed_by_host_and_port ASC`) - if err != nil { - return nil, err - } - defer tsrows.Close() - - // Map of machine -> task -> recent task - taskSummaries := map[string]map[string]*machineRecentTask{} - - for tsrows.Next() { - var taskName string - var result bool - var count int64 - var machine string - - if err := tsrows.Scan(&machine, &taskName, &result, &count); err != nil { - return nil, err - } - - if _, ok := taskSummaries[machine]; !ok { - taskSummaries[machine] = map[string]*machineRecentTask{} - } - - if _, ok := taskSummaries[machine][taskName]; !ok { - taskSummaries[machine][taskName] = &machineRecentTask{TaskName: taskName} - } - - if result { - taskSummaries[machine][taskName].Success = count - } else { - taskSummaries[machine][taskName].Fail = count - } - } - - // Then machine summary - rows, err := a.db.Query(ctx, "SELECT id, host_and_port, CURRENT_TIMESTAMP - last_contact AS last_contact, cpu, ram, gpu FROM harmony_machines order by host_and_port asc") - if err != nil { - return nil, err // Handle error - } - defer rows.Close() - - var summaries []machineSummary - for rows.Next() { - var m machineSummary - var lastContact time.Duration - var ram int64 - - if err := rows.Scan(&m.ID, &m.Address, &lastContact, &m.Cpu, &ram, &m.Gpu); err != nil { - return nil, err // Handle error - } - m.SinceContact = lastContact.Round(time.Second).String() - m.RamHumanized = humanize.Bytes(uint64(ram)) - - // Add recent tasks - if ts, ok := taskSummaries[m.Address]; ok { - for _, t := range ts { - m.RecentTasks = append(m.RecentTasks, t) - } - sort.Slice(m.RecentTasks, func(i, j int) bool { - return m.RecentTasks[i].TaskName < m.RecentTasks[j].TaskName - }) - } - - summaries = append(summaries, m) - } - return summaries, nil -} - -func (a *app) clusterTaskSummary(ctx context.Context) ([]taskSummary, error) { - rows, err := a.db.Query(ctx, "SELECT t.id, t.name, t.update_time, t.owner_id, hm.host_and_port FROM harmony_task t LEFT JOIN curio.harmony_machines hm ON hm.id = t.owner_id ORDER BY t.update_time ASC, t.owner_id") - if err != nil { - return nil, err // Handle error - } - defer rows.Close() - - var summaries []taskSummary - for rows.Next() { - var t taskSummary - var posted time.Time - - if err := rows.Scan(&t.ID, &t.Name, &posted, &t.OwnerID, &t.Owner); err != nil { - return nil, err // Handle error - } - - t.SincePosted = time.Since(posted).Round(time.Second).String() - - summaries = append(summaries, t) - } - return summaries, nil -} - -func (a *app) clusterTaskHistorySummary(ctx context.Context) ([]taskHistorySummary, error) { - rows, err := a.db.Query(ctx, "SELECT id, name, task_id, posted, work_start, work_end, result, err, completed_by_host_and_port FROM harmony_task_history ORDER BY work_end DESC LIMIT 15") - if err != nil { - return nil, err // Handle error - } - defer rows.Close() - - var summaries []taskHistorySummary - for rows.Next() { - var t taskHistorySummary - var posted, start, end time.Time - - if err := rows.Scan(&t.TaskID, &t.Name, &t.TaskID, &posted, &start, &end, &t.Result, &t.Err, &t.CompletedBy); err != nil { - return nil, err // Handle error - } - - t.Posted = posted.Local().Round(time.Second).Format("02 Jan 06 15:04") - t.Start = start.Local().Round(time.Second).Format("02 Jan 06 15:04") - //t.End = end.Local().Round(time.Second).Format("02 Jan 06 15:04") - - t.Queued = start.Sub(posted).Round(time.Second).String() - if t.Queued == "0s" { - t.Queued = start.Sub(posted).Round(time.Millisecond).String() - } - - t.Took = end.Sub(start).Round(time.Second).String() - if t.Took == "0s" { - t.Took = end.Sub(start).Round(time.Millisecond).String() - } - - summaries = append(summaries, t) - } - return summaries, nil -} - -type porepPipelineSummary struct { - Actor string - - CountSDR int - CountTrees int - CountPrecommitMsg int - CountWaitSeed int - CountPoRep int - CountCommitMsg int - CountDone int - CountFailed int -} - -func (a *app) porepPipelineSummary(ctx context.Context) ([]porepPipelineSummary, error) { - rows, err := a.db.Query(ctx, ` - SELECT - sp_id, - COUNT(*) FILTER (WHERE after_sdr = false) as CountSDR, - COUNT(*) FILTER (WHERE (after_tree_d = false OR after_tree_c = false OR after_tree_r = false) AND after_sdr = true) as CountTrees, - COUNT(*) FILTER (WHERE after_tree_r = true and after_precommit_msg = false) as CountPrecommitMsg, - COUNT(*) FILTER (WHERE after_precommit_msg_success = false AND after_precommit_msg = true) as CountWaitSeed, - COUNT(*) FILTER (WHERE after_porep = false AND after_precommit_msg_success = true) as CountPoRep, - COUNT(*) FILTER (WHERE after_commit_msg_success = false AND after_porep = true) as CountCommitMsg, - COUNT(*) FILTER (WHERE after_commit_msg_success = true) as CountDone, - COUNT(*) FILTER (WHERE failed = true) as CountFailed - FROM - sectors_sdr_pipeline - GROUP BY sp_id`) - if err != nil { - return nil, xerrors.Errorf("query: %w", err) - } - defer rows.Close() - - var summaries []porepPipelineSummary - for rows.Next() { - var summary porepPipelineSummary - var actor int64 - if err := rows.Scan(&actor, &summary.CountSDR, &summary.CountTrees, &summary.CountPrecommitMsg, &summary.CountWaitSeed, &summary.CountPoRep, &summary.CountCommitMsg, &summary.CountDone, &summary.CountFailed); err != nil { - return nil, xerrors.Errorf("scan: %w", err) - } - - sactor, err := address.NewIDAddress(uint64(actor)) - if err != nil { - return nil, xerrors.Errorf("failed to create actor address: %w", err) - } - - summary.Actor = sactor.String() - - summaries = append(summaries, summary) - } - return summaries, nil -} - -type machineInfo struct { - Info struct { - Host string - ID int64 - LastContact string - CPU int64 - Memory int64 - GPU int64 - } - - // Storage - Storage []struct { - ID string - Weight int64 - MaxStorage int64 - CanSeal bool - CanStore bool - Groups string - AllowTo string - AllowTypes string - DenyTypes string - Capacity int64 - Available int64 - FSAvailable int64 - Reserved int64 - Used int64 - AllowMiners string - DenyMiners string - LastHeartbeat time.Time - HeartbeatErr *string - - UsedPercent float64 - ReservedPercent float64 - } - - /*TotalStorage struct { - MaxStorage int64 - UsedStorage int64 - - MaxSealStorage int64 - UsedSealStorage int64 - - MaxStoreStorage int64 - UsedStoreStorage int64 - }*/ - - // Tasks - RunningTasks []struct { - ID int64 - Task string - Posted string - - PoRepSector, PoRepSectorSP *int64 - } - - FinishedTasks []struct { - ID int64 - Task string - Posted string - Start string - Queued string - Took string - Outcome string - Message string - } -} - -func (a *app) clusterNodeInfo(ctx context.Context, id int64) (*machineInfo, error) { - rows, err := a.db.Query(ctx, "SELECT id, host_and_port, last_contact, cpu, ram, gpu FROM harmony_machines WHERE id=$1 ORDER BY host_and_port ASC", id) - if err != nil { - return nil, err // Handle error - } - defer rows.Close() - - var summaries []machineInfo - if rows.Next() { - var m machineInfo - var lastContact time.Time - - if err := rows.Scan(&m.Info.ID, &m.Info.Host, &lastContact, &m.Info.CPU, &m.Info.Memory, &m.Info.GPU); err != nil { - return nil, err - } - - m.Info.LastContact = time.Since(lastContact).Round(time.Second).String() - - summaries = append(summaries, m) - } - - if len(summaries) == 0 { - return nil, xerrors.Errorf("machine not found") - } - - // query storage info - rows2, err := a.db.Query(ctx, "SELECT storage_id, weight, max_storage, can_seal, can_store, groups, allow_to, allow_types, deny_types, capacity, available, fs_available, reserved, used, allow_miners, deny_miners, last_heartbeat, heartbeat_err FROM storage_path WHERE urls LIKE '%' || $1 || '%'", summaries[0].Info.Host) - if err != nil { - return nil, err - } - - defer rows2.Close() - - for rows2.Next() { - var s struct { - ID string - Weight int64 - MaxStorage int64 - CanSeal bool - CanStore bool - Groups string - AllowTo string - AllowTypes string - DenyTypes string - Capacity int64 - Available int64 - FSAvailable int64 - Reserved int64 - Used int64 - AllowMiners string - DenyMiners string - LastHeartbeat time.Time - HeartbeatErr *string - - UsedPercent float64 - ReservedPercent float64 - } - if err := rows2.Scan(&s.ID, &s.Weight, &s.MaxStorage, &s.CanSeal, &s.CanStore, &s.Groups, &s.AllowTo, &s.AllowTypes, &s.DenyTypes, &s.Capacity, &s.Available, &s.FSAvailable, &s.Reserved, &s.Used, &s.AllowMiners, &s.DenyMiners, &s.LastHeartbeat, &s.HeartbeatErr); err != nil { - return nil, err - } - - s.UsedPercent = float64(s.Capacity-s.FSAvailable) * 100 / float64(s.Capacity) - s.ReservedPercent = float64(s.Capacity-(s.FSAvailable+s.Reserved))*100/float64(s.Capacity) - s.UsedPercent - - summaries[0].Storage = append(summaries[0].Storage, s) - } - - // tasks - rows3, err := a.db.Query(ctx, "SELECT id, name, posted_time FROM harmony_task WHERE owner_id=$1", summaries[0].Info.ID) - if err != nil { - return nil, err - } - - defer rows3.Close() - - for rows3.Next() { - var t struct { - ID int64 - Task string - Posted string - - PoRepSector *int64 - PoRepSectorSP *int64 - } - - var posted time.Time - if err := rows3.Scan(&t.ID, &t.Task, &posted); err != nil { - return nil, err - } - t.Posted = time.Since(posted).Round(time.Second).String() - - { - // try to find in the porep pipeline - rows4, err := a.db.Query(ctx, `SELECT sp_id, sector_number FROM sectors_sdr_pipeline - WHERE task_id_sdr=$1 - OR task_id_tree_d=$1 - OR task_id_tree_c=$1 - OR task_id_tree_r=$1 - OR task_id_precommit_msg=$1 - OR task_id_porep=$1 - OR task_id_commit_msg=$1 - OR task_id_finalize=$1 - OR task_id_move_storage=$1 - `, t.ID) - if err != nil { - return nil, err - } - - if rows4.Next() { - var spid int64 - var sector int64 - if err := rows4.Scan(&spid, §or); err != nil { - return nil, err - } - t.PoRepSector = §or - t.PoRepSectorSP = &spid - } - - rows4.Close() - } - - summaries[0].RunningTasks = append(summaries[0].RunningTasks, t) - } - - rows5, err := a.db.Query(ctx, `SELECT name, task_id, posted, work_start, work_end, result, err FROM harmony_task_history WHERE completed_by_host_and_port = $1 ORDER BY work_end DESC LIMIT 15`, summaries[0].Info.Host) - if err != nil { - return nil, err - } - defer rows5.Close() - - for rows5.Next() { - var ft struct { - ID int64 - Task string - Posted string - Start string - Queued string - Took string - Outcome string - - Message string - } - - var posted, start, end time.Time - var result bool - if err := rows5.Scan(&ft.Task, &ft.ID, &posted, &start, &end, &result, &ft.Message); err != nil { - return nil, err - } - - ft.Outcome = "Success" - if !result { - ft.Outcome = "Failed" - } - - // Format the times and durations - ft.Posted = posted.Format("02 Jan 06 15:04 MST") - ft.Start = start.Format("02 Jan 06 15:04 MST") - ft.Queued = fmt.Sprintf("%s", start.Sub(posted).Round(time.Second).String()) - ft.Took = fmt.Sprintf("%s", end.Sub(start).Round(time.Second)) - - summaries[0].FinishedTasks = append(summaries[0].FinishedTasks, ft) - } - - return &summaries[0], nil -} diff --git a/curiosrc/web/hapi/simpleinfo_pipeline_porep.go b/curiosrc/web/hapi/simpleinfo_pipeline_porep.go deleted file mode 100644 index f6920ca1eff..00000000000 --- a/curiosrc/web/hapi/simpleinfo_pipeline_porep.go +++ /dev/null @@ -1,195 +0,0 @@ -package hapi - -import ( - "context" - "net/http" - "time" - - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-bitfield" - - "github.com/filecoin-project/lotus/chain/actors/adt" - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/must" -) - -type PipelineTask struct { - SpID int64 `db:"sp_id"` - SectorNumber int64 `db:"sector_number"` - - CreateTime time.Time `db:"create_time"` - - TaskSDR *int64 `db:"task_id_sdr"` - AfterSDR bool `db:"after_sdr"` - - TaskTreeD *int64 `db:"task_id_tree_d"` - AfterTreeD bool `db:"after_tree_d"` - - TaskTreeC *int64 `db:"task_id_tree_c"` - AfterTreeC bool `db:"after_tree_c"` - - TaskTreeR *int64 `db:"task_id_tree_r"` - AfterTreeR bool `db:"after_tree_r"` - - TaskPrecommitMsg *int64 `db:"task_id_precommit_msg"` - AfterPrecommitMsg bool `db:"after_precommit_msg"` - - AfterPrecommitMsgSuccess bool `db:"after_precommit_msg_success"` - SeedEpoch *int64 `db:"seed_epoch"` - - TaskPoRep *int64 `db:"task_id_porep"` - PoRepProof []byte `db:"porep_proof"` - AfterPoRep bool `db:"after_porep"` - - TaskFinalize *int64 `db:"task_id_finalize"` - AfterFinalize bool `db:"after_finalize"` - - TaskMoveStorage *int64 `db:"task_id_move_storage"` - AfterMoveStorage bool `db:"after_move_storage"` - - TaskCommitMsg *int64 `db:"task_id_commit_msg"` - AfterCommitMsg bool `db:"after_commit_msg"` - - AfterCommitMsgSuccess bool `db:"after_commit_msg_success"` - - Failed bool `db:"failed"` - FailedReason string `db:"failed_reason"` -} - -type sectorListEntry struct { - PipelineTask - - Address address.Address - CreateTime string - AfterSeed bool - - ChainAlloc, ChainSector, ChainActive, ChainUnproven, ChainFaulty bool -} - -type minerBitfields struct { - alloc, sectorSet, active, unproven, faulty bitfield.BitField -} - -func (a *app) pipelinePorepSectors(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - var tasks []PipelineTask - - err := a.db.Select(ctx, &tasks, `SELECT - sp_id, sector_number, - create_time, - task_id_sdr, after_sdr, - task_id_tree_d, after_tree_d, - task_id_tree_c, after_tree_c, - task_id_tree_r, after_tree_r, - task_id_precommit_msg, after_precommit_msg, - after_precommit_msg_success, seed_epoch, - task_id_porep, porep_proof, after_porep, - task_id_finalize, after_finalize, - task_id_move_storage, after_move_storage, - task_id_commit_msg, after_commit_msg, - after_commit_msg_success, - failed, failed_reason - FROM sectors_sdr_pipeline order by sp_id, sector_number`) // todo where constrain list - if err != nil { - http.Error(w, xerrors.Errorf("failed to fetch pipeline tasks: %w", err).Error(), http.StatusInternalServerError) - return - } - - head, err := a.workingApi.ChainHead(ctx) - if err != nil { - http.Error(w, xerrors.Errorf("failed to fetch chain head: %w", err).Error(), http.StatusInternalServerError) - return - } - epoch := head.Height() - - minerBitfieldCache := map[address.Address]minerBitfields{} - - sectorList := make([]sectorListEntry, 0, len(tasks)) - for _, task := range tasks { - task := task - - task.CreateTime = task.CreateTime.Local() - - addr, err := address.NewIDAddress(uint64(task.SpID)) - if err != nil { - http.Error(w, xerrors.Errorf("failed to create actor address: %w", err).Error(), http.StatusInternalServerError) - return - } - - mbf, ok := minerBitfieldCache[addr] - if !ok { - mbf, err = a.getMinerBitfields(ctx, addr, a.stor) - if err != nil { - http.Error(w, xerrors.Errorf("failed to load miner bitfields: %w", err).Error(), http.StatusInternalServerError) - return - } - minerBitfieldCache[addr] = mbf - } - - afterSeed := task.SeedEpoch != nil && *task.SeedEpoch <= int64(epoch) - - sectorList = append(sectorList, sectorListEntry{ - PipelineTask: task, - Address: addr, - CreateTime: task.CreateTime.Format(time.DateTime), - AfterSeed: afterSeed, - - ChainAlloc: must.One(mbf.alloc.IsSet(uint64(task.SectorNumber))), - ChainSector: must.One(mbf.sectorSet.IsSet(uint64(task.SectorNumber))), - ChainActive: must.One(mbf.active.IsSet(uint64(task.SectorNumber))), - ChainUnproven: must.One(mbf.unproven.IsSet(uint64(task.SectorNumber))), - ChainFaulty: must.One(mbf.faulty.IsSet(uint64(task.SectorNumber))), - }) - } - - a.executeTemplate(w, "pipeline_porep_sectors", sectorList) -} - -func (a *app) getMinerBitfields(ctx context.Context, addr address.Address, stor adt.Store) (minerBitfields, error) { - act, err := a.workingApi.StateGetActor(ctx, addr, types.EmptyTSK) - if err != nil { - return minerBitfields{}, xerrors.Errorf("failed to load actor: %w", err) - } - - mas, err := miner.Load(stor, act) - if err != nil { - return minerBitfields{}, xerrors.Errorf("failed to load miner actor: %w", err) - } - - activeSectors, err := miner.AllPartSectors(mas, miner.Partition.ActiveSectors) - if err != nil { - return minerBitfields{}, xerrors.Errorf("failed to load active sectors: %w", err) - } - - allSectors, err := miner.AllPartSectors(mas, miner.Partition.AllSectors) - if err != nil { - return minerBitfields{}, xerrors.Errorf("failed to load all sectors: %w", err) - } - - unproved, err := miner.AllPartSectors(mas, miner.Partition.UnprovenSectors) - if err != nil { - return minerBitfields{}, xerrors.Errorf("failed to load unproven sectors: %w", err) - } - - faulty, err := miner.AllPartSectors(mas, miner.Partition.FaultySectors) - if err != nil { - return minerBitfields{}, xerrors.Errorf("failed to load faulty sectors: %w", err) - } - - alloc, err := mas.GetAllocatedSectors() - if err != nil { - return minerBitfields{}, xerrors.Errorf("failed to load allocated sectors: %w", err) - } - - return minerBitfields{ - alloc: *alloc, - sectorSet: allSectors, - active: activeSectors, - unproven: unproved, - faulty: faulty, - }, nil -} diff --git a/curiosrc/web/hapi/watch_actor.go b/curiosrc/web/hapi/watch_actor.go deleted file mode 100644 index 51e1f51e74d..00000000000 --- a/curiosrc/web/hapi/watch_actor.go +++ /dev/null @@ -1,286 +0,0 @@ -package hapi - -import ( - "context" - "sort" - "time" - - "github.com/BurntSushi/toml" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - - "github.com/filecoin-project/lotus/blockstore" - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" -) - -const watchInterval = time.Second * 10 - -func (a *app) watchActor() { - ticker := time.NewTicker(watchInterval) - for { - err := a.updateActor(context.TODO()) - if err != nil { - log.Errorw("updating rpc info", "error", err) - } - select { - case <-ticker.C: - } - } -} - -type minimalActorInfo struct { - Addresses []struct { - MinerAddresses []string - } -} - -var startedAt = time.Now() - -func (a *app) updateActor(ctx context.Context) error { - a.rpcInfoLk.Lock() - api := a.workingApi - a.rpcInfoLk.Unlock() - - stor := store.ActorStore(ctx, blockstore.NewReadCachedBlockstore(blockstore.NewAPIBlockstore(a.workingApi), ChainBlockCache)) - - if api == nil { - if time.Since(startedAt) > time.Second*10 { - log.Warnw("no working api yet") - } - return nil - } - - var actorInfos []actorInfo - - confNameToAddr := map[address.Address][]string{} // address -> config names - - err := forEachConfig[minimalActorInfo](a, func(name string, info minimalActorInfo) error { - for _, aset := range info.Addresses { - for _, addr := range aset.MinerAddresses { - a, err := address.NewFromString(addr) - if err != nil { - return xerrors.Errorf("parsing address: %w", err) - } - confNameToAddr[a] = append(confNameToAddr[a], name) - } - } - - return nil - }) - if err != nil { - return err - } - - wins, err := a.spWins(ctx) - if err != nil { - return xerrors.Errorf("getting sp wins: %w", err) - } - - for addr, cnames := range confNameToAddr { - p, err := api.StateMinerPower(ctx, addr, types.EmptyTSK) - if err != nil { - return xerrors.Errorf("getting miner power: %w", err) - } - - dls, err := api.StateMinerDeadlines(ctx, addr, types.EmptyTSK) - if err != nil { - return xerrors.Errorf("getting deadlines: %w", err) - } - - mact, err := api.StateGetActor(ctx, addr, types.EmptyTSK) - if err != nil { - return xerrors.Errorf("getting actor: %w", err) - } - - mas, err := miner.Load(stor, mact) - if err != nil { - return err - } - - outDls := []actorDeadline{} - - for dlidx := range dls { - p, err := api.StateMinerPartitions(ctx, addr, uint64(dlidx), types.EmptyTSK) - if err != nil { - return xerrors.Errorf("getting partition: %w", err) - } - - dl := actorDeadline{ - Empty: false, - Current: false, // todo - Proven: false, - PartFaulty: false, - Faulty: false, - } - - var live, faulty uint64 - - for _, part := range p { - l, err := part.LiveSectors.Count() - if err != nil { - return xerrors.Errorf("getting live sectors: %w", err) - } - live += l - - f, err := part.FaultySectors.Count() - if err != nil { - return xerrors.Errorf("getting faulty sectors: %w", err) - } - faulty += f - } - - dl.Empty = live == 0 - dl.Proven = live > 0 && faulty == 0 - dl.PartFaulty = faulty > 0 - dl.Faulty = faulty > 0 && faulty == live - - outDls = append(outDls, dl) - } - - pd, err := api.StateMinerProvingDeadline(ctx, addr, types.EmptyTSK) - if err != nil { - return xerrors.Errorf("getting proving deadline: %w", err) - } - - if len(outDls) != 48 { - return xerrors.Errorf("expected 48 deadlines, got %d", len(outDls)) - } - - outDls[pd.Index].Current = true - - avail, err := mas.AvailableBalance(mact.Balance) - if err != nil { - return xerrors.Errorf("getting available balance: %w", err) - } - - mi, err := mas.Info() - if err != nil { - return xerrors.Errorf("getting miner info: %w", err) - } - - wbal, err := api.WalletBalance(ctx, mi.Worker) - if err != nil { - return xerrors.Errorf("getting worker balance: %w", err) - } - - sort.Strings(cnames) - - actorInfos = append(actorInfos, actorInfo{ - Address: addr.String(), - CLayers: cnames, - QualityAdjustedPower: types.DeciStr(p.MinerPower.QualityAdjPower), - RawBytePower: types.DeciStr(p.MinerPower.RawBytePower), - Deadlines: outDls, - - ActorBalance: types.FIL(mact.Balance).Short(), - ActorAvailable: types.FIL(avail).Short(), - WorkerBalance: types.FIL(wbal).Short(), - - Win1: wins[addr].Win1, // note: zero values are fine here - Win7: wins[addr].Win7, - Win30: wins[addr].Win30, - }) - } - - sort.Slice(actorInfos, func(i, j int) bool { - return actorInfos[i].Address < actorInfos[j].Address - }) - - a.actorInfoLk.Lock() - a.actorInfos = actorInfos - a.actorInfoLk.Unlock() - - return nil -} - -func (a *app) loadConfigs(ctx context.Context) (map[string]string, error) { - rows, err := a.db.Query(ctx, `SELECT title, config FROM harmony_config`) - if err != nil { - return nil, xerrors.Errorf("getting db configs: %w", err) - } - - configs := make(map[string]string) - for rows.Next() { - var title, config string - if err := rows.Scan(&title, &config); err != nil { - return nil, xerrors.Errorf("scanning db configs: %w", err) - } - configs[title] = config - } - - return configs, nil -} - -type wins struct { - SpID int64 `db:"sp_id"` - Win1 int64 `db:"win1"` - Win7 int64 `db:"win7"` - Win30 int64 `db:"win30"` -} - -func (a *app) spWins(ctx context.Context) (map[address.Address]wins, error) { - var w []wins - - // note: this query uses mining_tasks_won_sp_id_base_compute_time_index - err := a.db.Select(ctx, &w, `WITH wins AS ( - SELECT - sp_id, - base_compute_time, - won - FROM - mining_tasks - WHERE - won = true - AND base_compute_time > NOW() - INTERVAL '30 days' - ) - - SELECT - sp_id, - COUNT(*) FILTER (WHERE base_compute_time > NOW() - INTERVAL '1 day') AS "win1", - COUNT(*) FILTER (WHERE base_compute_time > NOW() - INTERVAL '7 days') AS "win7", - COUNT(*) FILTER (WHERE base_compute_time > NOW() - INTERVAL '30 days') AS "win30" - FROM - wins - GROUP BY - sp_id - ORDER BY - sp_id`) - if err != nil { - return nil, xerrors.Errorf("query win counts: %w", err) - } - - wm := make(map[address.Address]wins) - for _, wi := range w { - ma, err := address.NewIDAddress(uint64(wi.SpID)) - if err != nil { - return nil, xerrors.Errorf("parsing miner address: %w", err) - } - - wm[ma] = wi - } - - return wm, nil -} - -func forEachConfig[T any](a *app, cb func(name string, v T) error) error { - confs, err := a.loadConfigs(context.Background()) - if err != nil { - return err - } - - for name, tomlStr := range confs { - var info T - if err := toml.Unmarshal([]byte(tomlStr), &info); err != nil { - return xerrors.Errorf("unmarshaling %s config: %w", name, err) - } - - if err := cb(name, info); err != nil { - return xerrors.Errorf("cb: %w", err) - } - } - - return nil -} diff --git a/curiosrc/web/hapi/web/actor_summary.gohtml b/curiosrc/web/hapi/web/actor_summary.gohtml deleted file mode 100644 index bf577d802e0..00000000000 --- a/curiosrc/web/hapi/web/actor_summary.gohtml +++ /dev/null @@ -1,30 +0,0 @@ -{{define "actor_summary"}} -{{range .}} - - {{.Address}} - - {{range .CLayers}} - {{.}} - {{end}} - - {{.QualityAdjustedPower}} - -
- {{range .Deadlines}} -
- {{end}} -
- - {{.ActorBalance}} - {{.ActorAvailable}} - {{.WorkerBalance}} - - - - - -
1day:  {{.Win1}}
7day:  {{.Win7}}
30day: {{.Win30}}
- - -{{end}} -{{end}} \ No newline at end of file diff --git a/curiosrc/web/hapi/web/chain_rpcs.gohtml b/curiosrc/web/hapi/web/chain_rpcs.gohtml deleted file mode 100644 index 5705da39517..00000000000 --- a/curiosrc/web/hapi/web/chain_rpcs.gohtml +++ /dev/null @@ -1,15 +0,0 @@ -{{define "chain_rpcs"}} -{{range .}} - - {{.Address}} - - {{range .CLayers}} - {{.}} - {{end}} - - {{if .Reachable}}ok{{else}}FAIL{{end}} - {{if eq "ok" .SyncState}}ok{{else}}{{.SyncState}}{{end}} - {{.Version}} - -{{end}} -{{end}} diff --git a/curiosrc/web/hapi/web/cluster_machines.gohtml b/curiosrc/web/hapi/web/cluster_machines.gohtml deleted file mode 100644 index 6c04b2871ab..00000000000 --- a/curiosrc/web/hapi/web/cluster_machines.gohtml +++ /dev/null @@ -1,15 +0,0 @@ -{{define "cluster_machines"}} -{{range .}} - - {{.Address}} - {{.ID}} - {{.Cpu}} - {{.RamHumanized}} - {{.Gpu}} - {{.SinceContact}} - {{range .RecentTasks}} - {{.TaskName}}:{{.Success}}{{if ne 0 .Fail}}({{.Fail}}){{end}} - {{end}} - -{{end}} -{{end}} diff --git a/curiosrc/web/hapi/web/cluster_task_history.gohtml b/curiosrc/web/hapi/web/cluster_task_history.gohtml deleted file mode 100644 index f95dbb2b26b..00000000000 --- a/curiosrc/web/hapi/web/cluster_task_history.gohtml +++ /dev/null @@ -1,19 +0,0 @@ -{{define "cluster_task_history"}} - {{range .}} - - {{.Name}} - {{.TaskID}} - {{.CompletedBy}} - {{.Posted}} - {{.Start}} - {{.Queued}} - {{.Took}} - {{if .Result}}success{{else}}error{{end}} - -
- {{.Err}} -
- - - {{end}} -{{end}} diff --git a/curiosrc/web/hapi/web/cluster_tasks.gohtml b/curiosrc/web/hapi/web/cluster_tasks.gohtml deleted file mode 100644 index b7b3faec0ef..00000000000 --- a/curiosrc/web/hapi/web/cluster_tasks.gohtml +++ /dev/null @@ -1,10 +0,0 @@ -{{define "cluster_tasks"}} - {{range .}} - - {{.Name}} - {{.ID}} - {{.SincePosted}} - {{if ne nil .OwnerID}}{{.Owner}}{{end}} - - {{end}} -{{end}} diff --git a/curiosrc/web/hapi/web/node_info.gohtml b/curiosrc/web/hapi/web/node_info.gohtml deleted file mode 100644 index 98e56afd8bc..00000000000 --- a/curiosrc/web/hapi/web/node_info.gohtml +++ /dev/null @@ -1,100 +0,0 @@ -{{define "node_info"}} -

Info

- - - - - - - - - - - - - - - - - - - -
HostIDLast ContactCPUMemoryGPUDebug
{{.Info.Host}}{{.Info.ID}}{{.Info.LastContact}}{{.Info.CPU}}{{toHumanBytes .Info.Memory}}{{.Info.GPU}}[pprof]
-
-

Storage

- - - - - - - - - - {{range .Storage}} - - - - - - - - - {{end}} - -
IDTypeCapacityAvailableReserved
{{.ID}} - {{if and (not .CanSeal) (not .CanStore)}}ReadOnly{{end}} - {{if and (.CanSeal) (not .CanStore)}}Seal{{end}} - {{if and (not .CanSeal) (.CanStore)}}Store{{end}} - {{if and (.CanSeal) (.CanStore)}}Seal+Store{{end}} - {{toHumanBytes .Capacity}}{{toHumanBytes .Available}}{{toHumanBytes .Reserved}} -
-
-
-
-
-
-

Tasks

-

Running

- - - - - - - - {{range .RunningTasks}} - - - - - - - {{end}} -
IDTaskPostedSector
{{.ID}}{{.Task}}{{.Posted}}{{if ne nil .PoRepSector}}f0{{.PoRepSectorSP}}:{{.PoRepSector}}{{end}}
-

Recently Finished

- - - - - - - - - - - - {{range .FinishedTasks}} - - - - - - - - - - - {{end}} -
IDTaskPostedStartQueuedTookOutcomeMessage
{{.ID}}{{.Task}}{{.Posted}}{{.Start}}{{.Queued}}{{.Took}}{{.Outcome}}{{.Message}}
-{{end}} diff --git a/curiosrc/web/hapi/web/pipeline_porep_sectors.gohtml b/curiosrc/web/hapi/web/pipeline_porep_sectors.gohtml deleted file mode 100644 index 82f0ad19671..00000000000 --- a/curiosrc/web/hapi/web/pipeline_porep_sectors.gohtml +++ /dev/null @@ -1,200 +0,0 @@ -{{define "sector_porep_state"}} - - - - - - - - - - - - - - - - - - - - - - - - - -
-
SDR
-
- {{if .AfterSDR}}done{{else}} - {{if ne .TaskSDR nil}}T:{{.TaskSDR}}{{else}}--{{end}} - {{end}} -
-
-
TreeC
-
- {{if .AfterTreeC}}done{{else}} - {{if ne .TaskTreeC nil}}T:{{.TaskTreeC}}{{else}}--{{end}} - {{end}} -
-
-
PComm Msg
-
- {{if .AfterPrecommitMsg}}done{{else}} - {{if ne .TaskPrecommitMsg nil}}T:{{.TaskPrecommitMsg}}{{else}}--{{end}} - {{end}} -
-
-
PComm Wait
-
- {{if .AfterPrecommitMsgSuccess}}done{{else}} - -- - {{end}} -
-
-
Wait Seed
-
- {{if .AfterSeed}}done{{else}} - {{if ne .SeedEpoch nil}}@{{.SeedEpoch}}{{else}}--{{end}} - {{end}} -
-
-
PoRep
-
- {{if .AfterPoRep}}done{{else}} - {{if ne .TaskPoRep nil}}T:{{.TaskPoRep}}{{else}}--{{end}} - {{end}} -
-
-
Clear Cache
-
- {{if .AfterFinalize}}done{{else}} - {{if ne .TaskFinalize nil}}T:{{.TaskFinalize}}{{else}}--{{end}} - {{end}} -
-
-
Move Storage
-
- {{if .AfterMoveStorage}}done{{else}} - {{if ne .TaskMoveStorage nil}}T:{{.TaskMoveStorage}}{{else}}--{{end}} - {{end}} -
-
-
On Chain
-
{{if .ChainSector}}yes{{else}}{{if .ChainAlloc}}allocated{{else}}no{{end}}{{end}}
-
-
TreeD
-
- {{if .AfterTreeD}}done{{else}} - {{if ne .TaskTreeD nil}}T:{{.TaskTreeD}}{{else}}--{{end}} - {{end}} -
-
-
TreeR
-
- {{if .AfterTreeR}}done{{else}} - {{if ne .TaskTreeR nil}}T:{{.TaskTreeR}}{{else}}--{{end}} - {{end}} -
-
-
Commit Msg
-
- {{if .AfterCommitMsg}}done{{else}} - {{if ne .TaskCommitMsg nil}}T:{{.TaskCommitMsg}}{{else}}--{{end}} - {{end}} -
-
-
Commit Wait
-
- {{if .AfterCommitMsgSuccess}}done{{else}} - -- - {{end}} -
-
-
Active
-
{{if .ChainActive}}yes{{else}} - {{if .ChainUnproven}}unproven{{else}} - {{if .ChainFaulty}}faulty{{else}}no{{end}} - {{end}} - {{end}} -
-
-{{end}} - -{{define "pipeline_porep_sectors"}} - {{range .}} - - {{.Address}} - {{.CreateTime}} - - {{template "sector_porep_state" .}} - - - DETAILS - - - - - {{.SectorNumber}} - - - {{end}} -{{end}} diff --git a/curiosrc/web/hapi/web/pipline_porep.gohtml b/curiosrc/web/hapi/web/pipline_porep.gohtml deleted file mode 100644 index 5e7c7f7c63e..00000000000 --- a/curiosrc/web/hapi/web/pipline_porep.gohtml +++ /dev/null @@ -1,15 +0,0 @@ -{{define "pipeline_porep"}} - {{range .}} - - {{.Actor}} - {{.CountSDR}} - {{.CountTrees}} - {{.CountPrecommitMsg}} - {{.CountWaitSeed}} - {{.CountPoRep}} - {{.CountCommitMsg}} - {{.CountDone}} - {{.CountFailed}} - - {{end}} -{{end}} diff --git a/curiosrc/web/hapi/web/root.gohtml b/curiosrc/web/hapi/web/root.gohtml deleted file mode 100644 index 3766158a349..00000000000 --- a/curiosrc/web/hapi/web/root.gohtml +++ /dev/null @@ -1,28 +0,0 @@ -{{define "root"}} - - - - {{.PageTitle}} - - - - - - - - - - -
-
-

{{.PageTitle}}

-
-
-
-
- {{.Content}} -
-
- - -{{end}} diff --git a/curiosrc/web/hapi/web/sector_info.gohtml b/curiosrc/web/hapi/web/sector_info.gohtml deleted file mode 100644 index 49c6fabb35f..00000000000 --- a/curiosrc/web/hapi/web/sector_info.gohtml +++ /dev/null @@ -1,105 +0,0 @@ -{{define "sector_info"}} -

Sector {{.SectorNumber}}

-
-

PoRep Pipeline

- {{template "sector_porep_state" .PipelinePoRep}} -
-
-

Pieces

- - - - - - - - - - - - - - - - - - - {{range .Pieces}} - - - - - - - - - - - {{if .IsParkedPiece}} - - - - - - {{else}} - - - - - - {{end}} - - {{end}} -
Piece IndexPiece CIDPiece SizeData URLData Raw SizeDelete On FinalizeF05 Publish CIDF05 Deal IDDirect Piece Activation ManifestPiecePark IDPP URLPP Created AtPP CompletePP Cleanup Task
{{.PieceIndex}}{{.PieceCid}}{{.PieceSize}}{{.DataUrl}}{{.DataRawSize}}{{.DeleteOnFinalize}}{{.F05PublishCid}}{{.F05DealID}}{{.DDOPam}}{{.PieceParkID}}{{.PieceParkDataUrl}}{{.PieceParkCreatedAt}}{{.PieceParkComplete}}{{.PieceParkCleanupTaskID}}{{if not .IsParkedPieceFound}}ERR:RefNotFound{{end}}
-
-
-

Storage

- - - - - - - - {{range .Locations}} - - {{if .PathType}} - - {{end}} - {{if .FileType}} - - {{end}} - - - - {{range $i, $loc := .Locations}} - {{if gt $i 0}} - - - - - {{end}} - {{end}} - {{end}} -
Path TypeFile TypePath IDHost
{{.PathType}}{{.FileType}}{{(index .Locations 0).StorageID}}{{range (index .Locations 0).Urls}}

{{.}}

{{end}}
{{$loc.StorageID}}{{range $loc.Urls}}

{{.}}

{{end}}
-
-
-

Tasks

- - - - - - - - {{range .Tasks}} - - - - - - - {{end}} -
Task TypeTask IDPostedWorker
{{.Name}}{{.ID}}{{.SincePosted}}{{if ne nil .OwnerID}}{{.Owner}}{{end}}
-
-{{end}} diff --git a/curiosrc/web/srv.go b/curiosrc/web/srv.go deleted file mode 100644 index b16a9f9afcb..00000000000 --- a/curiosrc/web/srv.go +++ /dev/null @@ -1,82 +0,0 @@ -// Package web defines the HTTP web server for static files and endpoints. -package web - -import ( - "context" - "embed" - "io" - "io/fs" - "net" - "net/http" - "os" - "path" - "strings" - "time" - - "github.com/gorilla/mux" - "go.opencensus.io/tag" - - "github.com/filecoin-project/lotus/cmd/curio/deps" - "github.com/filecoin-project/lotus/curiosrc/web/api" - "github.com/filecoin-project/lotus/curiosrc/web/hapi" - "github.com/filecoin-project/lotus/metrics" -) - -//go:embed static -var static embed.FS - -var basePath = "/static/" - -// An dev mode hack for no-restart changes to static and templates. -// You still need to recomplie the binary for changes to go code. -var webDev = os.Getenv("CURIO_WEB_DEV") == "1" - -func GetSrv(ctx context.Context, deps *deps.Deps) (*http.Server, error) { - mx := mux.NewRouter() - err := hapi.Routes(mx.PathPrefix("/hapi").Subrouter(), deps) - if err != nil { - return nil, err - } - api.Routes(mx.PathPrefix("/api").Subrouter(), deps) - - var static fs.FS = static - if webDev { - basePath = "" - static = os.DirFS("curiosrc/web/static") - } - - mx.NotFoundHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // If the request is for a directory, redirect to the index file. - if strings.HasSuffix(r.URL.Path, "/") { - r.URL.Path += "index.html" - } - - file, err := static.Open(path.Join(basePath, r.URL.Path)[1:]) - if err != nil { - w.WriteHeader(http.StatusNotFound) - _, _ = w.Write([]byte("404 Not Found")) - return - } - defer func() { _ = file.Close() }() - - fileInfo, err := file.Stat() - if err != nil { - w.WriteHeader(http.StatusInternalServerError) - _, _ = w.Write([]byte("500 Internal Server Error")) - return - } - - http.ServeContent(w, r, fileInfo.Name(), fileInfo.ModTime(), file.(io.ReadSeeker)) - }) - - return &http.Server{ - Handler: http.HandlerFunc(mx.ServeHTTP), - BaseContext: func(listener net.Listener) context.Context { - ctx, _ := tag.New(context.Background(), tag.Upsert(metrics.APIInterface, "curio")) - return ctx - }, - Addr: deps.Cfg.Subsystems.GuiAddress, - ReadTimeout: time.Minute * 3, - ReadHeaderTimeout: time.Minute * 3, // lint - }, nil -} diff --git a/curiosrc/web/static/chain-connectivity.mjs b/curiosrc/web/static/chain-connectivity.mjs deleted file mode 100644 index 60e6888ab81..00000000000 --- a/curiosrc/web/static/chain-connectivity.mjs +++ /dev/null @@ -1,63 +0,0 @@ -import { LitElement, html, css } from 'https://cdn.jsdelivr.net/gh/lit/dist@3/all/lit-all.min.js'; -import RPCCall from '/lib/jsonrpc.mjs'; - -window.customElements.define('chain-connectivity', class MyElement extends LitElement { - constructor() { - super(); - this.data = []; - this.loadData(); - } - - async loadData() { - const blockDelay = await RPCCall('BlockDelaySecs') - await this.updateData(); - setInterval(this.update, blockDelay * 1000); - }; - - async updateData() { - this.data = await RPCCall('SyncerState'); - console.log(this.data); - super.requestUpdate(); - } - - static get styles() { - return [css` - :host { - box-sizing: border-box; /* Don't forgert this to include padding/border inside width calculation */ - } - .success { - color: green; - } - .warning { - color: yellow; - } - .error { - color: red; - } - `]; - } - render = () => html` - - - - - - - - - - - - - - ${this.data.map(item => html` - - - - - - - `)} - -
RPC AddressReachabilitySync StatusVersion
${item.Address}${item.Reachable ? html`ok` : html`FAIL`}${item.SyncState === "ok" ? html`ok` : html`${item.SyncState}`}${item.Version}
` -}); diff --git a/curiosrc/web/static/config/edit.html b/curiosrc/web/static/config/edit.html deleted file mode 100644 index d365745f5fe..00000000000 --- a/curiosrc/web/static/config/edit.html +++ /dev/null @@ -1,164 +0,0 @@ - - - - JSON Schema Editor - - - - - - - - - - - - - -
-
-
-
- -
- -
- - -
- -
- -
-
-
- - diff --git a/curiosrc/web/static/config/index.html b/curiosrc/web/static/config/index.html deleted file mode 100644 index 28a11f6d9e4..00000000000 --- a/curiosrc/web/static/config/index.html +++ /dev/null @@ -1,125 +0,0 @@ - - - - - Configuration Editor - - - - - - - - -
-
-
-
-

Configuration Editor

-

Click on a layer to edit its configuration

- -
-
-
-
-
- - - \ No newline at end of file diff --git a/curiosrc/web/static/favicon.svg b/curiosrc/web/static/favicon.svg deleted file mode 100644 index 91f132959f2..00000000000 --- a/curiosrc/web/static/favicon.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/curiosrc/web/static/index.html b/curiosrc/web/static/index.html deleted file mode 100644 index 57c6d485657..00000000000 --- a/curiosrc/web/static/index.html +++ /dev/null @@ -1,204 +0,0 @@ - - - - Curio Cluster Overview - - - - - - - -
-
-
-
-

Chain Connectivity

- -
-
-
-
-
- -
-
-
-

Cluster Machines

- - - - - - - - - - - - - - -
HostIDCPUsRAMGPUsLast ContactTasks (24h)
-
-
-
-
-
- -
-
-
-

PoRep Pipeline

- - - - - - - - - - - - - - - - -
AddressSDRTreesPrecommit MsgWait SeedPoRepCommit MsgDoneFailed
-
-
-
-
-
- -
-
-
-

Actor Summary

- - - - - - - - - - - - - - - -
AddressConfig Layers AvailableQaPDeadlinesBalanceAvailableWorkerWins
-
-
-
-
-
- - -
-
-
-

Recently Finished Tasks

- - - - - - - - - - - - - - - - -
NameIDExecutorPostedStartQueuedTookOutcomeMessage
-
-
-
-
-
- -
-
-
-

Cluster Tasks

- - - - - - - - - - - -
TaskIDPostedOwner
-
-
-
-
-
-
-
- - - \ No newline at end of file diff --git a/curiosrc/web/static/lib/jsonrpc.mjs b/curiosrc/web/static/lib/jsonrpc.mjs deleted file mode 100644 index 4f560f0bb32..00000000000 --- a/curiosrc/web/static/lib/jsonrpc.mjs +++ /dev/null @@ -1,96 +0,0 @@ -class JsonRpcClient { - static instance = null; - - static async getInstance() { - if (!JsonRpcClient.instance) { - JsonRpcClient.instance = (async () => { - const client = new JsonRpcClient('/api/webrpc/v0'); - await client.connect(); - return client; - })(); - } - return await JsonRpcClient.instance; - } - - - constructor(url) { - if (JsonRpcClient.instance) { - throw new Error("Error: Instantiation failed: Use getInstance() instead of new."); - } - this.url = url; - this.requestId = 0; - this.pendingRequests = new Map(); - } - - async connect() { - return new Promise((resolve, reject) => { - this.ws = new WebSocket(this.url); - - this.ws.onopen = () => { - console.log("Connected to the server"); - resolve(); - }; - - this.ws.onclose = () => { - console.log("Connection closed, attempting to reconnect..."); - setTimeout(() => this.connect().then(resolve, reject), 1000); // Reconnect after 1 second - }; - - this.ws.onerror = (error) => { - console.error("WebSocket error:", error); - reject(error); - }; - - this.ws.onmessage = (message) => { - this.handleMessage(message); - }; - }); - } - - handleMessage(message) { - const response = JSON.parse(message.data); - const { id, result, error } = response; - - const resolver = this.pendingRequests.get(id); - if (resolver) { - if (error) { - resolver.reject(error); - } else { - resolver.resolve(result); - } - this.pendingRequests.delete(id); - } - } - - call(method, params = []) { - const id = ++this.requestId; - const request = { - jsonrpc: "2.0", - method: "CurioWeb." + method, - params, - id, - }; - - return new Promise((resolve, reject) => { - this.pendingRequests.set(id, { resolve, reject }); - - if (this.ws.readyState === WebSocket.OPEN) { - this.ws.send(JSON.stringify(request)); - } else { - reject('WebSocket is not open'); - } - }); - } -} - -async function init() { - const client = await JsonRpcClient.getInstance(); - console.log("webrpc backend:", await client.call('Version', [])) -} - -init(); - -export default async function(method, params = []) { - const i = await JsonRpcClient.getInstance(); - return await i.call(method, params); -} diff --git a/curiosrc/web/static/pipeline_porep.html b/curiosrc/web/static/pipeline_porep.html deleted file mode 100644 index cfa6ade6a7f..00000000000 --- a/curiosrc/web/static/pipeline_porep.html +++ /dev/null @@ -1,98 +0,0 @@ - - - Curio PoRep Pipeline - - - - - - - - - -
-
-

Curio PoRep Pipeline

-
-
-
-
-
-
-
-

Sectors

- - - - - - - - - - -
Sector IDCreate TimeState
-
-
-
-
-
- - \ No newline at end of file diff --git a/curiosrc/web/static/sector/index.html b/curiosrc/web/static/sector/index.html deleted file mode 100644 index 9ac5559cd4a..00000000000 --- a/curiosrc/web/static/sector/index.html +++ /dev/null @@ -1,129 +0,0 @@ - - - - - Sector List - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
- - - - -
Loading...
-
-
-
-
- - - - \ No newline at end of file diff --git a/curiosrc/web/static/ux/curio-ux.mjs b/curiosrc/web/static/ux/curio-ux.mjs deleted file mode 100644 index 2a3db4f019b..00000000000 --- a/curiosrc/web/static/ux/curio-ux.mjs +++ /dev/null @@ -1,99 +0,0 @@ -import {LitElement, css, html} from 'https://cdn.jsdelivr.net/gh/lit/dist@3/all/lit-all.min.js'; - -//import 'https://cdn.jsdelivr.net/npm/bootstrap@5.1.3/dist/js/bootstrap.esm.js'; - - -class CurioUX extends LitElement { - static styles = css` -\ .curio-slot { - } - :host { - display: block; - margin: 2px 3px; - } - - `; - connectedCallback() { - super.connectedCallback(); - //"https://unpkg.com/@cds/core/global.min.css", - //"https://unpkg.com/@cds/city/css/bundles/default.min.css", - //"https://unpkg.com/@cds/core/styles/theme.dark.min.css", - //"https://unpkg.com/@clr/ui/clr-ui.min.css", - - document.head.innerHTML += ` - - - - - -` - - document.documentElement.lang = 'en'; - - // how Bootstrap & DataTables expect dark mode declared. - document.documentElement.classList.add('dark'); - - this.messsage = this.getCookieMessage(); - } - - render() { - return html` -
- - - ${this.message? html``: html``} - -
- - `; - } - - getCookieMessage() { - const name = 'message'; - const cookies = document.cookie.split(';'); - for (let i = 0; i < cookies.length; i++) { - const cookie = cookies[i].trim(); - if (cookie.startsWith(name + '=')) { - var val = cookie.substring(name.length + 1); - document.cookie = name + '=; expires=Thu, 01 Jan 1970 00:00:00 UTC; path=/;'; - return val; - } - } - return null; - } - -}; - -customElements.define('curio-ux', CurioUX); \ No newline at end of file diff --git a/curiosrc/web/static/ux/fonts/Metropolis-Bold.woff b/curiosrc/web/static/ux/fonts/Metropolis-Bold.woff deleted file mode 100644 index 5c69ce440c6945a4913108d41ea531348f073137..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 17788 zcmZshV{m6pl!s&6&cwFuWMbR4ZEIrNnb^rcwr$(Cot<}UKW^9k-L5|M^zFV?r<%uI zUR+#WMMWM6NX!Qa4(MOfUi|O<|0NM|@n1kdP=`Q3oOnP$BKQd~ozN1ZV&XtRLjU2^ z{|VFrluklHUiqJm76=GT^dJ8mAyt_qp{y$WuMLNP49x!u7zjvSnSmJy2vp}^S?-?< zEHNszjcpD8G2mtY>}3D)kdf3LC<{|V6Cfa9(tpf99*7-g2sXvypYb2V`>*`#pGYB& zz(Oo+T|EAA8vk_gukD(i3;JqXLyv#o1OC@vAc+42OsfE7XJ~8suWr>p`{%zr?3%=!jW>U+(r{mCh^hbUHj+W9vQs8-Ao(1o{< zzShfx7CEl@BCA7O=i=}}moUs~!7P`jyGWaMmuNb=d%S30%Oh1uc1HO_#0bOej_tUO z;bO%t3oWNBCv_W}A_YU74PAA)%<2kDhMba@M^6wwNi!A@auN$yfK6fX8)CvN5iuoB zIbg+Bxx6!Y--7sUv8L!GI3R(0d|2hUqhQ^lY zXdxf@wo1)MP1i^JQnAZ97X0G9=3N~=Pk4O-gBEti!p7_?gp;m6hF$RTGX?n%CC^+h z5k7DnWKo5O3Jtu+l+}%YoI%Qte8H03Ii z;bAttpu*}kB=PExx#v@#Qaq7AXW+*anfcO>D0(i_W#5o+mMaba=NAU4-}aNey-tTc z$3LQ}o`T3|y=Cw~0XyK1|M`vCAH(&5-tpeP#UZr5-rimK0_+1UQLHJffPi3NgjMW# z!7}$&!eEf#phTp`M}K1o2s6vDMi@A_Bq1l6Dw&JGWDrnPpurbtzyEy7HeN_>Ll7%e zKtxc%VofE*Ah2YsIXZ_DI>JSaS`{6P_-}~oe!7R~W&I^l1t#C`xLzu+KlI)a1o>76 zW0b1J=Q}>npZ43Gke&B>yTCy9uV1Na?6Ev zPS;Gzgo_@OHj5KZ#ZcK$e3k#uu#`{yg5qvTb6?5vPfU_h(xk;Zmi{YOR6MV$Hi!j> z4Sw<(l~Kqz$jdB_QYd)~9XY0mMv6v>QaM}v`WV4@Xu-_N(m~c*$9r~OHMzvfzAwod zqkmx>hyANue+dvk5~>^2$WkbMI2X*#aZQ{PDW2-eE_W1ZHtF#6 zS}(NTQCwdxS=PM`$rlpLVGpX8UDRcx!Kz}@Wwi3h;16LGv#I<)R zEZhxiH~<;VHcU6{+}$=m&T;qS*r_}bN}ZI30jEqMV|JIr6A_Up0*-&1kz~D%Bta0h zLlcWEl7k==RsJBJ5X$pf(B5v|;MoR@=-qhkNW_X69!dm?DAKYni?U4p1>5TNvRzqBN{aIk@pWPMV~t?-(6)_kJz+92K&QW8W(-4>&=%Lu3HCM*h( z%~9!0Xi_N^D}*8ORo}Y_^YeDzx(ts+vrf^_6aF;MG0o5MbBktNR!Nr4>+^ZGXBZX? zrD~d1ua^;m&ry#T+PcIN%+b7y_F$oj7<7QdaSz8xYrLlLbBlT*%Pa(-BG0s`I5W%2pt8(u%Sfbe8g-JU1@t#q zlni5JvGDQP19Z1xYrcdu(BsyT-CMRxZFo` z?XN!6??WmQ-h(_n#&s+P+!IyfE)?6sOBYrALgl6x4xY~?Hxj`)w951hjfT$AXVm$%x}GS_?P!_zvN>5wo$Rt9PllFGGM#KW^=lwG0&vI0 zoS1Y#IC4G$wfn30mz{Z9yEcKZIo)Gv_OBh|+R`?)ujQ}p_4cPa$F*PAeRR8-cEWB3 zU-my9gnTFk!U$(5ZpS`tzP3;3nV%aM^L(aGr7Q;V9ra;Cit1aQLyM zViM~U=Zu}etoK0A7L{2Iazpkm~5K#FcWMIjKtw$YC)V|8*D%PD0rnN zJijAK%`jJNBV=^pJH3COxavc8)`Ai+k`vKWyD0UyuFi&x0h?Aqd_uYhfd2R77x9)= z;SpdX<41`)&c)!gSfIS>vj>{mfJ|s1b5e2BQ&aMKDS7m_&ebO<2jS@i6_SUNi$jQs zO33|*j*BHcFGKhuTxNt+D=VrU_^z!jFr^il02AdDXW0zIzP3;eIpDbexh45w8LlRo zC@cJ4=%9rBX?W2knMjxL_x{|8X0@7cGOY?$tP)X~^v4?H+I2+m*!15$m?#_han~T{ za-jHvv`!4RYXH?_kC#2b&wurocwN2Sx-qX6uX(3k zr@4>ur_H_$xiXisSW;AZdwI#&#Ny=Q^5PQfH0xZlQKt2KU(;yQX47!fP_o^+^*9?o zJ3dDadk%*#(-s>aBOgZ>dl#E!kPuSneNdwTvQ1Fhtnex2Ba#H=_a%DPD?gw3~>y!IORKyPVfj4QfdxQ^}O7 z#I(iVR#bo#A2NaR(nWQgU+`+yiG&eLS>tRG6e&wsAbM3WDke{wpe3<|v%L`LviRk1 zq8h!q({_{RLo!v+*6%zlfq>V|TvRT0%?in*ltr~PFvTo5Mfwl-o3$1OH+fM}8yS+~;nP48WyBYqbm6E>0ttt{G4Nh~bh3j3eiPH8nt6N1Zv!SSzo7#a1MOX^kI6mwK- z&~f_Tu4OiaXL7^q-Wa{8I*>!TFssz*)YLn2(7w3y7Ec4jf+JJUx{lTwyBa7RH8D1=cdrj^mJ8= zmpyD1@F9vgPxDouF^lt$i8P1!t;sJ+*buiWGPo2~1p?h~P_P?a+2-}k!~)FRIR8Vl z?gN@m6uwv`HKF8zVZ%K4p~KN<>O_ebxu?`t=Hs1$rYZT| zQ2bh*pAraRfykwAj0R6U^eUMw)jfY zPz*jeFXQl9L65kt>xY486=NHS1iY;uuYtY~O3XIQ9&{kE9zis%!J&|)yde70?PWGZ z(l>)T_Kneu4P$;PreKaZkx_lB1~JGf5iDgKiX{xB;|c9eCgL>29HisPVxqRP){G2$ zec{&`Fcj~P^3F`~E`Y&0HQ9t9 zo?2$&gi<&Z=zwUMo0z^t{Yly1_Rm(4A#MD-(CwbfveXx3bLGiVDIQK*V4vOYoUci=@&43sY@7$vC;Z_MdWma4-Fyv5wuSRNySHE-4h*a z99514c8+&it+eG#jO$@N`hMM=)glcwfS=MgfM(JZ3m5d(b!O?J?&3To9-@^|iY%>6 zrDv$x%tosMGDeW+beMY}nk^v=bDsgbRKX*%4ry;+sFW8JPXrFubiITp#u%vNq!^T zLPO>X$QWQqT*&)<@DRaS*`l6p(ksPtIi}JrA_Zwjg%{ooxd0imD&mJk)#q-mPE1oO z#uFWz1Y8Y&NL1F8D3VxsJRAaU6$BH$fR$WQ!xh^5D)V3z!=Z&C`GSu|Ps!;u!rq6k zoR{4vJw5q1*LCnBUN3c-R6H(vc42zU*0Axaed4kZz6w%nx~s0m9sBLC+nSFrEx_0M z$E%M0&lvel+^-hOX}cYC7vnL*qN?Xp0ea45a@Z<1dbaW%Whh8Q6_HmG*rhsnTM?UM zYgV$cH{z6>m;KEnK|alpik>atfgI$tb+a|JtIu2+O?;t4v%$_Fy-u-iR}F$hR4=&d zj5PUK5(|SQsdgxW!mXUIs<0uGoL)9wXcO;5$_Hzy!k7kuzQD`)L)w$eNEkRg4)Q_r z#&;?OV3pZnktDV+2h(h(i2mp zj3@A{OTTbI_#&Ug2mNY`86wuA2G^LZm}4t#q-fw zVbO%J76o1Ul5um-=gVSGNjv6Nnryg@K8b139PXHPXJBw!Q?~77kFLJl>v7)n4jwDU z%!j~Pk`X5a42>fN7044`1xP;-1{+X+IcetHq?b?#jWz5;MJI6sc+m$!1_c(_rU`LR zmgj33gp>l_(S~N-6htEYk)_?|TLA}bED%#8#>Rk&{f4$s9oZTTFGkolUmaUJ{2!8@ z^oPUe2LZ$Mh4mpg{n*wQeaU8?jNH!H~;&&!XodldlTJu_mH^pn9SCV(eOYgK>%|6+$ zIrFC3M(V5}SAux@at`GjOW$%n>HRo*!mY;u`!Dy{J{WmpfZ^V2xSlwk33F%Lx35hu z{_u;*oD)O`%+{z)GX6AyQQhMj2kAC?9d-UZfe^hc!j@o$-G^4YO}1|+_neMC!s9q6 z4kjO-vAo@OXTH~iPo^4*lNq{!_`39tp{-;2htgKPPwW~F|J;j#yn`91iMDPXApeA( zUj6-Rhp*NSEZ*%CzrEVqv^MljP&+cO7;UCqyixs=+WVDOAs^(Ic!uG4miV5&okIxc zIL1|<{(qy@UCSHKr!IX2ZvW1}?j7bga9H_3tNEw#HP=rB?zF(6-mN@SnNMHOah|co zTcZE)&-l-9&mOh&baub$p2~D+<+$pB4i_wi^A*!=b+7CZoDP(^FvD_=^l|hiRt>j$ zV|5!g2!=CGKnmD=wD-p-VGr(#A}CeZNk^_Eg@sMPBd+$of`c5+vPqc zZ(K*1$@-dp=5HHE0GI=6o9f%v{#tZ?H3PGxwu-jcwUtYI0y8~}Z6iOcW_6cZ&ieh# zKjt)gH6Eq=wJEPsM>Lu=;41Ik^X*bR)H!7?CG%HO3To9a@Pi#1BMQWMmC6+Y@mekt-5aoj5Ks>X}h z=j5)XUJ_qRLrX(5WaDJhWNW4w6LM*IX&m%mFx3r}D_vKUrPm57RsV`i zHMW-Xhl;$4j0)E(_6{vQfWJ;bYidtC$ux z+-0~5Y&B=wsUkM}#SpC}>!oV8IyJ4U-|~Hp73P>T$hmvDW4X?}93FO0n@)AiXS_`W%2{5X8*9q!19Wn6pQ6!`7@~|Zgy&s&n^a|SI@nR zTx^skXoW6~tgc-zv2g>l6zD2=C~RAQ1}q+~_vA;OhcAf4Ue~H~^v{lTCIx(4k~eTz z|5N5cT<{?MFbh4|rVDxi6o1W^U|*?A7zMT*fxtxR`gS5qH*4c#G(sz+9u$?nQw>LG z6LYrt?zG4)KZvF4=*0Y6E;sB>A~q4)vA^*>n&iFob#;N|UuV8C?cdxY7rEwGN|LrnCO^L*4af+Jks9yE`p>DdS;f-1M~^MBi6pHLcQi z-%;Y^9ollTnHNg+6dN;saew)urZ(KR)b|c9pkFv>jH)A?VBQ5Sg=!@Ls!m1a9(%62 zg*=ftl+Uz9heVmvTJD@TA7(%I9qP6fDE4}iA5|lW~`p$(`#+t{1 zeMU3+YSRR!`25FQxtIhNiGRRMLuV#t?Ky84PwjU z3%VmuX)zmfhIV*i=W?6ftpKwI=z+VRSnqBj@`-I;{sd9ELa`e5g>s74W}(hq_UXD3(%@K41OS-3>mBKw>{1cQ~UI@+@VB`V;Koo7$3WQZcTN4H=&8ge$xYoVAio zUP9i_?f_2cz$f-IFp67n)0ff(NjUb&d&C(iFH-jQ2Jt5y#RYPr-uxZOe_kZ1vR$tc z$lK1_$j>j#jJN^QLkCN^W$nu=@r05Hm&`_%TXq-50~=uLh18s)8(2LFEB4Q)X=rgf zrP_ZVUmb=`$h7BlK9^2DFS$|yvYO*@VJ-FnBcEp4MF!qZ;5hr{7WJI+ zN}m3CnS#GH0}zrRS^ZTwDq`psva=^gE_+RpAY~;cSQ|>2{L1j5>?(ce5oT`w~>=)@KjFo$oa_r z+Lpz|5QG-2&dheQH48hmn;=7UaACog=*BHUUl>j4psGC9GJZgKiqKKAQj1&-x@uP7 z4Y#dc_vOA^=4@e*o6677)Csa?6^o12 zt}{+UoH=N(VgT$^>Oxaq%BQlcj+q`=#hj_A$Aj8e{~)fnCLRijz6BWGd=dQJ@kWxp z67d-PtuOeKyIB8St z0@;4YL8)RM)-pLVOn)kUA%YGOT-p?!o>qtrqks?62M;IdK7I>6SUPztK4K3Jp{9*E z@udnSnz>(;@bwOlzqMwzJTk|k*a3v$3-=)H7v^q-`hF|-Uhtr1Iv&&AlJ~?-M)}5N zK3}ubuCLiaL>RehRo5gT8Hb4YWuy%w5kCDy881y(c6E`zH>`zkT2O z3ga=PMo40o(F^(X1KJkXulGT$fRS@s2Ec-~@@xs*nEBcXhB z2mgbaKzJg9P*q|NA9Rm&Ugh%*My)kDR;~_>2s^ir(H`KAgNz@4vIyL-yN|Bb$Kk=> z=90oWAbd;UB|XPlDUDN)xliCGm_0iYPPq;t^G=F1mAx;53Uzr~%F?s^T(G0~2!5;- z;bzgTx3a=C$y7X9fUppF)jH}ce6=#xD4tWPb$QC{+gxl@}Lbt&qGdY@lBJ&RoxrXmi5_d`-;~r?31oalgEnxaV+d%6%oldvtvx2EYBSpZX2HP^B;^h`0O99Pnt&>I<@iYGzGafAb1nEF51%h1?Sbw3UsM2+$HulMKG)A7q1URFm?tdEoHI2BzJ6U~DcD zRD8HyZRhdX$tT;cHN$T#G>2W)uv^Ry?vc<2yM>)-rtFvtyc|f-tHHNuB=-iT;$3)b zW_ENGJ%)hIW#+P4Hd*GAb-%vyZGy2S!O%YW>*F-(J6ThDWTsH^Q7<<4CJ6)(A(abJc+6m>M z7jn^OUKSfwZ02u3y}WF6%r7RRNjxYc`v=-JM9gd%e%xA2@<-p+Ua4J2Rj-z4d_>|9 zH;4X<@uH-LDDL+JDGW){>qE<^i;-O<{RB#o>xsGvY-TKb4&}4mrKh}+`*x)N+M(oF zkXp!GEnvxc(Z*f;FV|PJPH}tA5hl{=u%CjmLv_bT^m#brrOCN-zkhpZLS|}Na}<&R z*~8$$C<1bOu)ljx?{N@?PU8blBe9EiKAiR4CFlSu6xDEaF2;N#T! z`Lgxz%Q{JRvg>slYO1XGbCc=s#t}lrR^lAq)}KjeTE4{O1cjO^JOZXLo&KCN%phx3vMtshyac%v$cwt|l=J}?{g~sx@a>BaZlZ3VV1^raF?@Ve zhU>`1_xT@KLwYeO@~T<&@8H&3bM}(nC6UX@i@KmHa-`M_rQm&%5`i4eOr2ozec`BY z*@zs^D$3bUp^HKR&@|-HEvJy>YAo!RtYT*Z7-DBRb1GNE9`gg=br4G)HU>iq zpTU2oGt%WE=rTIfn$A)999o}D;_b~YyeavNnL{BEZ%lHio1O){c-|w8!=tW5m^3W! za@G^5xwM=)(`D1>3xEgTHZan9a6u=DSxp6QL%^*a#b(0E^$d*EtT{Dtq+B)_MA%Yo z!~V)hL=z*>uOw*TR@JJ$>4nfOzKnK|`6c=VR5xQgd}BLZlv+DB3i;Oi+c=&RRG17{ zR6iNLR~JeWnUGuWmR*<&p8^>#)FE@V+Z{DP&?pS5!Li9H6-!*Li{KVnZ7pT&R<69B zUrS~h5Hai6LG9_ZfnK5SRQ0L90nAU>W6s0>WlJj{EfXL|~I?C7V zt!W{`u2r0&IR$X`44nCbFy#Ny673;D;Dn%u4?N^2X)O|Tzx2yy2j2qo{gHLK>~Vbm zo(NmZE89t6vJWBtV}gKKOn^iXd2f$xXdlr|?>%z=2*~1S=XNNRyeI-5VyX5*nB+~g zas>doDEUD}M$^8#Eb4O8vqR0`&Bak}FY*GAd~Nw)33N{s^hcC3VEZf_!n$QRCpK6_N6wGH$YfpMyUa=R21&%8HH=|c%4Ppf;L&&eHW=96-fP@Oz8 z7l!4m12JbWb{a5=VG!;z>U<6)CXq<6ijBV?p(frIIp>9R5t7mHvb$ zBQZ3V4(JT8ut`4>OJ3FV=mCq8Ef*0_vuuY`B0ne>8E&AXEYPn-a5>{R(s-?1sT!eI zm!>{_Pl$Ris0=UGc@jSBPHf=NMbxZm*fEgw&B3-BLr9F`Za~UvXhZE>6iJ=(8VzET z_$QV0D!E^Emee2#bFXr%+_daNHQ8V@4Sy1jwISf<~1W{8%dpb$wpc z)-te}P%C@0Ss1W1dLn~C(B?A&RYN-`iJBNrdNq?L(_l;%F*>fI({>&^2OhRrT=bYxZ^_}u5-|Ggk1l_+H%G%GPYy6D zLeUj4vxZttYuT1fP%aMOosVTuvwnP{9jb4|y0053^KWlE?LYd$-nw2lhD~mIIkW&T zQ^bT0gu}e(xOJ!@vAC>`-sRX_VZctl@tbw(GfZbNt}@#!rpIo5x)b2q*&dTTaz1P@ zmK~bqhM!TomknBjDZ_&CKwQ`6E0I*k_|YAdbX{nX!`$ka1Q#Uc_OFxH$tHR}$|w+_ zQ3nC*ymc4^GwqtXur{FI*7l)pT5lR32NsnKOKhYH@&W(pUEG`_j=xTHi@X#lr32KYhLiHEkjwU?n6<5z+d-OrwmcRaDOcSN%`wzFg$d% z*K+Djn_*M#gfy1Rq63(m$lstPXKHIUgr@P`h8EUu$f^&j478u;HsG9%fO!a`kDwA? z9FsTKlBeNq_IkliU0&=FkJSjuY%%Oq@-K*OD6%M~1x-JFwFXd;FSv8)N+Oq9+(&yL zYE!t>w5CG1>VmTOZm2xr5vq>3X$5SGz3|aT;K}-BJ=IOk4Y$){4LB5wzIn2y#^2m3 z;SUm#+9Ayp`Kw`gIE&z4WsBhFnknvDu*$oMz#XyLF!wCAI$W&e9>0ImdvA~v(9^jQ zr@7ts&$k-|)~D%PG4L&*VWQ7%mL!J3CI0fp%9+B6UG&K->_DA7#RCvf&lwTbzMQQU z==Fb3i~*QRGkkrsTH&Fjr{Dl@Oi7IQi%+ekc8ow=i{bT1n^0Eu$jy0cuz-=?FLgNr zYTE8B{7t7ATaEdMRbac;(60ge_2eH!_nx2^YT$82N%_LCHwSFW+$`T!RXPP zbxtxN^D==}xA(z&ige55Hmmi);nA3Lb~3?1$*|ZE87JM;dVH4Y8mq0ZQGe4Rl_IWi zs<>u~pQHAE!?3N49-HTO6_uP+(VB9ate$}R%HpS|w>c-(SD|@yHiw8<0XjLT z=K=awojhQLeq2fx7wRKZE?ZuRkPK`yy|g`#x?CBWRSMXj0QrXN8<&YIUCc0M0&4%2 zsWI-XnDp1!ThR15?BqzLIkwgh{-dE~z#m$H{u}i(F&|8jC%t*Yy#f&%sdL|o5VSCS zxCR2l2~~T?s;-{g4iC&JC$1C<&mk-D!#XkxYnEN(HEhIMIaZ_)S@Bkm(-ehLrNx}-E z4>k3g`5c843bZ*e9fgrUL{~e=-hm`)msm_k$EM91ru4d^M}E~FJM1xZGOqyiR=?If z_p>Izkunjr+2Np}oi9lP!g_L6ID`35JDBOJl-D_tYuAZ1^rW*Su?Ishu;!nKm!~ZH zY~jA|S;(h8bIcsWyh>^w^#$gqNQtLNcMHV)=SQ=r+*{bDm0|Fz+=?zgBY(Qg8x#dW z)}S1KqIwS!@A9yHyQL;vN~m*ELomArEQieux49RPdB@pyI(c#s@Z@&PB)j8w_gnZ;*rENVE0wN#f4n z)rn11K_`$=-a*xXmbDZra4WWU$*U#?vpJE0_hP-}a=s<11Y~~lxbVs=6#=)r7pmsA z37g7q?*W`N51|7NT3Jy}u!AU_k*LHp+G+w;`X{3pF_w*C?&QxYOkrD+O9T8x?U&F@ z{V?A^EsQeX!-qV7P2U^nD)Fo3Rwma*#-8vhk7YrIxPSv9I6oY8(Al@DF82hHL|(Sg>aNJ)~Rdh{j8aE{tgv?s zjw!kCh~fec3)E#687gaBq5Ve==qEoH9D2O>%-kBksDvjhQjj3wm5Sy1nuI+YKh6hj zBiEaYZ6&AEBR-G1NGgU@Omf1NQ7Fje(=T8?iXYo77dXhfyjAEPX9KqQ9Jc;+?JkHI zrBEyy@+#BHc#gm+cy_9;&3=86oxP7Qcsn8TQ1xB9pO)uNb85w#nd1K4ZK@xc)|iB@ zL|H(Qh9LV(el|jjxy=Vmh?b$}Hmy`H#Q(5r9BQW2S2d92JhJ(vRQ;pa6KzJx{A!fL zkD{EY-7F4of}R^2GR~y7EPmc&HafivO2=ENi{OFw#yupdx%RLnCMgMdvm{R#yjB?9 zq)e&sVk5iLGd^A+U9>E_m24PT;-(62fA-nbRGyGJ=U5MVqB+HdOQ}NHtpOMjj)G$1$6}1+&&h5fa zL!aH|a11w*E!K!RQ~!LUL(4oPq7ZyHyJ?HV#-Ia+vU)w>k_@~wt2hB9369&;IrI2Y zt?mpvO~{AG*oFM96Z9-RGnzIw~mEGZKb#us1|Px|mJHruuJCH=z2 zKlX+()Yxq2amWxVvA;1|d;DYMf8ICr;#A{W9*sz?@Q{~EJ_3%sBlv+iI z_9Rt4ep<7Z%@TyVvLCI4*Rs54&N_(el$liknj2*8h8%6>q@X#Yr(6u%o?it(358sD;r2Ba0p{PJl`dIMlx&zIN^83y zf^HJ|17YINQ+C0et8C`hlP9$kGb# z)lc18in$d+A4fsG=7{|D_#|DvS#N;9z+%_Z8;Q&p6g_0Nvop{Yr0`ErDkM{V1;AYH zd_r3r5q&Mu4c2hWn+)J)EHsIGE+_yM_&keDY=Gp!IL_VhCl~@OR(3Nx15nQi%Dt}f zmO}Lv3VKK6{kg?hcbwi^Cd@s)XVV;*-!KiXUvu>0>eab?pF^cwh8SbrH#tImQw z2ENxnk``pW!40Vhl}xA`2d(Qxm*XyI+ayvK z)Og=M=$kjriS%-O!(V@qT{$vWPC^kb6YT;&6}QbnzX1JO6ppwJ@UigC%3$0Xcs;&4 zN6WgfT>&%QY--ps+`BjPk0k-4`}_%rb+PGYh&PS#vi_XLI7bXXKE19 zeiX7xGwgCdhfI$!^y0S;3RlQ*O=57@&(^A?`9!7$8u^A2SSFcXJ!>ay{9Tx8(OGsv zHdWQ)c0)Dp6Xz-;mAN)R6VmfK4waKp+n%_(Pi0oAecAro0hOmB5?I+*O7(}-{iIz|BRk~%AY*`X~21Ow3OYTim`-6q`?d@uY826>oMAr6A z{>>?Bd_oYa_28x<%VRnQI!b28KcwvB9a zW|Qq2_=N(p6H`CWeoTJ<bn9yCWb076-KI6K1uY{1OD?+} zBR})E?WYxUCiPy6oLAG;1lwDP~`DEeox%9d27zTW6mWmtmJVmjM-{ zcL_%5BQWzm)iSC@)J&KhFo8b(QwlJcKH&&PEi8&bSjm1H!-P7iRmx)Mkx-U?^gY0> z-YaPUWds;QqSBm%4P`S%M_)>GFdn5^q<>&S1cdi$!hUdvue87jH+;9!+hX!&j5hYy5f8d$T)6p-cP_Ce4)4a0x8Ch1une!Rs7BA% ze2v4a0QDw!t0f}da_wX9DV$%0MR2M^?hZF}9TW!8?dO_%S$9(#x z7Y$MonH(%!z~R7|OGqOUaPkOl#z&obvxxqJZBgnY;3ubqmuRjJqt!>lVU5pXVC#*k zjGqf>n}rwbqei7<*eo_)K~o9()>0XV4NbkBN0Yb10wUy9Px z-O*9%HS$<)+g>kG4%4-o2LJ!RyAh4Fb@=oNl7}2ce6SinaIr%w$(%4O$tVAdMZPNg)E*Ai|E}}tSTQlh#8Zbj z2*fO4io!e;3y%DxJK2PCo@T~M&=-3*`Vam7IUPI+MLZN^RC(b&evVAce)pLFaTuC)^rVa zmtYrA6kyAe$vY*(C|p%M+-Z{ce$251`4-eo94Q~gAxdqeWef>v3N|RTXhhgrLb6!E zBpNhjeTlYCWnH1`Y^SR^ZOx$bSLh?LTQ1%X?e-65pzqD~8IULhWcx@uw7gz-`|LLhtM57mj%z!cmY zeA!JPXCw3S{<}A4hdPmecylYR`i)t$f6l4_R|mE(l%y%#Z?2*zCy)9z9mN-p zICk!3loZ?x%BJ?5_j`&31D`~ki~bu#c7uwCeq=lnGQuYis8(}{lsIiGB;VItcsJ&KghQfUt>t-}e}(7M(0X9IOkdQKb4=nX=tX|b zTk-qq9KgYDTly`OftG_t$iZefHjll07L9u7(U38kcITre_cyD&%AB&{32q3CPlPFG zxac`l5^LJZ>LkCPILn9mx|dRuX7Q#XRre9^_ieGDpZgbxgIXEo9uHPjyM8o|fZFo9 zIA@LCn0ch%it2C(ighj}kFlQiG&%SnUj9x$PpEKmT9dTk5>Zic@K;7D3~kv(%yE#;2B7Xmowf%h_)gZu4O43nN~RZ*7P10q97mm)-8D&r*p{R=o*|5o{DDt}o7 z7Of>%;-rj<@2cL{|_Nh9NPc^0000C z0000003rYa00RI40N?-t0SN&c0099U0UQ7W0099U00962004Lab(4FD)zufp*WTxQ z?>z3z&*x7pZsQ{pHI-Y|{2^k4hKZPiCK4)PVG=Wrh2#Uvq#`r2F-jy8qCyAM$_FHs zA|a9%7l{ZnGaGt{)u25L)IU0{@4A<9vbiqK=dQE&IeUN4#(@E-#c<4k-92sQTvQ_g zfEW<~7#yMX;pNLPu|Wd=ua5> zuYFg1wPJ-|DrQbs%$$wI3YnepoA^7opO&sWY+mwUN+sp6YdOKTp?Y7JIQoHPc9kL@_$L&P7z7*5Q zY3t-VXI=J(beZd%{p|@?t@rgoRLZ;$_fgb%zk|$kO(dIq8r9@8eGWB~y^h!czuzAy zYcW^4Yw^c?CH15~;~UCq=hsrc*fpB}^}C7FW8P8|H^a)RM3?EH7(y zcq)~8Uz2vQ*yb6St@b_S?$DBYu{mWh#n+Xnc$0jK*rdq4^rZZ(xP#ov2$@Fka_Vuk zh>?{xmz5>;o5{HG-^E)`Fv^fSudSEAw*=t=oC^rZY=ozJHcZ!USoe-jrl zPRcjSkM)fBG0dNrA6+IR=tybf7$N0v)q7_%Lh8(h)Z>19Ds= zFor6i5tA?l(=iKkQT~4d)kLTGqj*tVFP>8Uf_#^_PdrOH3=d%mK$oMh1yGNE{2f5` z!FZ}KyWY;&L`=p&G_m^)xCb+kM>A$)C>CH5ZdR4ia8zRj1+2mv7}nthWUvD}S!Flg z#s9G%?_)5Ip~(9u@CAm$;ON2?7|inY1L$)4jv@RV#@Y?I1!L(7n7|%)Vk*XA8s=jX z7P9wj_FjQ`>ezy{)H3yb6ffc>#x`LKW83f^o@4h9u@#5$3EskS6tN2@(1|^81iQ(G z05;4<(vPyysDYyn_2>&jKgu-|*Gz(Dl4vFw%_O6lWHpm2ETF3Obv2qyEgbcFSCw95 zx|!w8spUJaJ3)66-N~R8D^RU>)#ydFnq*Fs%yrM&lr!aAIoFf}j!}u;nbEAXaP(0h zQy*8KQ10^tRipX!?3SovqFUJ=wW{zHI(hN}mBXJ@-9L!?u$lk?0000000000007Ys BeY*ev diff --git a/curiosrc/web/static/ux/fonts/Metropolis-ExtraLight.woff b/curiosrc/web/static/ux/fonts/Metropolis-ExtraLight.woff deleted file mode 100644 index 40ce2f3c0c589671025c1e2969349bb5445706e0..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 17340 zcmZsCV{oU<6K%2^+qP}n+1R#id&6IBb7R}KZQHhW^Zsw$Z+EIrPxYzu^vs8@o|&qr z-Q>i?@gbN4=;wIE58z8QvBJ{IH&5s7^e*gvol2f8*0s{JN{Nu~~ zK;P`FMBT{7z#a$)bmoUA{h5P?le;0zO$>fCz$8B!#viaj6+)+(|Byc#ksqJn2P9xa zAb#dH&h9_DpZx=${j6P;JjKgqW8nT14;bf11NI+)Y2<-y4Qxz)zHI;k`i1#3?;yTw z1>4&>{p=UwOBYk6iAcNwC@D_L9jB1#P33`mbJ9ow8 zkqmEs3*B-wz7yj9+NNM+u@pd7yRf4_0o;M3=J?H#6>xfPuitg-A&5>0Qu9z)D=Tf{ zlT$;Q)JY%Z=Aq3T^AuSEP$)sb%S$5i{(ytC3p&`_pHZQ@AyVQ}sf?`T1{4P51xO+A z__-DIT7?+D+9wV>CSZW+^)&-_+92n};?^B^aFgaMI)>W|Aw@EZVL^bgA+`d|Qgd!`FAv#8U zg}`T8Npu?+!{1gOVCpapSCQ4j017ay0V<2C_9o<|u~3+41}rB!hw+vvtpeIrGxPRA zzak<_C0x=)uj@6H#w{fl-S<0%!Wd4CAiWa85SyzE-@oS%*h*E2G;Bbd>5NS6&!Ggn z=f3$lM>LcOyCkk_wayCr&-Qv$kqVWfYpcUzA=AiKlgT47Qc^RG1;98-lq>@(5;2m9 zh#fN<_%paYuezQADr6kd|mPI*F}pOF*p;<@;5mSK+_bA-CQ;nh8BZ0-7~Q&{t$j8INlhV?Cs%!s8=1 zh%I!Anj9u8;(_hPNHCe{7c(W7-ZOcw&(B#OHBFK#w;1VAwKx1!nvNd+Uw-H( zf7wm+^e8^_pM4Xd@&AK>(q8}r@>v9x`7eJ=>x?sddd7Ns7Y0#!dwO2LlHKB_%XH{T_mWnV5qyL&d-#3p_|wPh0v;2?|CAR9*xr^}pP* znU#}67Q`67y-wx_7R(DnI%K)+5j1dJZwQu$9~u*-U|~an*KJxaKu7#D`6jTg)1==o+~?xZzGfCF{xl-Cdc&gM>8*~!003ZOkUX|mv~ zzFB+U?HHRUYG<1_J9fdloAe086*`vDtsO?%y5qW;dcVA=Jyyw*vi846vU|Den2_@2 zsk=D$-|Lu?wOIBDEPEdHNTBP^#J?>V;WJ(Lng%xn+h8Cep&loIz(OZa zF^f{QX5w8EPP)e5PN(5f2^>gu@I83{Z4#f69qD`~E>U#HqISx$O#H1U_Av>8o^C=$ z#l)3yrjR-0gSuw##)ovAuikJW-!Q41neM9;Q=BFf&aD7B<}#ZRUPOzO$ke zSSW7z<$4(n5x|LTL&TfD!hf)9Knz=LWK(q@|43CwcD@)Gh$X{fRAfB(40VN|3_+nJ zKGV;}8nKSxjAko5M@mL4Zr~RY+-xx~s7^19%n}08&QFOH-@j@Ri{vO1x!EejBr=;H zP!Lien-sWO`U!lpTK!XWf9lTWyoUYaAuix{`Z@=JfCg3q0x~E;_%D!uowL6XG-%r_ z+M2UJU4#7ETpjDoFHr?VbbB@V1&0(|78)W;0W?ui1rY~qJLkN-oshcyUn7|YsRkW8 zTV_XDZocf><;Q}_6H-v16iK8^Zn8MS!s7YBv9Hq-EH`1qa3Z!SqG9>6VE7_R??mH* zIiB-cTTScSTaClI*B;yA(V_+i;{L)4G%QOZ%#($Uv$9^>3o35gw>mx_D{`L`^q*^X zu+QlEADi8{&$-C6V`j5c?%(W_pmeT{R@&EK&25voR}R&KsvbR9@LmgK>pYj7+?@ya z=BE+n(VkdiN(IebA?f<@mdG-UtIo`muBGD>tkY4pbt6dQw2c!`88)}6!-QC3Mm?Sc zn&O;Tm_P_J-btmFI8EED-jO)1U~h3raUvqF3lW(m_>(o`=6Q%_$h5}P$rK7@Lg2V6 zZ(aERa<*SP4UR-IPEb(e!kcCp=Vtl1MA9xQBueJ=cs<(E3~~pPHB2hkO7KBvsmAhb zoTKq(sozApF;IjJ-6NxK8HrDmtfh3P%l=GCR!Z4O<4U6!NzGNxqn=)!rr&)ekjE`; zdYhryL=P#Tlv2cuonZ0iyFEk!Gjfy}ibO`7DMe@{EX&ePNiP<$6~Glh&kdf=)uS0i zo)%$RMLO9VqTulF!MMDiJuTT(uPd?NMePeEosGDJj6{3X5?C6VR7A5XV4oWaTZiM; zGP@4@bBM&}pu{JVp-K!FFrD5to*DQJ9rint)E%0hnUlzs5)Dc!XS-7C*?ltk!Y;7i z!7$Jmt;&DjAfL-H2{uxarCXPsnr5U?T4cAT#nUy6I7-np_SKme4WVT)^YYp?>YTk- z(|phNe6zS@+i~dmN1&wVHc~e@wgh(3>`9R~K>c@#_+L#13jD5w zA%gk2Fpz!#XZ~h>W0^y1ZXw6hJ#R{P%^}<-8tU?_-1CiEwA~l4z_k4 zGZuTEX|Kr8(3mI<1Y}&yWz(6wmz9~_xccLPb6VQQwah2T>Pi-K*vJY483Q>PBfF&|>}3G<2<-lleL@R_EnP|v z?JnQ$l9N_T)`spCfmJ?-n1w_F!Le=7c9 z%yID@^DBifaLx$*F8}WGO~woEhwc|b&LH}o*~|Ecn>Xcm>UWr5zm<2AAX^JIO-NBl z3=A$TFU%)QAWS~&JPcxpdB2cwyg~3TbH7@MO^6)~0A?7*wV$+~wO^;-sGmDfi2>3D zM6Q?U1;W6_2#zJ$IVDTFG8UqA>x51YkDd^neBjGXs1YmtaP{a5C7RObWL06YyxbR~D zw7(!vGEveRh)#1BAX6} zkcmk48f;D9(~+tF29WIfX^?#oBYjW&(K{eP*O~Jpk3Y~HF7R9CTwUyX>AsB6 zxg7o0F+dkupE)^etfFi6E#r1V)nTcc^TTW{`*z(gQ-&W`SpXbS4Ty_PJ1f9RhDOaE zuaz1try)A5+)sqA%=l%0F+!sapK%R%Nk-gI(spw1g}Vc2pPq7%u`!QvaX9S+ao`TR zHHX&=T{_AaWs9mLEc5VkOGG^#{k0J+V@Ee&Y{}4rxr`~FQB#I+cdqWPI)d#co&_pv zP*??l{qPfP!I;8?u)uQT?Nt-q(xtIpT%yty;2K#BBWe!N}pD~afL;N zowe};8yl-Dn=IQD!xYmL>kw8#)OchL7CTlmwi{+QBOcok#(gB)zNk^{ZP9I2?Zivp z%hF5nE&VJ3Qf;m|Vn!%_*ySKyUJeG)6ykAM?H-at>q}va$_DKfdQ52XAorfnZP<(A zhv)}EfI2BA3DIH#)*t1&IKKEcNuL7$IR=Xc78R%zQz_^I(YfI{DvQnr(KX7;B)C6m zidDu5vQlaV_yz5Xr4QXml z6C$dCji!}?rmVe0F+~Yn$VO zCAxxK%+SnXIz!}NGZ*4G&R3MvZnO0MHf)qdrSx>Ow$!RJMH*%0QbS)!9*`+()TL29 z9f`;kqq$bhZ3F8GM}i2iF7J_s!2+^~M!4~k0#8mO1Fxt^Bqf4KC7J4tBS+r0?sqH8 z?XJHuP`yVTUsByo%%3yibSLs{+{k>Lt)LpRj6kgxl5TvnJ3j+cXWGVVsflT4nN1e2 zB1;~YiQbSBHp0?X%-`mPclcjWYgJd5qLp#H?^ABzbxm_3X9D5mhJ!P&Bj{+d&uef- zQ0oJKKSkeIE}AJ<3pO#7ARByg4zbxa91qOyeg^*Pn#4B=OC$Y;RECLbYdpDC&7>Kn zBB_a+J7DZQdVaE$$>GUje#&E|mg-0+0+H6?*`TM zzY5L;Qzf2MZ~P(G2=Cytg$Ia?jKXRnK=1~#vl$$ZhYp>Wm6*;JnzY|8^o7s}m z488AcN#od-nnJkjDRH@yE0= zceF*BlQ40Nn4~G(!Wz|l&t4*=5NRSHc?P7YPk9N+dxfT4d)5XcE{(oC%5UT_VJ#rUt;NoXutF@M1kZk`?y;TvS+CTwE}y#fG<* zRr_6&0n?=)!Zo99+L;O(W6tg|Gyz;2*m3Y6s@&ASOJBkhmtEkv)vD8)mhC7a-P8|t zoohg71;Ix+Q9Qq3cF`#jO}dLM`YKhH`4LX>45sKbIL9a=n`i5Ya%h4uKK?JHaAu+H zIS3fKvhHTCA2=83=^RpgMd~(vfX<2uC-B_x-$BLY1%V|lS(^Q3Usy+=W?opP`fd(M zOdfWaVM;R%`5FR%Af=1Lk~;Qy_EIEU?j0?_VceOo@ku|OdUGa-|m-PnuH$ON>S!qZiU{)wA0yUp*Y? zKvKww@km>Exsn&tN=i%UCN?m0Zc_OPO(T2g;ed+A5vZ{!swr|h(8t{`*ciXH#HqG| zGuBtJ3rZ)xDgrVg^wxKCb>MUbH?9lMrE=2!r8=(H_bCLXWthrm8@ZV^Cn^Lgp4^ci>@wiTkg@V z>lfortsk6lI^`YyCMg%dRvQ^NTp_eV`Ck2q5#W zrHM{3Zh}}ku&_yRU#+sQYoGM*3ku$}a3JwGCc<1=eYw9&3luq;wyXrzpwuLD%7k}I z)SEQ1Pt!*G#_SFmKXPoDZY9%J`f~E6O&Nc6yF#* zu@kD!2kj2to@hMa+q-N1bw$#hM19bG)a+2%vh?9v1>2pl-CMc8=6LzS;)~v$!9Cb` zY@M0qWH0)G$CvdUz&+_R>ayeNfZHmIQS9A+z0Y;O?P&J$sg3>xtCSaf{%emD#truRIl4 zKFzv=<(QorIa`3P7NIqPIfKsP*vM%|7mVD(sZCr3x15vFC$sPAZIACX-a6eP*II~C zT-9Ulg4!IZHCcI}_}ctY?QONmIJtLczklV}eM6)dW7iwAdz@e6ZD($CVu+!S0mwaM z>vt7~14}N3L5FdJp@%^gvdK-GQMf@T3SR63-fg+bx=E;IYm4O@f*T?nk~eC&HQGls zD!nbeyL7> zI$0X5Y+grbwy@3EcxpRr?{x6GR#oZO?r#=*$Lh3-tf=i8iocE$`drr z%(D7j?#{H!?}f7fF0E~}XH(@F^6ZUQRc%jg{L7!6Rr8%yUQW-^S3aA=ttl;PRGUeq zk{$7tC=|zu!;&hzwYpiJljl*@DN5Idosx~6YdVi=!=(;qiY_VXrOBm9cu~BX?#{Om zccFKG9!73Bb{&THqlc|x;PIy39DScH{%^ zVP#;CS$TFwy@uR~iQnoALk2BRjWuj2 zYdad#D*`LVE6yw1D=?QLmK@J>w&LAjuVb*6Ek7Lgq4~sej`%jc^L)B)RbCPri(}|l#id~04!9F zb1{_jGzVfc^N>?orA0w@B8}wR~Z4(|t zD@*LZN=Erb<@TI6woZ_hV>UQv|Eu_W_eDHAV*e?eSr4H^rPT$*BPRJIKWspUdRlEu znZ_uyE!mlbT|=DFbTsc|6}j{tQn_plK;Xl}9m;LJE|@63@~Pp3I{K0!q2Yu8&7Fj1@5!0SUdS$&M1p_2*2K}{jT~$z~2QQ z5+8g038#sAx!2r^e(m0M?i2xuW}1R?5df88P@Qz28a zlcnLIi#BU|Ugr?QDu9Ioi)w*(+-_dh&qcvTD=8b5g)dWI+Su?nwRq+nT%z>1+ZLo% zx!ad8!&hJ+sB;nY9CGdhbh!6)8^^#Ip?Rt5_E81A1sU^#Y@>pu_ z+^OO88s~i5xeUh_IGysJc4*;%6R~6aM*h~)`sUuKf_!D{AeI!W4;9ybP6%7+vMX~0 zY8T~~jEQ%N(qU7g-TN(pz6q7ar8uQGjWHtV;W!r+GBuT!QTmp&%aJZhYfA}@%CXIF zfla>_R>hnhyX!|#72Uxz+7Smr&}S-L@weYWEPPbfM6AuOt(zT3rw9a`D#2@VeqeItXsn}{CYCrW z-IF^#ALC9marFa#JL)J+bdRDO9tw&5)7kNIF;H&kac^cfL7<$<#Q?MQAfYf=I{zp%)5p(e^Z-ap z9b?L{mDz_@;2_(<3UKJ0)=S4_$UJ<1s*nz&*o^56s;Sk^P)b2!9E}2EG=GnZtH5F> zp!}M2eD$X4@pmxxOUe_Lv^&dNu!9cn;y9+JwJ0AWrps$N}%roYYtEFEce0aBVFc3vELq0Zk_-HMO z2V~nv>nH$Vw%xq*RPJZVB8rDiAFfY10h$w(1*wMBb`6|Pqcvl&QMV|7w&*zSvju@= z!ExqV{iWbWgZ&kblRiy>8kKS3%ClYXvNj|Mm)vG*j~SpgEqsm~i6dT4cd>_$879b% zZ+AGC`O+lZ1RU9VD0srzM3D)vC7~T-?8J4let~C2z*0AaS+>72hnYFVxb&ZwigX4Q z#Ug>uFfzcs6e{+39Pr4YKdq)Sym8&)$Mo5$m0O)5Ylp)6?46f5jvCEa-dryxr;lCe z*r?QU9e($N@wEf$THMyGFY>Tv8q`p0{Xe?>!J4psLx6kQ^AW;jAY1oum!uB`hitR$ zv&ILQg=}%kXa%6jLP2^}_7nw&w>&P&fA&rq=;1Lz{>HW{fpp67 zLr_lNki$DbSOG^WR}e!zQtTirKoaC%f$76=z?LrI<8yInG44F~i(?@UMxODw8bmB> zHwV2r*+-M7%aHJ|Bjnrr7_(O9Ali=|xd*?;xPQUNVlRhr|8mQbXSvL^qIMF}mnnQ# zBOAm-o`b$s1L4*7R=c`6kadu!fJ`kx~a?WMt>yYxYj5d1E@qCDnO2B={W!~P=2!? zjQ*W?!Xv{XTEUjT#Vg?m!`hJCreCSG)ABTi5tFq|G^G}2J~-0LdC{myaDd2ilWXp2K&On5pj_+Nt;yd-JE(e* zIN&KSI@D=uvu?y#u8VhWZpdHM*<&AucV7~G6yEyoJ-2{{qKH+ty-q9A(G~pCPF-Q}<}hRIiw?lQE&gdDWDLZMnTfy5L@?!tStDJw`Ua z;=Sj|nB<=NFP0KST>$Yb_r;hntrWXY%*+Gl%!9!f_!*r06a%fTg@62qJxeV}YNB`! zk}EWrTpmQ4|CR|#ui*7eE6=u|>`KzS8G)YHo}60EAV>59a|UHVI`JH)GnWGpXskVu zV6A!|O{SwK2byQxy|mZ`i3pv!Y)_gd1T&OcPEd64+etPzi(b2;F5?UKT+{IY-#qOz zIvASILGbC@mNF%lKt>AKuh;yKk9=vX<^b~E>Q2RyMug9E(*@1}PFWl74d3-Ll+&;Y zDK$P>y1EG_%EK<7ch<(#hm_7GKXbpuJkGwnv$ZiW6^00gPjB52?8tlS_-Q}DHW2ug)a0#j1s33S1GvZ%~vBGv(jsN zmJ&yK-U^iO=3*1<_fJzYb{w3$=5ujD{kO0g(wn@$&$8+9+)Ry+YC=+qqgU`x1{u)$ zz1`0FJ905!zTk+Q>!|Gh<>D_G_t~KpTDR1TnAq}W3*j>GdqdgE-}{o2)wl6-F|wEQ zEx55Sw++4R>yS!G&2p7+EMWMQ5R#o7)Z(&t3&>q-h{=2#Hc80DhBPQ9kE8vtezGqfOZq4MK5a<51qWDWj4sk`7zO!2-w9?Jm6Ah#7jH$oS3 zI;D#g)%nALVbf6cC-DwusVDdiO<^4Ikv1=xxU9+<5xs3Q7|rz%H&e)@&g^(<(<}Mn zPlGK_s_VhJFryb%P_zyX;wh)waGnnu3^}0Lt=1Dzfk#;GHgcvw2P>OX49yWEq0%d; zuLprgLcwm}`iTrRhWot7P3$n!-MM~zHyU+*()>^*?h^2(<;gAJw%flvE9ac#ojs8u zu&g3GSSE!nzCF=&yn(XBPF#_E9~j|C2>d(-8TxnWng_GI)4}5&@4TeG7!TrdmQQBR ztcmG+hPG4dxZUDF`w&Lu)rxi%;KQ{nIo?$!+n=Z8mv-`-$IUE=uP5c*95Cjs;?W38 ztu|uPm!Im>3s$~FvnC%QOv_+?9xW7TkESv-!PK3j_O0MFG^mC8 z&O6b1{EvpG)$>r}G#KK{Gdo8XjOV5{m6>v-@e|sdY!OSA2vZ47ej+&KsI6|-{aN_%%r89!vWgBKj|J3Gv=lnrp??=+cq`^el=bb!$av@J~iDrjml!p z(<`Ggtd*#ai_^K`8c91miB-*Ytn{^-PHMjaNK z6W**1-qY2>nR#KEB3huRb`q0x1Bcq)7mU-_fpEiVsTn04UJ^jw;ip00d_it~50eKBpXv0V?=Y?n=HlDJIWn@csE#DsdtkVq_6qL;zuWqRW)azfDeO6%p< zc7a!V3z(LXCKAN^(p4k&n(R@fMI#HASxoPlKudl~EZnZs6Xq0^6kL!w--D+;cv&yD zk8_tqT)gE*^N(UZ*p9=XInnupX-3FV@>RJ^n1xM^M?Cflh%={zv}MKLUg)pQ{DO?L_|w

P#?e4%}3ly>Bq7Eesgy(W?k{kqy4Yt%e!r>i#mkl}*5v$Ucc@M=5`#ms!UkB(zq(7s zAft?_0vGk%BO2{jg{G%lgwFaodjg6nmE^!yb|E+gSKtlPiJ&!$`>7zKYfsQ9!V!>D z`fL)=T>_I#bB)T(`Gxk%A?5zgNqk1jGRG1e4E{{rW2I*r6IRZI)NyxE+5i&viWirCelYufJED#~p0}4pzV|nUop?yS=CVI7$ zG%_6|f_i`ccF_p3TrVlP0arru5!=%wl$|w`c{!IKwv7ihaUeOV=me>NBiTH7Z&Tp( zn+y_Azk=dMESRUD$}!A{2wc!kBkpI+Q%iX69W93_$@i3Y()K4WHNwn_ouHZf3P$yeA+&h0YY2+0hIuoMWK< ztXwM2PS7qZNf#yzxP?a3b;FnRh!wdJY(8nzlASBzZEdkgl&-U)*S`*X7e)VIF|BWR zg6sGr#RYkyt@iTjkohi*Ij2zwrj1l2hmnyT4j27Aiz#0dDfsI$7*^0RbBzfmtGP#f zaQZMgDy2f|j$uIR_5+@4SYCV(QnORW`B|JL>uDFe>H<-1uzY5Z z^ahxvWG5ovTC+x!LEQ&4VtFPMaw@sV7`dDxJ<=g^OOW4@6`UEPL_j$w#OsmLHsFs0 zK)-YYeo?u944|O({93E`oanbS)o{SDBdov0OD%y(SSQA!SxrCYOh*)DsHVmz8g{^fpTsvEXE&Hm923;Ad_yEx`r z<37tnwFpLTZNG3-1?x63>lBB{VgB4aE#S^~q`1veez!j9{2mO*zSc@%o}03>j6qDk zFu$Ejqi*H*{8VPDFk9|T=vdbHO|~5ytJ=lf>Xgz2Oxq5_>1&qfB3rO>FS)v)*Etcv z0Lwj4#M&8YDVYx^4Wc+f0t*}%h1r1FPx>^5+2l|LVMnTJt`w(`yQJ*Q#+qhUoE?x- zte>6rJ-w|BFxlAd*k*!5-)DhSeEp=QAZYd;8o6PVm}RcSCe#VaxsZjo;C!d$7o6a8 z;rpyuu73Vp_giRpi_rCPs*bD|Gf01!o1bCVJp#qJ`P7A|Wy8zI3*hma`B+%mB!8i= z@$K@sOrFeU_~3L8cwH+@EiI-2kB$)C2jJY^8&;WrSNl(#$_uhE`HA0?ClTm*Ag}YRKI1?JTgzqDnfKvnIU3yO%c_^{R_Ywa z&upldrlm3(X^CjrY2D6qbu!p1w%Z)X%V?##Ka3mKI}TD)M>v{c$=QxgJ*qbA43*C6 zP;)9OSDmMhLLUv1OMLF(C$<|6h0o69QmQq(_*~}C`bRlSIY;Z`iIXSwIt&-h#@T4T zEnk;&Y&5T3NiKVOBYkYk`&C6IZne8!6DrM3Q{Gjy57aDZLEX;*P???8Eulj_t1OKm zWs)!4q`xp4yUwy38afGC2x&d47daQ)Q@J)3rH07bx@nY5fu=W9NPQ4k>7^GT1t@?6 zWs|u=1!BeFe-ZP>{*$$o2#!6_)xCS(K2COEI|OX&XL<^)l~ndLB;UR0o5YqGStLkq z;1f-0-~yDU{(iu!%*|_*V3AciSM_Y9tVRQ@lz|6OdrC;YQlF$}zLv|@^jwJBbMMO`rl_?$|oGqo;}@lxK4o|B79@eTZS%F(U%?%U}QcM3OQ|RCDFGJU+Dli|} z$wjYxSXu`^jk#(VSYMyYYl1u+N&B@$!G|1Nxn5kkS$3QmhmkDX{M@^`lUUhaQDnfbQ6i?C<|J|lwmfoP4o=Y65}yYPg@ z#K)k5nM@T48FN6DD*%7JlPL%>(9oe=(L44)r8529dd@Ze1|0=IR=z}ZYX(W^EvfN( zD;u{&`2W?wku!Jwor8oh-a$-(ThWAnKC#eb*{_ta_78=irTo98m>I}5 z4n>6&Oe$xZ6d(%lCjd}rZNzZ@3)~n~jD+db<%PSqrjkA4|ED;AqB5F8^ z?UnU(MZr5$d)TnoLl)!K0+6~2E`-y7woq*rHBi>B2lYC4bs@j&F669t8Z}z780dlh z9H!&)@FN}Qdy!A`6z0rYd+0v=Y9}w^T6HW9gz}myZt6+gCJtXt6;pG~7Kxb6rqglQ z9bANMGE2=HY3ZC?RZRm=K?MOJKBq;}tvnVjVICySCOlRbqu#oVcs%j6l56tADhcrp zpX1{IC|RQPdc;UI!M4wjW*s!M*wR(H?_>Qt7J3cS49j_z3~pWc2FD&CfZ=yHbU;iU zt^I(rk%@z|!pN=>tXK|q>`yAKA4&+4iK#kYUtX?Wt)cvjLoO3bf>n9x;au4Z>#alf=y`m%bkjrnn~)*xoj(&Wm*?ll z#XzIgQq};EStafs)NWYRa5>_Mz8WE%DQca@j@7UJsQ2pY?NrW-zogW{9k{FQHI?Y) zo2{hv>S@;lrJ^)bEQGR=i)j0_`(2GQewMp-!Rd(k_~LPOC4E7!ucat#98pBrT&P6p zwGbaki3ugpGGj#cfKVZ{+l()6lzteBrrR4@G1u)LJu2wn8U(ZwoIJf|K~USwOjJ;%g3Qv%!45&_h}OF|1r2Nfi%(&hkg5=k6XgUR&PV}%D5NPno2QG2##kjW5YBQ zZ67V=CrfQ0v+h9t9<Jt@s9Nd-=;o|1y5Pj z&*Kq0X7wRu6$Vs_>~fGEP8_Eq^E)L{h)*XTB-DP=sGrANGif5kiY?*hnc94-I-ZkE z@Rjb%9U*k{0bZEYqGB2?KOVN{LXVDaS!tjAEM3y!6e_HVnTMik(s!+nl0mbg0q8yT zziN!z(GP@_*0;Uy%}D*69pR5|Y61TStTb~NE=#7&i6re?h#7!nor1Y0lL+Q31+e4n0QvXZxA#=+=#;`QmU zTB+p5T=DcTLO7?<37e76{mTBKKGk;NFFnHFXC-JVI$3erwT8ZZ5g7U~k5MpAj?JC5!Zx z?}*G=d7Go6FS5X%lYM8Ezk>jyC!4RQkikCa1DJ}+_!Ia9hN8`u1ACAowoOmFWZ(IC zC3llEn$Jt+gUvyD#e^|ux&{wIG+suSL3_07IJupp!eE0!jAR?JgDPrEN%T|b|G@8+ zmgT9tVB3=?wvnGP-FToJn5!4dxzKN%y=Jugba$6w!~*pC^FEGHN-GjKj(2)AN( z-iYj2s5@mQt_M3#6jiNCxp#(Ql6nmiOrRIr&NtZwMz8y>lGS>wOD8|r`Yl(~HWg|A zxo|X?On*vmq_Tdba%Yt*{YoDiX?5z-rnY5Z&(4^bJVdi!Z{6H*xI(o%p!JsOLf%fm z9|YXozf^wccKz)h*xs9E!i)?_4+#O&6ZR4&@MUcty=f$ztl5sWaktg~MUEhd+7UD) zM4j6?BXxre20b5SPpF_y^z&;j)Oqk^ADuzBy~qffERDPIq-4!fa&vh~>fdJ*C?$h&-h|QiU?Y#W*a9 zsKO&ftJ2N|rMrZoKl_EJiY+YSngpa$<%RTxv~#W&F%5EDG`I;+B<;>(Z8V#)wo=c9 z$8+>^c5~pRW5x`lWNu2o69yL+7UnY27gq~#2YXs^(}XM4^Jg+o?SNk24C>}ql2hI2 zECnm?E^qunvpQ1G`i&dpD*0;C|M9h}*dMp3R828H5ihQamTSDvq>e4J$0Tb^$&t-s zl7v4L{VwTX*7&fTe}Y}_c)C`PM?p_vq383RpCYjuvu1qFARdSF$eJ3bo6xZ*&|Vs9 zB&vh5b@Qyj!mgCngOt|F;rTTsrD|7QZ-&QZ46tw@T7dQnq+ggW@&$6hfz*zPIN&vvFr%1Ae67_`4l;< zVq3|{4bM!QH$;m!E!+_XSJv33eiM_%LU7IMHpodP6iwd|j8pGi=~UPlV(}{gbE2%oysbqKiV9fMR0e z;|4w-9?hK9Pv)!DxF!VY60QmyMA&RIay(e-7!k96cOUAIPNGuy-7Ae+wXC;=fx5JV ziHwuWwJS3eQ+p9QbHnpWyx*dJfdO~Q}`e+ zpWN1_cI<^P>1%a9{!wIxuYY|y7pZ;77okz5YF&)pSzYjN?S<2ASrB(B;h5%kBfL17 zrQc?6@SL87rN*aZ0#D=-M#?-Y%A#BxNp+Flo;!bj-I4_o9xrEH%a&pqg&V1=2j=V+ zCkSWoKV~}dwywE9dNK#M!b(Jd@l+!A(YR8FxEIHNFvLr`%a)0TYWEMcSb-b7;zz+c z$p%OtReaQ4pI0`I=uDHEy0?C|xD&p^80fQgj|M07JY^LeDB+}-+nuvR`Z>62NKPSN zq9abr;w*<`tB+hkIKvntO3^-Ok2N4(g2|qP5;#7-}{L>VlG(h+x%Ko_(bbMCx-Co zc`1yRmUL9t6e$Q^hBs@xyeTJWcXSQ=!l=`kpGVMQF$|KQxRX1AjTHhuAAv(@!_iPlnk|N6T_p@36R)d_9$8n%)#wKQj2|-_Nn~!xTw}Tr;tJ-}YE^rrPTt-} zqtU===UI{(;?}*`t`;zDPo?ZO^x>l~myxb>!ob#67fd)nJ;kJkcTc+o9Y+ArWCC2u zv~8C+A`S+i+}c0BflSP4Ab@`Q+5d083De0w!RuLJKM2i!qS#>f)45C~_Q_H4C)N%`F!aqdg zjRa~MaEug#E((!(5fxZRq%M^wM8pO#bpa-B_J}^b*Y)sbb@hPnYo6_U-LLIu`duJn zh3G+hc4c~v=wBj02*Wo(I51tVI>Nk?XIJJWq82(fI<#)Ti&Rwic!-P{u}o462J(MV zYj<|kkA^;Q!&`v2EEu#rD4S3w>eCF}FR*Y}hiBZ+&7sK}i$(izLc+$0-mzgmUTY0kDEYn?)4?H$mM+!t?p{+|FO1Ka#l{`WW?Z;m%#w319K zZjD=qT=_|7T(lDr0K+91L-sN2M>QP!wYnv0rZ2)s60QjyuE{;;wo5x%O>(cgHn*Q@ zbMLx?u7h>db)MH(Vo17O59^HUbH7XHSZCb77O}E6+(uKR>$Xzv?6Gp298S>9U^dg{ zP%~}5Ra-T=#BO2TYPTm^%L-35cAsse9 z7?MNw8LNx>#=fI^#UJe~>reaJ!>6ghB0nH^A&z7RpBfBEUMC1`2~pl%C+?t(JmwHMWo3;cJ2yuMf1hl|MN^<%{I+B!nrTzC0Q1=`X?-q{-+DWb6G(hFk=t=px^rZY<^rZar zfmR$6yKcZM?-3U=C*{j6C4LNN`{gGlWP+yy-3%tA{B(Wkl_4RSU~TZ&miWNg;*0iS zpx;P-B)?9l#w855Rne33je&ktNbq?=CisV*)Sj{Qr2I0mMaid$58NP@GAHFz^nvs^ z=N=gR_lPp&SQVH`Rbm#ZumFp&6w8zULy`&L3-J^2q_{@hEB-2Wi@U{M(!wsR2k3J2 zSpdZt!QWBTFwCHav+GsN=3pL5FrVEo!;M&sQrwJX7>`w0jfuKt3Je8kq<~F$2o8_o zDWvck_VSk3@g^?C0lb4TID!*A--$0#4hKUY`r*PX=tt4z^d95*JArqX;R;NntHjmp zaSaw?I;yb}Rk)44m$7#vR;XhQ9;NEl_g-wpGt8dF3(R)lEj+>Q@8c!3<1k*qQJg>< zI?;oDFnD*l8wa=vZW8@uHwA?-6rmWy;V^-`|_HcGartol5_xPSTw&S2MK?HvJpRupSUSdirg1=)3|2m=m2S5M-IVE}~003b1Ut8uE`ex^) z`bO3Ub^rjNwZDGSUmZA-+yh~5Vqgpa07&x7Vf+OfR0VXh`LFYr!|`hq{DK5*62!yY z+R5#g_Ztsz_jl~-+-jaoYXi66cz`&+9I*cYNFxtmV_MsZR7X&|{04M+e@&JG|003@)M*7D30DEd4I*50lY6RR6@LTo{!^tiFGk5c}F+okz6EMwsqq&N!p^^+hVn+CcsqnNv@ z8ZSF>JgZtClN#xm8x#vyW=|7OekiTs>Jbl!xgR=NVNvf8-I+;N(r`6pnwHsCaMIov z2q8*EkIB4y%FJi7{$-`gQI2#B#BABCjG+j5!K8snXoa>! zO_pdho6iIBeeL$qeO@h<7TJ@*jN;Xi274?KPGzLaV0cBeRwq4QK1cp?#<-27PAyy&;IdG;e0*wy(qh^ld60LrSnJn4FBHzg8%<=LO=Q2cCxp3{*LeR$2b`x z7XnIu5e&fl8C3Sa>!uFInZ3Q^y?u*AD1E)XyRdnf2N=Q_Q~$hjQE+OR3H;=5?D)XI z!9Ym~P0xOYVPGcbVa!l5FvtQ9Q`OU!fhj@3$N;LNA%7pH-+Z#3o3dHpN9eulStve7 zSBc}8bg9OK-Gn4FXn7U?Z~hki}uwWu4Z1W+RH}RU(M?29V{3j`0s{&`1*% zbbNE?X0FHr)%Cc!1^CCVyRv4KLVNgYn`a!6P=SQH+ATy#L?aZTO$+NRy85H5p@LHIreX|QVb3|LA_&#UCdjA1-JgUrGg@P}yz|GhvP-3O z*!AunF3Y*rw#@|8wj^%C^5nUq$bF;Pi*&9er!`mNOaw&4Vr#Z_&$2x45EmtD7*T|c zPY?A%4!Xh-frKx@&Bt&SExh@}BPJVQa^6T^Utg}EO3W(@-YVwQrDYz^&M3($$BA^Pw z_Sm*g`T4uS^#?$snFgr_ox9s+$62mE>^l`F0?Ct7P@oh^q)e`|I6^{V1%R<{GZHMf zVZ?C4HYg%t1+rlH!b%@R69PFN3tHRF8-KQ&Ms#o7cf_Ja3=YNogcN94mW7$8ikjwR zJ$DvWTzBqtygygvz9#9v)@@;5&7s)obuQ>m79X^V2vvkwsZ%l>&IIn%P_7vF;BUaO-!=RMA_7j zB2CaXO+sZ@-=&TaVu>2{dJt%eabjTtAjo(nm0996@2q)6;vtxFcSJ3j$6msOYXsYMzD)!cIV6Il(*!ZX*9rRkC3lrSwH4A$vL?62^(8w zZ8s+S(M;>B5Bd9$l9>A-N0(t8gC6@t#i$F(I{(s11vg)*@rC`*=aLICPeqc(xb~5r zZY0UR6nP`mf18B=&A$KvKUFY9FuxlD(oew5U!RUAxHjs@!|PUu>x9~s+MiHcS=(X&Z`9!%Ca_vE@s5D`sGSV!JV>oLe-zRv- zFH>(N(Rq+hc#Jcwfe2} zuo~AzRZGfawU<^yu0Bqt_=tyQDtt zGae3rS_y1O*t0XnhwLBPNH-m9U|XU#hH#H$9no94ud!bKdG>eh`W$^9e)rMoyw13} zZYS7|w;XibetQA%MdD9|+$z1A^G4^4(C??*RlME2T7UBLCh<*`JgB`Hexv=w`i}n$ z{`C1WV89N_59qV9Ft9K>F*q?lGCeZGFnSq4unS|EVjW|nVXI;-V>V;EV8dZWV@+dT z24e?<2jj!`VBz>vD5GzJR_s!;pmRYO1zPYf2nm{rh`mW?qT=CSBlB=_QF)cynnU<| z2?-t;4t!;q@C`zSJvkg9Rq5oM7!+yly zBp|nmMdoE;;ifUf+7}UNwA{**sco=OFG3igYJ<%{MPzkf_0@=mh)PApV)veJyj>&C z)D{gC37mn72@Az!bXOJkdL8{~ig5xG{n~jI<-8NhWJXXN<14IhkvCh(n|@|E$qeQs z+)L_765mTA+56ey{!roNl5@Gw`pl_2Kd@YAHWDV)5hhKsn3gT(#FYDjcUbhDm|mtJ zH+3R)e)K)jPi`O`e*1I5=hTqtR5!gdE@6%RaD(Hk82nQowm{BatymfOBctV?VB6ob zkiMHa#Q9^Da{w|~l{tw>YXLWvcdv)m4=+X*?1q{0ct03xb_-$BZcxjk+7p;e5VFdLmd z%;nDSRoRR1o9dhLo7g87L?S*^SxAx8wJdv4+LGQm&O_Y0kl&(_MIk&kzewF8I+21l zE?L5=2zDOLBE(5tGJ&aZLNQyJCQ012Fl65GEa_1esH~qPRa;tR3Cx(VF1RQRHmq{F z0O3+KX1v2?IAQGx@s_oTl}IQ|h7R(|u{j`U$)Z z0*4xH;K62W%{yOWGau5Kr1Po%T@4NvwNT6e0G2oyyoPk)ktG-qp+nAPB4&) z?$6dSYX;3n^Q2s8L>4Y8`omT8S~I7?{$?VvL|O7>gBY_h>TCY>Q%@6T=;OkgvQUd50ba)Ne?*4mB)TU8E7mzE&vvbK%T zlx|rbCHiJVJv_RpcJ`mA_$RwyH}2M(YQW^+?SPG!ahQPf1Z$Iq>OmN1d`_?Womjw* z88A)ak?|Y9Y!V$!CvRt@k{@r4?-+V7#SGl~rjq^APqDcc+G9o(%jYes@gmy6zh%7g z_OT=tQ>C3R(Dpk)6JsefyAF*?@EP2DWSE%Dll_s_dO|9(zFx4ymy@fv2h9x@`$2Pn z;GSCQLZYdY>HG49aIFlaLm^B|f43;Cpg>mY^QJn}GE18M9#=DYm@AKG7Yv=0S3B*b z+*X@`6`E(|86PK$0{=$f@aHw$N!_D1jp_TmE`0RdG@Gypu{} zcDZStwA8JYmbQdmBko{0pE+coT)VnOZgR!uFyDOq=aY0wtbl$nT^wf%O=cJ zwF!60##Jc$iN&_`;uZ~>bRRC{aT=n=@|I1;a4J#|0Bj905`$k2f-?u9f3Sy-+4wu#ZHqiF|6N(V1xs-bN17 zWz6L|Qypja*@-!VI{}9O2~(;!YYF9uWaRHs!HHujAnWz_>&r(-m7Vsn4eiCoWWc({ z+7~j4*pZA02Gmt9F-$@q@}xX2*7$QO29+(zY(C)iHCOW(3qD&l**$`X-zfTha}l0% zirE~lFeg1Y*}BoQ5~ps4X^Nc0MU9QS$F%oKCa%4G!^vRO&b+}FLIrn7>*$);yp69} zf)=vLw`TrUb|8 zBP}Xl#9Dm^e0P#Hi9@xpwj);L!@TzC=%AgY6cJY+Z{}T_iRCYEEH>)Pw}h#buj=DG zI&U$OhV=qHbNNOYRgFCgio`=8>k0m`N*R_qm1sIk;2Xu=zxNB?Amh>wFf!7yf6#kj z6STpCnc7G$?sYO$Ic62iq0%^U<(``^PBhVB(U1BF*IFc}jZcoVaj#FPFSMk2ftO}R zkFKrkR4qBu%QM97YfjkQ@h&Vvj$N*p|2V*zdSA8g=DKz2J0DeAYgE@i3v4wwo@8$| zBp4Z*k`E4FW_E#_SPMs#O?{${72Hp|qcGJ#-|JBrrc0Rincd*V9wdZm1E@hA$_{#c zLnLXi>wtZF6U6ZUN&(;hWX_4uk0qDq zEs&4SUsFE5F|4W9cx$~q37v#vR#JV-r_lvF?64B!PD{Wcr)v#Jj6?R$XwgH)n~b(nHD*tg)wJzii$g^# znzoeBpPqyrKi6zkZLpLv6MOVG6y7wgDV_c9Jx#|yt!&y_KJ+~yJ5x;uuJ&u6ywyCu z|8{1acj+9}T5+bQ@%P~Gz~2V6#j25gQEd4}UJta*s-azz`1+3S$=j+wE$CwShMxBC z-E!g<_hi)W*$yjFEaUD0ltczs@YuwW_E2HNIyk*Gd{9s_L<+L zwDGIyebajSpAOy~FIu%Wt*_x+Q(7L>+5mV%uzOD&f#*)f50)JRTKT&nx@v%~@z_&4 zV(`|5t`U8s=#RUPB-?IU2{&yX_k-_O-jKe@&?k3x$6q%#!E7*oczh$Ur+J1mc9gC$ zeN%b{c1~{WiCgJ!@m~J<4yNp9*`vGw>kWU6@*Rsl$hDq)GI#EE_5RtG`Ud~z{2~5H z_Z|9~T-w#<45}C_J7r(qw{&#oaFrd-C;{ae1{yyy zzAm_~x-P-K*1nN(ycJbv@+OKt$~H>6F)Sd<<`C{K(oN}8<5R~;T%`z1k-Dq{g+wRC zhl!ODmc^Qhhp~&zFXDWDy(l4~W&$JSiOy4-uJv>Ge*I{MYMAO;rL(-TV!mTsO~plp zvQkm;$R1Vn96^pLJTUgm7;#M&E*|+1iIR|hOiPh&B_4Zh;0W?wnp4I{eyQrbvpl1` zSeZwYwc?yFXN`B`OCzly32x~_Rj$0d>OJq=tU|BmJNG>UYsu27lCftvg|)jZPP1S($&t0Ky`9bO zYJYu$b=A52!gHuNQYo22D@kjd_^2d(IQ_|zt$MB6bSM46YP;sGVx1qiI$DW%(fu6F zxyD0=*0JJzdDFUdgQ8{3Mz8(K<R3&TIa;B! zznuTsIxfjO8G=;y!I!l(>XT3{QcqX5BKs)E`ZcV)^rH!Kalh?ED;SlTN!5iLi17_u z*QQqv*$z?;tC3P5f09m_l%_&fPe)Mf;_XSu#6~ponEd7il$I-7xNbM8Md@6M#kq9I zC;F*#BSh6k@LnQ$EYi5SWR0LB+ytzrsIFNjk1GtGkizNalVX`M@z9RUpmqpL5-=u| zE1RqQP4Ta{BRq)fc7{n!z1EK^#=zGIiieubHE-P34`gkLUXLTJKS&N{$CS&$HECl- z9)Y?WSA6^Ix%=Dvyu52#c{{P@QBf_9?GVh0aCkZcpVb<|nR_|1{N9W}z`j{tO?+E0 ziG1}#5+8~ZU(-O$_Nlqv7%E`_1x}<@GrNF}cznPvu>_iTPeTMQ{|iTacVPU}6_l@r ztdp%x{k6okEl@3}iY!DdsK!mq0~S|U+!VJl6=u91N0(}Hg5kj+Iuv(a&e-8lbD-TW;>5S8%OdX(# z&yNfrCGfI9%#j(=w+!Q24`JN8X0fV}5wf6*5*mPh8S3+7%`o_xR4e+cwfNCVR~MIA zS%7a(lkSHrq|+rs_&rwc-vbC?>{<@Xr@o@|D`>e6Wy-pO*ZcmxEv9S?;%k@J9y*B2 zpNW>t2yZJ6T)CWNlZ6EAQUJ!*`KL6s4mk!pdQ=442EHItE+?`J&1aSs$mO(_yj?9u zkCg484kc1UC({@vdIeg^m0ivCar?C!rsdoEUKV~NPFV5aCow= z=nhlUwd?$?A!93}&ez&>)C@j9DM{~$YD5G(t~!HqxE+z~VZ>66I{`8Gj4|SpyQ3W+ zp0z)SL(P|lBVPjFy%&e${W*3xl}gJfJJAn)y`rR z$XW)4AS}LazZvWGLVyvHgcJT9m#_DtBbkP1Y~p7bPp0BYL)||y-87N}$4wy?ZMHx^ z^$?#6$Pj}*5UK;CDnLySO98)2af*{n5PY&$o}POf3*(-+|3wrLLlO0cL3Q&UheG&) zIlfq8c_AK&lG<^K6eRblAk37SK>;N9O}Gfz@tP8dCxEAcC66i$Lzg)E8YL~oj&X3^ z1I2ZBBGF!xR$MY+pQ;$r-=%nAv0>N2!P?+sDHnp_kru!| zE?=4-i>Kt8Xd!BwY-zTW6BjSN?Eh|GVZzhIEf)Xv#r+=0C8~%&=a^2e_%38%w^0mU zQrMak!^y%`ahSoCZIE{E-PIu%-Do`5#J}sm4fy8SA`V;16mN*9jLtPpU#`Un;Uf(P zQLe0(YP3Yo+K}LPd%+y$%p z8-pVf|CXWjn5TtU8A1Vh{{4lMn6)?@Eg=Gjjv7v|$dZ!UqX8)dj>d1$$B%Is?m1Hd zK~8{LW_Ow^mx;J)<0a{ox_1u?iOZH0A%!` zHF+VYa79mBk7}O`G-qkr5a0nTGx+XBG0rEK=>R3a*H{3WR5iV^$OY7WHd`ZJdI;d- zw)~iT1|k{1j^s6XYQ}%nOhQGe<{6jK9kfXo+#_%8!Wjdk_IFh*MrVi&51WX-lLoEv zFGj9^oe)|@hNZO(G+0v5_q|D0YHqjj1ta2lHMRF=N{74sxXk!~J<3_D)*K-bC*VmK zvO_+fdx&Q)QfG{AhlCE|_Z6r}K#NU?cu->>%2rDtRkUiU*t$nOvknVAK|0}F1RX&v z+rQ`!`H_g~TzJ1~!rI=l7j-eMVLob)R=@lv|1H_76-XAfMUQ(wj<6HXrtKRc>p`)B z;KPKUC3pJwaaqTbp_CUHJ8xBxC1wMqxO;WaP}LS(1v;jgl1OmUaW3EO{nb?S#7p~e z*FViRjn(QcHb)*4`j$*Jxu-0*;?y>I1pATL<7(5ysB5Bf?>0vRgbj}XLy-<@#-*A1 zl+k<|;m;+f^a5}mAf&GpFQ`)yl$S0SLg-4?teUY7jRfAOI5>z)0a!+jV8oOeF^+1H z?V){jS6{V=GOfpc-?JQ5dPd{wL#AD`sOQC~#7i2#HM1Jg!v) z7!oN3EV3BkfPE$%)g>qg=^QZ_s3P|9-&kVVs$RYK8Xf`K&e)<)V_F{OAPJ*`t7atr0V}!M)i*!Gt!PBa4XG_>Pw%-FZX1g zi&0gC@AV7ApzGfEFGKU!-4SKN6Yj*ENrl92n7lo1g0<)Kx21yUnfH`EAtX2kcOWX( zq~KXezsu5l8nEBmvt=D!9*Y)Fi($7X)ysOnE9(8h6}hJ@G_IzRA8$->1K=G7W`Kt0 zhk4_N?K!!8s3~hgtfMp$3Tvv7aPQ2l{lnna2nQ)*X5UWtftD_Z|2MA;biC*jyu0?(AKtTtIjuNr8DY;H}%A6S}^}5P*q_6}K*Mha#1KP11 zct<1Q=zb?*oCwBptuU}xVq+qgjhinB)+apEwv|sGrGO_H>I~I8s3YqMZd=d*4;dpV z*E#jhQoc<0K_-NCCPV^4`=@)SWzBG0o(sj=ojyDVY0S&!up@v?=Vu#}marWD*=~JzVz=RjQ4g|m#)N4%QRX&IqHk|e}k-a5>-joDTa%|mAWb#(T9y^ z3?G;kPl|_0mNA$q}6(YDEGww**Bbx^K*6>2F2}VXlqj7Sq^3WO0b_r z!1@M6a;l{)lJ9_rkoK^>D#+mkyCIhc)xJ&vgrC29o*Wf_k&SFPbKs@dIYUu)RgAWP z)ck`A8nq0HYwo{olcB&oedfn_!4G0vta^mucslRBW2??73|BU5nO9o`|+(t2R5o6I<~o46U-t$6F( zALm1lhdhwG56cD~ra)(ne*qc<;)#S7H-+arNN_>1G-fvw(l`nZZ>BwIt4%Ubf|7-w z!9}RYMSD%8j-DxljEH@cWh0CYu0>i&tXOvTecy^CH5<~)FW0?Nk(s^ZX3dv^rs`RF z{@>r#TFWJ{i}@fu`c!-w*Xo^Si-M{owkA;4n1vU9Q3FdXV{{5z1IF_oG3B3@_Ge$= zBj1gnO{dN~f=g}5PPRTc-RDsdwO5!I==Ng3b)~0GzX7qAVj7Lwq4G_>csi;Y#e8CR znYz~)m}{Gd3yjA>#ytKE0O!lf=&{bMc3p~jkgQhaj3R3V?2JVAu29AHXe!(1-3OK} zS-t$Jx4>sr?2--KBMGL8E2-v%9?3dDtYRU0on@S>s&=!FXq}GZ>&T|6jh?UF?5_pa zB|KyfdZvEw@#xeq)g#E)<`+^wO2&4q2Mm4GY6Nk-P%Vgp(QXvrmfJPvm3}i`(3Md= zN^_MGPTo?hGaHW;nICTiX^p6G`9QBCz2t|}NW@0NN`;zyo@~|T5`8(wwOxr>Eb8c; z%)l5!$Wqik+4z<;cc;W>Eco7Y)N2U)$5vT(dgME1))YMrDfIhwtLlC370Y4+L^?GAr^(-*#r#5z4iY_Sz$9Cx}xJLWpPzL9u+u4#%(ZlBlbykz-(*IK=Iij9A|???I{amf3P4JpTm>c#nY zvb14zcqs-d&41480Z_Gdy8#KW7X4F|sih27hR(>%%#1q*+|bH`1FhG1mcYjBWVl%@ zMo#HBHIkBLQu{1G5(y|rvZ$_JcTY3+XFdA z7m3ShC*E)iXE;GzGxWW?{nhd$mV&>39;uL9u)do9VR%IUIvh|QRNhZLUCuN|PDgZ{ zXNf=M-n+gT##!m`L;WtLFOD*=x$%_@(;!z8o_D=Sb~%FeAU3xT@g$2vwXwmH$Wa zScRWjiG=|;X1^U3kxQB{q6yChPfJseBgE7LLVhXDv$?q>MX-8`rqb!D1qXGfvLoYT zaBCCyK*iJ1z^M8nfoSz6QjMoi>Ou5O_j(CjRm3wE`|yB|rL@BOb~huQ8SUt5i?23o z%J&u;GEz>;uY>tnbLQamOpnK5^SkTg;~73REN$I=r6qk$m%D9a-M#S`NV}{0IG*I5 z{x-`7s1{JQxS;HLs6dZTVCU1*KY1T=986_VDPeTciBXMwL(Fl%Nu0cej}D?%$d#!% zd$q%KX85qmIa8cj{ANM#kgk3h#F>EYLAo4vwi$-#^)G^K?#ix!F`74oM=>+(!Q;tWC}9Lh!zBq-l`xo^ej&xbGsho)G84QC8GuHS-2~raf_Z-)#N{C8wv@U z;$lxJH%dsmM>F8YZ*@j`B82m0?1RsAf3ryuZX=I`w)S^i>K`Qft`IT#VkEaNT&07*0>>2F8QT@wVUrxN1G?;pbk$9>hD8TlFrY4G zopLYm`ZWb1&pl0v=9S3qv%?FhqJSb>-a z-HYQ}5?=B44W-~=nuRIOf-v|&k^df8bB#wdQ*FH5&xvPF7P_46M#!;O>(O(T<= z>w;W`1qg*c;4w9zY%M_s266fbtvZ z?TlEW8>I3uJ?QEl7F6O=zETvZ-4J zj!DizQqeX-W(l-|FnTq{)gU)Dgw?=+2KN#q*UFA-YYwv91?J`R$JAyFA7yZz@LI38 ztPGx-oIg>3pg|&-g$2$hCi%;Riv!w}6Mnphf!FDqv;u|>oo2JX=oKR2&$vtMf0q*Z zLBE+N`A~A%AgUwkit|?{hh%$_O43u`-OEURgcsS9{o34Ur_OwClWz_)ZYTMo{D~S* zBNFVH1j^d|0Va~~rESxyL&p8Zdpv1EZp+W=Id;i>)lqpSZ@D3Fze(y@$(43S3hd={ z)9kNM#t#+aZ82n7QM`Q?*9W}ieBR8NmC@e=LE)Utk%I5~R5L@45pp%=9kGGpIijO( zQAXvX2iNKlw)T!eUCv=|cI7~Jq(AW$L>a{C8cwM)$BY!)R}~CwIX|O8r1m2CZ_5%m z)0d%|}HKa|bJ$d4Xy&b>vi^N)}x zl_L#?khELw7Ve$H(1mIC)u+ie5vKu2jQT4QYGCw?DmfyEu&7AC(eO z!q?0NRrX^;06SoStoc{%TtxPhH!tKMZR*cm&;TDOzVL2R}7 z_KLhEXWPyA#PwqNuWq~jSum&-I<0e9CNe52=Y+{r8f=MNyRCL3q5WRIh{PVY;EHUc z=w%caLYw1VJE$J5&ZEhEde9Cn57&wDep#D=<3K!-8NF`%-@W%j=ZI#nr!B6OMO<)r zyseA-r9qLry=w_{eW}+yQs0jv0F@@T z^X)?&&yiS-G5Zhe=!TYkg?xNJ1Dh-e z7*`>umBEzt50I4+dQYFjc2fKCsUH2*bVBQ8voS|^&}hr!J73>pG#BF50P-kf<|h;=dR)^<)Uu)j>Xp5yDB6SLJ0t4T((!tSZCcfX*UBhn$T@Z+l3y!i|E!<7;mj^49p(+~2O5f7X zi%v{NUyiIFQY(%_`aWw_uULKo9JOj?9JSw*GKMOKf1IrkuHOooM{R9#=*Q}0#gV~d zaIu!R<_Sx8hhJi@npIV%+T-(q5ba4!>Dvn(iTr-)R91zmQYF zq%L&GCJK_`9+KbQTEovytuH6M%zZK1#)t@bT`{|K&O)1>7OO`{JB28jCK2SWG1=mU z_-b0`j^t0sCN}ESKcQFaPRtn78u}(_>Jsft{}V#bkDNN$UNIsYJ_)f8U>AUPJzI~U z%;0aFq>hT;=$EyrQSzq}RDbE6o^_H2O)?IPf!-dRR*>BF^bnX9L$ykYEKJ{6 zkx)rP%5~Wgz|lyQOmnIhrhELzahx5B^EOGdo3X{Pz!>i|&|*(E6)pP$LJU$#--0&> zI0KYp(>#&GL96o}+A?toRbGF82+7T-= ze-uXrQ>mUpUYXsLO5_J&yigzRgXdksNk?V$b1_@TJKx*DR%BZ^<=qilDHjDHX&>qX zl5=>jN?m<16EmUHxZ$!g^0it5U})ADZ>++ zr>!o@VZkYJ>J6*=HICEO(aPDlCdZIXc+c)t5siWgK<5=cs*j1SIY7M9aX+h+8t%{O z@oqLstMzMa*VSt{Vq8Khmq)Y5AxiJ94=_+xO-t4siOB#17lze5HN4^qhMnxZ`kC`& zfBSsc?2R?gvx_R~W*f(yCN2}}aMbNJjkHJ4_YN%rjeZJI3@diSj%{-J)WI)+S8&z?wNs;$bjVJ9A9oFW#bLW$pqD7p-@~&2lv=p<@2l>K?opqi0jZ7 z&=%iTHEw(P@TY$FI+A=P4=L!1xn{|Uy81>>#&f=X8C0X@i@5|x^a#5H-;{!5z6pznEP2KZYH zK0t1&iIZXAWdZuE1ufyjz4v>IzJ(O-N&z8H1?+3FRqkbkwLb1<5ZWVk3{?PY-lu}5 zVKA4;&@5QrqN?2+i-I7}8f9yi!SECrxG%=p!nOuNxKDIy`)Y#d=IHBksMdvAW`g4yu6&0u5hZ8`WS>GaTngcc)iik zKGn*<1r=}z4b4Wtl$|gKbQi4!oP$+Egw>Nr$TTYZ#hn|XO;k1+;tDnBY#|eX`Dzqw zYm>LAj+|iDGqSA^u5|uStru+x`cnbOC44?!<~%=f6>pXF!gwR0-OE{RrF>iB)19^% z4SF~`RSd7Yaq$J!Sw`a zwnC7rAh#msrL0g4KYxz*xA^e8Ue}7X&ox{u#XE#?3oQ0?tEXs>x58W|+n4G(vMD(f z00==PO*vEYgEL|OupX_}RW5c8um@k2@i@vq)><6A@8GyNJ4H{}n%R_e+4&y3cE}*> z?XVTNMDww)t`jxB&#|A}Rc%goh*_uksR89QIz!^2a$DyPT_QX^z`aTV-SVCn-B%FL zrZ-d7#1)O)vTCH+VKz1R>&H6LZH10)+DA;jbH^i%ur$ zV#M=<2qLcaj(*m{^cCvaz0WpJ`+BrAD7*CmV${oHU`kF+6X^hjeHFZ7&O-q95NF2Q zaqFjpuKRw+7H)OTB6|o*19D@=UwX^$pnb1w4Ud$phB@QX`!^CmE$10bIn__Qs)2sB z#$AY0xg}M+Lfz7NKUZgz)Z zl%XHNaMgU(lp`qTQHKo$yPJKJ{d4#8_M~esfB;I_oa_$`BQb1^co#L z!fa6bE@huxM;vnNl{Lo;E&N za-wKyp3NeqQH_ftD|Y_3>O-YWxSgIS*(cT~={s>;#{Z022{xf)Y~aws($doUd{6u_ESv(-;Gct1>uSBTw9gktBRWdYZ2AmPFNM z>Nn7$2L`PGezR^Taou;V0?5MjJQKnuZbEJnRNlOHzaXi`(5bpoqU22V$3__Q(l?J2 z0xNa1Km&fZ|K36eO!KnxrQ^er@UHy_J3FJ)Me$@(Tk|(4%`yqqOYyX;G?!qkwxE^* zN%rJt;awk(>AMD2!aP2aKmEpZ7J9_{3g4D7qx-|Rv_Tzga~&|5yRE}`HzUZ7=Z3kpE-!20ilNqeis5m~AD5ek(eoZ~Pow)E?}<8)6&fv%$8zSR2V86Y z0@bEo1=QpTOF2yofkXLztbHg|a5>KHMq;r9u>!Q2s4Py#^L71*P0$4gKM>-Kh83jSWlR5-f?{)@6sGj??y!Q;Z_X)oqH$RK zC-CY*8rGje9N8pzozP0w2@`nI)*99kexZOqc|*e&oGB4zx>-i;T>Ez*Tq{X~#AmRs zPIkjO5NP=zF9j>E*&PKq_dfM6?3m0|w^c_>EWNm;Yda2b@b(=GdaEtcefqRg#OKe_yTzR)wFsuJ;8D5i2weHzHOH=F5rDPN(6`(o{?b;m18kHaAck zmt(LeuzHVF43ipZ2X2uU(dZHOC1YuhtdYP4Z)pyTmVi+0ZUo=k23ifbF#GZ_q*i|C z)gioy7}De#L=NDUK{%w3(^6_nXJG03otT?FnGqYyWur#lL>XSWsyX51ofJ#Wx9&t7 z{uzv(d_ANL8gn<(QXP`yA-V`ne`fsmwzI7phEQ5GeDS|vh2i&5?ZKljK3wtkve^Rc zO7381*{IqMkE_LrU#jkO&P&8gdZWqD9HRM2DA4zFz~~ZAftliSv?;he-K6(v0bdTZ z?;+e|oE`$7ixNWeQ;Eo_oNZy4Ns4<#%X(2o_wrxf@@)XgClgdCruBjn!z*4aJwyW6 zx2%l59+1#G@rfz4T5X1DGO}xOCXvEmqY+HU!vkfJRhYF@MwG{q&NDa3-^3p|-Hdx> zpy!`{4ay?mAR}YoL?;)5z1FLY~rElSz#(eoGzJOc7 zwHNr(0!|QyAG>$JChY4GbN4-$@lq8COqhi@MexC6c7{T9|4QS;KgDV zcQwZfilZ4@&_E+N7CJ#7nD798zz+EdqZt5C#A&H?-?{@=K%)=RM%Rt*!m%E0Sfo_% zyQ+4p8iPnNHx*OFo}0wudaK)dwpsaW2irsm4d=Mt9{@$s$N&Cfs45_fJU}+_15Ys- zpZ#0>_FG&3f3^GoL;yelz`yn`03aYFAoj1s`K^Wj_54b}|9EbI&Iz#QNk`bjH6K3P zj?Y&oC_lc-i>x9hYZM8!2KlU0gABrg)HITY3mU|%m(cb3=9r1%Wub8vG7|`qyo3ET zO@9m3!zdyJR2w75`KdiRvM)MI$EMrdd`>*7Pc~=1x?AADa&eqfAv^Nim$eg#0RORw z0O;EVE_-fl$afu9KdUa%Sm?}$a{*Kjeg{=Wda14aB|w32{&%(FIm^D#*D zJsEieEO!K!pQtQ{YWyD~Sk(Z-iga~RdcI$*D5)!HLj@{<^yhSMx-~tPoGA6;~!XD@cr8`VeUFDUZSul1z-_c)oZN>^QSl1(e!kZ$Pnxh0lg@ITvVT0`D)#|Clp^i8*YKC!Ugu+C1>JaN6MPai!KZk$HD_xF5{?>n#0Bb9vR-*BCvdi)3K zw0Oq<^fLh=A1cEjDsot;%~a17cjH21m`F8-rZAOi3Nym&Omn&7Zhlx87Ey~sdw3|* zW8ujt^}ZnOVCmDIgIT-pAYThD(Tg)8{S)P@3RFH_{tl8}$@TPP!KWXQYnYHR!P4ll zso=#M#kI^y`NPrM$^v!Ir{ZF%t%;tLUl|$ly6sp-Cip9QlHNwplkyA1^-2z9KJo!^By&<;carotYY$%d+e9rY zxau*EYQ#h|VJc=|Hs)skTe1n@u((hBQoK*xCH^fQ7q^H#q{DDG76Eh>^yL7mFp%Ga zss0#G4Pe)+m`%cDRAUOe{}0nK6E(O6b1(u6un?E)ma%Y@Vi^Ujz)Bb%$Fs=cb-c-2 zw&5)d!A|VLFzm-6)_36u>R@p6;v5WSHS~k&D)bz~`8|?%*J2bJ=o)b~dt8HQxB|_X zk0#vC-gDS{8Rn^D3m&D~)ORVK!}H8uz*=VO@iv}f_xJG%_TV$TiUT-=PIRFgJKzX* znGFYQq>ZK@V`EVUMLB>(^b diff --git a/curiosrc/web/static/ux/fonts/Metropolis-Medium.woff b/curiosrc/web/static/ux/fonts/Metropolis-Medium.woff deleted file mode 100644 index 2091ae6772092f095f0fd9cae991f930ff023901..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 17524 zcmZr%V|XS_uuV3}#v9xA#e)s* zJ5@VdM<*a4h>IT%(hu;zLxGTifaHLHseyppfDQGG^niBXN}gKwUah+s=oowSfcz(N zvgrm+zNfx*Y7-f9KpoOBQu*?MQiuL5|8(F695u%;hOCc1eEaa8b)XOcZhwqg^=Ek&D4a>Aa(=*aEg~G8WRm{S@le&%B0sHG`n3 z-&lTwg=;b`(o8P9seozx)}u}OqP<=~%# zounV9r`|ov`AY29DO#De3s3Tx*yfY92vTL412tb*QMO*Zz?uANlN=X_X#L&bzjQTs z7lVRPPAM9|DmE{YTs%9xqO6ak4Vhj+T61Er$Yd51nWdkznGDHc9gfQ>yYx3sKFc}g z)MYw=xg`x)wdQq;(p3>+m8)v@jf3;>pQb@9^Y4M3#I~j`#L03?cXR_U^#uVeF#|p##wW{fh$r zvw{)JSL)h|7XT9A9}nN~;A{B%w~2Xh0~8EQ0>6V)rPR4!;x7;+pydh3k^g+kIu>9v z&j&x^E5mDM&adek#79_2dWhbB!)s`wXn&FlluzFQW^ zHC8n1i;{cPK7hnKBK;Y3Wc@-ld(SO|=$bp)_4F2HB*smvyTg=$_myull)~`sAZ#gN zj~GHkfo$FnuZ$@)UJKQnU|u}0eurzRExojkdeEIKalVm^sOqk*b<&{XL0EKpC=K^- zW#s(ip)zVo#6^GoS&05hJ4aWy*=GZ%lMAsK5)zaQgj8g#8DcTaO_*GvNR-%U9C9JD zn8JGAC0Fd!ztEPuJsiRpf+<|np^W!wV9K~^ZQDk8F|gjeu!dO3;k{TBV^+Kd9O2;66q>B zs~SQuBy28j+$17!+p2l)%ewL*iZ?kRUH2JFbY#SNZOPT9hP7T1`U-vlqHKAwd!UvPW&TKoCveU0qtvj8K!8iqk8dRHN{rjii~G^~L2Fm|4)-3lrPk zD*p>SxW0n(Kfl*w<-srkgOLKSDOh!eIq6zwv|=-w7x9b3EoXx0=^E zx0*(DuHCmqqlNVkMEwNishO697y(61vofCB3(BtBx7ywxE3%)Hw4ZCXu+L}(ADcZm z&v{6*s*)YoLvX^W~UKm(H@xN ziiIuR!RdPO7D&?ct4@pnm$HdT=IJP#`ccFQnx;vp4C~v}5dusR!(I=34N-PX3?O)E zucR^y?B?xNuSo2+-*0hAaY8~a3lW*6c!1govwQ?oBpM^CWODg(0dO4Uw{E=LobA^x z{Uf1_6J(UQ@a9?ixmg|#p|p!i@zOb6Zuj;y{k)-Mb>phFQarF(%JF<_r)b<+syCq? zbYwvTx5(&QdZN=LD@mQ{axs8Jm87*4jucw4q8VUJzKefP-MiJVuWVGvJB0X)M7D9;h#e2xuMg!22}mX(_#$ENJl#Z zWNh9&80Yu1rzPu}bw$>@sC|K?vr*^Z(P;NNdahRk`mQ zq;qKo{w502bgS}HlZ-S9^X#^?_`i*#4wBSO{q<(W!>AdI+}yTJ+Gp=I)Zeqc-%QTg zwrsk75yijB{#oF9uWMBEig>f>66x;l}>dy-_0Q2zxE|NA@!3iz&qA%yt}5=cIP zGk&wR*p3`lsAraL`N#3@>lZHnWLF7V5go223h5Ts~1N+=tj1$xhfuqz6 zCgBRhM#*(zdtIhkTix#49IW5hU3i-fPivM{+?h?wI+mB-ZZajiZrLr5f5OAUQ*jY6 zu{Y;9%@l4T=}^|C%BCow-Hqc!%Bl>Xl$+Ko&Vp+f+S@UFV^GdiJTEcFpP- z+T6#r58CjoPBiQcuIa~m(dY!Zo@6~>@?qM`P(em;)5xHnIK&a zpan1hFaRh4r~n!1@}p2vK}SVL-FvNj`FrUJ%dqC;C4S2}4ogi7PK(bb{VRQ7KY4`R zE2IK{gMba9Vxd6rpoc#YnFvS=oOD1`T$MfEl)iBc_`?P91BMDhQha)Pa&D86H#^O^ zT^y=B9-d0$CuryhsHk${Gt-nC=ieBel;WL~V6ZQ6hxnEgAXPm9UjcYr&LS^~|4K3R z^MDx*eyiTWe#(BzQH{u1(uSz->ifMU3kre+Yuc~0uvl}pN50ggA|x4zD7lyjDcQXw z`l;;kFKx^F^~w7&_LGME24EW}u~|qc@mbB3iudu$^UwCp`g||NG%vlYpMz8y;(UG^ z!j@W*u8@?*|6E0gtES<~SBc+|>@VNv>#dFF$agvE za+XwiDrvhaY@LW-aMFXmC4~9)L#5vmHA2Fgs|CJNCX>FT4cbb^)8EPgc%NlPnYX&H zj0rOl?wY`tV#AuX&K6|@3f0nRErnBCH2aDW>SYA8hO4HRl-zFC< z8l@LGD3uolC`uhBU-J^eXn-uY!hYA3#B_|CHj5(^9Ay6J)|S2A!F?Mm9<3YROhiogOk~Vx%&bhV zcJ#}A#v9`2)t4nWg*e5Ji!yT}W~SyT<_S&jO{R^P%!mcW=GOld>kjf#f1OEu9_DQ0XMxyu! znhONd38vBvLPrED_jBk+#1dbiK0%KJ!R(gbNRXp;LmU#)LK)D^QnMgugwhN(8_3qh zoRIuM42b|Xs6{6biI5yb*(0SF|8d!fv^|j9q8AlvRLLRULEi`Ehu_{f-iF@L-kjd5 zT^ZdmKBYc^K9xTCJW{-ZyrOtDx`n!Vx;5M7+eOegS_T$UBxDw`0pv_3%()zv8wu&q&YNNUxNE*wMy`&E9I2@!Ghs9o9nVt< zjU<)8^Nev#CYQavpl0M_uw`W0^X1w_Zr&f!sWt^*9W)Sv0MVoK6SI^NS(<$~9M_ z=j;rM`%i4`n8Lt2OaPd-Q7ZXQoZy%z%h~ehxQYO&fDPhTRtp0%N{^I20Xu{|PEGQE zQKKFMw*7dXSKOZyeGKRqMkgeH^D0xw$84wmoHlPw+Jw`x9BX;(kuw)S3yeh*m;6&^iw92Jq`uFxv)?C-&uDfftV}KarcYW|4^7mJrU~LKx zMd}p~>@BxpcLv$Plj+8-NwgK{xkDHq+7hHlB9QBzN;Jk(?SRLrJlbya3Ore17rA!H z@WSuGlWiCJUo@fE;##9>cSy{IMHjqYbY9RAV-$I!lL~c3^q>@4bwMDyV~moi zhzu^n=p(4yI*Hys5BY3F+%9K6j#sd|7iQC+SCXL%83IUUpV_g4hJQq3F4=N?^j%Ka z=gZ#vclAGAJUAGnU9V9m09n;w0Zag5=Ox{&1LxKdqKfrG1j|v(WDdbi(bbS@9M+E0 zb&qHx4cqI&M*MkVSAv_1gd5f98N?TZT2@KpeETzai;bC#2eL}~1taD{^9MPlEOVh> z^YzbkZRo?D$NZ|rjjw{xy5pV`R=}Kxvv+?cumx4sw&2;a!1%g*VM-M@%L+HH zMTlRsAz=A!nzhLdqz^kR`c}L;@axKF3f(iAxOjll93^(raLb)Mr-Twfp@l+!p()kW z(IvO>d7~O9C$>C>B_azK_Q&lir3;qGV+p?6;ZbUHCfHx5e`Da~(0}nLG#W>4oY7Id z*#$P!Vf}sxPusbs^RnG3BcgbR!}+t#I@DZbe-5%y2SzHnK;al{_RqgKn|Y zGdzbrjv8wG0+nbir;8`Bgk)#`8>hlDTWsQZqDIKj504)8@q`c^+HPS8K2e}(X#QA4 zL{wa}l-?VK9`&Ip_~vBO+=LB07;O^hF{AE{n}gdx^&wu@Yj?CrNJA952*zIUsrmUB z=SlSH4|GH%%;V;g#{f{m0#BHjAqDLa8l^zqLNGe76Ui{5xTcxvZ^NPK?H#mal7I3> zDaat${#d^fgjIUKL2tLYBMPQO~rwNFOpxlJ#yFN(UMxl}Xr2 zG{w;JyCT6#T*G}ckwxpv8WaDPAlhC3OE_3upr)UiSH7d}H6XwAavx0?5M{aAh}abD zo*A8YdiJ3(XO5Fdit;cgUKJ^t+nCkv@QAiCNah*oQT+-{FxubE8g%1~#>9u`nZUch z zlZ<%h6_~=MNk3}SZ$U#H2sYs9ky_(p%~jaR9Uu2(jWtzA%Cr^F#CQN+bS!sX;( zCt-$33FsTxeGizN>lT9=`R?7Six&-6H$8ErLdxAKph+IVAJL6J5iA&zSd(n0vuK~)b6jaZym-Q<6Cy3d&erN=M<(Q zfq$!{P~2H3g-}#95uQXcgy<`w?uX-2pVW`EMgG2Zv#LvgM5fj%9iew&DMlXea=qQ7 zY2cXnUNR~zwk$EZzuvJpsPd-7(^_Um=a8A0EZnF7j{L$B-4is~i^0C1qaYx#(T@?0 zxN_enC)(b1(_Ceg@x?zbBRK*|C*JHTLA)wVt6u08f zC&!646)j62B;SAA{aL$fdP=UzoFiGsEqCj!+pW+W*IJr3RLA{I`w1^NA1>A0JqaD7 zWR5s5+8;L6M6O{I2Y($^XOh+@N^ag~F3%_N4isnWI0v2kAnquAQ#*$74%=@Rg+rYJWd?Th0e&3|D`F)^FW%P{e?yEQkwDx^C^F;LY zaUXcK{)$wug3|#I@ab1!J0aDn111kv=+T-dF9*?yJCO# z_Z&mHdqvOKI>vJ-VAK6V(Cy_t%F*NEO7|S%-PgI7ZKMDAc^Q5C`;PStLp-5;C;JB2 zUvPGZ(jHkjG?(Huv86TaTR1wic~1EywBlwGwU0Ki!Ls_z+b_@)T?rgC6 z+F&ndIUbhV1-=JG?7(Qzp^dZ|T1`0uXZHFYkUAu^mT%zx4O*W{V`CI;yUiR043}z8 zM;&_Hk!*0;0JkJ+P69^UKFK22e2)-YNlt=C4863+TjaVP1UDePoL$>mGW#REth=#z z6Jw6xESS2mb)9uqw)5!r2fa~7Jn=gLMXZ zFD+Xr-rK~Vo}Xx+ye!sMZ|g91w=H2!>}HNzq}xZ=lwL{Y%4aI}WqTUNdP(-mEGq08 zBt7E|lz5@pOaj+rfkJVwM$F6Y@I_sv zZ@JHF%wf!8OlwQ2D*BRPTV|Feu?qX*;VMku#X@G&C8^R^OF&{es|AB*lr^(vf;H>9 zPcxBK$(j$-62l+*VN72}9fmEsE!O14c+17A#nxocsz8m`rf?(CRmG>ckx~6S2KO#6 z@opl`i?h|Xb@#QXTIdE%gO=f|;!W+2V;AG5rY6{?OH0<8OASx6OZ%n)i*F@Q@5_m% z>m?t>k?LsCEO+Pn^Uelao1QJ6>AbuFhY;iumFV-cry9;~8>!8lbJ9gXH8fND@Vq9e zs3MgjT0#xQLZaR{D=Ymo(|h`I#{H!2WVj-8NwEyB%zl;$JHVN;tfhEM_pxI+N7L8y zv+?a3Q^{Pe#!obW0v45dWjnW0KmDHX*nWza)7DwWWy)^K$!AaNSMcLjlzu4|9dPSGQ zT473f%6bE$}M^(ykgZyGg{Dk z5)+;y+zm^oWI@B%kiUZ-HzA53NfuSV0g4#=05M{*tW* z!?^7JU&{|f>0ODTQxT=SiSX&}?iBxiQ}5X?oTAR82pV>ehy+YoWrQ3-5MV@gki!;S zC8O1g4j6tpf?BAq-IID&cufc&TPxk9zRemN-6-Gg`haINv6cdp0*8G$jW_O)Y3*x< zt}t%Yi4}Q_k6vBhcdOo+yaKW(W=7v+B0=iFlPF(Qt-I$t%-w4Xmdp}~`4^i9uAl}T zqdr$f)^wJb%vCXlFFI8ITO}y%o^^u1X}De`7v%MT7mz325qtH-Vp(J6dnd}_}cYR~DO`rnW>7o>`CZy>ITYH`DeY-~Y|Uou8b zSD^Axi7iFN%l&(BEUfU;G zdig%krXwG?@@CgV2EaGNafpaj^+ITm9MM&6S(ccX0<{+T?{5R-ubNa^yF3Qchh;3D z!4gGW?w%`1|<-cX&)9q>soQebBPCnHdkh?9^@ZhnJ$$q#82ogP7Fl75P zt4f6oRq=Vl7GyMtnO21&%PA;f+9KPEwktn-*{UK!o^1~Vlkd$Z)%qMB9%0;Mb5mF{ z=pLk|4+K*d#MU8W$-i@50J=y$0F}x|%^r{4Wa4wQVjUfzeM%({OKVm!laZ6a;!S+d$a&DcwD&FOmSMr? z8&b-Iq&CjDv-@+D|4Ps+S}T&_@s!Ff+vEJZe>N6d9D2f_l;E{h*-9UyaUwkL=KY!e z&dtM5;Mu^v<&EndX;o`Cu=Wh&>w|`8>S+nBG&{(f_MhT7RvQC$znkkH<3$=?L;Ttd znQ|vW{+IlqSOW~gM>$Sa^0|h!>|fY+Cex3bq)=54veD315sS9LxnkiKxSGN3y2X=7 z`gO*j)G8tn$?leA+4^Dnr^wuVizEgrV{Zrp-zhs>-NDD(*2YwKw;~3@`|+=ALA5H| zVd3Bb&bgz&-RgSf$iO2DyZI~r6Dwa|M)kVh6nIJU31LAMsTFrah+0pN5Il`Kh3Bq{ z+zb(!z$J&EEHGqbJ?mwM7g~+Ka*k(Zb(`#}FwC{;R>$=MYUcMj<@MFnd2Er5y?d6G zPa7O!y~`dqT}s`QV))))@~d?dTWPf;tXRva=$kr&?xcJ^h0UljKS!bvqhW8wTwJmF zH1V!tnR_qxsJ&35G-Y&=Np-rAK6~vE>9G{IWqnQ)tQl%8 zk~R7d9xzmGIeICZ)t_)0jBRrd!?J8}F&$LJyob&D~4kx3K!YnIqkS<)VA z)HGtqUA~00DPm@Sho~)Ub?Zpe)1iIf0B5O#pzEvLDHCn1*DIMMu6k8Vq3-f(Ehp;= z4b%$~Z_5B=Uqg3b(g{mHv6vf>%shL>YJojYQ{)C1w0wdM#6EzO7f>LCb(<(-cEP7T zOAGE`f+?ra>qACkL#>M13@P{J0yu&K43lb>Af;$^|ByIP-Bs#suTm%2oA@0scIvQ# z8NQvKd*X7r=>1EW36D;_#G>!pIC+fq$>D&?L-IC_wqvT~6iW0>>}_6vk``F^@GB}S zD#Y_UkvF(Zzs*n(WNs`nGwUe}Zf1f3uE_J&8p|D4tsrNb^5@WPN@B<>E2;x$7vJR< z;C!S9MEsl~UWyq{x_A)PiCC95hbclhef>3*X5fA}6XjaIegn{IkidzI%5@Eyi@>F> z&0t@fCP<*Jj=)%pvKB$jH~|t;a-HcW#m5P_tOkPW0M%BsC{>{ap!BWEmuV6a_Tox= z3Fb0?>W0Ew&qai1*Di~aTUyUa!sffyMp}W$ zC!7BQHAUJCB-HQUpU7+7P%MGvGLSRd1E+Q)Q8@-W13!G}pqzVOu9N&bi0mi@2?e4_Ip9X3jPR=r96b3`pHNqE3ZEifH?G8$T95T1s0E@+;Y@ zNhn|MiLX`&y+Iv|QtEOc4M_d&JE)V<1 zh7MqXmeV#0lOsfrC&zAaBzaLg73MI^(HiTnJA2AEA<J19cdzZ4#nww*{a(WJaZq7x3SE&2d17y_=#yiSXs^1i`Uf!*lgTZAa@|X z)DvE(*X!Brx|%1C;p_*JNs?aq+PRQNkO}eJBFQkcO>7=lN)#I6uTl(29t^|kv|6Tg zJ$?F!L7(A{DtTyWJEl`j@wbo{BL)Q=z%6lV7ZUj4QZRm`dIwC2<pgN#F;!LS;Hl~yu$ilxP9*p$u9)wfUt|A+Q0}egBm~+zcJqfmzIA$?<>ZdppOo`%(kAv=F3S=%({qmtOD~D zIPr9W?2+Aj`H~Ham*6EPAS@-Tz#E>;E-d4{)L+i`OPf*vAxVmMgt9y6qimY)tzAXk zTh*m#au+Um{XxpHaWVCvy0e@2v(ejF({zk%U)@;aRWFU?oL-f;g~lYRmbfxvOnd^=J9 z$rMDS=)u|O4*-k7bP17dm|xQD77hO7w+l~B8m!&XRI*ZA(DCMUj?Ly@%C6m|a(mwx z6_blOfbGNr4}TQ(K7n!i9;smt;Q|lyqG06aZ?BY+dUHIB$kDuE!B(NV1kL@CS!mS| zAo-Dg4B2E=8rE?Tq5ZsbHeWLEhAEHzJc8D=CZ!Ryt*3-Lax8l4?fQJk;a$6ZeeQIT zL>;7)DO4vszd4M9Oq5Y#4NFJWiFk6iPDX9;%!tVB-b*lb>8~@9*S!;`ldoN!(-8#!LX-RPs3czLj^*& zwI6y&&E; zGDk#@)I&_^n06;vrukV94PH7TieuUxa_9Czy*1TQrLzF@+J7qH{^qta_uutD~rugKCcfanTXWmNtY6zSsH9|NJ{%6vP_tsTPpE>mrx)8~sybG_7Y(h2)PBkIv++_qXWQ3x|0^VtWugbo?x9VbPV zeAleNZqJ|3dDd^B>~AF_U?7!}8lrfuXlq+EL4k-Lwp^Mv<3!w@zrcku8U}vjI66LH z(gRrMth_vG!i)_mxP3GzzRIq@?&P{_a{D*_(BWjO%w3ND;hXvsL zPySU0I_6Rg7*&{eST;AD)$v|hBoO><5bFK@_r4PIhq!cyKG zoD!R-$q05>;<74%8VIW|<+4enfC9}$)lZy$dHE}rX0JL19wA|RWvSYJ?nKWM2-wn4 zg;l%Rf>f4qstU3NO!?cOE3li-I8OqJ00m{0@_KO${9_(O4MRG3dg&F;KG%s*lEUjP z;1ZFHF0G(vbm$9ie4$J>-s5;f_#riX+cyBD>7p8+LH6D82rUoxN{`-?4N}HfGc@DV zG_S)3um`;@JF(MdvKZrJV3mM^+TH&tc9xV%5~d^~vSuT~uq?LB z%w}LUD1C&O@5KF^QW=&m1YibjFso2Wv)hT)eA&=T*H>}tr26!4H23LxNy&94^E1!8 z(}$)r>wRG|aL?#w*j}00b1aEG(^h*J$^~D@r}R7&oL2YSW9uo(2U{ScbfH*b^+kq| z<>o16ssv+@hC3CkJIC60lv|+I%)eZov)RYznTE&YK8Fa<@ca$=wE2CCXv!!7yrO+t z8nhBRC=`4pUA}%SrpdF`nL-8C)ii(oN2p7;80ovKx_?uMoIw60z z9D%y6ffLd;){<^XLDs`;W6R$8Y~sAa{^s)1(cUTV1Fa#xFhg5+@m_tLRcjb*D(4)@ zv6KtMqRmja_md=%q?uF~tQPLHqdrBZpoM45yHrW&^jJ4^CE+Kw=V01X^&vA>VlW3H z4oR=B{2XgK{|XY}`7}?(l(8BjfjJ(5hj@S_(=Q+E5;2`Dm_7T&25D$VpS87&G~HtQ zDHGddK^=t2Q;u9pj%UL?Y7-8=N;fp+u@gBo+^-Ftl%cpU#k$yk4y$jLzbKM;i)C(B zW4GrTQCiZlO#YF`T8+FPrM1|{74U~NzG2y8fB$Gf+SaQ*6`v{?ec8-`Q2H;o=qzvG z{rJ5fITi553fINLM&P#iHFM&2-o<>sLI)v2e^Qix!l)K?lp1&uf`2gB3D(LZ33RAI z1SjA%yv#wDK~cFU@$Past=5#-_zDH78Ntwlm!n~>VVec+)2MYjwst+aMa{7pPxwg# z!N_6A?!YF9=1L{mGKt$|60X@Ft`a#_#OSG#wgSYPehH&rg`T%r7$^$jQJ;LlP!Mdu zHwnR9pMr5IN_p(4FGs*@$clzwg*GG7iO7H}N(U5D# zkrD6iG=+B4@kQ_)9{z)%`h>j_<T(6NdVcw! zh5y}-e>oByvhZS9KoZApJ}ZwI-`ZK!Yed*BuqULxbglOSOrdYA7+ezO{NH}Y3InU2 zP(|)*4q=!ly$|6FV1Jre?3V1vX{-eC#I^NJ@mjE!Y!Gxo{K#bI1|%t?1CIR6SldXX zJAxJe%qilOZc1jlH>!w~JTmlLdeA=NVA-XFwg!tz`iwz;pqw3>zjO53t~+@sjUF)A zCw%Xr_LV4>ivhP%mMpbhamc^*3VC|}*cLb;q@h*#}~TuU_p4ARycj1<(rI5>(yi=cLQa_*r0jXvS3YIUAFq%7J-Qj z*s<>$tZ|Q!Esy*;;Uq8ZTD5MQVZlS1)lJdO*ci#Xmh(-y$_}B8VSao@?)=+*8$=l$ zte%=|?+y~f7jq<0rfotkXJI%sTJ#`|WQ<=ySuZeg^Qa9*j=H+X%J5`F&xN=`)TQtB zfwgiAwT5H360K$RcOWc)+`;8^zl~8%sGzlDb>N=I;H&EIQRQ+I9NKt zh;6)po05G%!aP|PldcuLlevFK5B>c z*%lrE!?6v3<8kIzxZ1X)d*DqEh)G2H+K7@~qqYGA3!#e<9#_La;}@>C)UY4G8(81g zYTD~m={NWUhY!6^kR@AtaIP=VT!f#xw!%#O_NwFK2O~Iqnj84G8LY}sg7T@oy^>Gr z*h`b-;6HvG9CYAW*i7kR#G*2rneZ!fSDgC#IXvEOdK4mugl{T}*ZYz8>EaJ~_mX@8{ z)B!sZD(J`%tp3V{#q72jh9e?)k3bL%_Ha zHGI=}N9gJB_fgzlA8xae0u3Aa9fU<$$+~cT=_L*w7TG^P`PI~g2=wYf)463i@NXjR z4+yCF%k~Q1j-O3B@Hl{NJwz_V{A>3EiEZC{hA%dg>`EaYR7IGnyHkN)O+_4qtX38B z`}Md5v^qYSF(0rd6D3|ot#CyqqMpCN$TIL(OYiEHjtS3W=+=Ckj5%RdcPqWdhN&bs zWkwo%+5>XZipHsi`esjx71+%l=88o|C3OJGb(bEo4O*`mHCqXlX^$nb2!&Cv^q&Wt zl8Fgbq|)z9q;s!9!yI)GY#jnOfdfuWvlu~So^EBvqECFeuTTZPl#hc`%@AHrE<*=H zSKpf5VyrEJ;W|h27HsILmp{+;I0G+1Ci^)U$-h<1E5A8IL8k>WLke`S3b;N+7t;bk z)+rF{Allm66VCEGoUPC0dTOCZbMP|TlFaYA2_hjMGWHHn&U|_9HTZwuSk7ceD|7Us z^1wOO_!8{cqVdC`9!o6H4#o;6d8~}%SQY%~o;>ASe72NIvs@*8Cgg3@C$$)ms$c-3 z9FRNm%J~+inEUF_G%h;nE)rwXB9ME=So2*{)=40uvzK&I@EdT*?T;!I?ADY%Zoc4g z8rW5(gM@P>_5+~K3lLkdnil3H3^R3oF>0-Gh9)il8xk`i3IdL1&VR|4vmi_lVTvb1qM>xvIu(Ro?Xue z%H$z!ykpD{`3<*Mx9Km=`PD}$Bl+eH^FT5D2eTS+Qq1ar$2#_TzY2Sl_-GLGrXKgw zie`#ee9|-kwtfe*C@_y`iX%+9#>hn!AZ`kZcyadmQ&ifvl#Jor;6d zRTKx3%%Ao1sA94Q5i(lt3fxtHb2S=pQeUH$G;$cI_pZ?;)0*!3>jMg`f*xTIp1TA` z*SmP>i-_*+zdh@+I54|cGjoU1Ju6vtheUhvdsYoS$({c!bY~3Bv*p}@oRy|i9CDCktM#d^4x zc6!|pl&Djw`w-j$VAz)}ppO= zL5X;5<8T&mnNiWQJOsI+NnWK%4HD4dv2p``oYMU)z{PD22;0Lo+@DE)8e`82=6~>S zzBzp??*QrALjqt6i`{$%GdSbhO0e-jHy1s)q2#wN;1Ikhil|!u!;{(}{vZj$Z$69~ zZ#ipQ4tNyb^z30fP8?h(2Xoz5SgJV~-(^ zuO391UUv-deaI8c8Z9B18U3pC`XF>Tte! zg|)N+(!}eIT{p4j{dL)xJSrH&%{(G#BgR#^544G2y-{ubMoDCd&0||gvRfXm$72(o zWnW#W>FZxi8wEt!S0lWNGCa-PALg%~cHXwslr?p-CLA+(939^hRx)kmIpVOGj1FfH(S1idijIG999rF&9y`qw4~gH_ynp9-*d5-CX@UdD zM6>XoYB}pcWzdlh&&_J>&a$7Ey)SjI?mq7-x@K~lX^$OdgLq3;u*fnM*T%sJ*Lo@8 zA(Yi}Z^VqO^<(}yp&(!08kN{KN19+H(4C136R~G!6R>)R`Uz7`U*EG5C_>LC9y^$r z%-MH#%EFkEJ3$Vfp^YVm$fp*dmmZPFGY0h;gjzC&KeCD{fIus69mQv$->8^}Cbwrb zC5q;S^D4kYMB)ddj&qxhZ!39N*p{<%Iq&->1;8E|zAj#?`FKR$u--TBOl=%4c%Qdi z(uHWF{;qIke!Hray5VpumYwYLJ}S@*nCt1R{IQq3Q{^jzNuA;5#PpCVB20ZoUW^8xpn>`$!?HtL1McE}Z6+ zD<>-_>BJ(Q4vi-%S0q=e=`>+dS1ZdL&Wl{tdPvrjnjU4cvrYwIO}#o8wwh}(u_kaD zeZJgcx@LF|d=9C*adzWwr@0=#-vqqcd$Hk7&>pJZk9~;q#7rOemBX#{uAUD15oupUI`oFHFAf0fG)${#t8g!LFLIB;V`QKS$zf2aYG_Vd5?+^H z7v?od??VA52QbMu{S`*OD ztDDg{A#R200;TQKME@I!LJGw%Pzz5f1Q`){*sr0VP%F4XgN+;wRT6kFP;4hSii8UR z)~}c+=SJm)z*h>-Oo(7StHTubI5cdaS+^czTjVtum*0bqFN0bVw)+SfStq#~q&qHj z`Nvj>*d5Van2y0#I3RQt-!(t91A}cxj8#-+IqftR+VVN*H1{guf@pPQL2_S42vaC) zU2zEh4+KBrL(lzac0ia;ajbkE zUNx!+?7X}Y{wxgMcomUd3nGKtkU!K=YjXV2{s@CR$lY@e6}O*`a+aYLGLHUqF;jxh z-&x3!F`qkb!BFEdu^F-}-Q#SG-QFUrmaeP#LnjLat5@p z*=XcaV!@H+n!ckE5->+BO1F(~cnA;C0U(Pm+BrxO{J5KsVKU33`3sQ|q#ms45?&{! z(&e^=cP|BSi_(w8LR24#d>=w52qeP&wZ9^{V7HJM+unZc_hdgmco$c7V3WYuLuEVX zDMVb+jxSib&hE=ZEh@20rC^mYfz0963I=XUhyu54b|pvq^Zy;?w)@IlO;Yh`0Eq$w z%cBL2o+cbiIcIXX-(Q^`$rA;Ik^LPq;gGmJ5S0HFSATuYHu&d94c&;yQU?VZd*1rLNmM0KA4stR+Z)(nSDM)Y36^jn{Twpw|B2TZ`3}snn zTwz8}r5PkbC=-sP7ahTk3UheRi4?}H&y3;oO}blWq0P3CCUGrPTDIVi>6QcDr!y{+ z1h7!7JQ^n6Eqqnrw|H;@GPBu|ZgUb+xGEqfsan~irhH6%>^GdKP7d$dQkbsSp{q^K z8b##7NE$z`z`F*Odj#E}h9$bSpDVDhtK9=e&lYd^40}My4D8Ma&2okw6R=1g$8wi9 zSEoz5I+D8Q)P4o1uJ23ZkocVnQFty5wHadn0x5yRIB2+Eu}kp9e$|%tn z!ldhP9R>PHb*)QRKNVPN4iLpAHUnS#-9kRkqIWr&+BsBd+kWsSbwVa#QT^nazkw$t zhyCT{QQ?#D#29CAYmr*09KDxUoX(;VB(7*&`uH-V^Zfx<9YO~jI_gg#9ZM>&`REZ^ zv-Yg7)OAn9|4!|%n_*UzWCW75_w{Rx1)6v`MWg0LS&?S~{ON+b*7t2Ic4stnCUz46 z95BilBevy4`_a1jHd!A~+{&P#=J0HKv|W-Ywskz1Jb zBCSLen(;r;)NjBQ6g2 zZ;`#lb-p>wa@<0R+cqT$4dF#e6R}VwNTzHt@w&r+eYev!5kmEA<`Ipa_^>1sT29f% zcvkv`cYesU6Bd-c{T_VJtE`9q&(Bgq-HPC$b zb{DYbxH9iv{x#0HyG8e+!+#Q2pi*5W%VMPKy(@>a9C|oA2akQ@{6D7Wpm7^X6e{nU)h@R@A=z{kTqfqefuTz~-GF}b?*A{(ABYg>7ZAvgz5@gd z3<-?&Be8$-?$(K=?)%?a4|K~ zExe!r74>-UA(VZ(i<&rpQ&VohqCcHV-BBIC0Wkw{dKCJAK!|!J7>nZ{DiGC~^+%oU ziRNJ*+N*8PuIEpl=TC0dKY#deO$<;r!owGnM)`q&^daDS`*!{(+5;v0s~+86jlyjY zZLdP{wyJGa{2DM-8L{&S*k=k#Fk2aqS=ivJ<(-ux&~qu0j?Y9lT(V5L1PXBzrt{Q1 zIP3}=veJ@;(TP-PY6sHD2f~4{Eo=_E!yl35U-*00O4pd#Ur_YF_g}*|vv%mqTDg$5 z(jB&CsWgW@;fYA2PcS+njsB&KOJPH_mqa-NAbB|1ixdg~cIjZ{uQ8^m65T6nOe+uiuy3v(*lTBgM(A#e#DwlM|1xFD8Fx-G5vOjzQgIEQJK{^*S z2K(vrF_DC;$0=9u-f-_qA290WK5{MYDAnSQxfAXb()SUSIq!NH7hS*mL%Pnm z=>D~Uk!Ry=3Kg-~W?Ct=!0sT68H>qfRzWSZ)pjqn+U~Ol8IRc3SWhydx1IKk?WJC} z274=3qwSYIv4g{k-&*asourQ2SJq)&LpmE#nG4p-=%WVg8ufQX51(Y@Tc4lki>Qbt zepalxjOcBlU*yZEMZVmxq{{soUm0s7BYNBHxA@1Y?fxnMT&x%UtC2K&SbBpCcUuC^ z?Y1N20lzi!;`)fztNimBD!)Sh0g_&smGorBhsVf$%#cwAwUI|_#)})oX68xxbNYU7 zhU)XSe0#==)#90m_JgYHGE{y6Jt@D0o|NB3Ps)F$vs0stt<8AluZfk+lkyexr2MUx z5ZmCaQNA=LGkAyOTxLl5`P#re%#iv<8^*zm7uSi0?8}ILC;46E-Cg{WRG%gEq!nTxuZ}p$z3% zi8ZK<|IfsChx(&S>=tXpZ`6OXe2>^7{!AL54`T~Jm!{97E5HOkCsE_DfEv%Kw=uUE zOHqhroPHzj#wrwJE!JZ?HeoYvR+l+2IU4spp zxE0S*)tdVR>hKzKuVWu`@8d9D;`A@@9!}zGe2CNh4K3(I501d#-BdRXa5LO&`cgLs zxyfimsHv=gtL1lmbLJ4t9KN$n(sO;nD4U9L8h2Sb7GmC}uzP9}MBd3?rn z#_LR=GYM3o206M{u5Og4O{TTU^w6%IYECs%&9r5Yb}G<46WVnWhH;wXG{-c@$8(;h ta<#va(*ku2)GIlnR|@CQ!!HM@H2$RW{{zX=v_Jp>0000000000003@ZF>(L^ diff --git a/curiosrc/web/static/ux/fonts/Metropolis-Regular.woff b/curiosrc/web/static/ux/fonts/Metropolis-Regular.woff deleted file mode 100644 index 1b74c6d546880695a0f5ba05516f3ff25d757001..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 17376 zcmZsCb8sim7j3e^#Ce68Km6lQpMku2 zQ|w^x?DF%h|Ghi%4+sF^Kqx>!3P8X#KtLYA#s($^K>Mr8x=x30wmnP?%)JIcfs=W; z3GMU1{Xzz3ahpP-9_g)yyEbS9hZ zb%K~Sc92oCti(dnMScQYj>s^$O9IMJg>b=Mp)hn#WY*rn-cf!~eo~O!LJ4?(pd1=+ zFLz>)b*Rbf9pL7s8yj!-^Q83Cs2&}$nZA6S%xw(cYK2b@tVa1}{qK7$?yE!wzU_^C zOU;An_3tOu)8e7Cq_VAlq#YRTqFb3oW{^2ZYuh4)Yxlv8qu!~e6NJ#kLU;@~7EG%t z?xE)4x1@EPbeA|+^+W6N&e=C1PkNhu3BU`4uuMwk3%8X{5-$^H>l4IR0wGAdV-=Vs zTcsOHw@c>7?@fusQyy!DYi1+i+R`Z902dF5&MXO;%|6TV^id8%ZkVb+*{7A7Bs*lN zk-9PS_Q#y9X(SlQR3Ny)G7?4N^cf5fkCG9J87IM8%By2mGGYU0t8Y zkr7IXsMo+atkl>fpD$gHRfJ7q0c2qIIXCMGksq@(Nh*}iX_Z*4&@vUtEfQ+?CbGJ| zh51rs8ynszpIt?lCX3sto6#TcO%q_dhGs#YnT1&F_UVmA(IRS?4L8gr*4Zp!?Z%lV zeKncVM^m9%itB25NrzEtz2kn@)G5lMPpN-d92~wu)|0Od{@g!{=^WOuY zrHbT3Kp8B71Nkn2$^18G%`L`uqC!U<)u0F+?z?|M=vi;M6k{1Ss4( z@Pq#b2O}jkI|q!wz)UT`n4@A~kOdv3Yhilm$*~U-XEefEB#bvcN zH@g5sRHVdC{$F?Ex3A|ACG^-DBr4 zTzK}wr%!=+Z40aiyyW!HYJb5gEL`B6bIfA?iD%MFO4IO5jCyVx{MISB2TFpX@zN6s^I|>_d>JBRPC=>JgRKE(dq$) ze~}uEXoCSln1$gD7BHlN)F&hmbQ02rFm&Mz53(n)D;KRu%QG39GLE&7qpvJ3J;uqP!`hi5n^UFM=3O#12I?qDT%LUqtzX zXi_lGYf*crb(4FiWmNChb5}f8)bL0=KvN;_#)j&??7xsNB%ydB<4BH z(_`4cpvOK{HSR{TExdA3#Vu5BeqrbSTy`VotxD0H&^gxEizYdcCU1uNZ$t3EZ!J&| zpazBr=4bII{RGbb;at0ZYoLrj0C6>SF+=lqQaJ}fJ=a1KpF|fD5E%%F_5}kYpn?!l zrT6=|Ut|o^fJdh@=jVrGDj*=JEIr6QKEk9~wd?&=Z}I@_cE9-ED!X-7RaI7bcT&5; z&tkgZ%s$-rg=;b{7M01zWUo^Z7#&eN{`g0N$*JBZB;`3`bA!nT*{P}hQ-&OGhEI|FYy*wL_uWKGka z^c=4^Wp&_g>D?f>r%oQ(IfCgxzGQ!7e58B?ePprUlj-f}c%b*t?q=Nfdp~@=5q?te zCFc#vpPD~BybXU!`N8Im-JT@9p?uTi^_xGuy#4A)_Mh%O^*>boj*30Ax}54H>wNC4 z?ab}u>-_E{>}2eO*f!B^rju(EX%lNxY?E!1ch_)NbeDBkei2Zwq+b0hZMukg&-5bi z*1RpWueq&;pBMKG9Tpi9^N@pvjmyA0M#bXuRtr}5ph)E{lh4LOWh{xSS@^9jZ0@-^ zv2$293k3@w7oN$=P1Ts=Zcl$pHC3slj*l&TOBKquzgQ*+{&mj%`aJuJi-&GjGxsMp7O0B5#== z_>XYSuY|GHKI3+Zs1=B+AV5@RJ6_%Rr=Y+mjm`>0JlCebq9m3ZfI%x< zuBXeb4XKUUzW~`z*v*xH^dQszFwfYsNoOM<-5I9Wh&KbZL{PeE(QI&3pKEli^PfU!OkZzA5rzy#ef7 z4_vysTMx9ZK`z9u$}ZlnK{=e$oKkHSyN zkJ3-O7w%{BXR{YgPoD3;zL>wbzsP+P8$(7WSmjCp?HVq8r5?s!61`I zC>lk#3&BOKDt-#94AYBw8@m=6xiGOYwlcf1 zJ~KWuzhi^M7{?sOC`U6#H^n%|NXJmeT*hcecSV21gvC%!OE_C4BvTf{HlSc^%vCHI zRm#guzz;11gCj*Hv+vyMsGFXj96x*`l&!kbcu@N94^?yo9PO2jiq%A)rYt&oWnUcSzbwM?3bscT~9w=^KhfK;b zAFbVHS!HVLjjspH-4oA{UDk#!Rkit#>h-hdWfk-V@iH@gkn0NiBzr%aTW@B^Hw^L=g{wUpwYDw%*Tv89Mh7tnuHUvlARU7g-J{Q zBFqDMlXJf{>82RV!tof)cGDiio4c%zVy;^Ji@eBqKI&+YYZPF$cq{(ohK0dWEI7$` z+8CBD{w}>5r$@14gV`;J(cR-&V-1n&B6eVenn5tf9ea(Ta1VALawk_bn2E6Ru>hmkF1`c3FZ&dtdb z&2(n{NcG)s^m?92PT!a6lBkA=7Xz{<{PtUycF%hQLY@|qiw-(Mah`^=jX*SULl?C7SwKTO0<3=Km0ltfv)(v&Pp-sAF_$H9Li=MeCH-c0>zqqMc|s`5m^(2hDzg#E)#`)BUy1jl!6gm4FtfNBK2tH%dr|Gbsf%QjvW%K z0Ew$nVxY9kMwBS-1CfybTS5WEOM8h-Y2e^Z+Bk=N=!Tb&a{a)9NaA8g$QoVoI=>Xs zy;_@RIaSQs+Zt=3>!fLuXiLQ!n!P6GLm)K4RNET5bog#DzC_ga_tJ*k7G$xck_|_SQGqR? zN{==wc?;6I%yZ)OpEMz8#YzjtKT>n+^`W&Rw%t^j5{%wPCB<(lN@BqCc$@O4H z5zCe3rv)vJNMP{gGoUL0t|l9cgGP}sJx1ey$+m9kd84~n-OCrp=9PVZf!}sBtnu(E z{6c3Cl(nGRFI{<7U>+}#-!B7=YR(eyMrWw^N{uKsO1M`@&Wk0z|G>)ueY2#2GNpjC zS}avZVAd#p)oC%UWeL_PSxE8TgfHQcY_QU(Wy#_|Ay2W~ zEknCjrCk#EingiNfIw5DDn^1ZgLQO7WH3LO_pK^B^XDJrxzv9_U3prBun0H znc~1z@kM$`D`ag zd^IXuZvU_%a*1-duiO_ksg&9Ylomq=sBlTiZ{cHC$xXY) z|KXAVV<9P~QS2=ByfhQJHtF80c%NhkO13fSi6`>M0jXOcroGD4 z-+t*CkP1-T1L%=aH4NxtSCNxqQg5YHTqm|}T1D^Fz4tnbWQQ@NFui$3Khm@T+*7%K zyZ2S?Nn4aR-nEFU>3~>3F86eY!{*!aCsr+_dnf>yxZ3$s%MZjocQ<%vfh&3<^A7uM zTF1E-%srrMO!uJ8A??lfQ>~Wtbb{Qe=Jom$?y*hF+4fcUO!xXksiRc~D{V%1zxLky zmBbDqBR2gw4HlP_Xf{DfPZ%T)T!My8+|pb zcT8WXcQ%WAsO&ziqellXZ-jRbuW;5qWb(Am=qhde&7tSh^&9JS^yzTLnB^^;`Y4it z>~H_mX;VA!r>q@;e^&3X?cQgH;3w`koqrtl3Al3{;|~|8 z!Z#t{&+Or^H=i5Kyr_9A1VA|bUSJ1Pw6eYf{hLrvIQ@Q9hvAmPH{(c-=_j)tD0|{Z zg8xAGL7r35>*<#KC-t}ZH~BXyAP|s2Kf3k^3rGcoR1I@FFpZ2sosKPw(YMiwO(J=9 z&gi&4y0~s}%=9hqT{u5*^0G%%jV>SYv?JKTtOi|YD-9(Gu53EnWYg)uv$#Txykb$VccV|Oa*(EIeZE1-SWo1r~Ob&_re+k&DC&>p8f zJa~iutovJa;+D5Ja~MT^uyY5lcarrl=&j-t{5!U`^9DOdbX5>NCg6c-+fElLblTiS zw3FS7xO4IPiT5e!$??7BUHTpSo#`tNJgT6gpu3>Wkc4Xz8uk)~9L6+s6`brW1WO3s zFf*Rw3V8<7Jva}H>|*GG2$j2_h+6^%6?$lOBvnk_MTs7RIi_R8|0??mYprABsU^$2 zZNjd1+qR3ROQ}o$iu1$jS?;;^IpJCQDsDZmVYc4i3c!vi>AsNln#9l;?jz5ZBuo-> zlwfq9_({B+%q5MtfM6MwOs=Yet!=B#j&0H@atz zVV-8BWvgL7&fHnDP#x?Y$;N84;;Fmq8PR8b%WSb?t6Og!hz@f z_uzHzuEMy(SoGSe<#N@nX1n{jeWS^me3eiA!_3wica^KaRc@t2yG?96xD!1GC;Nok z*0J{VZ*{IT?~51k6VWAYjeDgYqph^}-0eB>d!`>7wV8TTm}k-(IdhYbF=Typ!&Q?` z{Z>=*>gsCQQ`eGvm49o1cMu5d4y(=jTD$riHdEEkJ~FjnINcqJv}=l$K9v~Q^&g~rNub%lAhJ-+UnUVYlHOAdj)F`w{{aIy^x$q8QM}a6@HW> z@G^>jory9wZ%48vOz($K?uz)0JlKcLdQphmVU;qO+71=iN7DynppetCAt<-`OXYG= zGfY3nczOe)72F#aSdNO)nN%`(hR<+`d{*8O@Jpd~C=1WIwf6VU%PhE+k))aaBx zC4ne=)lxZh1^ewV*NAj02wkz>VS!D%b%-pGIpo( z$B;?Lu^ujjM49n5ef{o?d3G4Cp*@J8EIgCMwV<6$ zOQ&P<25aP1iX;FqWE3-bmw@4J3hte+e3lJT~tSpLIe@W5ro8OsRQ1{8LtmK5# zEK*T32&aHm*|6a1r@&+16LRhD+d}%#;Hpsp?p6?Ui{l^SviUksnY%NNBS^0bf6X&Y zYxvl@0KNr_yfxQY)C&8+)2*ipOS?pD(RPD&NF9uUBEAlmg7CEElwJ~}mEK@|Vu|KX zqM{jCCg7L7!Qe1U9EcC`YU=+ge(&y&YpR<^Cw}H5T7&30)AG4&-sP?E@_JvY=^}?$ z-7i19@o3Eyl)SgaB3IzQL3PR43cT^^WXoy$Fj8r4q89QgP5SUdPITMp?771Rf&rq-^T|4d9+iHws#r*%h(?^z^XXUNGw# z6s%)*-EfPA%m>Kyi3AhikXy~mT9Q=HWd06g5Gy7ZqPe1l+k7%f-%TscVn<&e+%83P zYwBZG>j;q!6cLil1XJPx^|tl}XsH65fgx{b$#}`>mq-8Y|Mz0P6k? z+}|KCYh$%0K1wu4HY1FmVX~A*KCn;Gr7w4@6nTo3p>N9CZ!5mqu*O9yw%GM6E5h6L zA)Y;c7Q(^Z84A03wM}qpUI{)io`fbN1rHT$;+a{fdQfy_C-D)r`jBpm*~iH$9+1+v$%`9=Lsb{A#B-FhaEnIe08OPdEt;xGZ-r5Rpu+Ftxuc&$o zI~}0ppUClGYYcip_(c4(1qxjYl?8u$XNapHQ7>DO>Z|^?L}+ZDC`2Q%P(%;fhN8Ym zu^V4~f-RPrA&gkkanz{|t_|ohjF%Ayd@&O%J7S4SN{V$(Y@ntim zLmoX2|3Jk!oPd-zZrql@}}c;OUC8sk;p|o?ymCe*EL&n<1W|iBj-9S-N^Z1TDT@5$2nki~NXVCDZd3?J5o0)o78NtQAr{Ew z_BOrZlKaPys*A9On8gokcn7-r8j7UzN_qi@$fjjJAm&O6YK-ZUvgjMQHLs3e;YCBJE8O{k5U{H-gbO#j6oT(TQhZeOap9qY^k8J=6oWZJA%e2QDv8zM0egUlIAp% z%i!>dbZ`z*wvbUpFO_rbp4wb2i2foRCs(ZxtYv{QN9L5pvm@)l^q4o`8BizpkbHRQ z9Fa1AjBW@*bTbj379f)tqcD*;%-%7gy%xG5%*09DoL;k}WXeJw-Ma?r9$1i16MRwk zS_>@O57=hy$*Ytjwq8HZSxm~fmE)NilGa0sMWI7*hZ?raamuOH1|rDQ#yHEs*-|XT z`L*1IEnWS9>UrUqD9?IFr&=BSB89E={Cc*$Y34bDd`rE~>kRg$v~fK+Je$3C%hAW? zB5<`gh5)_z#bh+5UlRgZ1Y-8VowcHqPgRbrjjU}+LNx;8CR0V~8$(6ENTv9#1STu( zCqVm_R!YjBpZYmjN_wk~m>>9R%N-)uZX)%h%2UwPLP-@-1&T-JQv4c@Ct&7Rv6&zz zHKy)-Pk&1aFTLMUTIMcWw)Z{^tj&`2$Cjrxdcq?xyU`&2p4HRLPX6{aE!w}gxfXug zPw0;scBKCIzDZ`5iijsiwD)Pf@i-*a22dKM(A-+FNP>HNZPlK*r5vJbUuX(uBa}a9 zZvIMS&B%Gra5?REMfFV&O?s;0GG7m*rr7>H5YqE-1*E3DH1Hx&{ZMOqL+pFFzh+{V zq*KEzacfC6VmTC@=_tJ$+d&9`&U8rpIQU2rKzB@bw5s#Y$FLj-fH|P9qqt?u1{?}WBf8bU*!=b;|0Mb)1f(78PP7Sp3$|zo zWf_}`f}aH*Z$}YJzKvdh`J1*1s7O5Vm?8!?)JVYLt<*egPNYY8O!^Mg=*--%)x{ygl@ z-}}uY1u{(y66fsf{{HY25jlB_s~{0*BsV1*_T|nubHWT`V}C%WSRMFHNwssY-x3JA zBt(ZS0i(5E!lw-HW#yP@Sh>$J538|P8P()B2qSwx*gZ{rdUuS|y4FCX;gJO_QtZyT zi|iTiBis14Fg(gQCZ2<)Z^am92I5d)%b&?2`}77hv4+OzVT+=>wF=edJDr)G^ll;S z+c(g3=imuTNL-54t|S{XfvNQkgqcHxJw+e5$MGB&bPGFxx40x%LKL+ZUfDL!;73^c ztt@u9+!p-W?l@VqVgdgWm4E8@zrbPU>-BW}oHXUhGZ#TRJIU&t>RPE$!?Ih=qbMyu zW@_j2LKWv=MLM_3>&0;!EV%;n>_Ot}{z1@KPmE)*^loqa_mQ6BiZ#tw)OqLpyLgqPBI_#n=0e|0*h!1OfydIZ|8-RW82chXqFEh78 zwsi)kf{d26wjwvlE7c5FvRIBPqlDpTbj)2zYu_V+)X<8`ilvm%;lq*@=VuMlD{MnGeBjPXg5&pvXX*GfTr*!1g5Sid`><= zezT9Ge>Urdb&O1$Pkvb%!%!)j@P5T7q*421pzM@7kaG!pu371pCji&6z?iwbE5G;j z%jw3yrCQ+~$NCJIqnMX3LlO>sA_ajjcmmKK@3z7`!9u0ZMt0Ay9HH}Ot8KNwz}N=v zI#HljfY%s0_q4(cYsv6H`sr_p{k z@s-x>?XKc7@RN*^i^(yu>9oEtHN4}rb(ZNQ z(Du6TLXum_4SxbU%8|zJz2_s}apmV_%z6L*$8ouR?^1l`pscKs3)f%wR8niQ4K8xj z+a$tCW|q=fqbSvUSp#%3iT3$=OTj@qt;u8dMAh{Xxul}8sukx=#WdjzB~G7Yo$`xo zqN4BXMQ3d(MGdu(CnmmCh`s5;`EsUzLIdr%6N4-3=27i3kU zypY&&7cF{crD$#or|qdAge|N}>WKKWq>mGo*P2{oDD*R01j8fqFgf{x>U%m+-J~Ty zCWs?H&CnD0c?%&XNUV~8{DKx zheh9wMztTU??i`RwwoP>CFcb=Ajci?BkKh>jpU#q*XxmAo8teGI)A=G)cn2lwm_{;W&C64ZV@b`+FL(oRU?WJ= z`NW04IWQ&n7JoT0k|4s$W{d#qAI#C*w?lB5p`nlWni$Ur1OX00DjgzF9bP50odPYI zUkq63J`UhCF65vxfHQCw<8KL*OhHp@g#+V)36RtvdQxae$v6k5! zEUm~oqMU+W)F9yBRMKpfAta1LQ`-ehHAyp5xobrOa zIrjwbH|;D_($}?8siru3_KYf#VGIwGA(N1jN;M5=i=jTq zZQ*gRO}K+uvlb|Jm|5a|5rufHf4!w*1nYJsvs_Sqj5eEp$L9U~%E0ySmY1qK z%An4XePeu7AAW8M8Is7-`wnq_Gg^;y(V?(B1})`@)D?{?4o&RM(Rkt+4U-Y!-wK@D zKr$IMCDXkKuD>N|f4@%goZfYNz}~|kC{w0kf^Az(awE}|!{#rh3|EySX>o;I(WRkQ z^MuC2TjpbSiO_$e5dKZ!6a1R$7+wicph$;ZN|T}mgJ;&^EN*+eY87WUYx zVpjDqk@X{(qmc{&s1Pxzn{O>0>e6Qr_Yd%w(Dq+fCij&SJhYSU z9(upDZ8OSFF?Q_Gplfw$*(R;-S`c@>f6laH^$!6W;p1! zmnq_wXf|AMU^lg4w&$?68ID%?UOwCIba&UnTB4nM`>o}Wo@4+!y0EGKF5d*VoP|S% zaKVojwGC|@d!|Tu-g_#B zN34PVRnVD0dO7P%fKT){6DqDmvC(X_lO^?qX>wjI=~#hJT&Ry7Ja1gC(XZkP`rpV7 zhEWQQufev>50&aYV>kJoda$6tcw5(BDZ*J=D zD!9X<>tF=fXrC3o)4BBPZX8UC49HsmOu;2jy_w)GuJ5x?Q!1Y7?QL4ic(2vcMk_(s*cW_IjW zTx+lg9;3L9ffC(<-uBy>Bu}puv3uTERqQrJEYCkbNj`Rrv}#BnC)I|lK%qFw2}n}R zz|!DU!-x~i?6fRUXP8(g_z(e~zzJ@Fq7&W9wXfrH62NFiOA6p=xYS&%H7Z=TC#*2i zVaJqkS<^ROSz^ecZKh6p1`UKiM9rqSP_qlFxHpHlnkNi80?Ni)}zW!Gs4i-M?<+e z{}O4+ln7QOfodGjjc&~AlDA8(yq+um#Htf;CzwMaGT<2`znt-62$VMDi30Kwg8#cW zROv2+!|vb>`K*9j*=z7T71QA=Hgti8EFDF@7j@TFM|?v5JA5h!GY-BVCffijdc8e5 z?C*torM$aJ5!AXlqECxW$M)!Ch<$dx*rE~{W9_L!8_>fO<3RTkHhL}xzZXYilzh8> zR4p0{LlW3x->Z)X0RA+o?`!ZYq3@WxoUi1oXXSlJ37JsWT>URB# zF*Scba5n`%%(5-Qh{1S~7k}@%lH+$u>R6m=BxmVO?=ibt&_={njqB2h-!Fr)Zf1Q! zK5k%8aIwX)*D!}`U>KUC-S0tI5z99*NSv8FRg|WFdwyZw^40d>WgG+~Z07z(;1g&C zH49rpIy3*BbUK_a2iEh_hpxYZxLoh=9nTl;T-<9+JOXEVPbO1&6pB7{TN5FQZ=W?v zuc)3qBK~7FvmU()lVrp$ABZ@DY0j2fXJ8K@n?mpZ1Sxg1HMPx*ax^HQ>5ORm@v(eg zA()n|H3qCS(KI6#oGj*V9a9f-IxzXU()gh)mM43?jiW4+Q-L}w#LoJ!UYj7!QQbVQ z$p(~Lm@|d$SV>O$BYk*`|IvqVmw-qg?-EF3h6$J%a;3GfS*oT?((LBnDp*p3#mS#mR7Ea(lSH zzGNe@lhEm~y?c+4G}Iosx z6py3bHSb~{yTuaU=-Plota_KA-ne?uxISq#0Jo}YOj2*CneUsA_BR}W*vSV!Fth=B z7@s6QUceXiQ~`X8AAqyanrr`yoF51TlrrN0&oyFBELmTt5~tW=%!+*vCl`3N6e=c( zx&MtIB`Hy}9EoN`GY#UnkP@>kW!4s=3%1Ac6BAY5;ad?T`MM3vEKgPg`ja^l`V$Oc z*wDf*-JlABowB))pU5g)WD3ZJh(^Yc*2dJr5WCT~{f+|K`cGhQm|#8EF7Q@>wJy|h zOY<2}au-SuV*j_m`eI%-pS$nG5t$zGSKzdI81yQgSvIwkpPy^Ee{ZeelwX7B_hEHQ zZVdKiS{>dD&*qXq98J5f{I`Nhi4c*zv(kv!NrXU56CdPsi5 z-~dl{7tCyrZ|~1(ki8gbhs0Cx?c3+QIyNNL1cy;$Yh9qtkkRs`A~#T2UAQq$PpL(D zQ8(Y)KOgJJ1JRzI+qc)AQ|mhHo}Qb6YC}3e&bt@@*Sfsi-l@i3WD(>S?9Jsq`blZ^ z?9lo)YST-_qLl^7RtzVfQ5vVO`Tbp&+^(>3fy*vmyjw;ZvC^d{VG$N*!@F27rlQ`7 zfbn;~g+8>kfsKzRNpptziU(Xz-K;?BoZ0E|<8F3-0OQMEzt4kf(Q5P{CtA7vCof4% z%Z{ZU9?c&X_uXndx!tCbfj>uMkDe!p9xa}K7Ofszw~Wk~KRU+Z_@?LiO~t^F?CNt- znSWBcQwfMLN>9%gZU05)X$k>7R%efUMUuH8HhiDl^2`tVttfLwowDWl-z#F}@Jkr685ExU-{T*&DM{ktvqbYy zZHtXgt_Hdw3`3s9EZKSpr2Dt(NZNNA2?eUwBXv7OSzj9l)&X#3;`ch zqF;)XWPC;u5IFg8;IXs6yG{+t!pu-}k|j{y5WcN!U+S;#R@u0>VDv!I&5V3F% z4QhpZTFq^fos^1eNP$j_pv^a@CE2(EnB9Ilx)``yOaPV+os*xw!dOzG|pDKE07maV*{*|r)|0$ zZddY=i|FGX>ymI45bmh@XX=h}5wYyO9uL^bt*t54YRgJbNi)}BicWXxg;#S3#;}U} zj1FfREf>K#U?Cx`x~6Qn(DeUMF2!}s)&7|k5X{_GO7GF!KUIL{nJ}#zzHRmEwar@0 zW%XXUVH>Q{ag(R%eLVTbpK)d_6So|{fNNrT@}=_QKJ9AfC=01Vuhlbgd+{Z146WGP z0@C8NM)yw&r-X`g0Y4Tp`v$~f{q9fKVFS;OxovvLIpKM{;9D-Q7l61o5Tuzy{-tSv2r87X5t3LH zyL>u;N}7^+QRRK`YHsD)&83!84Kr!-)aCNaw@-dJZABpBIG!DaRBvNZ~L+&lNx~H_dkj!6qU81f<$|nXsqG*|nw5I8y%9v|NlG--?pq13_MG z&aCH=)6k#P)iGBN_2ym(aMbZrs;fj)vcT;KYH@qq9`&}}{k1=5hKnOhxE}s0Vq>md z`m;QL|81O?ycDsSadtPT=eZ5K$MGSXB+RlxVD8j)BUe4IAj{C-%WV&V#Dnp-p{_y6 zc_1}}M<%X4l2b@-Jv3#Nrnb>n9#f2-T0Z#IpHEq}A#?bZ;rw*kNtOb+R{+6%S@<5J(o?7P=-m2L%k8 zY|7IbA%xY(MgXVTgk&`#cQBPBPlwJQucsF-s<{HwIEvLUaw8h&=xxe>j<*^2~kSsey z@Qfhb^71c-{oBfzCwE5dFp;FC)zya4E)D-~RK(n4g>So5Jf8G2r>lP_+aEL@8E$k> zySgdvuJ_$2e(={zAWz45B3r^wd!;=x0%F8VfyhTaw(EI;(5mZA{q0xs`;Ge9ksYMI zm?s+OUTre zmJ5cDtVpj3(~b+`!i#K>)pYBTmcvd5yKH>)KG?&7c1ax+^ikY{8HaNnw3l@6Nli~& z`upX#Ods4H%-xiiz3w-`PYFLV{O|wne9fqI1v$`u1ALsZS?>zTR?_5QD3+l`2%k9hXOY)1>x3)(Gpb1F(#t-VBTbP67XS-*5#Q?_$#S+{58b1zI^$2rn zGR)gg-;dny+>a$3RWR@{@-Tc||2LnHMFl2UPR9Wu7bt2$)DCqW>^Iap8IuIw+@lWtoS3$H}L-yE;N`%}v8jFf8B!_ILr?~ldC2CjHk}-eLRFkRMpBxHm55G6 zsZ_yYLPjB|!-cFPbY`h|sZ5EvGdq*HOoK3&FVK^QrL&gr7QMI;BmIv|o>&U7w)=Tcfu z&&O2{|&*4>*6;e2vKjwk4V=L0TE(xrq`^iLwjYcj^AV zo$F}=qiL!;1xZDgs#XKBhNU)WZtqkuFgBoTMk7~5Gnh8{T9PR!8hbZn1d75HQI5zY zt>mrI9FQ}bMylamrs>yBrb2TiR&wXL#gfrBbn;|SZ4|Yjh#@dkX*@^G>S2`jCygml zQblWKG!rCEBM132>+N&(OeyIbOrx<~YeI8APnjyE)hRvcww|b4D-uDl0o(@Gbs<>1 zpU#x#L^e9`+@%1>9jX*Q#B(k&klB~c3xE<9p+x`dRG?i83M~>IxH1PJHfVukCX8m9 zK!#YKW@EmPiYI1Cxjf@(r_R{vw_gFm}ZisAnXlP_4 zdTrk95P?|=L!_jz&YiKuXdzbW-ARTfB%&_>VsnjnKWWUa>e&I$DP)A`!=#3?oOL^9 zV=WZ%Ius$5%wm@p1>wo3LN79(M=ceZQlN`IfsRmrtJIOe&!PRJz&L?c164%tA|ubFKt2I9=u*<+L7y{YEmOh*Nv2 zNQveKQ-2k+$KD+fywJo`ufYsetVE=nJ0ksdd5-jB;=m2~fUe?!J3@8e_9YB8^En>T zen22TFM(n7&%0Y%W9m_;Wq)u`Ke~d4#D0jJ?FmTqQ*^m*J?3)tGZ@%(zDfM24 zygRgWWIr+$(9@U&+h6J>bW0?GkA z+5dK`lj%!JkS(#EN*DMlbF!t{rHn|!j8XZJSU|t51z2qo(l22q)N`ICsK=y;X#0K6 z$Au^q$)y zP8*<;Pv>)Tq{7x8b#B0rdOAEyq4~uh8>hZ5D4p}+B}$u$dK775$-}3YtX4VvdnGM+ z1OPeUJN^{B|6KpK#vX_W=ob*kkG}^53=9d3{bO-{YSe$29}E1S?x%h&$va%t6lHjI zXSd^JW{W$iPLgKiZr-9O9oAHcE!<*T#zY1YI);H7;E-7nD%ONV4H<0{oOniPbX?Tj z7+f5tAsHQK#n6N#9Uq{_fXNF=d)7SD`I|Mv8~ww5W&PvIKCj8u0O%eg0<-=yiuttg z7b4IQ<1e5f;^wXGozTn`?hS2)&5ebPfihtAl!VT{9v<)_xS|xu@=+e^BzAj&;!6_~ z*m@as8g8`?7%KZ-M+>ZVZQHp%1aQ~Q>5$(gGJICa=;oh_wpB3}g z->iTx0HI&&6(oO3O|pMp-68yhO6v&k-HizPQ5+K-$%VDp*8d4e1GoH{>?PBP2*|F_ z9zqV)08cr<%Beo4sG@f-SLsdEN9S)S^V8FMQ~rA#k5|R3F04S?ir2^M`>gz;9*Z_0 z0^qm^L&#p{&Y%dx8C6$Do%B^0NW#`&udT6X?0IPeV~yM<+h%W5+iaKZw!Ms9Bqi-n zs7$vVVH~p)_M_CxIA(v?X~w@Ed7dJz+xzp$BR<$`lMcF(%r*FUs=+7tBx-_B@gifU z&q;MFBe}cX@A4(oeZJHmN%gp|mO6diIi+_m`bOVEZS*&Mr@wbrdlD-1sUKtKH#o?Y* z?cu>hnte{XgT*U71GDA6gWMjLBwn18(7#gt(Gry}%HJXuOPTm1avd{dlEKo%_74+=rQ|{Rc6D7_ zqE2g~C*@Zp^wT=6CnYoZho00km(!E-cZ#nlbEM?O<>F}ON%;zTQm37^2hROHq7DT{ zJ;qXvn207!#dI`dZu&nY-2lE6yTmWVR`C;^pOZf(ZV`J(hhY&G19S!YJb-Ep;B^qy z596u+oO%^=lQ0=In8N9o;6}_qEoNahhGPL1;xe5w2964JP{1-g1j9-^i3~R5b)K>v zZ{lL?#Jd=RefXUH-8hVqFgSW~1_rZQ`ayIBy~i+KNAT=AjKVm&MqJGq*I*j1KoRrN zgxffKHfMKWo+d8A3aU+W@5NJinz^-jj=7ic7M|er_wfSupbIZ!Ki^>+x^V(K;0QL< zh5udV+c)^<>b34piv7D)mKqZL*+E7S8V4R5R6FHP@B{_EC|(GoxK+;pnG1 zra7)TA)WIuRjK`*KP}RUkONfq!XRrMd_YpQ7g0RR91000000001) C1H}^n diff --git a/curiosrc/web/static/ux/fonts/Metropolis-Thin.woff b/curiosrc/web/static/ux/fonts/Metropolis-Thin.woff deleted file mode 100644 index 7b2dc6cace8436edff5751468abc4f6ea6f65458..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 17128 zcmZsCV~{9KtoGXWS=+Xav$k#9wr!oYZQHhO+ur^5z2E(LyXxt#WS&lHYBE!)OvhDL zR8&?;NfrP=@DuRI_Y#X9007^^T8Bxan6QW_008f=3_$rG zfX#qu#N=cZe|1p+03g41AyJZh8u`Q&l?8u&Z2oef{s({n0J4g7i~s<@X1}uZFZ4{$ zOJxnM_3Z!vKnH*IB)@CWNJ*CCra@iRMH zM<)OPi2v>#`4{*w1 ziiV}KHng1U@hyd+pz?#o>9{sfs^5Y0rTzEtW-Tlbky{Z0&=6@n1OjwiK%0;{zpjnj z>8A}$?)F|L?QYzs+fEpKTa8rBt%Du}-{SJM36zh>L`u@D$wzK6Qxb0q2`Ai=gbI?r zoZrehE4;`m%3W8jFPl_bG1jMQVL~`0C&Mj$@hmuS3eVpwza!lMzhcfuVZtd}&cZwQ zwY1+uq^Aa(>m?gEIxfrYvkaBn*%u*B3LIhrwq)wZvCTcx5AGmYZfLl zC1E7|GopCQIgIQ~uKzp=|ACo{Is)n(OqNeFDY-revZH@APb)1zt-sFr+y zb4@nw#U({uJTA}9d|`7N!05TN(rA+rD84l0oc-anj2xUIEKx`1DRK2#gY;dXo}+>#TX@yOd|#vyw@G<&YA2Vy`xuv=qe_ zvR$C@$a2rR;RRni3~`sm{LD8u9OX{-oYx)`b4yCHt|y&23Tw@O3h*qxg@@WpOm@gNoh3MEOoG-puwLg$Il&vj6bO*x) zdD3@_pmm1`BP-x1X3m^p`nh|X0n~A~y#w%L`*gUxgW0hUrYwnBVc%W0PyRZZz}apX!PIeve!b-3cxFE=Ie_xqb&C#p8M99ox*5LX z+@ZX|w*7SM5#QQy?i>;@-ne6ht+DGA%DIWjd2l%U2$0Tx6`TwMABGu4HY6P?yNOK_ z#})i@wiuhBg=(HRFP_KrL&(~eeOgC3=$v2Q9T7Ecx=$H7B*Lu(JjcRhZxh1u2SniG z`0}~_OVvS1LL#zlz$tH0{a;G^d<00KzF;2(FE0^FND#OnF=7TvPFN7Oh=7DYVH1>e z$P6e$2=)}n)T#_*wYZ!S4o~`#MH0px`ME$9Nq_oj(wVSryv)29qW7y7&_r?RjsYH2g;m)Lo2=IcBstt^QyXx)V6$Z|!Y1LIi zsHRe;1`kolKWJ1OT+aw3blRk6F$lT%v?+&62*{vBAwK~BdMAHDXwddKv~?$c+D5tc`FfUF zAQ5>)bURhKMf+47W@!XiiIuR zAsKoJ7Rb^JYfem4E@cywEHlwI^`l4=G))3xeY)zjx#1=Ip$6=^qPc zo}!?}M>Nkd%+K+13a4LIikHso^0>FB>*o!ns2f+Um*RoWQI6+ZJH_D6QN0WIV4w&Y zxJAX>F%X?4TS@B7l>eQQsFJjn!jVERmYlCzKs~!Y%eencB#U3(@-juUjv1CmDI<>^ zKgHxJaD9vhWMD5h5RQsGSB%t5T#=!fmRc%iErctCo*z1!Z$Q(JIxGHT8RckafP&4p z599oC{=95mv!Td#AAKO0d_L+NG8*Gvhi_qETp7b6k9A=vWEFu^$K*2N&n}#hgOZR$ ziYh)*$ar?&bgu6^d^F%dTz_P8ZbmFyMmQv)lQVoL5wo)L(B_JdBpf#KU9Tq<#KTL;W+?`@`&BK0;)^f2Yv*VCz+o37@;5{SQMLoOo<#Wng za?Us!=(9(_*Rre^`62rM$KqoBe32`PGN~eMmvXeUPI=zm7qNb)sz8g=C$7 z>7S)M%we?xQnfW+mzLwdnWZ1laJK>urz)E zNo?hkgWWy|u%i6K{@qC@M}K}IOG0fss;ybels4dEYRFNj&4@NRbDCy1#on?tdQ-)Q&NX1-z{c*S zbz~Fzn(f-wYPMg~J-Fpgz{|OtcPGep$o1Isw(Awchn_D1Z$kF4k9>I^#N1C+(Zw9Yp>X*(yK?0?7i3K(LX%@hSK4)&8z3bYze; z78?;0D{y9+NSjD(5FR!<9EXdvI}3dg&C+nV@#%PDC>k3X84HWgU~&~P+SM5D3!Kf68&sSYCRxtZDjrAM-*Lg zkfylQpZ$h=*kdE=f!DB0=}u%x#BqiF`=|7vi(O>5`A(!#>*r6!#D<0eqVxU_4gXd9 z6DHgj{5m~drv7%ZC<@PyB3HjASAE#n=+Cr#eGBOuU+b98`JBiM-GT;l;?pq7)6>M? zsC4X)BB3AS^!px2$vX$}yJ^Gmp2W=eo_ENfOkp~csx*B=gQyV@TZSb`j0Zk1+pn}i zeS48Kk!a6`AI2B7PQ~M?Ki-cfo&P%9^0%?pTnrP%Fo`Nu*@#V;T?lD;IcUEnNu3T$ z7Y~J3b{UQeDh5SWX>`#?bbaJ3-e&v8!V4zENa65UFw;L}camPtGqr?*QBIVqxm8mw zhn@F4?07iP*&?t;q<5LGQC#Ca2YF7mA7r{I@n%Zy%|DraIBM?NN&&L(@Lqe|hJOt2 zS&~_l8NGuw`8F3vm=~GH8$sAUL>E*RY!=KFyImW4n7=IEC3iva^4M*j&IPqMFROxF z`uPcD{vr)SAqTbf1L+slN{^z&K$!+h_VeWNyAgRIq@^>Z(z7wMF}1L^Fu5|k(7!Og zu#IGZXNqNrWtwK3WT0oHrmLp2rLm>Ero(5%X3AzhOtDSEPsvW%PWEK-rT?)05WR=_ zeT3ib(=&);6UiWi|T`J;KASz2=)UY6PPRTF)R_J6F#-xZ&HTmmU2s)2s zp4_OuPGBs_F>im?>7?CGh5C10SL z$Qi<4&jgeYZ81fQs3}}IOCU>GobNnI*`&NFq_KE%t`)O^y{f$fD0vD09#tPTJsnXc zOBoPP1ql1Tr)t!y-qZr&3A>DcG4mnza?=CV_?1F|C7#hJ>8g*ah3JVd{$nY-ven@wi1`ZK7)r z>GXPv28F;^=2{iY-{(>qdk1GA8iF`wlPcpU@N+$m;5mKsS3LFBMgBdNt$we6tirQTQogUq4Lvlu$pxUjQ!)@lz(f*4CNwb_%g_3; z1pRfN8dtVdl~re9a2`JtM^=&13B9;`o_o_|`@q%f?#PM`TV=cH3)6J5L{oMFMJoCc zma33{@ zmRPnW?86mrTTd&M76%ozuQGa4vbio(oNJ4ZpPP%1Z<{Om=Rj578J*iyde0dk7=#Oj zm@PL4)fDY75T)M(!QG*%pglQmr*>mT-tfE`u^2n<)8r-U$G7Uaq%3Pb@DQPy}c)#z4s<# zoHD%a&*KppouRWs&@|4JxNv?hedvRvd5X9ysNE_i|2LdFEdZad?dIHm9ph_g`ssPO zcEVhPHGb(E1cHicJP<-Mu~D$3oaxlkK0c-i`*IDCuqKx}uNK3xEwUncys{uziR1Zh-z7& zWTs;|jsz>>aYT34uk2A!x_XC~2@mAdl5i1|O({xQH3s9Z%|zT06=P3iO4n=G_r9;d z5?U>8l(k0lp3!NUH(Nk5d(NX+%X%^^+W3WTA_t{VNFl;Q)Imba1V$?m<>g`|s>Tdg zwHhyXod~VC;hqq7tWu+ibEpK#MN(~p4$?p4 zlslkr30kJ4P1mGrO5c{-NR&C4+|x8Uf<8Ob8C)-#C7W`ARI7whvqlUu2_3WEm4s0G zBXs5$-fSOTwShlmb#NIS77;K$ z%GnGhm%gUp$(9P1Vuox_MfKTbGXEIe`wNp3S1L8zES}aWDipHrnLVP%P0s+6lE^7* zG}|1m)*F)#>LlTrv4KtFw`5fHOSe@T)b@ZUn_vo-?cbH8k8dpNc@II(sN4hAWGzm7 zO0Kz#zi)LYb8-VVeK7BOIs*5hDks#0pSm;fa|ngX{zW^3vzlI~rt4n4N3mCCa4DH+ zB6RR8R*z?FKti*~HbbLHEJK5gva93ex`X|_uXiI}*0gZIq8y{(i&#~)>-Gfm;gc(= z#0zjar`?Zxc)unY2VYxoz)lV&C5A~JI!zbaf1fO>Urc>MX($?)RUB05cZ9U80t{HA zaL3q#H=TGo(LbR0M7=y4v@a+oT{DK|-KDoB+U@IGAm>4p#$o*MS7(0Vj85eNR`;C3 z`9}y{qE|;bU=Ynmg155CC?g44i1;DSIHG*$a@){KZ9~=)nKLzW#7vu+Iib7{^ydDF zZNr8>SS9+i&sELX5}FgsJI?C^*b_Ighkh6NMsm8MB(P&hp_*JVe02h62mY44N#Fz8 z^RHrhX;;;r<*n%x(}q_y@*_fbs^(zDu}#bRn%*^tYaB-?ha~VLhHKh;W@&FZFJ3YB z$A4$g_VD~6vklpcbj5FH*!D=UHE~nc?E2qv%PrqM*Xxy*P&Hx?a#xmi=P=80hMno_ z)}}gdrsep}-uW-r}8(WZ}7#K?g8%I z%G^%mzU@6*YfT%_<72j+>|4ht%{QEH=EabApw9-+H^C2p_aN^k(>LM^(@)gJ1m98i z1L!C6H^WcF&mW!+wokfG=x>&ve^lYq#Q{3SX%C8VUAU#PvR^>M3wieds=I_gmtBs3|Q;l1VBTkD>YmbAEh%|EG z>p4@i_y1@=@y!IgZx^*DFHM`%D>5=OI?_EeL(@z%;nQLZVBL2<6^IQK?Ljm`6^EkD z;C+yI5=1B?n{^$_3KnJCiP~tUsClRts848CXkuyFWhwAFWQ}u*c_DcVyiGp--Htu9 zT|Z2YXisWY3fDyp?MD=$ZfIDk)(s5Vpnk|z#w`*=|J9t&j3P!JQ@ZC_6NXDbCeA^L zd#TrkOAtCVau4z<-@(O*ezU9&A)W4G-YC+pk{^Y-F7_vqC85-A|(4f+| z&_J$ws9dSU`}*fI=X3CR{7Q4LdC@X(Q@X>^<4JOe)JKAuWTu!e8~%!PAX!OaUtTCX zhD2gbf|Y!!eE*R!Az_j_nzpHGu|nZS@~i5-yw5_rse@B4tKwR2rb2Z|j1uG(oE<11=pB?8ObSj7yOtK8 zNl&-;%&!A*2Kbx$O0iw*?765wqC=vF*1OtCKYiMGf8tYZxyH(D?TK{_&1j-AwehAd zrv-i0m+nNf+M^Bbh;jUi`lNWujQuK*^d6`^Jb-9&ZYO?!)ZWYP^MLxA`UJ+_#d(+` zG0QNEZ33R$w@8pbcpH^eIZb!ZDEI`G8+Ag}O1n;WXbp8nB?IX%uqC)C7#tKfE<2N* z`R;V@fd8AnE08ao7y2v5D4G&cskhuqu74$aWLrjZ<`eMueLZW|dh0`I=(fc}%A-sh8M*V^gA36yq zA55$#wVrT^K#1lle`qI~wa~K7po4mhGrj!_*kg7~DJNYMT2fe{T{! zCWo1BWazp8nmmb8YblGjhhLTMTRo2uuWd#u0#} z|8&M;A0Ap2`G|p)k1i_Uxs8DJal6#hC)v;547JzCwVN}G`~^!d54QL=j~=hiCp`Do zDjQ4&He!af6SoM`H`Sg?d5URqzs!%+C*~1Q+H196@JWY^V&cw8UO#?Y zm6zOQNl0nXnU8$SL$ ze@_WIsE1AfGb{dSj0(7ut~lL?`mC zl{A*CD27yz-|b4H6VGUMTUtVg>ee&&$D&K1_k)p;8oZBJ0x52sW!7gDfgNCM3iV~U z;l!p}d6XS{;Xzo{`7VTOiCPwQHU%%h%Zb*HqGIEtraMoMm35(Tv-&%;pgA<#`Leit z`%vk+(e!0@3+Q1#M@)}b7*5d&u(PZlRN;O*rhTPfK@Cd-T^4|V8g5QJPmRn{we61N zJz5)ikh8-bp__Tt{p&*EETfjT53Ur2g=KP|V_4(|i=iFd12j@*E7a`Fm%VT7PR>Jc z9Igd;YgG%RajNTP23ami`N7jY^Z3NR%>IBm1DXjFkFwHR;&h4_IWneb&wbpOp6?%> zjY&8Z->QupZ{r%L`L^3)Ns*^SV(uxW63a52ZKL=g%*VM|#r?d{i=M>S4YeN8BCS^j zH8jcjg90ghvJsb7@$T5^3^Jx{$3+;KS7F82UdXxPawC`)6W>ksi7PYk{CGUEKP-WCFir+1t`XuY<-Zi(l z!o>PDzgJhc(^a-qPZY>Z8`k&r#m6`u)h`{dr*A`_h4+<~yKx@#7eqo6jyML;%Cp9% zsKkL~IJuvA3I=gak#Rq*0xK?y*NcUYzsXB@qJYR>dC z?q%>UvycoIx-w-r(P^vxI@$9Po*v0}u|C;!RH51GA3qqjP%ixLJ{IEGE`iSiyn_gx1~qkUG`=h_<&! z?y_grix>gAc)h((aA+*(F1G99VAs}>bR2sdosoYLYbPJUJ4dS*sX4Eun~Z*#YIK{CvbtnFXJRyG^-!S( z8w;h~7Rmi#ZAOwCZm88B@revJ-1CQOu&=RKPLrhR>|hf|;WNhr{&*`xA$LbiWIc|2 zB<(?;gq(MrLBD@-t{}t^54xFpLak$y!S$@(UGgfvRKpYt4KTKF+yV&dd9T28n_al%suK z9R&P5=gYOwL5}VN*7(4l@Ff?>LaG*wrNKr_@+52<Jl$N|G?@hZ;YW z26@!xt33B7SlST59IRu~r1!`ZWx2M+4EQr*4gd?|hHp=Lsdsqfs zE)wtbczD(fa%@R(PxYPo&dQFOqDt`#kHP^&APz1Fhk!1(G>X`#%DvLs7`#Hbs2lz< zl^ON$Ka29H`mrDKp$?r3a*beX%4W~Wb2Cd|M;}Wd@X-fk_+q}8q6=%pR$Nzuj1jqd zmnUb^hovbMeJ0Xl9Ds&C#N&I&un@-^afy@g_9K|B9AOpE<`ZZ4`P;pa^et zJm>HZ+*rsiq^mK2Pby~ijbux>&EU|tpeNf_j?3FGjwi0qO6T0c&!)!vXM5sG6^33L zWnSMI>`2rSfJ|(bsY{|Kd`ls#H4V%eQjf;6JLj4@x4GznuHPqH3y}7KDGrvIF|=P_D-srt!mH>mD{e}g)!QK%IA&>WvUKF=hv&1?*4zKzp*N9ZUH z!Nokf%o_Vc0V3Pq!5b+U7b4ENkaw&~7EDCG&w5QgrMen@Jek_yH8m7ow#PWO_s~Sz z34mN!!8l>x&*kXylXIQ7VF)`yd^VN=jS+afeLE>SbH*Lo>P9^Rd{Br;OpihWP{@%d@6$6akHe7iJP(L^nYc7XThA3HW6kW zxHk`(MMM=9z}mDl@+jSe=Kmz0$bcpco}2X&jlE>(Q=L8g9Z)&4*?uv&OQz>FLTPt- ze7jcQERfA)q;dZHu66qwN1bkeUxXb6BYC%b=76QYf3f705(y7^1Os`n9vu?QfeJFt z<6%(=EMAX8@g)x?JF)!h+M?jQ$?V+^R^s3~pd#Y!G7NaDv-DhN z|K;x|M);2HuPN)Cygv$)hI&fuVtNg1tJ->JekN3)n%TbJf8IG0F^U^LakeCE!7=U* zs~#QD$0JbT-eY8#7SCBYThZ0!R%bMYgE(H?Sz4I%Dunp+oc6sy^#wbdT;Q<4tI(q0&Wv|7hBl#2h^_ z7?i^^iD&f}l;3-xqS+*5Ch^rkE~t4rqqd2w-L-+dm*`bGF*nukieb$aWYW#*l78j{dND4)C;~&HK<6VCF{I+YGG=xWWW}?q&fP?)=>y(8$;JLBDdK z`f!WlXOQ+2ZHJSR1A_g4~*+5^ivB zjs7mZ7bfx8xPw;pLlEye6Ntg_5YLxhLO3B~4?cUt6;A%w7YO$Ipwtg4g;SsM%+j&!w62@py?7*e9EU73$zTv%pJFE=S{m?=Lw){=C80`1uC`_-1VxYZG?Ww+FGf+Q@QVW6}E$Ht0UfP|9ZNa$&cq17;$8vi1 zfhDSCtaEu>HM5ib^Wz)xW=Q90Uso%GI#L%8ePU;M3<$Y+A#*wG(h;QNm;92!nv@-( zxVU^PLRtzPLE7F>KA-+0gz=-?F-|+uv1-AVre`b3|9W*C+BH4CRdsG44r%39=aceSv)0 zhE!j(Q&2fqe}M@0H+{WQI@;yD)M9L^rzsKlXQkG4w?-%**AD8DDIRy6fpOIINb8Mg zG!dQ8gT7K_&;MiWazpXa8^-`hI*3?{PKv3=Jn3qJT##Sjf~xF9MxKYZwZVxQf54C` zrcGTxt9pgBL*1I`I$$CK{^y>Vz4VQ5c{hp8?h#`t{_*v^2`Dt5uYJ8+{-B!YoNGX< zh4|>;!*UCA(RQu4xgq$X>hb!nfzMWS=d>2K>cKFxQBu|VMi3Vql*J6=vbJ8?P4H#G zjXW4)@Z4^uFctdK_Zks5viQ1}EhLB=okP0O{N`zzP+zA1=y{Mr6!t{dUjoNCAu?+dCe0i?-T=PK3F{!z0E1ka=g+_WzD$^ zJ?e+z={8P7$Xh3*E3*|2zn;&F5pclxeN0MMRBo^@c0c6;)zZap5r_asd~Zn&N*RU| zDmG3e5ddF~)!%PSQKd>e>CRaNA$5o|!D}9-*dw#a$vn_#{u@HzC`?K}Sf3p2bOToC z4g)iy)K?uaL;bI3<&|7O(kTf(p+@Kl^8opl#cJ?u*c5AfTz6b^mgFnVI}G45m{pQq zdA9|q@m;nVT8&su?(!Q77?=~uLzxcDOzdXAfQ2gK1Ur9cYBIRKAzc9Sx-5^G8-$aJ zJhfBe9zpNexG2V4wDKvz>|2PTZ~>Gj5iqHKCfB2#0N$V8_kNAAAd!85a28WTHE4`L z_0uu5I8$d`4bazj-h-c(_=_(-{h zuxaOEnMu0MGZM~ZVNWkwoj_cYma#Fv?QN9)_}}_B8rqyVfA1puJP>%0agA?W23c)|S&)g#vi zti_Wr;72fqF~*xcIF08lJ(@}TahX$kfpUWgaSt*`t*O3_H0Ys?>;o#ko@44aXUL)T z6JQrO>H%+Hse}YK+-6i#H*i!@n*#a!j8^FK)8Y`+$xPP!urGc~HxBEWRjtZ=kILVg z^~VG7$UH2va8F>|&76$j@ilu}eNs`awO;40YvX8+M7+puZMjZ--w6(c=mTqlvf0(c zQY$w$OE2tJf{C&Cc4e~42;sD$c1UgafLx0|z7=2m8xOVo!(n=};ZyULlrFRmQ@Ju% zXWaO@yBN&Fc4dp(2wR4=9I5_KTkC|RkjA}2b&E4vL%D3n=VqWLOMJ{=Yr+-0yHQh2 zud%VBm~Ct30tJ-}b~cylFJ|In&^D6iQMhgre!pjU)`Ajy2^%VpG|jUAYC{i8TIJo3 zo%q?mXL!6ASwI=RA1}U7Kp%W1->E`cZAB!*tVsPDh#s@-{dwkR8A;TJ*newoli$_3 z?p3*Qal4|?4G*52VzEdYCtXqA)-7;?(i9f-A_osHXT~gm8&*{+)p%76GF}@ikK+JHfRDJHQ#`c*dHIg;fbW?3knnyA!x*;(yvV;sKcosCXdk>yk5 zR7*t%%bVRXNr(nDiEJUdO+rnwa=;3lRpnD@)a#u-9xv5Bv(yV(NNlrP4>wSu(WuoMotE#q;@ecS_Y=F3}U>0y*%D((r{K(g9Ai7T z6rWD5!{uRrLiCY%afQWVz8H=w)bVlP5xX`K={)aba_U-YAJR{WJ!5&}rluZ_Drvr$ z-%RtWK8iPEd)4i^9p9*KwbG$zVCeaGqXHBjnpUu`sPwk@Y!0DMcP4QEf!qlOL!>f0 zL?qXJQG@x#Ln-^`qx05_J6cr0+)zKsT0DD!MtqJ@X3+SbStTJ#)jmYA*(df_6MZN} z`M1VHXG-M?kO8)5Q6Let6Ax^bZrpbfv zzy|4Y$F&dHUR0G4~-8@Ckt zpq6&pdOB>FGAxOKv z$b$H*U;@_oLCJm!F^9y65ez#OrQl`JbI)mLhV=tb6*h`5_nNN@P^ps3fGuUXzRNtlBAv#%+w zTXFc<8t*#;uqQ=wj0jkLI#$4_=-+jyLdFRCYJ-_dJaY~f*Wrg-_UV}uk9>>Hnj>$# z?$y<3CYhhg?r2tq$~wv58%AQLSDi=mpAQ>wxDwl1q_o!;hof!0R@fMruh&!vs$AT6 zXr$FPcTk_!%ZpCdcFi3@$FXqyj<7974u{NHz$1lNOf_&^bCw6oj9W5nJ4*%x&5K+8 z5i?e+N$FP6GtYod<@AFow&tC!>1gg2m!pht+Bw5LY1akVXO%~qz0X^j9lgJ&c60f^ z-p+$xpmC^2`n?+7FUJ}LqD=oml~D5|`=0NNFXx)8nq*(pW{XVm=mgGV4w`RE?o(ze zsraM%LKhikwHLa77Oq_!SL)hMeEK}(OT%d|bN>VB4$WPikz2+U`?HRgtoGJDol!k9 z-%?;oMz%KW%l7@DB!;{)gH(|M3`Z*9{Xxff(_z=w7Ng;rhsUHC$jAI0ND8o|lm;1& zDH&veM($Fz`!z6OkJHS>YKvmqp@CCrY&s-V&KR1gd~w1#Xrl366jAoBH)WvB3!l-> zjKJK5(M~?XY~1l`Oz%&kN0`^6s?k7xkjG4HMhyr;bQTf=pta<- z>M!&S$Xh-M8mMhWj}=8BG)-$KE<&FA1lVufT8>+=3?&Euy^GhE6R<6VS3_!TqI_3_ zsU$5CVo{kPzy1$o=@vdOlSy51h>{L%E(WortjVDmJDt0Gl}Xn(pR;v z9qe)L?qtXatT?&J4_bAMMS9Hd%MMWch0M&>M2bwMv_63xyF`kc0UmY(xr1W8V1?p1 zV~;Bxj{1~@N5&Z{y{623vAjlGTuw{O986pnPF%wAFQMZvE8;IBuNT3bH&8J`>=6DS zwZeeJaRc82LEGn3Q-pMF)79-UYz}kjNt8BwTS2R@)y2I!2>D79W+PyAs7ND3D{L%?Jcw{-xL`<&3msD^evkW$ z(2~#%uvpjSzgI{K3pQIx#nPCm9Mjs84?87Cmh`}}%ir%G#uk^`tKN5>uDILIv>(%P zOU7U=IG7BLz8f1jwqA)R51!LsT{G6Q8=F=r^UcbCf1c{dHaIw=DqmTUt;an`J8R$% zpM$erXxWp{HbSo*uEHPmGq;)+CHb6hke1!ehWw{%=!z^ltW#yP`#P4MAcK7@mPFZd zcziQF9XlS(Ryu3ZS-b^_2t%BkHF*N;MFDFdds|dkUedMt%R8<--x6=t*~?bs=gx#+ z;gh5VeqxtGX$+!_Xb$NRvXw{C)i_f8->>J4fuw^zX&1-APGuY3yx7V4JHM4^JbpTO z`b*B)T8O;b0HzMYlDD>H+~_+K&_V)xAmK%CuJHETG*R}BUIdv_ucr@gu%?N0|IBRA z7o_Eo>BTL)7s;Qd1#My(e22&NAT?m%FU1LT$Hm6e3wMMGDDdKGLg`m6aSVprX5bPR zFip#I8{O)dua9B4Ks+?~j_j#@g1och`$Dt3Ny~c|~@>r#d`X#Ps7ZZr?LG zqE(})hE$9+?Qz=cIs&({)5c>BRP8&oB5%T8!@k5Dc8;bV2EM*~VdM^gRo_gcSuE%pbeE`A@Niws-Rfb zwWxefAR_KvgmJS%R#1dup1>^qu987Slep~t_HC4tM3t2?ExuCdso?)8;X&3`szu~{if}&2m~0Dc;aD1z^`=S> zluP}JEQED1G$qvX%yIPh+~ThqL|PiC1y`j|S+-fV_))$lpIGNfN6l#vMIxDu7KeM) zW<(=dy{955rKc`IDU3BIvilT>{ak`FwCXBbO%}6_(|p>Vp(vd;(>8~BSX%qpNSWrP zn4=3`l4Ve^a>d-CiuamwK=3#7Hy3+8IhK=HW_Nsk)VMMAu> zbx-56g(kI29vWle`CP=*_6_4*KZD8=f#8rF`uMvkPnA4T`UWP?=oh&@20$^>br!u9 z`;EB=4mR1tXgrt2R}Ya$n2psboeFn1vkxNRKDzC@YJxS-a2(`}v+6JYoe)h$h}DJ@SCmMX zRkE6Fm>Bk($w`!ljg?Jg=HTyd6WecPGOw9t4I?GRSUwh#`t_hnTv8h@FOw;Iu2-B& z?H!7W9}T#rv7eW&K_deis72psWET=&&1-@ajrs6;4_^i*C0C6f*q8>rTcJIO1Ya35 zhthJK8k6+3u+UF`GoK!m!|%?Gv9REDELOup#Rr;>4=#n!h`P2J(aq+-gDWiZi=;&A zKRo>^Vyo(7qSk>&E>48;zHJUiS9Ga{<_jNEPNRvX1#~Vw6v=rk7z&FLA?qKIuP4al z!W}_Hby%?4v1^F}g|p@+-g^*HsmGRtlAy5f60{DpucN{HY~<`IAP#B?42A4Id5C*< zz3Ioo%Uj7|n*Zvs6HBBp+AFv6apBf@ipUs9 zZRZ6kJE3OrMFSXH2|ZVDhM|$%+vzStpbd1#4sII}pIB)lJH@{uM(>jND#!MmN;tBl zNMzJAd1Q20Q!rc^9rCR1U|c5hB@;&Eb!4&_jRUv?8rf$bKBu**7@OG-Nf#;jcw7$# zK5j2yF@svFv^Z+6kW09e+l>olFg#K&M^hSH59jhA9flPMZ#I_iDz$ZlNYs_<@4)sf z7@M96_;$UPvt31d9Kg^8fu$2$Y<0VB3jX;U24~-PygzO}!Q*w(1c8B`GsWX0(K8M4t<-+$V8C+1hNiW0oBgN1g88~S?Onj z4B;{je*lL^v1*}M*D^`W)!mshUR}0IucA_bN!muDD)KD#^up=R|0;WMhnxpPPs5376@~4aP|4 zP9wu{%!y`D%dkmyj`&~(;zlDEhV{*ecuZ}A38@oI%6VfEc?y~d45vD)F-m9cOTqN0 zAck35>H(AyL7YsfI5;DTA`eA7#K7M-{PV(h7ALhPxleXD;4831=_za~FfWYCSB zp0(2V39H*oB{wZt~(Yyr$DXsE~XryVIuv%>zmDRU~QM*~zwwkj& zdU5Nf6hf~WO$*VjUNDTepEY8;B%jo3ysdI#&MV>~up97NLrQZlnVm&HHv*}Sp+8ah zvY^uM9Y49tH&x>)sxfsh&=s4yx7p;Le1dJ0mS3BUUYCz=p5Xsq089e2{ZWtlExt3W zUA`-ndY_Zt;DYTIg>$#sK5~a|3cXkv(q2-0ONNTq$=@l~DRLz}DPJD+k~B<-(DQC{ zdjiS4>~x<|3cb0-j zdqy?u7&8;q$ElC0kI(kGpUT(##&(Nn$B6cdjoB-X?=i?PPg6= uint64(len(parts)) { - return nil, xerrors.Errorf("invalid partIdx %d (deadline has %d partitions)", partIdx, len(parts)) - } - - partition := parts[partIdx] - - params := miner2.SubmitWindowedPoStParams{ - Deadline: di.Index, - Partitions: make([]miner2.PoStPartition, 0, 1), - Proofs: nil, - } - - var partitions []miner2.PoStPartition - var xsinfos []proof7.ExtendedSectorInfo - - { - toProve, err := bitfield.SubtractBitField(partition.LiveSectors, partition.FaultySectors) - if err != nil { - return nil, xerrors.Errorf("removing faults from set of sectors to prove: %w", err) - } - /*if manual { - // this is a check run, we want to prove faulty sectors, even - // if they are not declared as recovering. - toProve = partition.LiveSectors - }*/ - toProve, err = bitfield.MergeBitFields(toProve, partition.RecoveringSectors) - if err != nil { - return nil, xerrors.Errorf("adding recoveries to set of sectors to prove: %w", err) - } - - good, err := toProve.Copy() - if err != nil { - return nil, xerrors.Errorf("copy toProve: %w", err) - } - if !disablePreChecks { - good, err = checkSectors(ctx, t.api, t.faultTracker, maddr, toProve, ts.Key()) - if err != nil { - return nil, xerrors.Errorf("checking sectors to skip: %w", err) - } - } - - /*good, err = bitfield.SubtractBitField(good, postSkipped) - if err != nil { - return nil, xerrors.Errorf("toProve - postSkipped: %w", err) - } - - post skipped is legacy retry mechanism, shouldn't be needed anymore - */ - - skipped, err := bitfield.SubtractBitField(toProve, good) - if err != nil { - return nil, xerrors.Errorf("toProve - good: %w", err) - } - - sc, err := skipped.Count() - if err != nil { - return nil, xerrors.Errorf("getting skipped sector count: %w", err) - } - - skipCount := sc - - ssi, err := t.sectorsForProof(ctx, maddr, good, partition.AllSectors, ts) - if err != nil { - return nil, xerrors.Errorf("getting sorted sector info: %w", err) - } - - if len(ssi) == 0 { - return nil, xerrors.Errorf("no sectors to prove") - } - - xsinfos = append(xsinfos, ssi...) - partitions = append(partitions, miner2.PoStPartition{ - Index: partIdx, - Skipped: skipped, - }) - - log.Infow("running window post", - "chain-random", rand, - "deadline", di, - "height", ts.Height(), - "skipped", skipCount) - - tsStart := build.Clock.Now() - - mid, err := address.IDFromAddress(maddr) - if err != nil { - return nil, err - } - - nv, err := t.api.StateNetworkVersion(ctx, ts.Key()) - if err != nil { - return nil, xerrors.Errorf("getting network version: %w", err) - } - - ppt, err := xsinfos[0].SealProof.RegisteredWindowPoStProofByNetworkVersion(nv) - if err != nil { - return nil, xerrors.Errorf("failed to get window post type: %w", err) - } - - postOut, ps, err := t.generateWindowPoSt(ctx, ppt, abi.ActorID(mid), xsinfos, append(abi.PoStRandomness{}, rand...)) - elapsed := time.Since(tsStart) - log.Infow("computing window post", "partition", partIdx, "elapsed", elapsed, "skip", len(ps), "err", err) - if err != nil { - log.Errorf("error generating window post: %s", err) - } - - if err == nil { - // If we proved nothing, something is very wrong. - if len(postOut) == 0 { - log.Errorf("len(postOut) == 0") - return nil, xerrors.Errorf("received no proofs back from generate window post") - } - - headTs, err := t.api.ChainHead(ctx) - if err != nil { - return nil, xerrors.Errorf("getting current head: %w", err) - } - - checkRand, err := t.api.StateGetRandomnessFromBeacon(ctx, crypto.DomainSeparationTag_WindowedPoStChallengeSeed, di.Challenge, buf.Bytes(), headTs.Key()) - if err != nil { - return nil, xerrors.Errorf("failed to get chain randomness from beacon for window post (ts=%d; deadline=%d): %w", ts.Height(), di, err) - } - - if !bytes.Equal(checkRand, rand) { - // this is a check from legacy code, there it would retry with new randomness. - // here we don't retry because the current network version uses beacon randomness - // which should never change. We do keep this check tho to detect potential issues. - return nil, xerrors.Errorf("post generation randomness was different from random beacon") - } - - sinfos := make([]proof7.SectorInfo, len(xsinfos)) - for i, xsi := range xsinfos { - sinfos[i] = proof7.SectorInfo{ - SealProof: xsi.SealProof, - SectorNumber: xsi.SectorNumber, - SealedCID: xsi.SealedCID, - } - } - if correct, err := t.verifier.VerifyWindowPoSt(ctx, proof.WindowPoStVerifyInfo{ - Randomness: abi.PoStRandomness(checkRand), - Proofs: postOut, - ChallengedSectors: sinfos, - Prover: abi.ActorID(mid), - }); err != nil { // revive:disable-line:empty-block - /*log.Errorw("window post verification failed", "post", postOut, "error", err) - time.Sleep(5 * time.Second) - continue todo retry loop */ - } else if !correct { - _ = correct - /*log.Errorw("generated incorrect window post proof", "post", postOut, "error", err) - continue todo retry loop*/ - } - - // Proof generation successful, stop retrying - //somethingToProve = true - params.Partitions = partitions - params.Proofs = postOut - //break - - return ¶ms, nil - } - } - - return nil, xerrors.Errorf("failed to generate window post") -} - -type CheckSectorsAPI interface { - StateMinerSectors(ctx context.Context, addr address.Address, bf *bitfield.BitField, tsk types.TipSetKey) ([]*miner.SectorOnChainInfo, error) -} - -func checkSectors(ctx context.Context, api CheckSectorsAPI, ft sealer.FaultTracker, - maddr address.Address, check bitfield.BitField, tsk types.TipSetKey) (bitfield.BitField, error) { - mid, err := address.IDFromAddress(maddr) - if err != nil { - return bitfield.BitField{}, xerrors.Errorf("failed to convert to ID addr: %w", err) - } - - sectorInfos, err := api.StateMinerSectors(ctx, maddr, &check, tsk) - if err != nil { - return bitfield.BitField{}, xerrors.Errorf("failed to get sector infos: %w", err) - } - - type checkSector struct { - sealed cid.Cid - update bool - } - - sectors := make(map[abi.SectorNumber]checkSector) - var tocheck []storiface.SectorRef - for _, info := range sectorInfos { - sectors[info.SectorNumber] = checkSector{ - sealed: info.SealedCID, - update: info.SectorKeyCID != nil, - } - tocheck = append(tocheck, storiface.SectorRef{ - ProofType: info.SealProof, - ID: abi.SectorID{ - Miner: abi.ActorID(mid), - Number: info.SectorNumber, - }, - }) - } - - if len(tocheck) == 0 { - return bitfield.BitField{}, nil - } - - pp, err := tocheck[0].ProofType.RegisteredWindowPoStProof() - if err != nil { - return bitfield.BitField{}, xerrors.Errorf("failed to get window PoSt proof: %w", err) - } - pp, err = pp.ToV1_1PostProof() - if err != nil { - return bitfield.BitField{}, xerrors.Errorf("failed to convert to v1_1 post proof: %w", err) - } - - bad, err := ft.CheckProvable(ctx, pp, tocheck, func(ctx context.Context, id abi.SectorID) (cid.Cid, bool, error) { - s, ok := sectors[id.Number] - if !ok { - return cid.Undef, false, xerrors.Errorf("sealed CID not found") - } - return s.sealed, s.update, nil - }) - if err != nil { - return bitfield.BitField{}, xerrors.Errorf("checking provable sectors: %w", err) - } - for id := range bad { - delete(sectors, id.Number) - } - - log.Warnw("Checked sectors", "checked", len(tocheck), "good", len(sectors)) - - sbf := bitfield.New() - for s := range sectors { - sbf.Set(uint64(s)) - } - - return sbf, nil -} - -func (t *WdPostTask) sectorsForProof(ctx context.Context, maddr address.Address, goodSectors, allSectors bitfield.BitField, ts *types.TipSet) ([]proof7.ExtendedSectorInfo, error) { - sset, err := t.api.StateMinerSectors(ctx, maddr, &goodSectors, ts.Key()) - if err != nil { - return nil, err - } - - if len(sset) == 0 { - return nil, nil - } - - sectorByID := make(map[uint64]proof7.ExtendedSectorInfo, len(sset)) - for _, sector := range sset { - sectorByID[uint64(sector.SectorNumber)] = proof7.ExtendedSectorInfo{ - SectorNumber: sector.SectorNumber, - SealedCID: sector.SealedCID, - SealProof: sector.SealProof, - SectorKey: sector.SectorKeyCID, - } - } - - proofSectors := make([]proof7.ExtendedSectorInfo, 0, len(sset)) - if err := allSectors.ForEach(func(sectorNo uint64) error { - if info, found := sectorByID[sectorNo]; found { - proofSectors = append(proofSectors, info) - } //else { - //skip - // todo: testing: old logic used to put 'substitute' sectors here - // that probably isn't needed post nv19, but we do need to check that - //} - return nil - }); err != nil { - return nil, xerrors.Errorf("iterating partition sector bitmap: %w", err) - } - - return proofSectors, nil -} - -func (t *WdPostTask) generateWindowPoSt(ctx context.Context, ppt abi.RegisteredPoStProof, minerID abi.ActorID, sectorInfo []proof.ExtendedSectorInfo, randomness abi.PoStRandomness) ([]proof.PoStProof, []abi.SectorID, error) { - var retErr error - randomness[31] &= 0x3f - - out := make([]proof.PoStProof, 0) - - if len(sectorInfo) == 0 { - return nil, nil, xerrors.New("generate window post len(sectorInfo)=0") - } - - maxPartitionSize, err := builtin.PoStProofWindowPoStPartitionSectors(ppt) // todo proxy through chain/actors - if err != nil { - return nil, nil, xerrors.Errorf("get sectors count of partition failed:%+v", err) - } - - // The partitions number of this batch - // ceil(sectorInfos / maxPartitionSize) - partitionCount := uint64((len(sectorInfo) + int(maxPartitionSize) - 1) / int(maxPartitionSize)) - if partitionCount > 1 { - return nil, nil, xerrors.Errorf("generateWindowPoSt partitionCount:%d, only support 1", partitionCount) - } - - log.Infof("generateWindowPoSt maxPartitionSize:%d partitionCount:%d", maxPartitionSize, partitionCount) - - var skipped []abi.SectorID - var flk sync.Mutex - cctx, cancel := context.WithCancel(ctx) - defer cancel() - - sort.Slice(sectorInfo, func(i, j int) bool { - return sectorInfo[i].SectorNumber < sectorInfo[j].SectorNumber - }) - - sectorNums := make([]abi.SectorNumber, len(sectorInfo)) - sectorMap := make(map[abi.SectorNumber]proof.ExtendedSectorInfo) - for i, s := range sectorInfo { - sectorNums[i] = s.SectorNumber - sectorMap[s.SectorNumber] = s - } - - postChallenges, err := ffi.GeneratePoStFallbackSectorChallenges(ppt, minerID, randomness, sectorNums) - if err != nil { - return nil, nil, xerrors.Errorf("generating fallback challenges: %v", err) - } - - proofList := make([]ffi.PartitionProof, partitionCount) - var wg sync.WaitGroup - wg.Add(int(partitionCount)) - - for partIdx := uint64(0); partIdx < partitionCount; partIdx++ { - go func(partIdx uint64) { - defer wg.Done() - - sectors := make([]storiface.PostSectorChallenge, 0) - for i := uint64(0); i < maxPartitionSize; i++ { - si := i + partIdx*maxPartitionSize - if si >= uint64(len(postChallenges.Sectors)) { - break - } - - snum := postChallenges.Sectors[si] - sinfo := sectorMap[snum] - - sectors = append(sectors, storiface.PostSectorChallenge{ - SealProof: sinfo.SealProof, - SectorNumber: snum, - SealedCID: sinfo.SealedCID, - Challenge: postChallenges.Challenges[snum], - Update: sinfo.SectorKey != nil, - }) - } - - pr, err := t.GenerateWindowPoStAdv(cctx, ppt, minerID, sectors, int(partIdx), randomness, true) - sk := pr.Skipped - - if err != nil || len(sk) > 0 { - log.Errorf("generateWindowPost part:%d, skipped:%d, sectors: %d, err: %+v", partIdx, len(sk), len(sectors), err) - flk.Lock() - skipped = append(skipped, sk...) - - if err != nil { - retErr = multierr.Append(retErr, xerrors.Errorf("partitionIndex:%d err:%+v", partIdx, err)) - } - flk.Unlock() - } - - proofList[partIdx] = ffi.PartitionProof(pr.PoStProofs) - }(partIdx) - } - - wg.Wait() - - if len(skipped) > 0 { - log.Warnw("generateWindowPoSt skipped sectors", "skipped", len(skipped)) - } - - postProofs, err := ffi.MergeWindowPoStPartitionProofs(ppt, proofList) - if err != nil { - return nil, skipped, xerrors.Errorf("merge windowPoSt partition proofs: %v", err) - } - - out = append(out, *postProofs) - return out, skipped, retErr -} - -func (t *WdPostTask) GenerateWindowPoStAdv(ctx context.Context, ppt abi.RegisteredPoStProof, mid abi.ActorID, sectors []storiface.PostSectorChallenge, partitionIdx int, randomness abi.PoStRandomness, allowSkip bool) (storiface.WindowPoStResult, error) { - - var slk sync.Mutex - var skipped []abi.SectorID - - var wg sync.WaitGroup - wg.Add(len(sectors)) - - vproofs := make([][]byte, len(sectors)) - - for i, s := range sectors { - if t.parallel != nil { - select { - case t.parallel <- struct{}{}: - case <-ctx.Done(): - return storiface.WindowPoStResult{}, xerrors.Errorf("context error waiting on challengeThrottle") - } - } - - go func(i int, s storiface.PostSectorChallenge) { - defer wg.Done() - if t.parallel != nil { - defer func() { - <-t.parallel - }() - } - - if t.challengeReadTimeout > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, t.challengeReadTimeout) - defer cancel() - } - - vanilla, err := t.storage.GenerateSingleVanillaProof(ctx, mid, s, ppt) - slk.Lock() - defer slk.Unlock() - - if err != nil || vanilla == nil { - skipped = append(skipped, abi.SectorID{ - Miner: mid, - Number: s.SectorNumber, - }) - log.Errorf("reading PoSt challenge for sector %d, vlen:%d, err: %s", s.SectorNumber, len(vanilla), err) - return - } - - vproofs[i] = vanilla - }(i, s) - } - wg.Wait() - - if len(skipped) > 0 && !allowSkip { - // This should happen rarely because before entering GenerateWindowPoSt we check all sectors by reading challenges. - // When it does happen, window post runner logic will just re-check sectors, and retry with newly-discovered-bad sectors skipped - log.Errorf("couldn't read some challenges (skipped %d)", len(skipped)) - - // note: can't return an error as this in an jsonrpc call - return storiface.WindowPoStResult{Skipped: skipped}, nil - } - - // compact skipped sectors - var skippedSoFar int - for i := range vproofs { - if len(vproofs[i]) == 0 { - skippedSoFar++ - continue - } - - if skippedSoFar > 0 { - vproofs[i-skippedSoFar] = vproofs[i] - } - } - - vproofs = vproofs[:len(vproofs)-skippedSoFar] - - // compute the PoSt! - res, err := t.GenerateWindowPoStWithVanilla(ctx, ppt, mid, randomness, vproofs, partitionIdx) - r := storiface.WindowPoStResult{ - PoStProofs: res, - Skipped: skipped, - } - if err != nil { - log.Errorw("generating window PoSt failed", "error", err) - return r, xerrors.Errorf("generate window PoSt with vanilla proofs: %w", err) - } - return r, nil -} - -func (t *WdPostTask) GenerateWindowPoStWithVanilla(ctx context.Context, proofType abi.RegisteredPoStProof, minerID abi.ActorID, randomness abi.PoStRandomness, proofs [][]byte, partitionIdx int) (proof.PoStProof, error) { - pp, err := ffiselect.FFISelect{}.GenerateSinglePartitionWindowPoStWithVanilla(proofType, minerID, randomness, proofs, uint(partitionIdx)) - if err != nil { - return proof.PoStProof{}, err - } - if pp == nil { - // should be impossible, but just in case do not panic - return proof.PoStProof{}, xerrors.New("postproof was nil") - } - - return proof.PoStProof{ - PoStProof: pp.PoStProof, - ProofBytes: pp.ProofBytes, - }, nil -} diff --git a/curiosrc/window/compute_task.go b/curiosrc/window/compute_task.go deleted file mode 100644 index b3f4974a606..00000000000 --- a/curiosrc/window/compute_task.go +++ /dev/null @@ -1,453 +0,0 @@ -package window - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "sort" - "strings" - "time" - - logging "github.com/ipfs/go-log/v2" - "github.com/samber/lo" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-bitfield" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/crypto" - "github.com/filecoin-project/go-state-types/dline" - "github.com/filecoin-project/go-state-types/network" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/curiosrc/chainsched" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" - "github.com/filecoin-project/lotus/lib/harmony/resources" - "github.com/filecoin-project/lotus/lib/harmony/taskhelp" - "github.com/filecoin-project/lotus/lib/promise" - "github.com/filecoin-project/lotus/node/modules/dtypes" - "github.com/filecoin-project/lotus/storage/paths" - "github.com/filecoin-project/lotus/storage/sealer" - "github.com/filecoin-project/lotus/storage/sealer/sealtasks" - "github.com/filecoin-project/lotus/storage/sealer/storiface" - "github.com/filecoin-project/lotus/storage/wdpost" -) - -var log = logging.Logger("curio/window") - -var EpochsPerDeadline = miner.WPoStProvingPeriod() / abi.ChainEpoch(miner.WPoStPeriodDeadlines) - -type WdPostTaskDetails struct { - Ts *types.TipSet - Deadline *dline.Info -} - -type WDPoStAPI interface { - ChainHead(context.Context) (*types.TipSet, error) - ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error) - StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*dline.Info, error) - StateMinerInfo(context.Context, address.Address, types.TipSetKey) (api.MinerInfo, error) - ChainGetTipSetAfterHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) - StateMinerPartitions(context.Context, address.Address, uint64, types.TipSetKey) ([]api.Partition, error) - StateGetRandomnessFromBeacon(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte, tsk types.TipSetKey) (abi.Randomness, error) - StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error) - StateMinerSectors(context.Context, address.Address, *bitfield.BitField, types.TipSetKey) ([]*miner.SectorOnChainInfo, error) -} - -type ProverPoSt interface { - GenerateWindowPoStAdv(ctx context.Context, ppt abi.RegisteredPoStProof, mid abi.ActorID, sectors []storiface.PostSectorChallenge, partitionIdx int, randomness abi.PoStRandomness, allowSkip bool) (storiface.WindowPoStResult, error) -} - -type WdPostTask struct { - api WDPoStAPI - db *harmonydb.DB - - faultTracker sealer.FaultTracker - storage paths.Store - verifier storiface.Verifier - - windowPoStTF promise.Promise[harmonytask.AddTaskFunc] - - actors map[dtypes.MinerAddress]bool - max int - parallel chan struct{} - challengeReadTimeout time.Duration -} - -type wdTaskIdentity struct { - SpID uint64 `db:"sp_id"` - ProvingPeriodStart abi.ChainEpoch `db:"proving_period_start"` - DeadlineIndex uint64 `db:"deadline_index"` - PartitionIndex uint64 `db:"partition_index"` -} - -func NewWdPostTask(db *harmonydb.DB, - api WDPoStAPI, - faultTracker sealer.FaultTracker, - storage paths.Store, - verifier storiface.Verifier, - pcs *chainsched.CurioChainSched, - actors map[dtypes.MinerAddress]bool, - max int, - parallel int, - challengeReadTimeout time.Duration, -) (*WdPostTask, error) { - t := &WdPostTask{ - db: db, - api: api, - - faultTracker: faultTracker, - storage: storage, - verifier: verifier, - - actors: actors, - max: max, - challengeReadTimeout: challengeReadTimeout, - } - if parallel > 0 { - t.parallel = make(chan struct{}, parallel) - } - - if pcs != nil { - if err := pcs.AddHandler(t.processHeadChange); err != nil { - return nil, err - } - } - - return t, nil -} - -func (t *WdPostTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { - log.Debugw("WdPostTask.Do()", "taskID", taskID) - - var spID, pps, dlIdx, partIdx uint64 - - err = t.db.QueryRow(context.Background(), - `Select sp_id, proving_period_start, deadline_index, partition_index - from wdpost_partition_tasks - where task_id = $1`, taskID).Scan( - &spID, &pps, &dlIdx, &partIdx, - ) - if err != nil { - log.Errorf("WdPostTask.Do() failed to queryRow: %v", err) - return false, err - } - - head, err := t.api.ChainHead(context.Background()) - if err != nil { - log.Errorf("WdPostTask.Do() failed to get chain head: %v", err) - return false, err - } - - deadline := wdpost.NewDeadlineInfo(abi.ChainEpoch(pps), dlIdx, head.Height()) - - var testTask *int - isTestTask := func() bool { - if testTask != nil { - return *testTask > 0 - } - - testTask = new(int) - err := t.db.QueryRow(context.Background(), `SELECT COUNT(*) FROM harmony_test WHERE task_id = $1`, taskID).Scan(testTask) - if err != nil { - log.Errorf("WdPostTask.Do() failed to queryRow: %v", err) - return false - } - - return *testTask > 0 - } - - if deadline.PeriodElapsed() && !isTestTask() { - log.Errorf("WdPost removed stale task: %v %v", taskID, deadline) - return true, nil - } - - if deadline.Challenge > head.Height() { - if isTestTask() { - deadline = wdpost.NewDeadlineInfo(abi.ChainEpoch(pps)-deadline.WPoStProvingPeriod, dlIdx, head.Height()-deadline.WPoStProvingPeriod) - log.Warnw("Test task is in the future, adjusting to past", "taskID", taskID, "deadline", deadline) - } - } - - maddr, err := address.NewIDAddress(spID) - if err != nil { - log.Errorf("WdPostTask.Do() failed to NewIDAddress: %v", err) - return false, err - } - - ts, err := t.api.ChainGetTipSetAfterHeight(context.Background(), deadline.Challenge, head.Key()) - if err != nil { - log.Errorf("WdPostTask.Do() failed to ChainGetTipSetAfterHeight: %v", err) - return false, err - } - - postOut, err := t.DoPartition(context.Background(), ts, maddr, deadline, partIdx) - if err != nil { - log.Errorf("WdPostTask.Do() failed to doPartition: %v", err) - return false, err - } - - var msgbuf bytes.Buffer - if err := postOut.MarshalCBOR(&msgbuf); err != nil { - return false, xerrors.Errorf("marshaling PoSt: %w", err) - } - - if isTestTask() { - // Do not send test tasks to the chain but to harmony_test & stdout. - - data, err := json.MarshalIndent(map[string]any{ - "sp_id": spID, - "proving_period_start": pps, - "deadline": deadline.Index, - "partition": partIdx, - "submit_at_epoch": deadline.Open, - "submit_by_epoch": deadline.Close, - "proof_params": msgbuf.Bytes(), - }, "", " ") - if err != nil { - return false, xerrors.Errorf("marshaling message: %w", err) - } - ctx := context.Background() - _, err = t.db.Exec(ctx, `UPDATE harmony_test SET result=$1 WHERE task_id=$2`, string(data), taskID) - if err != nil { - return false, xerrors.Errorf("updating harmony_test: %w", err) - } - log.Infof("SKIPPED sending test message to chain. SELECT * FROM harmony_test WHERE task_id= %v", taskID) - return true, nil // nothing committed - } - // Insert into wdpost_proofs table - n, err := t.db.Exec(context.Background(), - `INSERT INTO wdpost_proofs ( - sp_id, - proving_period_start, - deadline, - partition, - submit_at_epoch, - submit_by_epoch, - proof_params) - VALUES ($1, $2, $3, $4, $5, $6, $7)`, - spID, - pps, - deadline.Index, - partIdx, - deadline.Open, - deadline.Close, - msgbuf.Bytes(), - ) - - if err != nil { - log.Errorf("WdPostTask.Do() failed to insert into wdpost_proofs: %v", err) - return false, err - } - if n != 1 { - log.Errorf("WdPostTask.Do() failed to insert into wdpost_proofs: %v", err) - return false, err - } - - return true, nil -} - -func entToStr[T any](t T, i int) string { - return fmt.Sprint(t) -} - -func (t *WdPostTask) CanAccept(ids []harmonytask.TaskID, te *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { - // GetEpoch - ts, err := t.api.ChainHead(context.Background()) - - if err != nil { - return nil, err - } - - // GetData for tasks - type wdTaskDef struct { - TaskID harmonytask.TaskID - SpID uint64 - ProvingPeriodStart abi.ChainEpoch - DeadlineIndex uint64 - PartitionIndex uint64 - - dlInfo *dline.Info `pgx:"-"` - } - var tasks []wdTaskDef - - err = t.db.Select(context.Background(), &tasks, - `Select - task_id, - sp_id, - proving_period_start, - deadline_index, - partition_index - from wdpost_partition_tasks - where task_id IN (SELECT unnest(string_to_array($1, ','))::bigint)`, strings.Join(lo.Map(ids, entToStr[harmonytask.TaskID]), ",")) - if err != nil { - return nil, err - } - - // Accept those past deadline, then delete them in Do(). - for i := range tasks { - tasks[i].dlInfo = wdpost.NewDeadlineInfo(tasks[i].ProvingPeriodStart, tasks[i].DeadlineIndex, ts.Height()) - - if tasks[i].dlInfo.PeriodElapsed() { - // note: Those may be test tasks - return &tasks[i].TaskID, nil - } - } - - // todo fix the block below - // workAdderMutex is held by taskTypeHandler.considerWork, which calls this CanAccept - // te.ResourcesAvailable will try to get that lock again, which will deadlock - - // Discard those too big for our free RAM - /*freeRAM := te.ResourcesAvailable().Ram - tasks = lo.Filter(tasks, func(d wdTaskDef, _ int) bool { - maddr, err := address.NewIDAddress(tasks[0].Sp_id) - if err != nil { - log.Errorf("WdPostTask.CanAccept() failed to NewIDAddress: %v", err) - return false - } - - mi, err := t.api.StateMinerInfo(context.Background(), maddr, ts.Key()) - if err != nil { - log.Errorf("WdPostTask.CanAccept() failed to StateMinerInfo: %v", err) - return false - } - - spt, err := policy.GetSealProofFromPoStProof(mi.WindowPoStProofType) - if err != nil { - log.Errorf("WdPostTask.CanAccept() failed to GetSealProofFromPoStProof: %v", err) - return false - } - - return res[spt].MaxMemory <= freeRAM - })*/ - if len(tasks) == 0 { - log.Infof("RAM too small for any WDPost task") - return nil, nil - } - - // Ignore those with too many failures unless they are the only ones left. - tasks, _ = taskhelp.SliceIfFound(tasks, func(d wdTaskDef) bool { - var r int - err := t.db.QueryRow(context.Background(), `SELECT COUNT(*) - FROM harmony_task_history - WHERE task_id = $1 AND result = false`, d.TaskID).Scan(&r) - if err != nil { - log.Errorf("WdPostTask.CanAccept() failed to queryRow: %v", err) - } - return r < 2 - }) - - // Select the one closest to the deadline - sort.Slice(tasks, func(i, j int) bool { - return tasks[i].dlInfo.Open < tasks[j].dlInfo.Open - }) - - return &tasks[0].TaskID, nil -} - -var res = storiface.ResourceTable[sealtasks.TTGenerateWindowPoSt] - -func (t *WdPostTask) TypeDetails() harmonytask.TaskTypeDetails { - return harmonytask.TaskTypeDetails{ - Name: "WdPost", - Max: t.max, - MaxFailures: 3, - Follows: nil, - Cost: resources.Resources{ - Cpu: 1, - - // todo set to something for 32/64G sector sizes? Technically windowPoSt is happy on a CPU - // but it will use a GPU if available - Gpu: 0, - - // RAM of smallest proof's max is listed here - Ram: lo.Reduce(lo.Keys(res), func(i uint64, k abi.RegisteredSealProof, _ int) uint64 { - if res[k].MaxMemory < i { - return res[k].MaxMemory - } - return i - }, 1<<63), - }, - } -} - -func (t *WdPostTask) Adder(taskFunc harmonytask.AddTaskFunc) { - t.windowPoStTF.Set(taskFunc) -} - -func (t *WdPostTask) processHeadChange(ctx context.Context, revert, apply *types.TipSet) error { - for act := range t.actors { - maddr := address.Address(act) - - aid, err := address.IDFromAddress(maddr) - if err != nil { - return xerrors.Errorf("getting miner ID: %w", err) - } - - di, err := t.api.StateMinerProvingDeadline(ctx, maddr, apply.Key()) - if err != nil { - return err - } - - if !di.PeriodStarted() { - return nil // not proving anything yet - } - - partitions, err := t.api.StateMinerPartitions(ctx, maddr, di.Index, apply.Key()) - if err != nil { - return xerrors.Errorf("getting partitions: %w", err) - } - - // TODO: Batch Partitions?? - - for pidx := range partitions { - tid := wdTaskIdentity{ - SpID: aid, - ProvingPeriodStart: di.PeriodStart, - DeadlineIndex: di.Index, - PartitionIndex: uint64(pidx), - } - - tf := t.windowPoStTF.Val(ctx) - if tf == nil { - return xerrors.Errorf("no task func") - } - - tf(func(id harmonytask.TaskID, tx *harmonydb.Tx) (bool, error) { - return t.addTaskToDB(id, tid, tx) - }) - } - } - - return nil -} - -func (t *WdPostTask) addTaskToDB(taskId harmonytask.TaskID, taskIdent wdTaskIdentity, tx *harmonydb.Tx) (bool, error) { - - _, err := tx.Exec( - `INSERT INTO wdpost_partition_tasks ( - task_id, - sp_id, - proving_period_start, - deadline_index, - partition_index - ) VALUES ($1, $2, $3, $4, $5)`, - taskId, - taskIdent.SpID, - taskIdent.ProvingPeriodStart, - taskIdent.DeadlineIndex, - taskIdent.PartitionIndex, - ) - if err != nil { - return false, xerrors.Errorf("insert partition task: %w", err) - } - - return true, nil -} - -var _ harmonytask.TaskInterface = &WdPostTask{} diff --git a/curiosrc/window/faults_simple.go b/curiosrc/window/faults_simple.go deleted file mode 100644 index 64f5e86506c..00000000000 --- a/curiosrc/window/faults_simple.go +++ /dev/null @@ -1,152 +0,0 @@ -package window - -import ( - "context" - "crypto/rand" - "fmt" - "sync" - "time" - - "golang.org/x/xerrors" - - ffi "github.com/filecoin-project/filecoin-ffi" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/storage/paths" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -type SimpleFaultTracker struct { - storage paths.Store - index paths.SectorIndex - - parallelCheckLimit int // todo live config? - singleCheckTimeout time.Duration - partitionCheckTimeout time.Duration -} - -func NewSimpleFaultTracker(storage paths.Store, index paths.SectorIndex, - parallelCheckLimit int, singleCheckTimeout time.Duration, partitionCheckTimeout time.Duration) *SimpleFaultTracker { - return &SimpleFaultTracker{ - storage: storage, - index: index, - - parallelCheckLimit: parallelCheckLimit, - singleCheckTimeout: singleCheckTimeout, - partitionCheckTimeout: partitionCheckTimeout, - } -} - -func (m *SimpleFaultTracker) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storiface.SectorRef, rg storiface.RGetter) (map[abi.SectorID]string, error) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - if rg == nil { - return nil, xerrors.Errorf("rg is nil") - } - - var bad = make(map[abi.SectorID]string) - var badLk sync.Mutex - - var postRand abi.PoStRandomness = make([]byte, abi.RandomnessLength) - _, _ = rand.Read(postRand) - postRand[31] &= 0x3f - - limit := m.parallelCheckLimit - if limit <= 0 { - limit = len(sectors) - } - throttle := make(chan struct{}, limit) - - addBad := func(s abi.SectorID, reason string) { - badLk.Lock() - bad[s] = reason - badLk.Unlock() - } - - if m.partitionCheckTimeout > 0 { - var cancel2 context.CancelFunc - ctx, cancel2 = context.WithTimeout(ctx, m.partitionCheckTimeout) - defer cancel2() - } - - var wg sync.WaitGroup - wg.Add(len(sectors)) - - for _, sector := range sectors { - select { - case throttle <- struct{}{}: - case <-ctx.Done(): - addBad(sector.ID, fmt.Sprintf("waiting for check worker: %s", ctx.Err())) - wg.Done() - continue - } - - go func(sector storiface.SectorRef) { - defer wg.Done() - defer func() { - <-throttle - }() - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - commr, update, err := rg(ctx, sector.ID) - if err != nil { - log.Warnw("CheckProvable Sector FAULT: getting commR", "sector", sector, "sealed", "err", err) - addBad(sector.ID, fmt.Sprintf("getting commR: %s", err)) - return - } - - toLock := storiface.FTSealed | storiface.FTCache - if update { - toLock = storiface.FTUpdate | storiface.FTUpdateCache - } - - locked, err := m.index.StorageTryLock(ctx, sector.ID, toLock, storiface.FTNone) - if err != nil { - addBad(sector.ID, fmt.Sprintf("tryLock error: %s", err)) - return - } - - if !locked { - log.Warnw("CheckProvable Sector FAULT: can't acquire read lock", "sector", sector) - addBad(sector.ID, "can't acquire read lock") - return - } - - ch, err := ffi.GeneratePoStFallbackSectorChallenges(pp, sector.ID.Miner, postRand, []abi.SectorNumber{ - sector.ID.Number, - }) - if err != nil { - log.Warnw("CheckProvable Sector FAULT: generating challenges", "sector", sector, "err", err) - addBad(sector.ID, fmt.Sprintf("generating fallback challenges: %s", err)) - return - } - - vctx := ctx - - if m.singleCheckTimeout > 0 { - var cancel2 context.CancelFunc - vctx, cancel2 = context.WithTimeout(ctx, m.singleCheckTimeout) - defer cancel2() - } - - _, err = m.storage.GenerateSingleVanillaProof(vctx, sector.ID.Miner, storiface.PostSectorChallenge{ - SealProof: sector.ProofType, - SectorNumber: sector.ID.Number, - SealedCID: commr, - Challenge: ch.Challenges[sector.ID.Number], - Update: update, - }, pp) - if err != nil { - log.Warnw("CheckProvable Sector FAULT: generating vanilla proof", "sector", sector, "err", err) - addBad(sector.ID, fmt.Sprintf("generating vanilla proof: %s", err)) - return - } - }(sector) - } - - wg.Wait() - - return bad, nil -} diff --git a/curiosrc/window/recover_task.go b/curiosrc/window/recover_task.go deleted file mode 100644 index 1ed110978c1..00000000000 --- a/curiosrc/window/recover_task.go +++ /dev/null @@ -1,324 +0,0 @@ -package window - -import ( - "context" - - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-bitfield" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/builtin" - "github.com/filecoin-project/go-state-types/dline" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/actors" - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/curiosrc/chainsched" - "github.com/filecoin-project/lotus/curiosrc/message" - "github.com/filecoin-project/lotus/curiosrc/multictladdr" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" - "github.com/filecoin-project/lotus/lib/harmony/resources" - "github.com/filecoin-project/lotus/lib/promise" - "github.com/filecoin-project/lotus/node/modules/dtypes" - "github.com/filecoin-project/lotus/storage/sealer" - "github.com/filecoin-project/lotus/storage/wdpost" -) - -type WdPostRecoverDeclareTask struct { - sender *message.Sender - db *harmonydb.DB - api WdPostRecoverDeclareTaskApi - faultTracker sealer.FaultTracker - - maxDeclareRecoveriesGasFee types.FIL - as *multictladdr.MultiAddressSelector - actors map[dtypes.MinerAddress]bool - - startCheckTF promise.Promise[harmonytask.AddTaskFunc] -} - -type WdPostRecoverDeclareTaskApi interface { - ChainHead(context.Context) (*types.TipSet, error) - StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*dline.Info, error) - StateMinerPartitions(context.Context, address.Address, uint64, types.TipSetKey) ([]api.Partition, error) - StateMinerInfo(context.Context, address.Address, types.TipSetKey) (api.MinerInfo, error) - StateMinerSectors(ctx context.Context, addr address.Address, bf *bitfield.BitField, tsk types.TipSetKey) ([]*miner.SectorOnChainInfo, error) - - GasEstimateMessageGas(context.Context, *types.Message, *api.MessageSendSpec, types.TipSetKey) (*types.Message, error) - GasEstimateFeeCap(context.Context, *types.Message, int64, types.TipSetKey) (types.BigInt, error) - GasEstimateGasPremium(_ context.Context, nblocksincl uint64, sender address.Address, gaslimit int64, tsk types.TipSetKey) (types.BigInt, error) - - WalletBalance(context.Context, address.Address) (types.BigInt, error) - WalletHas(context.Context, address.Address) (bool, error) - StateAccountKey(context.Context, address.Address, types.TipSetKey) (address.Address, error) - StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error) -} - -func NewWdPostRecoverDeclareTask(sender *message.Sender, - db *harmonydb.DB, - api WdPostRecoverDeclareTaskApi, - faultTracker sealer.FaultTracker, - as *multictladdr.MultiAddressSelector, - pcs *chainsched.CurioChainSched, - - maxDeclareRecoveriesGasFee types.FIL, - actors map[dtypes.MinerAddress]bool) (*WdPostRecoverDeclareTask, error) { - t := &WdPostRecoverDeclareTask{ - sender: sender, - db: db, - api: api, - faultTracker: faultTracker, - - maxDeclareRecoveriesGasFee: maxDeclareRecoveriesGasFee, - as: as, - actors: actors, - } - - if pcs != nil { - if err := pcs.AddHandler(t.processHeadChange); err != nil { - return nil, err - } - } - - return t, nil -} - -func (w *WdPostRecoverDeclareTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { - log.Debugw("WdPostRecoverDeclareTask.Do()", "taskID", taskID) - ctx := context.Background() - - var spID, pps, dlIdx, partIdx uint64 - - err = w.db.QueryRow(context.Background(), - `Select sp_id, proving_period_start, deadline_index, partition_index - from wdpost_recovery_tasks - where task_id = $1`, taskID).Scan( - &spID, &pps, &dlIdx, &partIdx, - ) - if err != nil { - log.Errorf("WdPostRecoverDeclareTask.Do() failed to queryRow: %v", err) - return false, err - } - - head, err := w.api.ChainHead(context.Background()) - if err != nil { - log.Errorf("WdPostRecoverDeclareTask.Do() failed to get chain head: %v", err) - return false, err - } - - deadline := wdpost.NewDeadlineInfo(abi.ChainEpoch(pps), dlIdx, head.Height()) - - if deadline.FaultCutoffPassed() { - log.Errorf("WdPostRecover removed stale task: %v %v", taskID, deadline) - return true, nil - } - - maddr, err := address.NewIDAddress(spID) - if err != nil { - log.Errorf("WdPostTask.Do() failed to NewIDAddress: %v", err) - return false, err - } - - partitions, err := w.api.StateMinerPartitions(context.Background(), maddr, dlIdx, head.Key()) - if err != nil { - log.Errorf("WdPostRecoverDeclareTask.Do() failed to get partitions: %v", err) - return false, err - } - - if partIdx >= uint64(len(partitions)) { - log.Errorf("WdPostRecoverDeclareTask.Do() failed to get partitions: partIdx >= len(partitions)") - return false, err - } - - partition := partitions[partIdx] - - unrecovered, err := bitfield.SubtractBitField(partition.FaultySectors, partition.RecoveringSectors) - if err != nil { - return false, xerrors.Errorf("subtracting recovered set from fault set: %w", err) - } - - uc, err := unrecovered.Count() - if err != nil { - return false, xerrors.Errorf("counting unrecovered sectors: %w", err) - } - - if uc == 0 { - log.Warnw("nothing to declare recovered", "maddr", maddr, "deadline", deadline, "partition", partIdx) - return true, nil - } - - recovered, err := checkSectors(ctx, w.api, w.faultTracker, maddr, unrecovered, head.Key()) - if err != nil { - return false, xerrors.Errorf("checking unrecovered sectors: %w", err) - } - - // if all sectors failed to recover, don't declare recoveries - recoveredCount, err := recovered.Count() - if err != nil { - return false, xerrors.Errorf("counting recovered sectors: %w", err) - } - - if recoveredCount == 0 { - log.Warnw("no sectors recovered", "maddr", maddr, "deadline", deadline, "partition", partIdx) - return true, nil - } - - recDecl := miner.RecoveryDeclaration{ - Deadline: dlIdx, - Partition: partIdx, - Sectors: recovered, - } - - params := &miner.DeclareFaultsRecoveredParams{ - Recoveries: []miner.RecoveryDeclaration{recDecl}, - } - - enc, aerr := actors.SerializeParams(params) - if aerr != nil { - return false, xerrors.Errorf("could not serialize declare recoveries parameters: %w", aerr) - } - - msg := &types.Message{ - To: maddr, - Method: builtin.MethodsMiner.DeclareFaultsRecovered, - Params: enc, - Value: types.NewInt(0), - } - - msg, mss, err := preparePoStMessage(w.api, w.as, maddr, msg, abi.TokenAmount(w.maxDeclareRecoveriesGasFee)) - if err != nil { - return false, xerrors.Errorf("sending declare recoveries message: %w", err) - } - - mc, err := w.sender.Send(ctx, msg, mss, "declare-recoveries") - if err != nil { - return false, xerrors.Errorf("sending declare recoveries message: %w", err) - } - - log.Debugw("WdPostRecoverDeclareTask.Do() sent declare recoveries message", "maddr", maddr, "deadline", deadline, "partition", partIdx, "mc", mc) - return true, nil -} - -func (w *WdPostRecoverDeclareTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { - if len(ids) == 0 { - // probably can't happen, but panicking is bad - return nil, nil - } - - if w.sender == nil { - // we can't send messages - return nil, nil - } - - return &ids[0], nil -} - -func (w *WdPostRecoverDeclareTask) TypeDetails() harmonytask.TaskTypeDetails { - return harmonytask.TaskTypeDetails{ - Max: 128, - Name: "WdPostRecover", - Cost: resources.Resources{ - Cpu: 1, - Gpu: 0, - Ram: 128 << 20, - }, - MaxFailures: 10, - Follows: nil, - } -} - -func (w *WdPostRecoverDeclareTask) Adder(taskFunc harmonytask.AddTaskFunc) { - w.startCheckTF.Set(taskFunc) -} - -func (w *WdPostRecoverDeclareTask) processHeadChange(ctx context.Context, revert, apply *types.TipSet) error { - tf := w.startCheckTF.Val(ctx) - - for act := range w.actors { - maddr := address.Address(act) - - aid, err := address.IDFromAddress(maddr) - if err != nil { - return xerrors.Errorf("getting miner ID: %w", err) - } - - di, err := w.api.StateMinerProvingDeadline(ctx, maddr, apply.Key()) - if err != nil { - return err - } - - if !di.PeriodStarted() { - return nil // not proving anything yet - } - - // declaring two deadlines ahead - declDeadline := (di.Index + 2) % di.WPoStPeriodDeadlines - - pps := di.PeriodStart - if declDeadline != di.Index+2 { - pps = di.NextPeriodStart() - } - - partitions, err := w.api.StateMinerPartitions(ctx, maddr, declDeadline, apply.Key()) - if err != nil { - return xerrors.Errorf("getting partitions: %w", err) - } - - for pidx, partition := range partitions { - unrecovered, err := bitfield.SubtractBitField(partition.FaultySectors, partition.RecoveringSectors) - if err != nil { - return xerrors.Errorf("subtracting recovered set from fault set: %w", err) - } - - uc, err := unrecovered.Count() - if err != nil { - return xerrors.Errorf("counting unrecovered sectors: %w", err) - } - - if uc == 0 { - log.Debugw("WdPostRecoverDeclareTask.processHeadChange() uc == 0, skipping", "maddr", maddr, "declDeadline", declDeadline, "pidx", pidx) - continue - } - - tid := wdTaskIdentity{ - SpID: aid, - ProvingPeriodStart: pps, - DeadlineIndex: declDeadline, - PartitionIndex: uint64(pidx), - } - - tf(func(id harmonytask.TaskID, tx *harmonydb.Tx) (bool, error) { - return w.addTaskToDB(id, tid, tx) - }) - } - } - - return nil -} - -func (w *WdPostRecoverDeclareTask) addTaskToDB(taskId harmonytask.TaskID, taskIdent wdTaskIdentity, tx *harmonydb.Tx) (bool, error) { - _, err := tx.Exec( - `INSERT INTO wdpost_recovery_tasks ( - task_id, - sp_id, - proving_period_start, - deadline_index, - partition_index - ) VALUES ($1, $2, $3, $4, $5)`, - taskId, - taskIdent.SpID, - taskIdent.ProvingPeriodStart, - taskIdent.DeadlineIndex, - taskIdent.PartitionIndex, - ) - if err != nil { - return false, xerrors.Errorf("insert partition task: %w", err) - } - - return true, nil -} - -var _ harmonytask.TaskInterface = &WdPostRecoverDeclareTask{} diff --git a/curiosrc/window/submit_task.go b/curiosrc/window/submit_task.go deleted file mode 100644 index 330fd050902..00000000000 --- a/curiosrc/window/submit_task.go +++ /dev/null @@ -1,307 +0,0 @@ -package window - -import ( - "bytes" - "context" - - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/go-state-types/builtin" - "github.com/filecoin-project/go-state-types/builtin/v9/miner" - "github.com/filecoin-project/go-state-types/crypto" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/curiosrc/chainsched" - "github.com/filecoin-project/lotus/curiosrc/message" - "github.com/filecoin-project/lotus/curiosrc/multictladdr" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" - "github.com/filecoin-project/lotus/lib/harmony/resources" - "github.com/filecoin-project/lotus/lib/promise" - "github.com/filecoin-project/lotus/storage/wdpost" -) - -type WdPoStSubmitTaskApi interface { - ChainHead(context.Context) (*types.TipSet, error) - - WalletBalance(context.Context, address.Address) (types.BigInt, error) - WalletHas(context.Context, address.Address) (bool, error) - - StateAccountKey(context.Context, address.Address, types.TipSetKey) (address.Address, error) - StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error) - StateMinerInfo(context.Context, address.Address, types.TipSetKey) (api.MinerInfo, error) - StateGetRandomnessFromTickets(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte, tsk types.TipSetKey) (abi.Randomness, error) - - GasEstimateMessageGas(context.Context, *types.Message, *api.MessageSendSpec, types.TipSetKey) (*types.Message, error) - GasEstimateFeeCap(context.Context, *types.Message, int64, types.TipSetKey) (types.BigInt, error) - GasEstimateGasPremium(_ context.Context, nblocksincl uint64, sender address.Address, gaslimit int64, tsk types.TipSetKey) (types.BigInt, error) -} - -type WdPostSubmitTask struct { - sender *message.Sender - db *harmonydb.DB - api WdPoStSubmitTaskApi - - maxWindowPoStGasFee types.FIL - as *multictladdr.MultiAddressSelector - - submitPoStTF promise.Promise[harmonytask.AddTaskFunc] -} - -func NewWdPostSubmitTask(pcs *chainsched.CurioChainSched, send *message.Sender, db *harmonydb.DB, api WdPoStSubmitTaskApi, maxWindowPoStGasFee types.FIL, as *multictladdr.MultiAddressSelector) (*WdPostSubmitTask, error) { - res := &WdPostSubmitTask{ - sender: send, - db: db, - api: api, - - maxWindowPoStGasFee: maxWindowPoStGasFee, - as: as, - } - - if pcs != nil { - if err := pcs.AddHandler(res.processHeadChange); err != nil { - return nil, err - } - } - - return res, nil -} - -func (w *WdPostSubmitTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { - log.Debugw("WdPostSubmitTask.Do", "taskID", taskID) - - var spID uint64 - var deadline uint64 - var partition uint64 - var pps, submitAtEpoch, submitByEpoch abi.ChainEpoch - var earlyParamBytes []byte - var dbTask uint64 - - err = w.db.QueryRow( - context.Background(), `SELECT sp_id, proving_period_start, deadline, partition, submit_at_epoch, submit_by_epoch, proof_params, submit_task_id - FROM wdpost_proofs WHERE submit_task_id = $1`, taskID, - ).Scan(&spID, &pps, &deadline, &partition, &submitAtEpoch, &submitByEpoch, &earlyParamBytes, &dbTask) - if err != nil { - return false, xerrors.Errorf("query post proof: %w", err) - } - - if dbTask != uint64(taskID) { - return false, xerrors.Errorf("taskID mismatch: %d != %d", dbTask, taskID) - } - - head, err := w.api.ChainHead(context.Background()) - if err != nil { - return false, xerrors.Errorf("getting chain head: %w", err) - } - - if head.Height() > submitByEpoch { - // we missed the deadline, no point in submitting - log.Errorw("missed submit deadline", "spID", spID, "deadline", deadline, "partition", partition, "submitByEpoch", submitByEpoch, "headHeight", head.Height()) - return true, nil - } - - if head.Height() < submitAtEpoch { - log.Errorw("submit epoch not reached", "spID", spID, "deadline", deadline, "partition", partition, "submitAtEpoch", submitAtEpoch, "headHeight", head.Height()) - return false, xerrors.Errorf("submit epoch not reached: %d < %d", head.Height(), submitAtEpoch) - } - - dlInfo := wdpost.NewDeadlineInfo(pps, deadline, head.Height()) - - var params miner.SubmitWindowedPoStParams - if err := params.UnmarshalCBOR(bytes.NewReader(earlyParamBytes)); err != nil { - return false, xerrors.Errorf("unmarshaling proof message: %w", err) - } - - commEpoch := dlInfo.Challenge - - commRand, err := w.api.StateGetRandomnessFromTickets(context.Background(), crypto.DomainSeparationTag_PoStChainCommit, commEpoch, nil, head.Key()) - if err != nil { - err = xerrors.Errorf("failed to get chain randomness from tickets for windowPost (epoch=%d): %w", commEpoch, err) - log.Errorf("submitPoStMessage failed: %+v", err) - - return false, xerrors.Errorf("getting post commit randomness: %w", err) - } - - params.ChainCommitEpoch = commEpoch - params.ChainCommitRand = commRand - - var pbuf bytes.Buffer - if err := params.MarshalCBOR(&pbuf); err != nil { - return false, xerrors.Errorf("marshaling proof message: %w", err) - } - - maddr, err := address.NewIDAddress(spID) - if err != nil { - return false, xerrors.Errorf("invalid miner address: %w", err) - } - - msg := &types.Message{ - To: maddr, - Method: builtin.MethodsMiner.SubmitWindowedPoSt, - Params: pbuf.Bytes(), - Value: big.Zero(), - } - - msg, mss, err := preparePoStMessage(w.api, w.as, maddr, msg, abi.TokenAmount(w.maxWindowPoStGasFee)) - if err != nil { - return false, xerrors.Errorf("preparing proof message: %w", err) - } - - ctx := context.Background() - smsg, err := w.sender.Send(ctx, msg, mss, "wdpost") - if err != nil { - return false, xerrors.Errorf("sending proof message: %w", err) - } - - // set message_cid in the wdpost_proofs entry - - _, err = w.db.Exec(ctx, `UPDATE wdpost_proofs SET message_cid = $1 WHERE sp_id = $2 AND proving_period_start = $3 AND deadline = $4 AND partition = $5`, smsg.String(), spID, pps, deadline, partition) - if err != nil { - return true, xerrors.Errorf("updating wdpost_proofs: %w", err) - } - - return true, nil -} - -func (w *WdPostSubmitTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { - if len(ids) == 0 { - // probably can't happen, but panicking is bad - return nil, nil - } - - if w.sender == nil { - // we can't send messages - return nil, nil - } - - return &ids[0], nil -} - -func (w *WdPostSubmitTask) TypeDetails() harmonytask.TaskTypeDetails { - return harmonytask.TaskTypeDetails{ - Max: 128, - Name: "WdPostSubmit", - Cost: resources.Resources{ - Cpu: 0, - Gpu: 0, - Ram: 10 << 20, - }, - MaxFailures: 10, - Follows: nil, // ?? - } -} - -func (w *WdPostSubmitTask) Adder(taskFunc harmonytask.AddTaskFunc) { - w.submitPoStTF.Set(taskFunc) -} - -func (w *WdPostSubmitTask) processHeadChange(ctx context.Context, revert, apply *types.TipSet) error { - tf := w.submitPoStTF.Val(ctx) - - qry, err := w.db.Query(ctx, `SELECT sp_id, proving_period_start, deadline, partition, submit_at_epoch FROM wdpost_proofs WHERE submit_task_id IS NULL AND submit_at_epoch <= $1`, apply.Height()) - if err != nil { - return err - } - defer qry.Close() - - for qry.Next() { - var spID int64 - var pps int64 - var deadline uint64 - var partition uint64 - var submitAtEpoch uint64 - if err := qry.Scan(&spID, &pps, &deadline, &partition, &submitAtEpoch); err != nil { - return xerrors.Errorf("scan submittable posts: %w", err) - } - - tf(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, err error) { - // update in transaction iff submit_task_id is still null - res, err := tx.Exec(`UPDATE wdpost_proofs SET submit_task_id = $1 WHERE sp_id = $2 AND proving_period_start = $3 AND deadline = $4 AND partition = $5 AND submit_task_id IS NULL`, id, spID, pps, deadline, partition) - if err != nil { - return false, xerrors.Errorf("query ready proof: %w", err) - } - if res != 1 { - return false, nil - } - - return true, nil - }) - } - if err := qry.Err(); err != nil { - return err - } - - return nil -} - -type MsgPrepAPI interface { - StateMinerInfo(context.Context, address.Address, types.TipSetKey) (api.MinerInfo, error) - GasEstimateMessageGas(context.Context, *types.Message, *api.MessageSendSpec, types.TipSetKey) (*types.Message, error) - GasEstimateFeeCap(context.Context, *types.Message, int64, types.TipSetKey) (types.BigInt, error) - GasEstimateGasPremium(ctx context.Context, nblocksincl uint64, sender address.Address, gaslimit int64, tsk types.TipSetKey) (types.BigInt, error) - - WalletBalance(context.Context, address.Address) (types.BigInt, error) - WalletHas(context.Context, address.Address) (bool, error) - StateAccountKey(context.Context, address.Address, types.TipSetKey) (address.Address, error) - StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error) -} - -func preparePoStMessage(w MsgPrepAPI, as *multictladdr.MultiAddressSelector, maddr address.Address, msg *types.Message, maxFee abi.TokenAmount) (*types.Message, *api.MessageSendSpec, error) { - mi, err := w.StateMinerInfo(context.Background(), maddr, types.EmptyTSK) - if err != nil { - return nil, nil, xerrors.Errorf("error getting miner info: %w", err) - } - - // set the worker as a fallback - msg.From = mi.Worker - - mss := &api.MessageSendSpec{ - MaxFee: maxFee, - } - - // (optimal) initial estimation with some overestimation that guarantees - // block inclusion within the next 20 tipsets. - gm, err := w.GasEstimateMessageGas(context.Background(), msg, mss, types.EmptyTSK) - if err != nil { - log.Errorw("estimating gas", "error", err) - return nil, nil, xerrors.Errorf("estimating gas: %w", err) - } - *msg = *gm - - // calculate a more frugal estimation; premium is estimated to guarantee - // inclusion within 5 tipsets, and fee cap is estimated for inclusion - // within 4 tipsets. - minGasFeeMsg := *msg - - minGasFeeMsg.GasPremium, err = w.GasEstimateGasPremium(context.Background(), 5, msg.From, msg.GasLimit, types.EmptyTSK) - if err != nil { - log.Errorf("failed to estimate minimum gas premium: %+v", err) - minGasFeeMsg.GasPremium = msg.GasPremium - } - - minGasFeeMsg.GasFeeCap, err = w.GasEstimateFeeCap(context.Background(), &minGasFeeMsg, 4, types.EmptyTSK) - if err != nil { - log.Errorf("failed to estimate minimum gas fee cap: %+v", err) - minGasFeeMsg.GasFeeCap = msg.GasFeeCap - } - - // goodFunds = funds needed for optimal inclusion probability. - // minFunds = funds needed for more speculative inclusion probability. - goodFunds := big.Add(minGasFeeMsg.RequiredFunds(), minGasFeeMsg.Value) - minFunds := big.Min(big.Add(minGasFeeMsg.RequiredFunds(), minGasFeeMsg.Value), goodFunds) - - from, _, err := as.AddressFor(context.Background(), w, maddr, mi, api.PoStAddr, goodFunds, minFunds) - if err != nil { - return nil, nil, xerrors.Errorf("error getting address: %w", err) - } - - msg.From = from - - return msg, mss, nil -} - -var _ harmonytask.TaskInterface = &WdPostSubmitTask{} diff --git a/curiosrc/winning/winning_task.go b/curiosrc/winning/winning_task.go deleted file mode 100644 index 5fc40282168..00000000000 --- a/curiosrc/winning/winning_task.go +++ /dev/null @@ -1,725 +0,0 @@ -package winning - -import ( - "bytes" - "context" - "crypto/rand" - "encoding/binary" - "encoding/json" - "time" - - "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log/v2" - "golang.org/x/sync/errgroup" - "golang.org/x/xerrors" - - ffi "github.com/filecoin-project/filecoin-ffi" - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/crypto" - "github.com/filecoin-project/go-state-types/network" - "github.com/filecoin-project/go-state-types/proof" - prooftypes "github.com/filecoin-project/go-state-types/proof" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/gen" - lrand "github.com/filecoin-project/lotus/chain/rand" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/ffiselect" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" - "github.com/filecoin-project/lotus/lib/harmony/resources" - "github.com/filecoin-project/lotus/lib/promise" - "github.com/filecoin-project/lotus/node/modules/dtypes" - "github.com/filecoin-project/lotus/storage/paths" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -var log = logging.Logger("curio/winning") - -type WinPostTask struct { - max int - db *harmonydb.DB - - paths *paths.Local - verifier storiface.Verifier - - api WinPostAPI - actors map[dtypes.MinerAddress]bool - - mineTF promise.Promise[harmonytask.AddTaskFunc] -} - -type WinPostAPI interface { - ChainHead(context.Context) (*types.TipSet, error) - ChainTipSetWeight(context.Context, types.TipSetKey) (types.BigInt, error) - ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error) - - StateGetBeaconEntry(context.Context, abi.ChainEpoch) (*types.BeaconEntry, error) - SyncSubmitBlock(context.Context, *types.BlockMsg) error - StateGetRandomnessFromBeacon(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte, tsk types.TipSetKey) (abi.Randomness, error) - StateGetRandomnessFromTickets(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte, tsk types.TipSetKey) (abi.Randomness, error) - StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error) - StateMinerInfo(context.Context, address.Address, types.TipSetKey) (api.MinerInfo, error) - - MinerGetBaseInfo(context.Context, address.Address, abi.ChainEpoch, types.TipSetKey) (*api.MiningBaseInfo, error) - MinerCreateBlock(context.Context, *api.BlockTemplate) (*types.BlockMsg, error) - MpoolSelect(context.Context, types.TipSetKey, float64) ([]*types.SignedMessage, error) - - WalletSign(context.Context, address.Address, []byte) (*crypto.Signature, error) -} - -func NewWinPostTask(max int, db *harmonydb.DB, pl *paths.Local, verifier storiface.Verifier, api WinPostAPI, actors map[dtypes.MinerAddress]bool) *WinPostTask { - t := &WinPostTask{ - max: max, - db: db, - paths: pl, - verifier: verifier, - api: api, - actors: actors, - } - // TODO: run warmup - - go t.mineBasic(context.TODO()) - - return t -} - -func (t *WinPostTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done bool, err error) { - log.Debugw("WinPostTask.Do()", "taskID", taskID) - - ctx := context.TODO() - - type BlockCID struct { - CID string - } - - type MiningTaskDetails struct { - SpID uint64 - Epoch uint64 - BlockCIDs []BlockCID - CompTime time.Time - } - - var details MiningTaskDetails - - // First query to fetch from mining_tasks - err = t.db.QueryRow(ctx, `SELECT sp_id, epoch, base_compute_time FROM mining_tasks WHERE task_id = $1`, taskID).Scan(&details.SpID, &details.Epoch, &details.CompTime) - if err != nil { - return false, xerrors.Errorf("query mining base info fail: %w", err) - } - - // Second query to fetch from mining_base_block - rows, err := t.db.Query(ctx, `SELECT block_cid FROM mining_base_block WHERE task_id = $1`, taskID) - if err != nil { - return false, xerrors.Errorf("query mining base blocks fail: %w", err) - } - defer rows.Close() - - for rows.Next() { - var cid BlockCID - if err := rows.Scan(&cid.CID); err != nil { - return false, err - } - details.BlockCIDs = append(details.BlockCIDs, cid) - } - - if err := rows.Err(); err != nil { - return false, xerrors.Errorf("query mining base blocks fail (rows.Err): %w", err) - } - - // construct base - maddr, err := address.NewIDAddress(details.SpID) - if err != nil { - return false, err - } - - var bcids []cid.Cid - for _, c := range details.BlockCIDs { - bcid, err := cid.Parse(c.CID) - if err != nil { - return false, err - } - bcids = append(bcids, bcid) - } - - tsk := types.NewTipSetKey(bcids...) - baseTs, err := t.api.ChainGetTipSet(ctx, tsk) - if err != nil { - return false, xerrors.Errorf("loading base tipset: %w", err) - } - - base := MiningBase{ - TipSet: baseTs, - AddRounds: abi.ChainEpoch(details.Epoch) - baseTs.Height() - 1, - ComputeTime: details.CompTime, - } - - persistNoWin := func() (bool, error) { - n, err := t.db.Exec(ctx, `UPDATE mining_base_block SET no_win = true WHERE task_id = $1`, taskID) - if err != nil { - return false, xerrors.Errorf("marking base as not-won: %w", err) - } - log.Debugw("persisted no-win", "rows", n) - - if n == 0 { - return false, xerrors.Errorf("persist no win: no rows updated") - } - - return true, nil - } - - // ensure we have a beacon entry for the epoch we're mining on - round := base.epoch() - - _ = retry1(func() (*types.BeaconEntry, error) { - return t.api.StateGetBeaconEntry(ctx, round) - }) - - // MAKE A MINING ATTEMPT!! - log.Debugw("attempting to mine a block", "tipset", types.LogCids(base.TipSet.Cids()), "null-rounds", base.AddRounds) - - mbi, err := t.api.MinerGetBaseInfo(ctx, maddr, round, base.TipSet.Key()) - if err != nil { - return false, xerrors.Errorf("failed to get mining base info: %w", err) - } - if mbi == nil { - // not eligible to mine on this base, we're done here - log.Debugw("WinPoSt not eligible to mine on this base", "tipset", types.LogCids(base.TipSet.Cids())) - return persistNoWin() - } - - if !mbi.EligibleForMining { - // slashed or just have no power yet, we're done here - log.Debugw("WinPoSt not eligible for mining", "tipset", types.LogCids(base.TipSet.Cids())) - return persistNoWin() - } - - if len(mbi.Sectors) == 0 { - log.Warnw("WinPoSt no sectors to mine", "tipset", types.LogCids(base.TipSet.Cids())) - return false, xerrors.Errorf("no sectors selected for winning PoSt") - } - - var rbase types.BeaconEntry - var bvals []types.BeaconEntry - var eproof *types.ElectionProof - - // winner check - { - bvals = mbi.BeaconEntries - rbase = mbi.PrevBeaconEntry - if len(bvals) > 0 { - rbase = bvals[len(bvals)-1] - } - - eproof, err = gen.IsRoundWinner(ctx, round, maddr, rbase, mbi, t.api) - if err != nil { - log.Warnw("WinPoSt failed to check if we win next round", "error", err) - return false, xerrors.Errorf("failed to check if we win next round: %w", err) - } - - if eproof == nil { - // not a winner, we're done here - log.Debugw("WinPoSt not a winner", "tipset", types.LogCids(base.TipSet.Cids())) - return persistNoWin() - } - } - - log.Infow("WinPostTask won election", "tipset", types.LogCids(base.TipSet.Cids()), "miner", maddr, "round", round, "eproof", eproof) - - // winning PoSt - var wpostProof []prooftypes.PoStProof - { - buf := new(bytes.Buffer) - if err := maddr.MarshalCBOR(buf); err != nil { - err = xerrors.Errorf("failed to marshal miner address: %w", err) - return false, err - } - - brand, err := lrand.DrawRandomnessFromBase(rbase.Data, crypto.DomainSeparationTag_WinningPoStChallengeSeed, round, buf.Bytes()) - if err != nil { - err = xerrors.Errorf("failed to get randomness for winning post: %w", err) - return false, err - } - - prand := abi.PoStRandomness(brand) - prand[31] &= 0x3f // make into fr - - sectorNums := make([]abi.SectorNumber, len(mbi.Sectors)) - for i, s := range mbi.Sectors { - sectorNums[i] = s.SectorNumber - } - - ppt, err := mbi.Sectors[0].SealProof.RegisteredWinningPoStProof() - if err != nil { - return false, xerrors.Errorf("mapping sector seal proof type to post proof type: %w", err) - } - - postChallenges, err := ffi.GeneratePoStFallbackSectorChallenges(ppt, abi.ActorID(details.SpID), prand, sectorNums) - if err != nil { - return false, xerrors.Errorf("generating election challenges: %v", err) - } - - sectorChallenges := make([]storiface.PostSectorChallenge, len(mbi.Sectors)) - for i, s := range mbi.Sectors { - sectorChallenges[i] = storiface.PostSectorChallenge{ - SealProof: s.SealProof, - SectorNumber: s.SectorNumber, - SealedCID: s.SealedCID, - Challenge: postChallenges.Challenges[s.SectorNumber], - Update: s.SectorKey != nil, - } - } - - _, err = t.generateWinningPost(ctx, ppt, abi.ActorID(details.SpID), sectorChallenges, prand) - //wpostProof, err = t.prover.GenerateWinningPoSt(ctx, ppt, abi.ActorID(details.SpID), sectorChallenges, prand) - if err != nil { - err = xerrors.Errorf("failed to compute winning post proof: %w", err) - return false, err - } - } - - log.Infow("WinPostTask winning PoSt computed", "tipset", types.LogCids(base.TipSet.Cids()), "miner", maddr, "round", round, "proofs", wpostProof) - - ticket, err := t.computeTicket(ctx, maddr, &rbase, round, base.TipSet.MinTicket(), mbi) - if err != nil { - return false, xerrors.Errorf("scratching ticket failed: %w", err) - } - - // get pending messages early, - msgs, err := t.api.MpoolSelect(ctx, base.TipSet.Key(), ticket.Quality()) - if err != nil { - return false, xerrors.Errorf("failed to select messages for block: %w", err) - } - - log.Infow("WinPostTask selected messages", "tipset", types.LogCids(base.TipSet.Cids()), "miner", maddr, "round", round, "messages", len(msgs)) - - // equivocation handling - { - // This next block exists to "catch" equivocating miners, - // who submit 2 blocks at the same height at different times in order to split the network. - // To safeguard against this, we make sure it's been EquivocationDelaySecs since our base was calculated, - // then re-calculate it. - // If the daemon detected equivocated blocks, those blocks will no longer be in the new base. - time.Sleep(time.Until(base.ComputeTime.Add(time.Duration(build.EquivocationDelaySecs) * time.Second))) - - bestTs, err := t.api.ChainHead(ctx) - if err != nil { - return false, xerrors.Errorf("failed to get chain head: %w", err) - } - - headWeight, err := t.api.ChainTipSetWeight(ctx, bestTs.Key()) - if err != nil { - return false, xerrors.Errorf("failed to get chain head weight: %w", err) - } - - baseWeight, err := t.api.ChainTipSetWeight(ctx, base.TipSet.Key()) - if err != nil { - return false, xerrors.Errorf("failed to get base weight: %w", err) - } - if types.BigCmp(headWeight, baseWeight) <= 0 { - bestTs = base.TipSet - } - - // If the base has changed, we take the _intersection_ of our old base and new base, - // thus ejecting blocks from any equivocating miners, without taking any new blocks. - if bestTs.Height() == base.TipSet.Height() && !bestTs.Equals(base.TipSet) { - log.Warnf("base changed from %s to %s, taking intersection", base.TipSet.Key(), bestTs.Key()) - newBaseMap := map[cid.Cid]struct{}{} - for _, newBaseBlk := range bestTs.Cids() { - newBaseMap[newBaseBlk] = struct{}{} - } - - refreshedBaseBlocks := make([]*types.BlockHeader, 0, len(base.TipSet.Cids())) - for _, baseBlk := range base.TipSet.Blocks() { - if _, ok := newBaseMap[baseBlk.Cid()]; ok { - refreshedBaseBlocks = append(refreshedBaseBlocks, baseBlk) - } - } - - if len(refreshedBaseBlocks) != 0 && len(refreshedBaseBlocks) != len(base.TipSet.Blocks()) { - refreshedBase, err := types.NewTipSet(refreshedBaseBlocks) - if err != nil { - return false, xerrors.Errorf("failed to create new tipset when refreshing: %w", err) - } - - if !base.TipSet.MinTicket().Equals(refreshedBase.MinTicket()) { - log.Warn("recomputing ticket due to base refresh") - - ticket, err = t.computeTicket(ctx, maddr, &rbase, round, refreshedBase.MinTicket(), mbi) - if err != nil { - return false, xerrors.Errorf("failed to refresh ticket: %w", err) - } - } - - log.Warn("re-selecting messages due to base refresh") - // refresh messages, as the selected messages may no longer be valid - msgs, err = t.api.MpoolSelect(ctx, refreshedBase.Key(), ticket.Quality()) - if err != nil { - return false, xerrors.Errorf("failed to re-select messages for block: %w", err) - } - - base.TipSet = refreshedBase - } - } - } - - log.Infow("WinPostTask base ready", "tipset", types.LogCids(base.TipSet.Cids()), "miner", maddr, "round", round, "ticket", ticket) - - // block construction - var blockMsg *types.BlockMsg - { - uts := base.TipSet.MinTimestamp() + build.BlockDelaySecs*(uint64(base.AddRounds)+1) - - blockMsg, err = t.api.MinerCreateBlock(context.TODO(), &api.BlockTemplate{ - Miner: maddr, - Parents: base.TipSet.Key(), - Ticket: ticket, - Eproof: eproof, - BeaconValues: bvals, - Messages: msgs, - Epoch: round, - Timestamp: uts, - WinningPoStProof: wpostProof, - }) - if err != nil { - return false, xerrors.Errorf("failed to create block: %w", err) - } - } - - log.Infow("WinPostTask block ready", "tipset", types.LogCids(base.TipSet.Cids()), "miner", maddr, "round", round, "block", blockMsg.Header.Cid(), "timestamp", blockMsg.Header.Timestamp) - - // persist in db - { - bhjson, err := json.Marshal(blockMsg.Header) - if err != nil { - return false, xerrors.Errorf("failed to marshal block header: %w", err) - } - - _, err = t.db.Exec(ctx, `UPDATE mining_tasks - SET won = true, mined_cid = $2, mined_header = $3, mined_at = $4 - WHERE task_id = $1`, taskID, blockMsg.Header.Cid(), string(bhjson), time.Now().UTC()) - if err != nil { - return false, xerrors.Errorf("failed to update mining task: %w", err) - } - } - - // wait until block timestamp - { - log.Infow("WinPostTask waiting for block timestamp", "tipset", types.LogCids(base.TipSet.Cids()), "miner", maddr, "round", round, "block", blockMsg.Header.Cid(), "until", time.Unix(int64(blockMsg.Header.Timestamp), 0)) - time.Sleep(time.Until(time.Unix(int64(blockMsg.Header.Timestamp), 0))) - } - - // submit block!! - { - log.Infow("WinPostTask submitting block", "tipset", types.LogCids(base.TipSet.Cids()), "miner", maddr, "round", round, "block", blockMsg.Header.Cid()) - if err := t.api.SyncSubmitBlock(ctx, blockMsg); err != nil { - return false, xerrors.Errorf("failed to submit block: %w", err) - } - } - - log.Infow("mined a block", "tipset", types.LogCids(blockMsg.Header.Parents), "height", blockMsg.Header.Height, "miner", maddr, "cid", blockMsg.Header.Cid()) - - // persist that we've submitted the block - { - _, err = t.db.Exec(ctx, `UPDATE mining_tasks - SET submitted_at = $2 - WHERE task_id = $1`, taskID, time.Now().UTC()) - if err != nil { - return false, xerrors.Errorf("failed to update mining task: %w", err) - } - } - - return true, nil -} - -func (t *WinPostTask) generateWinningPost( - ctx context.Context, - ppt abi.RegisteredPoStProof, - mid abi.ActorID, - sectors []storiface.PostSectorChallenge, - randomness abi.PoStRandomness) ([]proof.PoStProof, error) { - - // don't throttle winningPoSt - // * Always want it done asap - // * It's usually just one sector - - vproofs := make([][]byte, len(sectors)) - eg := errgroup.Group{} - - for i, s := range sectors { - i, s := i, s - eg.Go(func() error { - vanilla, err := t.paths.GenerateSingleVanillaProof(ctx, mid, s, ppt) - if err != nil { - return xerrors.Errorf("get winning sector:%d,vanila failed: %w", s.SectorNumber, err) - } - if vanilla == nil { - return xerrors.Errorf("get winning sector:%d,vanila is nil", s.SectorNumber) - } - vproofs[i] = vanilla - return nil - }) - } - if err := eg.Wait(); err != nil { - return nil, err - } - - return ffiselect.FFISelect{}.GenerateWinningPoStWithVanilla(ppt, mid, randomness, vproofs) - -} - -func (t *WinPostTask) CanAccept(ids []harmonytask.TaskID, engine *harmonytask.TaskEngine) (*harmonytask.TaskID, error) { - if len(ids) == 0 { - // probably can't happen, but panicking is bad - return nil, nil - } - - // select lowest epoch - var lowestEpoch abi.ChainEpoch - var lowestEpochID = ids[0] - for _, id := range ids { - var epoch uint64 - err := t.db.QueryRow(context.Background(), `SELECT epoch FROM mining_tasks WHERE task_id = $1`, id).Scan(&epoch) - if err != nil { - return nil, err - } - - if lowestEpoch == 0 || abi.ChainEpoch(epoch) < lowestEpoch { - lowestEpoch = abi.ChainEpoch(epoch) - lowestEpochID = id - } - } - - return &lowestEpochID, nil -} - -func (t *WinPostTask) TypeDetails() harmonytask.TaskTypeDetails { - return harmonytask.TaskTypeDetails{ - Name: "WinPost", - Max: t.max, - MaxFailures: 3, - Follows: nil, - Cost: resources.Resources{ - Cpu: 1, - - // todo set to something for 32/64G sector sizes? Technically windowPoSt is happy on a CPU - // but it will use a GPU if available - Gpu: 0, - - Ram: 1 << 30, // todo arbitrary number - }, - } -} - -func (t *WinPostTask) Adder(taskFunc harmonytask.AddTaskFunc) { - t.mineTF.Set(taskFunc) -} - -// MiningBase is the tipset on top of which we plan to construct our next block. -// Refer to godocs on GetBestMiningCandidate. -type MiningBase struct { - TipSet *types.TipSet - ComputeTime time.Time - AddRounds abi.ChainEpoch -} - -func (mb MiningBase) epoch() abi.ChainEpoch { - // return the epoch that will result from mining on this base - return mb.TipSet.Height() + mb.AddRounds + 1 -} - -func (mb MiningBase) baseTime() time.Time { - tsTime := time.Unix(int64(mb.TipSet.MinTimestamp()), 0) - roundDelay := build.BlockDelaySecs * uint64(mb.AddRounds+1) - tsTime = tsTime.Add(time.Duration(roundDelay) * time.Second) - return tsTime -} - -func (mb MiningBase) afterPropDelay() time.Time { - return mb.baseTime().Add(time.Duration(build.PropagationDelaySecs) * time.Second).Add(randTimeOffset(time.Second)) -} - -func (t *WinPostTask) mineBasic(ctx context.Context) { - var workBase MiningBase - - taskFn := t.mineTF.Val(ctx) - - // initialize workbase - { - head := retry1(func() (*types.TipSet, error) { - return t.api.ChainHead(ctx) - }) - - workBase = MiningBase{ - TipSet: head, - AddRounds: 0, - ComputeTime: time.Now(), - } - } - - /* - - /- T+0 == workBase.baseTime - | - >--------*------*--------[wait until next round]-----> - | - |- T+PD == workBase.afterPropDelay+(~1s) - |- Here we acquire the new workBase, and start a new round task - \- Then we loop around, and wait for the next head - - time --> - */ - - for { - // limit the rate at which we mine blocks to at least EquivocationDelaySecs - // this is to prevent races on devnets in catch up mode. Acts as a minimum - // delay for the sleep below. - time.Sleep(time.Duration(build.EquivocationDelaySecs)*time.Second + time.Second) - - // wait for *NEXT* propagation delay - time.Sleep(time.Until(workBase.afterPropDelay())) - - // check current best candidate - maybeBase := retry1(func() (*types.TipSet, error) { - return t.api.ChainHead(ctx) - }) - - if workBase.TipSet.Equals(maybeBase) { - // workbase didn't change in the new round so we have a null round here - workBase.AddRounds++ - log.Debugw("workbase update", "tipset", workBase.TipSet.Cids(), "nulls", workBase.AddRounds, "lastUpdate", time.Since(workBase.ComputeTime), "type", "same-tipset") - } else { - btsw := retry1(func() (types.BigInt, error) { - return t.api.ChainTipSetWeight(ctx, maybeBase.Key()) - }) - - ltsw := retry1(func() (types.BigInt, error) { - return t.api.ChainTipSetWeight(ctx, workBase.TipSet.Key()) - }) - - if types.BigCmp(btsw, ltsw) <= 0 { - // new tipset for some reason has less weight than the old one, assume null round here - // NOTE: the backing node may have reorged, or manually changed head - workBase.AddRounds++ - log.Debugw("workbase update", "tipset", workBase.TipSet.Cids(), "nulls", workBase.AddRounds, "lastUpdate", time.Since(workBase.ComputeTime), "type", "prefer-local-weight") - } else { - // new tipset has more weight, so we should mine on it, no null round here - log.Debugw("workbase update", "tipset", workBase.TipSet.Cids(), "nulls", workBase.AddRounds, "lastUpdate", time.Since(workBase.ComputeTime), "type", "prefer-new-tipset") - - workBase = MiningBase{ - TipSet: maybeBase, - AddRounds: 0, - ComputeTime: time.Now(), - } - } - } - - // dispatch mining task - // (note equivocation prevention is handled by the mining code) - - baseEpoch := workBase.TipSet.Height() - - for act := range t.actors { - spID, err := address.IDFromAddress(address.Address(act)) - if err != nil { - log.Errorf("failed to get spID from address %s: %s", act, err) - continue - } - - taskFn(func(id harmonytask.TaskID, tx *harmonydb.Tx) (shouldCommit bool, seriousError error) { - // First we check if the mining base includes blocks we may have mined previously to avoid getting slashed - // select mining_tasks where epoch==base_epoch if win=true to maybe get base block cid which has to be included in our tipset - var baseBlockCids []string - err := tx.Select(&baseBlockCids, `SELECT mined_cid FROM mining_tasks WHERE epoch = $1 AND sp_id = $2 AND won = true`, baseEpoch, spID) - if err != nil { - return false, xerrors.Errorf("querying mining_tasks: %w", err) - } - if len(baseBlockCids) >= 1 { - baseBlockCid := baseBlockCids[0] - c, err := cid.Parse(baseBlockCid) - if err != nil { - return false, xerrors.Errorf("parsing mined_cid: %w", err) - } - - // we have mined in the previous round, make sure that our block is included in the tipset - // if it's not we risk getting slashed - - var foundOurs bool - for _, c2 := range workBase.TipSet.Cids() { - if c == c2 { - foundOurs = true - break - } - } - if !foundOurs { - log.Errorw("our block was not included in the tipset, aborting", "tipset", workBase.TipSet.Cids(), "ourBlock", c) - return false, xerrors.Errorf("our block was not included in the tipset, aborting") - } - } - - _, err = tx.Exec(`INSERT INTO mining_tasks (task_id, sp_id, epoch, base_compute_time) VALUES ($1, $2, $3, $4)`, id, spID, workBase.epoch(), workBase.ComputeTime.UTC()) - if err != nil { - return false, xerrors.Errorf("inserting mining_tasks: %w", err) - } - - for _, c := range workBase.TipSet.Cids() { - _, err = tx.Exec(`INSERT INTO mining_base_block (task_id, sp_id, block_cid) VALUES ($1, $2, $3)`, id, spID, c) - if err != nil { - return false, xerrors.Errorf("inserting mining base blocks: %w", err) - } - } - - return true, nil // no errors, commit the transaction - }) - } - } -} - -func (t *WinPostTask) computeTicket(ctx context.Context, maddr address.Address, brand *types.BeaconEntry, round abi.ChainEpoch, chainRand *types.Ticket, mbi *api.MiningBaseInfo) (*types.Ticket, error) { - buf := new(bytes.Buffer) - if err := maddr.MarshalCBOR(buf); err != nil { - return nil, xerrors.Errorf("failed to marshal address to cbor: %w", err) - } - - if round > build.UpgradeSmokeHeight { - buf.Write(chainRand.VRFProof) - } - - input, err := lrand.DrawRandomnessFromBase(brand.Data, crypto.DomainSeparationTag_TicketProduction, round-build.TicketRandomnessLookback, buf.Bytes()) - if err != nil { - return nil, err - } - - vrfOut, err := gen.ComputeVRF(ctx, t.api.WalletSign, mbi.WorkerKey, input) - if err != nil { - return nil, err - } - - return &types.Ticket{ - VRFProof: vrfOut, - }, nil -} - -func randTimeOffset(width time.Duration) time.Duration { - buf := make([]byte, 8) - _, _ = rand.Reader.Read(buf) - val := time.Duration(binary.BigEndian.Uint64(buf) % uint64(width)) - - return val - (width / 2) -} - -func retry1[R any](f func() (R, error)) R { - for { - r, err := f() - if err == nil { - return r - } - - log.Errorw("error in mining loop, retrying", "error", err) - time.Sleep(time.Second) - } -} - -var _ harmonytask.TaskInterface = &WinPostTask{} diff --git a/go.mod b/go.mod index 4341fa321a6..05332c87e33 100644 --- a/go.mod +++ b/go.mod @@ -12,12 +12,10 @@ require ( github.com/DataDog/zstd v1.4.5 github.com/GeertJohan/go.rice v1.0.3 github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee - github.com/KarpelesLab/reflink v1.0.1 github.com/Kubuxu/imtui v0.0.0-20210401140320-41663d68d0fa github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d github.com/alecthomas/jsonschema v0.0.0-20200530073317-71f438968921 github.com/buger/goterm v1.0.3 - github.com/charmbracelet/lipgloss v0.10.0 github.com/chzyer/readline v1.5.1 github.com/codeskyblue/go-sh v0.0.0-20200712050446-30169cf553fe github.com/containerd/cgroups v1.1.0 @@ -123,7 +121,6 @@ require ( github.com/mattn/go-isatty v0.0.20 github.com/mattn/go-sqlite3 v1.14.16 github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 - github.com/minio/sha256-simd v1.0.1 github.com/mitchellh/go-homedir v1.1.0 github.com/multiformats/go-base32 v0.1.0 github.com/multiformats/go-multiaddr v0.12.3 @@ -133,7 +130,6 @@ require ( github.com/multiformats/go-multihash v0.2.3 github.com/multiformats/go-varint v0.0.7 github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333 - github.com/pkg/errors v0.9.1 github.com/polydawn/refmt v0.89.0 github.com/prometheus/client_golang v1.18.0 github.com/puzpuzpuz/xsync/v2 v2.4.0 @@ -162,12 +158,10 @@ require ( go.uber.org/multierr v1.11.0 go.uber.org/zap v1.27.0 golang.org/x/crypto v0.21.0 - golang.org/x/exp v0.0.0-20240213143201-ec583247a57a golang.org/x/net v0.23.0 golang.org/x/sync v0.6.0 golang.org/x/sys v0.18.0 golang.org/x/term v0.18.0 - golang.org/x/text v0.14.0 golang.org/x/time v0.5.0 golang.org/x/tools v0.18.0 golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 @@ -183,7 +177,6 @@ require ( github.com/StackExchange/wmi v1.2.1 // indirect github.com/akavel/rsrc v0.8.0 // indirect github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect - github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect github.com/bahlo/generic-list-go v0.2.0 // indirect github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect @@ -284,9 +277,8 @@ require ( github.com/miekg/dns v1.1.58 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect + github.com/minio/sha256-simd v1.0.1 // indirect github.com/mr-tron/base58 v1.2.0 // indirect - github.com/muesli/reflow v0.3.0 // indirect - github.com/muesli/termenv v0.15.2 // indirect github.com/multiformats/go-base36 v0.2.0 // indirect github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect github.com/multiformats/go-multistream v0.5.0 // indirect @@ -297,6 +289,7 @@ require ( github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 // indirect + github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.6.0 // indirect github.com/prometheus/common v0.47.0 // indirect @@ -329,7 +322,9 @@ require ( go.uber.org/dig v1.17.1 // indirect go.uber.org/mock v0.4.0 // indirect go4.org v0.0.0-20230225012048-214862532bf5 // indirect + golang.org/x/exp v0.0.0-20240213143201-ec583247a57a // indirect golang.org/x/mod v0.15.0 // indirect + golang.org/x/text v0.14.0 // indirect gonum.org/v1/gonum v0.14.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240108191215-35c7eff3a6b1 // indirect google.golang.org/grpc v1.60.1 // indirect diff --git a/go.sum b/go.sum index 5d4f99ce679..18a2670ccf8 100644 --- a/go.sum +++ b/go.sum @@ -109,8 +109,6 @@ github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6l github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= -github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= -github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk= github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= @@ -168,8 +166,6 @@ github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/charmbracelet/lipgloss v0.10.0 h1:KWeXFSexGcfahHX+54URiZGkBFazf70JNMtwg/AFW3s= -github.com/charmbracelet/lipgloss v0.10.0/go.mod h1:Wig9DSfvANsxqkRsqj6x87irdy123SR4dOXlKa91ciE= github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM= @@ -1203,8 +1199,6 @@ github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0Q github.com/magefile/mage v1.9.0 h1:t3AU2wNwehMCW97vuqQLtw6puppWXHO+O2MHo5a50XE= github.com/magefile/mage v1.9.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magik6k/reflink v1.0.2-patch1 h1:NXSgQugcESI8Z/jBtuAI83YsZuRauY9i9WOyOnJ7Vns= -github.com/magik6k/reflink v1.0.2-patch1/go.mod h1:WGkTOKNjd1FsJKBw3mu4JvrPEDJyJJ+JPtxBkbPoCok= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -1238,7 +1232,6 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= -github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= @@ -1288,10 +1281,6 @@ github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjW github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= -github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s= -github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8= -github.com/muesli/termenv v0.15.2 h1:GohcuySI0QmI3wN8Ok9PtKGkgkFIk7y6Vpb5PvrY+Wo= -github.com/muesli/termenv v0.15.2/go.mod h1:Epx+iuz8sNs7mNKhxzH4fWXGNpZwUaJKRS1noLXviQ8= github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= github.com/multiformats/go-base32 v0.0.4/go.mod h1:jNLFzjPZtp3aIARHbJRZIaPuspdH0J6q39uUM5pnABM= github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= diff --git a/itests/curio_test.go b/itests/curio_test.go index bea14e63d5a..d8462494c13 100644 --- a/itests/curio_test.go +++ b/itests/curio_test.go @@ -1,50 +1,7 @@ package itests -import ( - "context" - "encoding/base64" - "flag" - "fmt" - "net" - "os" - "path" - "testing" - "time" - - "github.com/docker/go-units" - "github.com/gbrlsnchs/jwt/v3" - "github.com/google/uuid" - logging "github.com/ipfs/go-log/v2" - manet "github.com/multiformats/go-multiaddr/net" - "github.com/stretchr/testify/require" - "github.com/urfave/cli/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-jsonrpc" - "github.com/filecoin-project/go-jsonrpc/auth" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/v1api" - miner2 "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/cli/spcli/createminer" - "github.com/filecoin-project/lotus/cmd/curio/deps" - "github.com/filecoin-project/lotus/cmd/curio/rpc" - "github.com/filecoin-project/lotus/cmd/curio/tasks" - "github.com/filecoin-project/lotus/curiosrc/market/lmrpc" - "github.com/filecoin-project/lotus/curiosrc/seal" - "github.com/filecoin-project/lotus/itests/kit" - "github.com/filecoin-project/lotus/lib/ffiselect" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/node" - "github.com/filecoin-project/lotus/node/config" - "github.com/filecoin-project/lotus/node/impl" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -func TestCurioNewActor(t *testing.T) { +/* +func SKIPTestCurioNewActor(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -93,7 +50,7 @@ func TestCurioNewActor(t *testing.T) { require.Contains(t, baseCfg.Addresses[0].MinerAddresses, maddr.String()) } -func TestCurioHappyPath(t *testing.T) { +func SKIPTestCurioHappyPath(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -397,3 +354,4 @@ func ConstructCurioTest(ctx context.Context, t *testing.T, dir string, db *harmo return capi, taskEngine.GracefullyTerminate, ccloser, finishCh } +*/ diff --git a/itests/kit/ensemble.go b/itests/kit/ensemble.go index 97a222b178a..91b2d0df84b 100644 --- a/itests/kit/ensemble.go +++ b/itests/kit/ensemble.go @@ -20,7 +20,6 @@ import ( "github.com/libp2p/go-libp2p/core/peer" mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" "github.com/stretchr/testify/require" - "github.com/urfave/cli/v2" "github.com/filecoin-project/go-address" cborutil "github.com/filecoin-project/go-cbor-util" @@ -47,9 +46,6 @@ import ( "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/wallet/key" - "github.com/filecoin-project/lotus/cmd/curio/deps" - "github.com/filecoin-project/lotus/cmd/curio/rpc" - "github.com/filecoin-project/lotus/cmd/curio/tasks" "github.com/filecoin-project/lotus/cmd/lotus-seed/seed" "github.com/filecoin-project/lotus/cmd/lotus-worker/sealworker" "github.com/filecoin-project/lotus/gateway" @@ -125,17 +121,15 @@ type Ensemble struct { options *ensembleOpts inactive struct { - fullnodes []*TestFullNode - providernodes []*TestCurioNode - miners []*TestMiner - workers []*TestWorker + fullnodes []*TestFullNode + miners []*TestMiner + workers []*TestWorker } active struct { - fullnodes []*TestFullNode - providernodes []*TestCurioNode - miners []*TestMiner - workers []*TestWorker - bms map[*TestMiner]*BlockMiner + fullnodes []*TestFullNode + miners []*TestMiner + workers []*TestWorker + bms map[*TestMiner]*BlockMiner } genesis struct { version network.Version @@ -229,20 +223,6 @@ func (n *Ensemble) FullNode(full *TestFullNode, opts ...NodeOpt) *Ensemble { return n } -// FullNode enrolls a new Curio node. -func (n *Ensemble) Curio(cu *TestCurioNode, opts ...NodeOpt) *Ensemble { - options := DefaultNodeOpts - for _, o := range opts { - err := o(&options) - require.NoError(n.t, err) - } - - *cu = TestCurioNode{t: n.t, options: options, Deps: &deps.Deps{}} - - n.inactive.providernodes = append(n.inactive.providernodes, cu) - return n -} - // Miner enrolls a new miner, using the provided full node for chain // interactions. func (n *Ensemble) MinerEnroll(minerNode *TestMiner, full *TestFullNode, opts ...NodeOpt) *Ensemble { @@ -904,28 +884,6 @@ func (n *Ensemble) Start() *Ensemble { // to active, so clear the slice. n.inactive.workers = n.inactive.workers[:0] - for _, p := range n.inactive.providernodes { - - // TODO setup config with options - err := p.Deps.PopulateRemainingDeps(context.Background(), &cli.Context{}, false) - require.NoError(n.t, err) - - shutdownChan := make(chan struct{}) - taskEngine, err := tasks.StartTasks(ctx, p.Deps) - if err != nil { - return nil - } - defer taskEngine.GracefullyTerminate() - - err = rpc.ListenAndServe(ctx, p.Deps, shutdownChan) // Monitor for shutdown. - require.NoError(n.t, err) - finishCh := node.MonitorShutdown(shutdownChan) //node.ShutdownHandler{Component: "rpc server", StopFunc: rpcStopper}, - //node.ShutdownHandler{Component: "provider", StopFunc: stop}, - - <-finishCh - - n.active.providernodes = append(n.active.providernodes, p) - } // --------------------- // MISC // --------------------- diff --git a/itests/kit/ensemble_presets.go b/itests/kit/ensemble_presets.go index 10a83be7dd5..3ec39cf9095 100644 --- a/itests/kit/ensemble_presets.go +++ b/itests/kit/ensemble_presets.go @@ -101,21 +101,6 @@ func EnsembleOneTwo(t *testing.T, opts ...interface{}) (*TestFullNode, *TestMine return &full, &one, &two, ens } -// EnsembleProvider creates and starts an Ensemble with a single full node and a single Curio. -// It does not interconnect nodes nor does it begin mining. -func EnsembleProvider(t *testing.T, opts ...interface{}) (*TestFullNode, *TestCurioNode, *Ensemble) { - opts = append(opts, WithAllSubsystems()) - - eopts, nopts := siftOptions(t, opts) - - var ( - full TestFullNode - provider TestCurioNode - ) - ens := NewEnsemble(t, eopts...).FullNode(&full, nopts...).Curio(&provider, nopts...).Start() - return &full, &provider, ens -} - func siftOptions(t *testing.T, opts []interface{}) (eopts []EnsembleOpt, nopts []NodeOpt) { for _, v := range opts { switch o := v.(type) { diff --git a/itests/kit/node_full.go b/itests/kit/node_full.go index d44d691dd35..3e80ed68869 100644 --- a/itests/kit/node_full.go +++ b/itests/kit/node_full.go @@ -22,7 +22,6 @@ import ( "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/wallet/key" cliutil "github.com/filecoin-project/lotus/cli/util" - "github.com/filecoin-project/lotus/cmd/curio/deps" "github.com/filecoin-project/lotus/gateway" "github.com/filecoin-project/lotus/node" ) @@ -55,17 +54,6 @@ type TestFullNode struct { options nodeOpts } -// TestCurioNode represents a Curio node enrolled in an Ensemble. -type TestCurioNode struct { - v1api.CurioStruct - - t *testing.T - - *deps.Deps - - options nodeOpts -} - func MergeFullNodes(fullNodes []*TestFullNode) *TestFullNode { var wrappedFullNode TestFullNode var fns api.FullNodeStruct diff --git a/lib/ffiselect/ffidirect/ffi-direct.go b/lib/ffiselect/ffidirect/ffi-direct.go deleted file mode 100644 index 23d6d28b5a6..00000000000 --- a/lib/ffiselect/ffidirect/ffi-direct.go +++ /dev/null @@ -1,71 +0,0 @@ -// This is a wrapper around the FFI functions that allows them to be called by reflection. -// For the Curio GPU selector, see lib/ffiselect/ffiselect.go. -package ffidirect - -import ( - "github.com/ipfs/go-cid" - - ffi "github.com/filecoin-project/filecoin-ffi" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/proof" -) - -// This allow reflection access to the FFI functions. -type FFI struct{} - -type ErrorString = string - -func untypeError1[R any](r R, err error) (R, string) { - if err == nil { - return r, "" - } - - return r, err.Error() -} - -func untypeError2[R1, R2 any](r1 R1, r2 R2, err error) (R1, R2, string) { - if err == nil { - return r1, r2, "" - } - - return r1, r2, err.Error() -} - -func (FFI) GenerateSinglePartitionWindowPoStWithVanilla( - proofType abi.RegisteredPoStProof, - minerID abi.ActorID, - randomness abi.PoStRandomness, - proofs [][]byte, - partitionIndex uint, -) (*ffi.PartitionProof, ErrorString) { - return untypeError1(ffi.GenerateSinglePartitionWindowPoStWithVanilla(proofType, minerID, randomness, proofs, partitionIndex)) -} - -func (FFI) SealPreCommitPhase2( - phase1Output []byte, - cacheDirPath string, - sealedSectorPath string, -) (sealedCID cid.Cid, unsealedCID cid.Cid, err ErrorString) { - return untypeError2(ffi.SealPreCommitPhase2(phase1Output, cacheDirPath, sealedSectorPath)) -} - -func (FFI) SealCommitPhase2( - phase1Output []byte, - sectorNum abi.SectorNumber, - minerID abi.ActorID, -) ([]byte, ErrorString) { - return untypeError1(ffi.SealCommitPhase2(phase1Output, sectorNum, minerID)) -} - -func (FFI) GenerateWinningPoStWithVanilla( - proofType abi.RegisteredPoStProof, - minerID abi.ActorID, - randomness abi.PoStRandomness, - proofs [][]byte, -) ([]proof.PoStProof, ErrorString) { - return untypeError1(ffi.GenerateWinningPoStWithVanilla(proofType, minerID, randomness, proofs)) -} - -func (FFI) SelfTest(val1 int, val2 cid.Cid) (int, cid.Cid, ErrorString) { - return untypeError2(val1, val2, nil) -} diff --git a/lib/ffiselect/ffiselect.go b/lib/ffiselect/ffiselect.go deleted file mode 100644 index d485d7fc640..00000000000 --- a/lib/ffiselect/ffiselect.go +++ /dev/null @@ -1,262 +0,0 @@ -package ffiselect - -import ( - "bytes" - "encoding/gob" - "io" - "os" - "os/exec" - "reflect" - "strconv" - "strings" - - "github.com/ipfs/go-cid" - "github.com/samber/lo" - "golang.org/x/xerrors" - - ffi "github.com/filecoin-project/filecoin-ffi" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/proof" - - "github.com/filecoin-project/lotus/curiosrc/build" - "github.com/filecoin-project/lotus/lib/ffiselect/ffidirect" -) - -var IsTest = false -var IsCuda = build.IsOpencl != "1" - -// Get all devices from ffi -var ch chan string - -func init() { - devices, err := ffi.GetGPUDevices() - if err != nil { - panic(err) - } - ch = make(chan string, len(devices)) - for i := 0; i < len(devices); i++ { - ch <- strconv.Itoa(i) - } -} - -type ValErr struct { - Val []interface{} - Err string -} - -// This is not the one you're looking for. -type FFICall struct { - Fn string - Args []interface{} -} - -func subStrInSet(set []string, sub string) bool { - return lo.Reduce(set, func(agg bool, item string, _ int) bool { return agg || strings.Contains(item, sub) }, false) -} - -func call(logctx []any, fn string, args ...interface{}) ([]interface{}, error) { - if IsTest { - return callTest(logctx, fn, args...) - } - - // get dOrdinal - dOrdinal := <-ch - defer func() { - ch <- dOrdinal - }() - - p, err := os.Executable() - if err != nil { - return nil, err - } - - commandAry := []string{"ffi"} - cmd := exec.Command(p, commandAry...) - - // Set Visible Devices for CUDA and OpenCL - cmd.Env = append(os.Environ(), - func(isCuda bool) string { - if isCuda { - return "CUDA_VISIBLE_DEVICES=" + dOrdinal - } - return "GPU_DEVICE_ORDINAL=" + dOrdinal - }(IsCuda)) - tmpDir, err := os.MkdirTemp("", "rust-fil-proofs") - if err != nil { - return nil, err - } - cmd.Env = append(cmd.Env, "TMPDIR="+tmpDir) - - if !subStrInSet(cmd.Env, "RUST_LOG") { - cmd.Env = append(cmd.Env, "RUST_LOG=debug") - } - if !subStrInSet(cmd.Env, "FIL_PROOFS_USE_GPU_COLUMN_BUILDER") { - cmd.Env = append(cmd.Env, "FIL_PROOFS_USE_GPU_COLUMN_BUILDER=1") - } - if !subStrInSet(cmd.Env, "FIL_PROOFS_USE_GPU_TREE_BUILDER") { - cmd.Env = append(cmd.Env, "FIL_PROOFS_USE_GPU_TREE_BUILDER=1") - } - - defer func() { _ = os.RemoveAll(tmpDir) }() - - lw := NewLogWriter(logctx, os.Stderr) - - cmd.Stderr = lw - cmd.Stdout = os.Stdout - outFile, err := os.CreateTemp("", "out") - if err != nil { - return nil, err - } - cmd.ExtraFiles = []*os.File{outFile} - var encArgs bytes.Buffer - err = gob.NewEncoder(&encArgs).Encode(FFICall{ - Fn: fn, - Args: args, - }) - if err != nil { - return nil, xerrors.Errorf("subprocess caller cannot encode: %w", err) - } - - cmd.Stdin = &encArgs - err = cmd.Run() - if err != nil { - return nil, err - } - - // seek to start - if _, err := outFile.Seek(0, io.SeekStart); err != nil { - return nil, xerrors.Errorf("failed to seek to beginning of output file: %w", err) - } - - var ve ValErr - err = gob.NewDecoder(outFile).Decode(&ve) - if err != nil { - return nil, xerrors.Errorf("subprocess caller cannot decode: %w", err) - } - if ve.Err != "" { - return nil, xerrors.Errorf("subprocess failure: %s", ve.Err) - } - if ve.Val[len(ve.Val)-1].(ffidirect.ErrorString) != "" { - return nil, xerrors.Errorf("subprocess call error: %s", ve.Val[len(ve.Val)-1].(ffidirect.ErrorString)) - } - return ve.Val, nil -} - -///////////Funcs reachable by the GPU selector./////////// -// NOTE: Changes here MUST also change ffi-direct.go - -type FFISelect struct{} - -func (FFISelect) GenerateSinglePartitionWindowPoStWithVanilla( - proofType abi.RegisteredPoStProof, - minerID abi.ActorID, - randomness abi.PoStRandomness, - proofs [][]byte, - partitionIndex uint, -) (*ffi.PartitionProof, error) { - logctx := []any{"spid", minerID, "proof_count", len(proofs), "partition_index", partitionIndex} - - val, err := call(logctx, "GenerateSinglePartitionWindowPoStWithVanilla", proofType, minerID, randomness, proofs, partitionIndex) - if err != nil { - return nil, err - } - return val[0].(*ffi.PartitionProof), nil -} -func (FFISelect) SealPreCommitPhase2( - sid abi.SectorID, - phase1Output []byte, - cacheDirPath string, - sealedSectorPath string, -) (sealedCID cid.Cid, unsealedCID cid.Cid, err error) { - logctx := []any{"sector", sid} - - val, err := call(logctx, "SealPreCommitPhase2", phase1Output, cacheDirPath, sealedSectorPath) - if err != nil { - return cid.Undef, cid.Undef, err - } - return val[0].(cid.Cid), val[1].(cid.Cid), nil -} - -func (FFISelect) SealCommitPhase2( - phase1Output []byte, - sectorNum abi.SectorNumber, - minerID abi.ActorID, -) ([]byte, error) { - logctx := []any{"sector", abi.SectorID{Miner: minerID, Number: sectorNum}} - - val, err := call(logctx, "SealCommitPhase2", phase1Output, sectorNum, minerID) - if err != nil { - return nil, err - } - - return val[0].([]byte), nil -} - -func (FFISelect) GenerateWinningPoStWithVanilla( - proofType abi.RegisteredPoStProof, - minerID abi.ActorID, - randomness abi.PoStRandomness, - proofs [][]byte, -) ([]proof.PoStProof, error) { - logctx := []any{"proof_type", proofType, "miner_id", minerID} - - val, err := call(logctx, "GenerateWinningPoStWithVanilla", proofType, minerID, randomness, proofs) - if err != nil { - return nil, err - } - return val[0].([]proof.PoStProof), nil -} - -func (FFISelect) SelfTest(val1 int, val2 cid.Cid) (int, cid.Cid, error) { - val, err := call([]any{"selftest", "true"}, "SelfTest", val1, val2) - if err != nil { - return 0, cid.Undef, err - } - return val[0].(int), val[1].(cid.Cid), nil -} - -// ////////////////////////// - -func init() { - registeredTypes := []any{ - ValErr{}, - FFICall{}, - cid.Cid{}, - abi.RegisteredPoStProof(0), - abi.ActorID(0), - abi.PoStRandomness{}, - abi.SectorNumber(0), - ffi.PartitionProof{}, - proof.PoStProof{}, - abi.RegisteredPoStProof(0), - } - var registeredTypeNames = make(map[string]struct{}) - - //Ensure all methods are implemented: - // This is designed to fail for happy-path runs - // and should never actually impact curio users. - for _, t := range registeredTypes { - gob.Register(t) - registeredTypeNames[reflect.TypeOf(t).PkgPath()+"."+reflect.TypeOf(t).Name()] = struct{}{} - } - - to := reflect.TypeOf(ffidirect.FFI{}) - for m := 0; m < to.NumMethod(); m++ { - tm := to.Method(m) - tf := tm.Func - for i := 1; i < tf.Type().NumIn(); i++ { // skipping first arg (struct type) - in := tf.Type().In(i) - nm := in.PkgPath() + "." + in.Name() - if _, ok := registeredTypeNames[nm]; in.PkgPath() != "" && !ok { // built-ins ok - panic("ffiSelect: unregistered type: " + nm + " from " + tm.Name + " arg: " + strconv.Itoa(i)) - } - } - for i := 0; i < tf.Type().NumOut(); i++ { - out := tf.Type().Out(i) - nm := out.PkgPath() + "." + out.Name() - if _, ok := registeredTypeNames[nm]; out.PkgPath() != "" && !ok { // built-ins ok - panic("ffiSelect: unregistered type: " + nm + " from " + tm.Name + " arg: " + strconv.Itoa(i)) - } - } - } -} diff --git a/lib/ffiselect/logparse.go b/lib/ffiselect/logparse.go deleted file mode 100644 index 3508a1f89b6..00000000000 --- a/lib/ffiselect/logparse.go +++ /dev/null @@ -1,88 +0,0 @@ -package ffiselect - -import ( - "bufio" - "bytes" - "io" - "regexp" - "strings" - "time" - - logging "github.com/ipfs/go-log/v2" - "go.uber.org/zap" -) - -var log = logging.Logger("ffiselect") - -type LogWriter struct { - ctx []any - errOut io.Writer - re *regexp.Regexp -} - -func NewLogWriter(logctx []any, errOut io.Writer) *LogWriter { - re := regexp.MustCompile(`^(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3})\s+(\w+)\s+(.*)$`) - return &LogWriter{ - ctx: logctx, - errOut: errOut, - re: re, - } -} - -func (lw *LogWriter) Write(p []byte) (n int, err error) { - reader := bufio.NewReader(bytes.NewReader(p)) - for { - line, err := reader.ReadBytes('\n') - if err == io.EOF { - break - } - if err != nil { - return 0, err - } - - lineStr := string(line) - // trim trailing \n - lineStr = strings.TrimSpace(lineStr) - - matches := lw.re.FindStringSubmatch(lineStr) - if matches == nil { - // Line didn't match the expected format, write it to stderr as-is - _, err := lw.errOut.Write(line) - if err != nil { - return 0, err - } - continue - } - - timestamp, logLevel, message := matches[1], matches[2], matches[3] - logTime, err := time.Parse("2006-01-02T15:04:05.000", timestamp) - if err != nil { - _, err := lw.errOut.Write(line) - if err != nil { - return 0, err - } - continue - } - - var zapLevel zap.AtomicLevel - switch logLevel { - case "DEBUG": - zapLevel = zap.NewAtomicLevelAt(zap.DebugLevel) - case "INFO": - zapLevel = zap.NewAtomicLevelAt(zap.InfoLevel) - case "WARN": - zapLevel = zap.NewAtomicLevelAt(zap.WarnLevel) - case "ERROR": - zapLevel = zap.NewAtomicLevelAt(zap.ErrorLevel) - default: - _, err := lw.errOut.Write(line) - if err != nil { - return 0, err - } - continue - } - - log.With(zap.Time("timestamp", logTime)).Logw(zapLevel.Level(), message, lw.ctx...) - } - return len(p), nil -} diff --git a/lib/ffiselect/testffi.go b/lib/ffiselect/testffi.go deleted file mode 100644 index 50d7fb3dc31..00000000000 --- a/lib/ffiselect/testffi.go +++ /dev/null @@ -1,27 +0,0 @@ -package ffiselect - -import ( - "reflect" - - "github.com/samber/lo" - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/lib/ffiselect/ffidirect" -) - -func callTest(logctx []any, fn string, rawargs ...interface{}) ([]interface{}, error) { - args := lo.Map(rawargs, func(arg any, i int) reflect.Value { - return reflect.ValueOf(arg) - }) - - resAry := reflect.ValueOf(ffidirect.FFI{}).MethodByName(fn).Call(args) - res := lo.Map(resAry, func(res reflect.Value, i int) any { - return res.Interface() - }) - - if res[len(res)-1].(ffidirect.ErrorString) != "" { - return nil, xerrors.Errorf("callTest error: %s", res[len(res)-1].(ffidirect.ErrorString)) - } - - return res, nil -} From 856ea4bba1b48f7dcb5de78552a18676c13a7bcb Mon Sep 17 00:00:00 2001 From: "Andrew Jackson (Ajax)" Date: Thu, 20 Jun 2024 17:24:46 -0500 Subject: [PATCH 09/27] rm policy and drand from buildconstants --- build/buildconstants/drand.go | 67 -------------------- build/buildconstants/params_2k.go | 7 --- build/buildconstants/params_butterfly.go | 7 --- build/buildconstants/params_calibnet.go | 7 --- build/buildconstants/params_interop.go | 7 --- build/drand.go | 80 +++++++++++++++++++----- build/params_testground_vals.go | 12 +++- 7 files changed, 75 insertions(+), 112 deletions(-) diff --git a/build/buildconstants/drand.go b/build/buildconstants/drand.go index 0cff7e92ef6..7925acdbaa9 100644 --- a/build/buildconstants/drand.go +++ b/build/buildconstants/drand.go @@ -1,9 +1,5 @@ package buildconstants -import ( - "github.com/filecoin-project/lotus/node/modules/dtypes" -) - type DrandEnum int const ( @@ -14,66 +10,3 @@ const ( DrandIncentinet DrandQuicknet ) - -var DrandConfigs = map[DrandEnum]dtypes.DrandConfig{ - DrandMainnet: { - Servers: []string{ - "https://api.drand.sh", - "https://api2.drand.sh", - "https://api3.drand.sh", - "https://drand.cloudflare.com", - "https://api.drand.secureweb3.com:6875", // Storswift - }, - Relays: []string{ - "/dnsaddr/api.drand.sh/", - "/dnsaddr/api2.drand.sh/", - "/dnsaddr/api3.drand.sh/", - }, - IsChained: true, - ChainInfoJSON: `{"public_key":"868f005eb8e6e4ca0a47c8a77ceaa5309a47978a7c71bc5cce96366b5d7a569937c529eeda66c7293784a9402801af31","period":30,"genesis_time":1595431050,"hash":"8990e7a9aaed2ffed73dbd7092123d6f289930540d7651336225dc172e51b2ce","groupHash":"176f93498eac9ca337150b46d21dd58673ea4e3581185f869672e59fa4cb390a"}`, - }, - DrandQuicknet: { - Servers: []string{ - "https://api.drand.sh", - "https://api2.drand.sh", - "https://api3.drand.sh", - "https://drand.cloudflare.com", - "https://api.drand.secureweb3.com:6875", // Storswift - }, - Relays: []string{ - "/dnsaddr/api.drand.sh/", - "/dnsaddr/api2.drand.sh/", - "/dnsaddr/api3.drand.sh/", - }, - IsChained: false, - ChainInfoJSON: `{"public_key":"83cf0f2896adee7eb8b5f01fcad3912212c437e0073e911fb90022d3e760183c8c4b450b6a0a6c3ac6a5776a2d1064510d1fec758c921cc22b0e17e63aaf4bcb5ed66304de9cf809bd274ca73bab4af5a6e9c76a4bc09e76eae8991ef5ece45a","period":3,"genesis_time":1692803367,"hash":"52db9ba70e0cc0f6eaf7803dd07447a1f5477735fd3f661792ba94600c84e971","groupHash":"f477d5c89f21a17c863a7f937c6a6d15859414d2be09cd448d4279af331c5d3e","schemeID":"bls-unchained-g1-rfc9380","metadata":{"beaconID":"quicknet"}}`, - }, - DrandTestnet: { - Servers: []string{ - "https://pl-eu.testnet.drand.sh", - "https://pl-us.testnet.drand.sh", - }, - Relays: []string{ - "/dnsaddr/pl-eu.testnet.drand.sh/", - "/dnsaddr/pl-us.testnet.drand.sh/", - }, - IsChained: true, - ChainInfoJSON: `{"public_key":"922a2e93828ff83345bae533f5172669a26c02dc76d6bf59c80892e12ab1455c229211886f35bb56af6d5bea981024df","period":25,"genesis_time":1590445175,"hash":"84b2234fb34e835dccd048255d7ad3194b81af7d978c3bf157e3469592ae4e02","groupHash":"4dd408e5fdff9323c76a9b6f087ba8fdc5a6da907bd9217d9d10f2287d081957"}`, - }, - DrandDevnet: { - Servers: []string{ - "https://dev1.drand.sh", - "https://dev2.drand.sh", - }, - Relays: []string{ - "/dnsaddr/dev1.drand.sh/", - "/dnsaddr/dev2.drand.sh/", - }, - IsChained: true, - ChainInfoJSON: `{"public_key":"8cda589f88914aa728fd183f383980b35789ce81b274e5daee1f338b77d02566ef4d3fb0098af1f844f10f9c803c1827","period":25,"genesis_time":1595348225,"hash":"e73b7dc3c4f6a236378220c0dd6aa110eb16eed26c11259606e07ee122838d4f","groupHash":"567d4785122a5a3e75a9bc9911d7ea807dd85ff76b78dc4ff06b075712898607"}`, - }, - DrandIncentinet: { - IsChained: true, - ChainInfoJSON: `{"public_key":"8cad0c72c606ab27d36ee06de1d5b2db1faf92e447025ca37575ab3a8aac2eaae83192f846fc9e158bc738423753d000","period":30,"genesis_time":1595873820,"hash":"80c8b872c714f4c00fdd3daa465d5514049f457f01f85a4caf68cdcd394ba039","groupHash":"d9406aaed487f7af71851b4399448e311f2328923d454e971536c05398ce2d9b"}`, - }, -} diff --git a/build/buildconstants/params_2k.go b/build/buildconstants/params_2k.go index 1eb8a167dcc..f7e27f133a4 100644 --- a/build/buildconstants/params_2k.go +++ b/build/buildconstants/params_2k.go @@ -11,8 +11,6 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/network" - - "github.com/filecoin-project/lotus/chain/actors/policy" ) const BootstrappersFile = "" @@ -94,11 +92,6 @@ var MinVerifiedDealSize = abi.NewStoragePower(256) var PreCommitChallengeDelay = abi.ChainEpoch(10) func init() { - policy.SetSupportedProofTypes(SupportedProofTypes...) - policy.SetConsensusMinerMinPower(ConsensusMinerMinPower) - policy.SetMinVerifiedDealSize(MinVerifiedDealSize) - policy.SetPreCommitChallengeDelay(PreCommitChallengeDelay) - getGenesisNetworkVersion := func(ev string, def network.Version) network.Version { hs, found := os.LookupEnv(ev) if found { diff --git a/build/buildconstants/params_butterfly.go b/build/buildconstants/params_butterfly.go index 3db522117f9..926376e2b32 100644 --- a/build/buildconstants/params_butterfly.go +++ b/build/buildconstants/params_butterfly.go @@ -10,8 +10,6 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/network" builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" - - "github.com/filecoin-project/lotus/chain/actors/policy" ) var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ @@ -78,11 +76,6 @@ var MinVerifiedDealSize = abi.NewStoragePower(1 << 20) var PreCommitChallengeDelay = abi.ChainEpoch(150) func init() { - policy.SetSupportedProofTypes(SupportedProofTypes...) - policy.SetConsensusMinerMinPower(ConsensusMinerMinPower) - policy.SetMinVerifiedDealSize(MinVerifiedDealSize) - policy.SetPreCommitChallengeDelay(PreCommitChallengeDelay) - SetAddressNetwork(address.Testnet) Devnet = true diff --git a/build/buildconstants/params_calibnet.go b/build/buildconstants/params_calibnet.go index c27230545d0..edea92515bb 100644 --- a/build/buildconstants/params_calibnet.go +++ b/build/buildconstants/params_calibnet.go @@ -13,8 +13,6 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/network" builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" - - "github.com/filecoin-project/lotus/chain/actors/policy" ) var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ @@ -108,11 +106,6 @@ var MinVerifiedDealSize = abi.NewStoragePower(1 << 20) var PreCommitChallengeDelay = abi.ChainEpoch(150) func init() { - policy.SetSupportedProofTypes(SupportedProofTypes...) - policy.SetConsensusMinerMinPower(ConsensusMinerMinPower) - policy.SetMinVerifiedDealSize(MinVerifiedDealSize) - policy.SetPreCommitChallengeDelay(PreCommitChallengeDelay) - SetAddressNetwork(address.Testnet) Devnet = true diff --git a/build/buildconstants/params_interop.go b/build/buildconstants/params_interop.go index 577238f7128..6fb20049726 100644 --- a/build/buildconstants/params_interop.go +++ b/build/buildconstants/params_interop.go @@ -14,8 +14,6 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/network" builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" - - "github.com/filecoin-project/lotus/chain/actors/policy" ) var NetworkBundle = "caterpillarnet" @@ -83,11 +81,6 @@ var MinVerifiedDealSize = abi.NewStoragePower(256) var PreCommitChallengeDelay = abi.ChainEpoch(10) func init() { - policy.SetSupportedProofTypes(SupportedProofTypes...) - policy.SetConsensusMinerMinPower(ConsensusMinerMinPower) - policy.SetMinVerifiedDealSize(MinVerifiedDealSize) - policy.SetPreCommitChallengeDelay(PreCommitChallengeDelay) - getUpgradeHeight := func(ev string, def abi.ChainEpoch) abi.ChainEpoch { hs, found := os.LookupEnv(ev) if found { diff --git a/build/drand.go b/build/drand.go index 35e7e2d9c27..18d2204deb0 100644 --- a/build/drand.go +++ b/build/drand.go @@ -7,12 +7,73 @@ import ( "github.com/filecoin-project/lotus/node/modules/dtypes" ) -var DrandSchedule = buildconstants.DrandSchedule +var DrandConfigs = map[buildconstants.DrandEnum]dtypes.DrandConfig{ + buildconstants.DrandMainnet: { + Servers: []string{ + "https://api.drand.sh", + "https://api2.drand.sh", + "https://api3.drand.sh", + "https://drand.cloudflare.com", + "https://api.drand.secureweb3.com:6875", // Storswift + }, + Relays: []string{ + "/dnsaddr/api.drand.sh/", + "/dnsaddr/api2.drand.sh/", + "/dnsaddr/api3.drand.sh/", + }, + IsChained: true, + ChainInfoJSON: `{"public_key":"868f005eb8e6e4ca0a47c8a77ceaa5309a47978a7c71bc5cce96366b5d7a569937c529eeda66c7293784a9402801af31","period":30,"genesis_time":1595431050,"hash":"8990e7a9aaed2ffed73dbd7092123d6f289930540d7651336225dc172e51b2ce","groupHash":"176f93498eac9ca337150b46d21dd58673ea4e3581185f869672e59fa4cb390a"}`, + }, + buildconstants.DrandQuicknet: { + Servers: []string{ + "https://api.drand.sh", + "https://api2.drand.sh", + "https://api3.drand.sh", + "https://drand.cloudflare.com", + "https://api.drand.secureweb3.com:6875", // Storswift + }, + Relays: []string{ + "/dnsaddr/api.drand.sh/", + "/dnsaddr/api2.drand.sh/", + "/dnsaddr/api3.drand.sh/", + }, + IsChained: false, + ChainInfoJSON: `{"public_key":"83cf0f2896adee7eb8b5f01fcad3912212c437e0073e911fb90022d3e760183c8c4b450b6a0a6c3ac6a5776a2d1064510d1fec758c921cc22b0e17e63aaf4bcb5ed66304de9cf809bd274ca73bab4af5a6e9c76a4bc09e76eae8991ef5ece45a","period":3,"genesis_time":1692803367,"hash":"52db9ba70e0cc0f6eaf7803dd07447a1f5477735fd3f661792ba94600c84e971","groupHash":"f477d5c89f21a17c863a7f937c6a6d15859414d2be09cd448d4279af331c5d3e","schemeID":"bls-unchained-g1-rfc9380","metadata":{"beaconID":"quicknet"}}`, + }, + buildconstants.DrandTestnet: { + Servers: []string{ + "https://pl-eu.testnet.drand.sh", + "https://pl-us.testnet.drand.sh", + }, + Relays: []string{ + "/dnsaddr/pl-eu.testnet.drand.sh/", + "/dnsaddr/pl-us.testnet.drand.sh/", + }, + IsChained: true, + ChainInfoJSON: `{"public_key":"922a2e93828ff83345bae533f5172669a26c02dc76d6bf59c80892e12ab1455c229211886f35bb56af6d5bea981024df","period":25,"genesis_time":1590445175,"hash":"84b2234fb34e835dccd048255d7ad3194b81af7d978c3bf157e3469592ae4e02","groupHash":"4dd408e5fdff9323c76a9b6f087ba8fdc5a6da907bd9217d9d10f2287d081957"}`, + }, + buildconstants.DrandDevnet: { + Servers: []string{ + "https://dev1.drand.sh", + "https://dev2.drand.sh", + }, + Relays: []string{ + "/dnsaddr/dev1.drand.sh/", + "/dnsaddr/dev2.drand.sh/", + }, + IsChained: true, + ChainInfoJSON: `{"public_key":"8cda589f88914aa728fd183f383980b35789ce81b274e5daee1f338b77d02566ef4d3fb0098af1f844f10f9c803c1827","period":25,"genesis_time":1595348225,"hash":"e73b7dc3c4f6a236378220c0dd6aa110eb16eed26c11259606e07ee122838d4f","groupHash":"567d4785122a5a3e75a9bc9911d7ea807dd85ff76b78dc4ff06b075712898607"}`, + }, + buildconstants.DrandIncentinet: { + IsChained: true, + ChainInfoJSON: `{"public_key":"8cad0c72c606ab27d36ee06de1d5b2db1faf92e447025ca37575ab3a8aac2eaae83192f846fc9e158bc738423753d000","period":30,"genesis_time":1595873820,"hash":"80c8b872c714f4c00fdd3daa465d5514049f457f01f85a4caf68cdcd394ba039","groupHash":"d9406aaed487f7af71851b4399448e311f2328923d454e971536c05398ce2d9b"}`, + }, +} func DrandConfigSchedule() dtypes.DrandSchedule { out := dtypes.DrandSchedule{} - for start, network := range DrandSchedule { - out = append(out, dtypes.DrandPoint{Start: start, Config: buildconstants.DrandConfigs[network]}) + for start, network := range buildconstants.DrandSchedule { + out = append(out, dtypes.DrandPoint{Start: start, Config: DrandConfigs[network]}) } sort.Slice(out, func(i, j int) bool { @@ -21,16 +82,3 @@ func DrandConfigSchedule() dtypes.DrandSchedule { return out } - -type DrandEnum = buildconstants.DrandEnum - -const ( - DrandMainnet = buildconstants.DrandMainnet - DrandTestnet = buildconstants.DrandTestnet - DrandDevnet = buildconstants.DrandDevnet - DrandLocalnet = buildconstants.DrandLocalnet - DrandIncentinet = buildconstants.DrandIncentinet - DrandQuicknet = buildconstants.DrandQuicknet -) - -var DrandConfigs = buildconstants.DrandConfigs diff --git a/build/params_testground_vals.go b/build/params_testground_vals.go index 8b5c140a3a1..a032e5fad48 100644 --- a/build/params_testground_vals.go +++ b/build/params_testground_vals.go @@ -3,8 +3,18 @@ package build -import "github.com/filecoin-project/lotus/chain/actors/policy" +import ( + "github.com/filecoin-project/lotus/build/buildconstants" + "github.com/filecoin-project/lotus/chain/actors/policy" +) // Actor consts // TODO: pieceSize unused from actors var MinDealDuration, MaxDealDuration = policy.DealDurationBounds(0) + +func init() { + policy.SetSupportedProofTypes(buildconstants.SupportedProofTypes...) + policy.SetConsensusMinerMinPower(buildconstants.ConsensusMinerMinPower) + policy.SetMinVerifiedDealSize(buildconstants.MinVerifiedDealSize) + policy.SetPreCommitChallengeDelay(buildconstants.PreCommitChallengeDelay) +} From 8d8894aa7d3debe0f31ea91d7c09fa1a45785485 Mon Sep 17 00:00:00 2001 From: "Andrew Jackson (Ajax)" Date: Thu, 20 Jun 2024 17:54:21 -0500 Subject: [PATCH 10/27] clean up curio further --- .github/workflows/test.yml | 3 +- Dockerfile | 4 - Dockerfile.curio | 95 ---- api/docgen/docgen.go | 4 - api/version.go | 2 - build/buildconstants/shared_funcs.go | 3 +- cli/spcli/createminer/create_miner.go | 7 +- documentation/en/api-v0-methods-curio.md | 369 --------------- documentation/en/cli-curio.md | 576 ----------------------- scripts/generate-lotus-cli.py | 1 - 10 files changed, 6 insertions(+), 1058 deletions(-) delete mode 100644 Dockerfile.curio delete mode 100644 documentation/en/api-v0-methods-curio.md diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index b199fd2201f..ada811f955e 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -63,7 +63,6 @@ jobs: "itest-deals_concurrent": ["self-hosted", "linux", "x64", "4xlarge"], "itest-sector_pledge": ["self-hosted", "linux", "x64", "4xlarge"], "itest-worker": ["self-hosted", "linux", "x64", "4xlarge"], - "itest-curio": ["self-hosted", "linux", "x64", "4xlarge"], "itest-gateway": ["self-hosted", "linux", "x64", "2xlarge"], "itest-sector_import_full": ["self-hosted", "linux", "x64", "2xlarge"], @@ -116,7 +115,7 @@ jobs: } # A list of test groups that require YugabyteDB to be running yugabytedb: | - ["itest-harmonydb", "itest-harmonytask", "itest-curio"] + ["itest-harmonydb", "itest-harmonytask"] # A list of test groups that require Proof Parameters to be fetched parameters: | [ diff --git a/Dockerfile b/Dockerfile index 5b77b134afb..51a39ed0395 100644 --- a/Dockerfile +++ b/Dockerfile @@ -109,7 +109,6 @@ COPY --from=lotus-builder /opt/filecoin/lotus-wallet /usr/local/bin/ COPY --from=lotus-builder /opt/filecoin/lotus-gateway /usr/local/bin/ COPY --from=lotus-builder /opt/filecoin/lotus-miner /usr/local/bin/ COPY --from=lotus-builder /opt/filecoin/lotus-worker /usr/local/bin/ -COPY --from=lotus-builder /opt/filecoin/curio /usr/local/bin/ COPY --from=lotus-builder /opt/filecoin/lotus-stats /usr/local/bin/ COPY --from=lotus-builder /opt/filecoin/lotus-fountain /usr/local/bin/ @@ -118,13 +117,11 @@ RUN mkdir /var/lib/lotus RUN mkdir /var/lib/lotus-miner RUN mkdir /var/lib/lotus-worker RUN mkdir /var/lib/lotus-wallet -RUN mkdir /var/lib/curio RUN chown fc: /var/tmp/filecoin-proof-parameters RUN chown fc: /var/lib/lotus RUN chown fc: /var/lib/lotus-miner RUN chown fc: /var/lib/lotus-worker RUN chown fc: /var/lib/lotus-wallet -RUN chown fc: /var/lib/curio VOLUME /var/tmp/filecoin-proof-parameters @@ -132,7 +129,6 @@ VOLUME /var/lib/lotus VOLUME /var/lib/lotus-miner VOLUME /var/lib/lotus-worker VOLUME /var/lib/lotus-wallet -VOLUME /var/lib/curio EXPOSE 1234 EXPOSE 2345 diff --git a/Dockerfile.curio b/Dockerfile.curio deleted file mode 100644 index ee4ab2f42a9..00000000000 --- a/Dockerfile.curio +++ /dev/null @@ -1,95 +0,0 @@ -##################################### -FROM golang:1.21.7-bullseye AS curio-builder -MAINTAINER Curio Development Team - -RUN apt-get update && apt-get install -y ca-certificates build-essential clang ocl-icd-opencl-dev ocl-icd-libopencl1 jq libhwloc-dev - -ENV XDG_CACHE_HOME="/tmp" - -### taken from https://github.com/rust-lang/docker-rust/blob/master/1.63.0/buster/Dockerfile -ENV RUSTUP_HOME=/usr/local/rustup \ - CARGO_HOME=/usr/local/cargo \ - PATH=/usr/local/cargo/bin:$PATH \ - RUST_VERSION=1.63.0 - -RUN set -eux; \ - dpkgArch="$(dpkg --print-architecture)"; \ - case "${dpkgArch##*-}" in \ - amd64) rustArch='x86_64-unknown-linux-gnu'; rustupSha256='5cc9ffd1026e82e7fb2eec2121ad71f4b0f044e88bca39207b3f6b769aaa799c' ;; \ - arm64) rustArch='aarch64-unknown-linux-gnu'; rustupSha256='e189948e396d47254103a49c987e7fb0e5dd8e34b200aa4481ecc4b8e41fb929' ;; \ - *) echo >&2 "unsupported architecture: ${dpkgArch}"; exit 1 ;; \ - esac; \ - url="https://static.rust-lang.org/rustup/archive/1.25.1/${rustArch}/rustup-init"; \ - wget "$url"; \ - echo "${rustupSha256} *rustup-init" | sha256sum -c -; \ - chmod +x rustup-init; \ - ./rustup-init -y --no-modify-path --profile minimal --default-toolchain $RUST_VERSION --default-host ${rustArch}; \ - rm rustup-init; \ - chmod -R a+w $RUSTUP_HOME $CARGO_HOME; \ - rustup --version; \ - cargo --version; \ - rustc --version; - -COPY ./ /opt/curio -WORKDIR /opt/curio - -### make configurable filecoin-ffi build -ARG FFI_BUILD_FROM_SOURCE=0 -ENV FFI_BUILD_FROM_SOURCE=${FFI_BUILD_FROM_SOURCE} - -RUN make clean deps - -ARG RUSTFLAGS="" -ARG GOFLAGS="" - -RUN make curio-devnet - -##################################### -FROM ubuntu:22.04 AS curio-all-in-one - -RUN apt-get update && apt-get install -y dnsutils vim curl - -# Copy libraries and binaries from curio-builder -COPY --from=curio-builder /etc/ssl/certs /etc/ssl/certs -COPY --from=curio-builder /lib/*/libdl.so.2 /lib/ -COPY --from=curio-builder /lib/*/librt.so.1 /lib/ -COPY --from=curio-builder /lib/*/libgcc_s.so.1 /lib/ -COPY --from=curio-builder /lib/*/libutil.so.1 /lib/ -COPY --from=curio-builder /usr/lib/*/libltdl.so.7 /lib/ -COPY --from=curio-builder /usr/lib/*/libnuma.so.1 /lib/ -COPY --from=curio-builder /usr/lib/*/libhwloc.so.* /lib/ -COPY --from=curio-builder /usr/lib/*/libOpenCL.so.1 /lib/ - -# Setup user and OpenCL configuration -RUN useradd -r -u 532 -U fc && \ - mkdir -p /etc/OpenCL/vendors && \ - echo "libnvidia-opencl.so.1" > /etc/OpenCL/vendors/nvidia.icd - -# Environment setup -ENV FILECOIN_PARAMETER_CACHE=/var/tmp/filecoin-proof-parameters \ - LOTUS_MINER_PATH=/var/lib/lotus-miner \ - LOTUS_PATH=/var/lib/lotus \ - CURIO_REPO_PATH=/var/lib/curio - -# Copy binaries and scripts -COPY --from=curio-builder /opt/curio/lotus /usr/local/bin/ -COPY --from=curio-builder /opt/curio/lotus-seed /usr/local/bin/ -COPY --from=curio-builder /opt/curio/lotus-shed /usr/local/bin/ -COPY --from=curio-builder /opt/curio/lotus-miner /usr/local/bin/ -COPY --from=curio-builder /opt/curio/curio /usr/local/bin/ -COPY --from=curio-builder /opt/curio/sptool /usr/local/bin/ - -# Set up directories and permissions -RUN mkdir /var/tmp/filecoin-proof-parameters \ - /var/lib/lotus \ - /var/lib/lotus-miner \ - /var/lib/curio && \ - chown fc: /var/tmp/filecoin-proof-parameters /var/lib/lotus /var/lib/lotus-miner /var/lib/curio - -# Define volumes -VOLUME ["/var/tmp/filecoin-proof-parameters", "/var/lib/lotus", "/var/lib/lotus-miner", "/var/lib/curio"] - -# Expose necessary ports -EXPOSE 1234 2345 12300 4701 32100 - -CMD ["/bin/bash"] \ No newline at end of file diff --git a/api/docgen/docgen.go b/api/docgen/docgen.go index 2d34a0903cc..5fd70562b50 100644 --- a/api/docgen/docgen.go +++ b/api/docgen/docgen.go @@ -456,10 +456,6 @@ func GetAPIType(name, pkg string) (i interface{}, t reflect.Type, permStruct []r i = &api.GatewayStruct{} t = reflect.TypeOf(new(struct{ api.Gateway })).Elem() permStruct = append(permStruct, reflect.TypeOf(api.GatewayStruct{}.Internal)) - case "Curio": - i = &api.CurioStruct{} - t = reflect.TypeOf(new(struct{ api.Curio })).Elem() - permStruct = append(permStruct, reflect.TypeOf(api.CurioStruct{}.Internal)) default: panic("unknown type") } diff --git a/api/version.go b/api/version.go index 124f53dabfb..9c2113578f1 100644 --- a/api/version.go +++ b/api/version.go @@ -59,8 +59,6 @@ var ( MinerAPIVersion0 = newVer(1, 5, 0) WorkerAPIVersion0 = newVer(1, 7, 0) - - CurioAPIVersion0 = newVer(1, 0, 0) ) //nolint:varcheck,deadcode diff --git a/build/buildconstants/shared_funcs.go b/build/buildconstants/shared_funcs.go index 13682879e93..c440e78e6e9 100644 --- a/build/buildconstants/shared_funcs.go +++ b/build/buildconstants/shared_funcs.go @@ -1,9 +1,10 @@ package buildconstants import ( - "github.com/filecoin-project/go-address" "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log/v2" + + "github.com/filecoin-project/go-address" ) // moved from now-defunct build/paramfetch.go diff --git a/cli/spcli/createminer/create_miner.go b/cli/spcli/createminer/create_miner.go index 0743527ebf6..f92369a832e 100644 --- a/cli/spcli/createminer/create_miner.go +++ b/cli/spcli/createminer/create_miner.go @@ -5,18 +5,17 @@ import ( "context" "fmt" + "golang.org/x/xerrors" + "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" - "golang.org/x/xerrors" - power2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/power" power6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/power" - lminer "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/api/v1api" "github.com/filecoin-project/lotus/chain/actors" + lminer "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/builtin/power" "github.com/filecoin-project/lotus/chain/types" ) diff --git a/documentation/en/api-v0-methods-curio.md b/documentation/en/api-v0-methods-curio.md deleted file mode 100644 index 0bfe09af5cb..00000000000 --- a/documentation/en/api-v0-methods-curio.md +++ /dev/null @@ -1,369 +0,0 @@ -# Groups -* [](#) - * [Shutdown](#Shutdown) - * [Version](#Version) -* [Allocate](#Allocate) - * [AllocatePieceToSector](#AllocatePieceToSector) -* [Log](#Log) - * [LogList](#LogList) - * [LogSetLevel](#LogSetLevel) -* [Storage](#Storage) - * [StorageAddLocal](#StorageAddLocal) - * [StorageDetachLocal](#StorageDetachLocal) - * [StorageFindSector](#StorageFindSector) - * [StorageInfo](#StorageInfo) - * [StorageInit](#StorageInit) - * [StorageList](#StorageList) - * [StorageLocal](#StorageLocal) - * [StorageStat](#StorageStat) -## - - -### Shutdown -Trigger shutdown - - -Perms: admin - -Inputs: `null` - -Response: `{}` - -### Version - - -Perms: admin - -Inputs: `null` - -Response: `131840` - -## Allocate - - -### AllocatePieceToSector - - -Perms: write - -Inputs: -```json -[ - "f01234", - { - "PublishCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "DealID": 5432, - "DealProposal": { - "PieceCID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "PieceSize": 1032, - "VerifiedDeal": true, - "Client": "f01234", - "Provider": "f01234", - "Label": "", - "StartEpoch": 10101, - "EndEpoch": 10101, - "StoragePricePerEpoch": "0", - "ProviderCollateral": "0", - "ClientCollateral": "0" - }, - "DealSchedule": { - "StartEpoch": 10101, - "EndEpoch": 10101 - }, - "PieceActivationManifest": { - "CID": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Size": 2032, - "VerifiedAllocationKey": null, - "Notify": null - }, - "KeepUnsealed": true - }, - 9, - { - "Scheme": "string value", - "Opaque": "string value", - "User": {}, - "Host": "string value", - "Path": "string value", - "RawPath": "string value", - "OmitHost": true, - "ForceQuery": true, - "RawQuery": "string value", - "Fragment": "string value", - "RawFragment": "string value" - }, - { - "Authorization": [ - "Bearer ey.." - ] - } -] -``` - -Response: -```json -{ - "Sector": 9, - "Offset": 1032 -} -``` - -## Log - - -### LogList - - -Perms: read - -Inputs: `null` - -Response: -```json -[ - "string value" -] -``` - -### LogSetLevel - - -Perms: admin - -Inputs: -```json -[ - "string value", - "string value" -] -``` - -Response: `{}` - -## Storage - - -### StorageAddLocal - - -Perms: admin - -Inputs: -```json -[ - "string value" -] -``` - -Response: `{}` - -### StorageDetachLocal - - -Perms: admin - -Inputs: -```json -[ - "string value" -] -``` - -Response: `{}` - -### StorageFindSector - - -Perms: admin - -Inputs: -```json -[ - { - "Miner": 1000, - "Number": 9 - }, - 1, - 34359738368, - true -] -``` - -Response: -```json -[ - { - "ID": "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8", - "URLs": [ - "string value" - ], - "BaseURLs": [ - "string value" - ], - "Weight": 42, - "CanSeal": true, - "CanStore": true, - "Primary": true, - "AllowTypes": [ - "string value" - ], - "DenyTypes": [ - "string value" - ], - "AllowMiners": [ - "string value" - ], - "DenyMiners": [ - "string value" - ] - } -] -``` - -### StorageInfo - - -Perms: admin - -Inputs: -```json -[ - "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8" -] -``` - -Response: -```json -{ - "ID": "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8", - "URLs": [ - "string value" - ], - "Weight": 42, - "MaxStorage": 42, - "CanSeal": true, - "CanStore": true, - "Groups": [ - "string value" - ], - "AllowTo": [ - "string value" - ], - "AllowTypes": [ - "string value" - ], - "DenyTypes": [ - "string value" - ], - "AllowMiners": [ - "string value" - ], - "DenyMiners": [ - "string value" - ] -} -``` - -### StorageInit - - -Perms: admin - -Inputs: -```json -[ - "string value", - { - "ID": "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8", - "Weight": 42, - "CanSeal": true, - "CanStore": true, - "MaxStorage": 42, - "Groups": [ - "string value" - ], - "AllowTo": [ - "string value" - ], - "AllowTypes": [ - "string value" - ], - "DenyTypes": [ - "string value" - ], - "AllowMiners": [ - "string value" - ], - "DenyMiners": [ - "string value" - ] - } -] -``` - -Response: `{}` - -### StorageList - - -Perms: admin - -Inputs: `null` - -Response: -```json -{ - "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8": [ - { - "Miner": 1000, - "Number": 100, - "SectorFileType": 2 - } - ] -} -``` - -### StorageLocal - - -Perms: admin - -Inputs: `null` - -Response: -```json -{ - "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8": "/data/path" -} -``` - -### StorageStat - - -Perms: admin - -Inputs: -```json -[ - "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8" -] -``` - -Response: -```json -{ - "Capacity": 9, - "Available": 9, - "FSAvailable": 9, - "Reserved": 9, - "Max": 9, - "Used": 9 -} -``` - diff --git a/documentation/en/cli-curio.md b/documentation/en/cli-curio.md index a24ef4d292c..9942febc61b 100644 --- a/documentation/en/cli-curio.md +++ b/documentation/en/cli-curio.md @@ -1,579 +1,3 @@ # curio ``` -NAME: - curio - Filecoin decentralized storage network provider - -USAGE: - curio [global options] command [command options] [arguments...] - -VERSION: - 1.27.1-dev - -COMMANDS: - cli Execute cli commands - run Start a Curio process - stop Stop a running Curio process - config Manage node config by layers. The layer 'base' will always be applied at Curio start-up. - test Utility functions for testing - web Start Curio web interface - guided-setup Run the guided setup for migrating from lotus-miner to Curio or Creating a new Curio miner - seal Manage the sealing pipeline - market - fetch-params Fetch proving parameters - help, h Shows a list of commands or help for one command - -GLOBAL OPTIONS: - --color use color in display output (default: depends on output being a TTY) - --db-host value Command separated list of hostnames for yugabyte cluster (default: "127.0.0.1") [$CURIO_DB_HOST, $CURIO_HARMONYDB_HOSTS] - --db-name value (default: "yugabyte") [$CURIO_DB_NAME, $CURIO_HARMONYDB_NAME] - --db-user value (default: "yugabyte") [$CURIO_DB_USER, $CURIO_HARMONYDB_USERNAME] - --db-password value (default: "yugabyte") [$CURIO_DB_PASSWORD, $CURIO_HARMONYDB_PASSWORD] - --db-port value (default: "5433") [$CURIO_DB_PORT, $CURIO_HARMONYDB_PORT] - --repo-path value (default: "~/.curio") [$CURIO_REPO_PATH] - --vv enables very verbose mode, useful for debugging the CLI (default: false) - --help, -h show help - --version, -v print the version -``` - -## curio cli -``` -NAME: - curio cli - Execute cli commands - -USAGE: - curio cli command [command options] [arguments...] - -COMMANDS: - storage manage sector storage - log Manage logging - wait-api Wait for Curio api to come online - help, h Shows a list of commands or help for one command - -OPTIONS: - --machine value machine host:port (curio run --listen address) - --help, -h show help -``` - -### curio cli storage -``` -NAME: - curio cli storage - manage sector storage - -USAGE: - curio cli storage command [command options] [arguments...] - -DESCRIPTION: - Sectors can be stored across many filesystem paths. These - commands provide ways to manage the storage the miner will used to store sectors - long term for proving (references as 'store') as well as how sectors will be - stored while moving through the sealing pipeline (references as 'seal'). - -COMMANDS: - attach attach local storage path - detach detach local storage path - list list local storage paths - find find sector in the storage system - help, h Shows a list of commands or help for one command - -OPTIONS: - --help, -h show help -``` - -#### curio cli storage attach -``` -NAME: - curio cli storage attach - attach local storage path - -USAGE: - curio cli storage attach [command options] [path] - -DESCRIPTION: - Storage can be attached to the miner using this command. The storage volume - list is stored local to the miner in storage.json set in curio run. We do not - recommend manually modifying this value without further understanding of the - storage system. - - Each storage volume contains a configuration file which describes the - capabilities of the volume. When the '--init' flag is provided, this file will - be created using the additional flags. - - Weight - A high weight value means data will be more likely to be stored in this path - - Seal - Data for the sealing process will be stored here - - Store - Finalized sectors that will be moved here for long term storage and be proven - over time - - -OPTIONS: - --init initialize the path first (default: false) - --weight value (for init) path weight (default: 10) - --seal (for init) use path for sealing (default: false) - --store (for init) use path for long-term storage (default: false) - --max-storage value (for init) limit storage space for sectors (expensive for very large paths!) - --groups value [ --groups value ] path group names - --allow-to value [ --allow-to value ] path groups allowed to pull data from this path (allow all if not specified) - --help, -h show help -``` - -#### curio cli storage detach -``` -NAME: - curio cli storage detach - detach local storage path - -USAGE: - curio cli storage detach [command options] [path] - -OPTIONS: - --really-do-it (default: false) - --help, -h show help -``` - -#### curio cli storage list -``` -NAME: - curio cli storage list - list local storage paths - -USAGE: - curio cli storage list [command options] [arguments...] - -OPTIONS: - --local only list local storage paths (default: false) - --help, -h show help -``` - -#### curio cli storage find -``` -NAME: - curio cli storage find - find sector in the storage system - -USAGE: - curio cli storage find [command options] [miner address] [sector number] - -OPTIONS: - --help, -h show help -``` - -### curio cli log -``` -NAME: - curio cli log - Manage logging - -USAGE: - curio cli log command [command options] [arguments...] - -COMMANDS: - list List log systems - set-level Set log level - help, h Shows a list of commands or help for one command - -OPTIONS: - --help, -h show help -``` - -#### curio cli log list -``` -NAME: - curio cli log list - List log systems - -USAGE: - curio cli log list [command options] [arguments...] - -OPTIONS: - --help, -h show help -``` - -#### curio cli log set-level -``` -NAME: - curio cli log set-level - Set log level - -USAGE: - curio cli log set-level [command options] [level] - -DESCRIPTION: - Set the log level for logging systems: - - The system flag can be specified multiple times. - - eg) log set-level --system chain --system chainxchg debug - - Available Levels: - debug - info - warn - error - - Environment Variables: - GOLOG_LOG_LEVEL - Default log level for all log systems - GOLOG_LOG_FMT - Change output log format (json, nocolor) - GOLOG_FILE - Write logs to file - GOLOG_OUTPUT - Specify whether to output to file, stderr, stdout or a combination, i.e. file+stderr - - -OPTIONS: - --system value [ --system value ] limit to log system - --help, -h show help -``` - -### curio cli wait-api -``` -NAME: - curio cli wait-api - Wait for Curio api to come online - -USAGE: - curio cli wait-api [command options] [arguments...] - -OPTIONS: - --timeout value duration to wait till fail (default: 30s) - --help, -h show help -``` - -## curio run -``` -NAME: - curio run - Start a Curio process - -USAGE: - curio run [command options] [arguments...] - -OPTIONS: - --listen value host address and port the worker api will listen on (default: "0.0.0.0:12300") [$CURIO_LISTEN] - --nosync don't check full-node sync status (default: false) - --manage-fdlimit manage open file limit (default: true) - --storage-json value path to json file containing storage config (default: "~/.curio/storage.json") - --journal value path to journal files (default: "~/.curio/") - --layers value, -l value, --layer value [ --layers value, -l value, --layer value ] list of layers to be interpreted (atop defaults). Default: base [$CURIO_LAYERS] - --help, -h show help -``` - -## curio stop -``` -NAME: - curio stop - Stop a running Curio process - -USAGE: - curio stop [command options] [arguments...] - -OPTIONS: - --help, -h show help -``` - -## curio config -``` -NAME: - curio config - Manage node config by layers. The layer 'base' will always be applied at Curio start-up. - -USAGE: - curio config command [command options] [arguments...] - -COMMANDS: - default, defaults Print default node config - set, add, update, create Set a config layer or the base by providing a filename or stdin. - get, cat, show Get a config layer by name. You may want to pipe the output to a file, or use 'less' - list, ls List config layers present in the DB. - interpret, view, stacked, stack Interpret stacked config layers by this version of curio, with system-generated comments. - remove, rm, del, delete Remove a named config layer. - edit edit a config layer - new-cluster Create new configuration for a new cluster - help, h Shows a list of commands or help for one command - -OPTIONS: - --help, -h show help -``` - -### curio config default -``` -NAME: - curio config default - Print default node config - -USAGE: - curio config default [command options] [arguments...] - -OPTIONS: - --no-comment don't comment default values (default: false) - --help, -h show help -``` - -### curio config set -``` -NAME: - curio config set - Set a config layer or the base by providing a filename or stdin. - -USAGE: - curio config set [command options] a layer's file name - -OPTIONS: - --title value title of the config layer (req'd for stdin) - --help, -h show help -``` - -### curio config get -``` -NAME: - curio config get - Get a config layer by name. You may want to pipe the output to a file, or use 'less' - -USAGE: - curio config get [command options] layer name - -OPTIONS: - --help, -h show help -``` - -### curio config list -``` -NAME: - curio config list - List config layers present in the DB. - -USAGE: - curio config list [command options] [arguments...] - -OPTIONS: - --help, -h show help -``` - -### curio config interpret -``` -NAME: - curio config interpret - Interpret stacked config layers by this version of curio, with system-generated comments. - -USAGE: - curio config interpret [command options] a list of layers to be interpreted as the final config - -OPTIONS: - --layers value [ --layers value ] comma or space separated list of layers to be interpreted (base is always applied) - --help, -h show help -``` - -### curio config remove -``` -NAME: - curio config remove - Remove a named config layer. - -USAGE: - curio config remove [command options] [arguments...] - -OPTIONS: - --help, -h show help -``` - -### curio config edit -``` -NAME: - curio config edit - edit a config layer - -USAGE: - curio config edit [command options] [layer name] - -OPTIONS: - --editor value editor to use (default: "vim") [$EDITOR] - --source value source config layer (default: ) - --allow-overwrite allow overwrite of existing layer if source is a different layer (default: false) - --no-source-diff save the whole config into the layer, not just the diff (default: false) - --no-interpret-source do not interpret source layer (default: true if --source is set) - --help, -h show help -``` - -### curio config new-cluster -``` -NAME: - curio config new-cluster - Create new configuration for a new cluster - -USAGE: - curio config new-cluster [command options] [SP actor address...] - -OPTIONS: - --help, -h show help -``` - -## curio test -``` -NAME: - curio test - Utility functions for testing - -USAGE: - curio test command [command options] [arguments...] - -COMMANDS: - window-post, wd, windowpost, wdpost Compute a proof-of-spacetime for a sector (requires the sector to be pre-sealed). These will not send to the chain. - help, h Shows a list of commands or help for one command - -OPTIONS: - --help, -h show help -``` - -### curio test window-post -``` -NAME: - curio test window-post - Compute a proof-of-spacetime for a sector (requires the sector to be pre-sealed). These will not send to the chain. - -USAGE: - curio test window-post command [command options] [arguments...] - -COMMANDS: - here, cli Compute WindowPoSt for performance and configuration testing. - task, scheduled, schedule, async, asynchronous Test the windowpost scheduler by running it on the next available curio. - help, h Shows a list of commands or help for one command - -OPTIONS: - --help, -h show help -``` - -#### curio test window-post here -``` -NAME: - curio test window-post here - Compute WindowPoSt for performance and configuration testing. - -USAGE: - curio test window-post here [command options] [deadline index] - -DESCRIPTION: - Note: This command is intended to be used to verify PoSt compute performance. - It will not send any messages to the chain. Since it can compute any deadline, output may be incorrectly timed for the chain. - -OPTIONS: - --deadline value deadline to compute WindowPoSt for (default: 0) - --layers value [ --layers value ] list of layers to be interpreted (atop defaults). Default: base - --storage-json value path to json file containing storage config (default: "~/.curio/storage.json") - --partition value partition to compute WindowPoSt for (default: 0) - --help, -h show help -``` - -#### curio test window-post task -``` -NAME: - curio test window-post task - Test the windowpost scheduler by running it on the next available curio. - -USAGE: - curio test window-post task [command options] [arguments...] - -OPTIONS: - --deadline value deadline to compute WindowPoSt for (default: 0) - --layers value [ --layers value ] list of layers to be interpreted (atop defaults). Default: base - --help, -h show help -``` - -## curio web -``` -NAME: - curio web - Start Curio web interface - -USAGE: - curio web [command options] [arguments...] - -DESCRIPTION: - Start an instance of Curio web interface. - This creates the 'web' layer if it does not exist, then calls run with that layer. - -OPTIONS: - --gui-listen value Address to listen for the GUI on (default: "0.0.0.0:4701") - --nosync don't check full-node sync status (default: false) - --layers value [ --layers value ] list of layers to be interpreted (atop defaults). Default: base - --help, -h show help -``` - -## curio guided-setup -``` -NAME: - curio guided-setup - Run the guided setup for migrating from lotus-miner to Curio or Creating a new Curio miner - -USAGE: - curio guided-setup [command options] [arguments...] - -OPTIONS: - --help, -h show help -``` - -## curio seal -``` -NAME: - curio seal - Manage the sealing pipeline - -USAGE: - curio seal command [command options] [arguments...] - -COMMANDS: - start Start new sealing operations manually - help, h Shows a list of commands or help for one command - -OPTIONS: - --help, -h show help -``` - -### curio seal start -``` -NAME: - curio seal start - Start new sealing operations manually - -USAGE: - curio seal start [command options] [arguments...] - -OPTIONS: - --actor value Specify actor address to start sealing sectors for - --now Start sealing sectors for all actors now (not on schedule) (default: false) - --cc Start sealing new CC sectors (default: false) - --count value Number of sectors to start (default: 1) - --synthetic Use synthetic PoRep (default: false) - --layers value [ --layers value ] list of layers to be interpreted (atop defaults). Default: base - --help, -h show help -``` - -## curio market -``` -NAME: - curio market - -USAGE: - curio market command [command options] [arguments...] - -COMMANDS: - rpc-info - seal start sealing a deal sector early - help, h Shows a list of commands or help for one command - -OPTIONS: - --help, -h show help -``` - -### curio market rpc-info -``` -NAME: - curio market rpc-info - -USAGE: - curio market rpc-info [command options] [arguments...] - -OPTIONS: - --layers value [ --layers value ] list of layers to be interpreted (atop defaults). Default: base - --help, -h show help -``` - -### curio market seal -``` -NAME: - curio market seal - start sealing a deal sector early - -USAGE: - curio market seal [command options] [arguments...] - -OPTIONS: - --actor value Specify actor address to start sealing sectors for - --layers value [ --layers value ] list of layers to be interpreted (atop defaults). Default: base - --synthetic Use synthetic PoRep (default: false) - --help, -h show help -``` - -## curio fetch-params -``` -NAME: - curio fetch-params - Fetch proving parameters - -USAGE: - curio fetch-params [command options] [sectorSize] - -OPTIONS: - --help, -h show help ``` diff --git a/scripts/generate-lotus-cli.py b/scripts/generate-lotus-cli.py index 14e85cf9f03..69126fa1ac3 100644 --- a/scripts/generate-lotus-cli.py +++ b/scripts/generate-lotus-cli.py @@ -58,5 +58,4 @@ def get_cmd_recursively(cur_cmd): generate_lotus_cli('lotus') generate_lotus_cli('lotus-miner') generate_lotus_cli('lotus-worker') - generate_lotus_cli('curio') generate_lotus_cli('sptool') From ee8226ec8866611f47143199c6b6633b0ede0d2b Mon Sep 17 00:00:00 2001 From: "Andrew Jackson (Ajax)" Date: Thu, 20 Jun 2024 18:13:56 -0500 Subject: [PATCH 11/27] aussie waffle --- build/parameters.go | 2 +- chain/types/ethtypes/eth_transactions.go | 100 +---------------------- 2 files changed, 2 insertions(+), 100 deletions(-) diff --git a/build/parameters.go b/build/parameters.go index 20baaa3d2df..627b0442c57 100644 --- a/build/parameters.go +++ b/build/parameters.go @@ -54,7 +54,7 @@ var UpgradeThunderHeight abi.ChainEpoch = buildconstants.UpgradeThunderHeight var UpgradeWatermelonHeight abi.ChainEpoch = buildconstants.UpgradeWatermelonHeight // Deprecated: Use buildconstants.UpgradeWatermelonHeight instead var UpgradeDragonHeight abi.ChainEpoch = buildconstants.UpgradeDragonHeight // Deprecated: Use buildconstants.UpgradeDragonHeight instead var UpgradePhoenixHeight abi.ChainEpoch = buildconstants.UpgradePhoenixHeight // Deprecated: Use buildconstants.UpgradePhoenixHeight instead -var UpgradeAussieHeight abi.ChainEpoch = buildconstants.UpgradeAussieHeight // Deprecated: Use buildconstants.UpgradeAussieHeight instead +var UpgradeWaffleHeight abi.ChainEpoch = buildconstants.UpgradeWaffleHeight // Deprecated: Use buildconstants.UpgradeWaffleHeight instead // This fix upgrade only ran on calibrationnet var UpgradeWatermelonFixHeight abi.ChainEpoch = buildconstants.UpgradeWatermelonFixHeight // Deprecated: Use buildconstants.UpgradeWatermelonFixHeight instead diff --git a/chain/types/ethtypes/eth_transactions.go b/chain/types/ethtypes/eth_transactions.go index a1ef2b3757e..d1713248a6d 100644 --- a/chain/types/ethtypes/eth_transactions.go +++ b/chain/types/ethtypes/eth_transactions.go @@ -9,7 +9,6 @@ import ( cbg "github.com/whyrusleeping/cbor-gen" "golang.org/x/crypto/sha3" - "golang.org/x/xerrors" "github.com/filecoin-project/go-address" gocrypto "github.com/filecoin-project/go-crypto" @@ -19,7 +18,6 @@ import ( typescrypto "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/build/buildconstants" "github.com/filecoin-project/lotus/chain/types" ) @@ -189,103 +187,7 @@ func EthTransactionFromSignedFilecoinMessage(smsg *types.SignedMessage) (EthTran } } -func EthTxArgsFromUnsignedEthMessage(msg *types.Message) (EthTxArgs, error) { - var ( - to *EthAddress - params []byte - err error - ) - - if msg.Version != 0 { - return EthTxArgs{}, xerrors.Errorf("unsupported msg version: %d", msg.Version) - } - - if len(msg.Params) > 0 { - paramsReader := bytes.NewReader(msg.Params) - params, err = cbg.ReadByteArray(paramsReader, uint64(len(msg.Params))) - if err != nil { - return EthTxArgs{}, xerrors.Errorf("failed to read params byte array: %w", err) - } - if paramsReader.Len() != 0 { - return EthTxArgs{}, xerrors.Errorf("extra data found in params") - } - if len(params) == 0 { - return EthTxArgs{}, xerrors.Errorf("non-empty params encode empty byte array") - } - } - - if msg.To == builtintypes.EthereumAddressManagerActorAddr { - if msg.Method != builtintypes.MethodsEAM.CreateExternal { - return EthTxArgs{}, fmt.Errorf("unsupported EAM method") - } - } else if msg.Method == builtintypes.MethodsEVM.InvokeContract { - addr, err := EthAddressFromFilecoinAddress(msg.To) - if err != nil { - return EthTxArgs{}, err - } - to = &addr - } else { - return EthTxArgs{}, - xerrors.Errorf("invalid methodnum %d: only allowed method is InvokeContract(%d)", - msg.Method, builtintypes.MethodsEVM.InvokeContract) - } - - return EthTxArgs{ - ChainID: buildconstants.Eip155ChainId, - Nonce: int(msg.Nonce), - To: to, - Value: msg.Value, - Input: params, - MaxFeePerGas: msg.GasFeeCap, - MaxPriorityFeePerGas: msg.GasPremium, - GasLimit: int(msg.GasLimit), - }, nil -} - -func (tx *EthTxArgs) ToUnsignedMessage(from address.Address) (*types.Message, error) { - if tx.ChainID != buildconstants.Eip155ChainId { - return nil, xerrors.Errorf("unsupported chain id: %d", tx.ChainID) - } - - var err error - var params []byte - if len(tx.Input) > 0 { - buf := new(bytes.Buffer) - if err = cbg.WriteByteArray(buf, tx.Input); err != nil { - return nil, xerrors.Errorf("failed to write input args: %w", err) - } - params = buf.Bytes() - } - - var to address.Address - var method abi.MethodNum - // nil indicates the EAM, only CreateExternal is allowed - if tx.To == nil { - method = builtintypes.MethodsEAM.CreateExternal - to = builtintypes.EthereumAddressManagerActorAddr - } else { - method = builtintypes.MethodsEVM.InvokeContract - to, err = tx.To.ToFilecoinAddress() - if err != nil { - return nil, xerrors.Errorf("failed to convert To into filecoin addr: %w", err) - } - } - - return &types.Message{ - Version: 0, - To: to, - From: from, - Nonce: uint64(tx.Nonce), - Value: tx.Value, - GasLimit: int64(tx.GasLimit), - GasFeeCap: tx.MaxFeePerGas, - GasPremium: tx.MaxPriorityFeePerGas, - Method: method, - Params: params, - }, nil -} - -func (tx *EthTxArgs) ToSignedMessage() (*types.SignedMessage, error) { +func ToSignedFilecoinMessage(tx EthTransaction) (*types.SignedMessage, error) { from, err := tx.Sender() if err != nil { return nil, fmt.Errorf("failed to calculate sender: %w", err) From f1a6572ac1f928ca42ab20475fc676e67bc1c8a0 Mon Sep 17 00:00:00 2001 From: "Andrew Jackson (Ajax)" Date: Thu, 20 Jun 2024 22:47:37 -0500 Subject: [PATCH 12/27] pr fixes --- build/drand.go | 7 +++++++ go.mod | 2 -- go.sum | 8 -------- itests/kit/ensemble.go | 16 +++++++++------- 4 files changed, 16 insertions(+), 17 deletions(-) diff --git a/build/drand.go b/build/drand.go index 18d2204deb0..9b54be057e7 100644 --- a/build/drand.go +++ b/build/drand.go @@ -7,6 +7,13 @@ import ( "github.com/filecoin-project/lotus/node/modules/dtypes" ) +var DrandMainnet = buildconstants.DrandMainnet +var DrandTestnet = buildconstants.DrandTestnet +var DrandDevnet = buildconstants.DrandDevnet +var DrandLocalnet = buildconstants.DrandLocalnet +var DrandIncentinet = buildconstants.DrandIncentinet +var DrandQuicknet = buildconstants.DrandQuicknet + var DrandConfigs = map[buildconstants.DrandEnum]dtypes.DrandConfig{ buildconstants.DrandMainnet: { Servers: []string{ diff --git a/go.mod b/go.mod index d9664ee07b6..08d462377d7 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,6 @@ require ( github.com/Kubuxu/imtui v0.0.0-20210401140320-41663d68d0fa github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d github.com/alecthomas/jsonschema v0.0.0-20200530073317-71f438968921 - github.com/buger/goterm v1.0.3 github.com/chzyer/readline v1.5.1 github.com/containerd/cgroups v1.1.0 github.com/coreos/go-systemd/v22 v22.5.0 @@ -168,7 +167,6 @@ require ( github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect github.com/StackExchange/wmi v1.2.1 // indirect github.com/akavel/rsrc v0.8.0 // indirect - github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect github.com/bahlo/generic-list-go v0.2.0 // indirect github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect diff --git a/go.sum b/go.sum index 433c99e834f..d1d53543052 100644 --- a/go.sum +++ b/go.sum @@ -93,12 +93,6 @@ github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYU github.com/ardanlabs/darwin/v2 v2.0.0 h1:XCisQMgQ5EG+ZvSEcADEo+pyfIMKyWAGnn5o2TgriYE= github.com/ardanlabs/darwin/v2 v2.0.0/go.mod h1:MubZ2e9DAYGaym0mClSOi183NYahrrfKxvSy1HMhoes= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= -github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= -github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk= github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= @@ -141,7 +135,6 @@ github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -901,7 +894,6 @@ github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm github.com/magefile/mage v1.9.0 h1:t3AU2wNwehMCW97vuqQLtw6puppWXHO+O2MHo5a50XE= github.com/magefile/mage v1.9.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= diff --git a/itests/kit/ensemble.go b/itests/kit/ensemble.go index d61121821fd..862953a97e5 100644 --- a/itests/kit/ensemble.go +++ b/itests/kit/ensemble.go @@ -119,15 +119,17 @@ type Ensemble struct { options *ensembleOpts inactive struct { - fullnodes []*TestFullNode - miners []*TestMiner - workers []*TestWorker + fullnodes []*TestFullNode + miners []*TestMiner + workers []*TestWorker + unmanagedMiners []*TestUnmanagedMiner } active struct { - fullnodes []*TestFullNode - miners []*TestMiner - workers []*TestWorker - bms map[*TestMiner]*BlockMiner + fullnodes []*TestFullNode + miners []*TestMiner + workers []*TestWorker + bms map[*TestMiner]*BlockMiner + unmanagedMiners []*TestUnmanagedMiner } genesis struct { version network.Version From fc36dcc5daffdbe89db0e809cb63f285d43a21c6 Mon Sep 17 00:00:00 2001 From: "Andrew Jackson (Ajax)" Date: Fri, 21 Jun 2024 16:03:39 -0500 Subject: [PATCH 13/27] fix lints --- chain/types_test.go | 6 +++--- cmd/tvx/codenames_test.go | 4 ++-- itests/eth_hash_lookup_test.go | 3 ++- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/chain/types_test.go b/chain/types_test.go index 0fb3992146e..692c932a575 100644 --- a/chain/types_test.go +++ b/chain/types_test.go @@ -8,7 +8,7 @@ import ( "github.com/filecoin-project/go-address" - "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/build/buildconstants" "github.com/filecoin-project/lotus/chain/types" ) @@ -43,7 +43,7 @@ func TestSignedMessageJsonRoundtrip(t *testing.T) { func TestAddressType(t *testing.T) { //stm: @CHAIN_TYPES_ADDRESS_PREFIX_001 - build.SetAddressNetwork(address.Testnet) + buildconstants.SetAddressNetwork(address.Testnet) addr, err := makeRandomAddress() if err != nil { t.Fatal(err) @@ -53,7 +53,7 @@ func TestAddressType(t *testing.T) { t.Fatalf("address should start with %s", address.TestnetPrefix) } - build.SetAddressNetwork(address.Mainnet) + buildconstants.SetAddressNetwork(address.Mainnet) addr, err = makeRandomAddress() if err != nil { t.Fatal(err) diff --git a/cmd/tvx/codenames_test.go b/cmd/tvx/codenames_test.go index 46d8466ecb2..e7476b1243a 100644 --- a/cmd/tvx/codenames_test.go +++ b/cmd/tvx/codenames_test.go @@ -16,11 +16,11 @@ func TestProtocolCodenames(t *testing.T) { t.Fatal("expected genesis codename") } - if height := abi.ChainEpoch(build.UpgradeBreezeHeight + 1); GetProtocolCodename(height) != "breeze" { + if height := build.UpgradeBreezeHeight + 1; GetProtocolCodename(height) != "breeze" { t.Fatal("expected breeze codename") } - if height := build.UpgradeAssemblyHeight + 1; GetProtocolCodename(abi.ChainEpoch(height)) != "actorsv2" { + if height := build.UpgradeAssemblyHeight + 1; GetProtocolCodename(height) != "actorsv2" { t.Fatal("expected actorsv2 codename") } diff --git a/itests/eth_hash_lookup_test.go b/itests/eth_hash_lookup_test.go index 1610e245826..4f56e05320b 100644 --- a/itests/eth_hash_lookup_test.go +++ b/itests/eth_hash_lookup_test.go @@ -14,6 +14,7 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/build/buildconstants" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types/ethtypes" "github.com/filecoin-project/lotus/itests/kit" @@ -311,7 +312,7 @@ func TestTransactionHashLookupNonexistentMessage(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Minute) defer cancel() - cid := build.MustParseCid("bafk2bzacecapjnxnyw4talwqv5ajbtbkzmzqiosztj5cb3sortyp73ndjl76e") + cid := buildconstants.MustParseCid("bafk2bzacecapjnxnyw4talwqv5ajbtbkzmzqiosztj5cb3sortyp73ndjl76e") // We shouldn't be able to return a hash for this fake cid chainHash, err := client.EthGetTransactionHashByCid(ctx, cid) From 7a1cc4d670b7542dd4efdd2ce4007dd10dad191a Mon Sep 17 00:00:00 2001 From: "Andrew Jackson (Ajax)" Date: Sun, 23 Jun 2024 22:24:20 -0500 Subject: [PATCH 14/27] little fixes --- chain/vm/gas.go | 2 +- node/builder.go | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/chain/vm/gas.go b/chain/vm/gas.go index cb0c5def94d..4bd63bf9c08 100644 --- a/chain/vm/gas.go +++ b/chain/vm/gas.go @@ -135,7 +135,7 @@ var Prices = map[abi.ChainEpoch]Pricelist{ verifyPostDiscount: true, verifyConsensusFault: 495422, }, - abi.ChainEpoch(build.UpgradeCalicoHeight): &pricelistV0{ + build.UpgradeCalicoHeight: &pricelistV0{ computeGasMulti: 1, storageGasMulti: 1300, diff --git a/node/builder.go b/node/builder.go index 2ea9dcac55c..71c6e51f94a 100644 --- a/node/builder.go +++ b/node/builder.go @@ -43,6 +43,7 @@ import ( "github.com/filecoin-project/lotus/node/modules/testing" "github.com/filecoin-project/lotus/node/repo" "github.com/filecoin-project/lotus/storage/paths" + "github.com/filecoin-project/lotus/storage/paths/alertinginterface" "github.com/filecoin-project/lotus/system" ) @@ -159,6 +160,7 @@ func defaults() []Option { Override(new(journal.DisabledEvents), journal.EnvDisabledEvents), Override(new(journal.Journal), modules.OpenFilesystemJournal), Override(new(*alerting.Alerting), alerting.NewAlertingSystem), + Override(new(alertinginterface.AlertingInterface), alerting.NewAlertingSystem), Override(new(dtypes.NodeStartTime), FromVal(dtypes.NodeStartTime(time.Now()))), Override(CheckFDLimit, modules.CheckFdLimit(build.DefaultFDLimit)), From 3b0fecd6bfdc2e039edb1257a9f5519c05389974 Mon Sep 17 00:00:00 2001 From: "Andrew Jackson (Ajax)" Date: Thu, 11 Jul 2024 17:05:47 -0500 Subject: [PATCH 15/27] oops this got updated --- extern/filecoin-ffi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/extern/filecoin-ffi b/extern/filecoin-ffi index 081367cae7c..e467d2992e3 160000 --- a/extern/filecoin-ffi +++ b/extern/filecoin-ffi @@ -1 +1 @@ -Subproject commit 081367cae7cdfe87d8b7240a9c3767ce86a40b05 +Subproject commit e467d2992e3f9bd09beb71ecf84323b45d2a3511 From 12ba6f2961e136287627c13958f3163c15b3b60f Mon Sep 17 00:00:00 2001 From: "Andrew Jackson (Ajax)" Date: Thu, 11 Jul 2024 17:12:16 -0500 Subject: [PATCH 16/27] unbreak test builds --- chain/beacon/drand/drand_test.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/chain/beacon/drand/drand_test.go b/chain/beacon/drand/drand_test.go index c35c0da18f5..06ae8ae2c35 100644 --- a/chain/beacon/drand/drand_test.go +++ b/chain/beacon/drand/drand_test.go @@ -15,11 +15,12 @@ import ( "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/build/buildconstants" ) func TestPrintGroupInfo(t *testing.T) { - server := build.DrandConfigs[build.DrandTestnet].Servers[0] - chainInfo := build.DrandConfigs[build.DrandTestnet].ChainInfoJSON + server := build.DrandConfigs[buildconstants.DrandTestnet].Servers[0] + chainInfo := build.DrandConfigs[buildconstants.DrandTestnet].ChainInfoJSON drandChain, err := dchain.InfoFromJSON(bytes.NewReader([]byte(chainInfo))) assert.NoError(t, err) @@ -37,7 +38,7 @@ func TestPrintGroupInfo(t *testing.T) { func TestMaxBeaconRoundForEpoch(t *testing.T) { todayTs := uint64(1652222222) - db, err := NewDrandBeacon(todayTs, build.BlockDelaySecs, nil, build.DrandConfigs[build.DrandTestnet]) + db, err := NewDrandBeacon(todayTs, build.BlockDelaySecs, nil, build.DrandConfigs[buildconstants.DrandTestnet]) assert.NoError(t, err) assert.True(t, db.IsChained()) mbr15 := db.MaxBeaconRoundForEpoch(network.Version15, 100) @@ -47,7 +48,7 @@ func TestMaxBeaconRoundForEpoch(t *testing.T) { func TestQuicknetIsChained(t *testing.T) { todayTs := uint64(1652222222) - db, err := NewDrandBeacon(todayTs, build.BlockDelaySecs, nil, build.DrandConfigs[build.DrandQuicknet]) + db, err := NewDrandBeacon(todayTs, build.BlockDelaySecs, nil, build.DrandConfigs[buildconstants.DrandQuicknet]) assert.NoError(t, err) assert.False(t, db.IsChained()) } From 2217b862836c875c1745b233484a87aac318894c Mon Sep 17 00:00:00 2001 From: "Andrew Jackson (Ajax)" Date: Thu, 11 Jul 2024 17:40:13 -0500 Subject: [PATCH 17/27] test fixes --- api/api_full.go | 2 +- build/openrpc/full.json | 2 +- documentation/en/api-v1-unstable-methods.md | 2 +- node/builder.go | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/api/api_full.go b/api/api_full.go index 1a52334df9f..599e0040fe8 100644 --- a/api/api_full.go +++ b/api/api_full.go @@ -870,7 +870,7 @@ type FullNode interface { // newLeaseExpiration cannot be further than 5 minutes in the future. // It is recommended to call F3Participate every 60 seconds // with newLeaseExpiration set 2min into the future. - // The oldLeaseExpiration has to be set to newLeaseExpiration of the last successfull call. + // The oldLeaseExpiration has to be set to newLeaseExpiration of the last successful call. // For the first call to F3Participate, set the oldLeaseExpiration to zero value/time in the past. // F3Participate will return true if the lease was accepted. // The minerID has to be the ID address of the miner. diff --git a/build/openrpc/full.json b/build/openrpc/full.json index 01791a98bea..80c82ca75dc 100644 --- a/build/openrpc/full.json +++ b/build/openrpc/full.json @@ -6588,7 +6588,7 @@ { "name": "Filecoin.F3Participate", "description": "```go\nfunc (s *FullNodeStruct) F3Participate(p0 context.Context, p1 address.Address, p2 time.Time, p3 time.Time) (bool, error) {\n\tif s.Internal.F3Participate == nil {\n\t\treturn false, ErrNotSupported\n\t}\n\treturn s.Internal.F3Participate(p0, p1, p2, p3)\n}\n```", - "summary": "F3Participate should be called by a storage provider to participate in signing F3 consensus.\nCalling this API gives the lotus node a lease to sign in F3 on behalf of given SP.\nThe lease should be active only on one node. The lease will expire at the newLeaseExpiration.\nTo continue participating in F3 with the given node, call F3Participate again before\nthe newLeaseExpiration time.\nnewLeaseExpiration cannot be further than 5 minutes in the future.\nIt is recommended to call F3Participate every 60 seconds\nwith newLeaseExpiration set 2min into the future.\nThe oldLeaseExpiration has to be set to newLeaseExpiration of the last successfull call.\nFor the first call to F3Participate, set the oldLeaseExpiration to zero value/time in the past.\nF3Participate will return true if the lease was accepted.\nThe minerID has to be the ID address of the miner.\n", + "summary": "F3Participate should be called by a storage provider to participate in signing F3 consensus.\nCalling this API gives the lotus node a lease to sign in F3 on behalf of given SP.\nThe lease should be active only on one node. The lease will expire at the newLeaseExpiration.\nTo continue participating in F3 with the given node, call F3Participate again before\nthe newLeaseExpiration time.\nnewLeaseExpiration cannot be further than 5 minutes in the future.\nIt is recommended to call F3Participate every 60 seconds\nwith newLeaseExpiration set 2min into the future.\nThe oldLeaseExpiration has to be set to newLeaseExpiration of the last successful call.\nFor the first call to F3Participate, set the oldLeaseExpiration to zero value/time in the past.\nF3Participate will return true if the lease was accepted.\nThe minerID has to be the ID address of the miner.\n", "paramStructure": "by-position", "params": [ { diff --git a/documentation/en/api-v1-unstable-methods.md b/documentation/en/api-v1-unstable-methods.md index 87538e805f2..ecb9053b248 100644 --- a/documentation/en/api-v1-unstable-methods.md +++ b/documentation/en/api-v1-unstable-methods.md @@ -2331,7 +2331,7 @@ the newLeaseExpiration time. newLeaseExpiration cannot be further than 5 minutes in the future. It is recommended to call F3Participate every 60 seconds with newLeaseExpiration set 2min into the future. -The oldLeaseExpiration has to be set to newLeaseExpiration of the last successfull call. +The oldLeaseExpiration has to be set to newLeaseExpiration of the last successful call. For the first call to F3Participate, set the oldLeaseExpiration to zero value/time in the past. F3Participate will return true if the lease was accepted. The minerID has to be the ID address of the miner. diff --git a/node/builder.go b/node/builder.go index 978336bd133..afc46bbdfa2 100644 --- a/node/builder.go +++ b/node/builder.go @@ -160,7 +160,7 @@ func defaults() []Option { // global system journal. Override(new(journal.DisabledEvents), journal.EnvDisabledEvents), Override(new(journal.Journal), modules.OpenFilesystemJournal), - Override(new(*alerting.Alerting), alerting.NewAlertingSystem), + Override(new(alerting.Alerting), alerting.NewAlertingSystem), Override(new(alertinginterface.AlertingInterface), alerting.NewAlertingSystem), Override(new(dtypes.NodeStartTime), FromVal(dtypes.NodeStartTime(time.Now()))), From 795a865f86aaca3896814396af2665418022b2ed Mon Sep 17 00:00:00 2001 From: "Andrew Jackson (Ajax)" Date: Thu, 11 Jul 2024 22:41:25 -0500 Subject: [PATCH 18/27] comments - cleanups --- build/buildconstants/params_interop.go | 5 ++++- build/buildconstants/params_mainnet.go | 2 ++ build/params_shared_vals.go | 17 +++++++++-------- build/params_testground_vals.go | 8 -------- build/proof-params/parameters.go | 2 +- build/version.go | 16 ++++++++-------- itests/api_test.go | 4 +--- 7 files changed, 25 insertions(+), 29 deletions(-) diff --git a/build/buildconstants/params_interop.go b/build/buildconstants/params_interop.go index ba61840ef94..c00ac8d9fc2 100644 --- a/build/buildconstants/params_interop.go +++ b/build/buildconstants/params_interop.go @@ -4,10 +4,11 @@ package buildconstants import ( - "log" "os" "strconv" + logging "github.com/ipfs/go-log/v2" + "github.com/ipfs/go-cid" "github.com/filecoin-project/go-address" @@ -16,6 +17,8 @@ import ( builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" ) +var log = logging.Logger("buildconstants") + var NetworkBundle = "caterpillarnet" var ActorDebugging = false diff --git a/build/buildconstants/params_mainnet.go b/build/buildconstants/params_mainnet.go index 2633aa5d4d2..1dfe97afe1a 100644 --- a/build/buildconstants/params_mainnet.go +++ b/build/buildconstants/params_mainnet.go @@ -22,6 +22,8 @@ var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ var NetworkBundle = "mainnet" +var MinVerifiedDealSize = abi.NewStoragePower(1 << 20) + // NOTE: DO NOT change this unless you REALLY know what you're doing. This is consensus critical. const ActorDebugging = false diff --git a/build/params_shared_vals.go b/build/params_shared_vals.go index e94a43dfdbc..6dc3935f16e 100644 --- a/build/params_shared_vals.go +++ b/build/params_shared_vals.go @@ -4,6 +4,7 @@ import ( "math/big" "github.com/filecoin-project/lotus/build/buildconstants" + "github.com/filecoin-project/lotus/chain/actors/policy" ) // ///// @@ -18,7 +19,7 @@ var UnixfsLinksPerLevel = buildconstants.UnixfsLinksPerLevel // Deprecated: Use var AllowableClockDriftSecs = buildconstants.AllowableClockDriftSecs // Deprecated: Use buildconstants.AllowableClockDriftSecs instead // Epochs -var ForkLengthThreshold = Finality // Deprecated: Use Finality instead +const ForkLengthThreshold = Finality // Deprecated: Use Finality instead // Blocks (e) var BlocksPerEpoch = buildconstants.BlocksPerEpoch // Deprecated: Use buildconstants.BlocksPerEpoch instead @@ -26,8 +27,6 @@ var BlocksPerEpoch = buildconstants.BlocksPerEpoch // Deprecated: Use buildconst // Epochs var MessageConfidence = buildconstants.MessageConfidence // Deprecated: Use buildconstants.MessageConfidence instead -// constants for Weight calculation -// The ratio of weight contributed by short-term vs long-term factors in a given round var WRatioNum = buildconstants.WRatioNum // Deprecated: Use buildconstants.WRatioNum instead var WRatioDen = buildconstants.WRatioDen // Deprecated: Use buildconstants.WRatioDen instead @@ -39,7 +38,6 @@ var TicketRandomnessLookback = buildconstants.TicketRandomnessLookback // Deprec // the 'f' prefix doesn't matter var ZeroAddress = buildconstants.ZeroAddress // Deprecated: Use buildconstants.ZeroAddress instead - // ///// // Devnet settings @@ -67,12 +65,8 @@ func init() { // Sync var BadBlockCacheSize = buildconstants.BadBlockCacheSize // Deprecated: Use buildconstants.BadBlockCacheSize instead -// assuming 4000 messages per round, this lets us not lose any messages across a -// 10 block reorg. var BlsSignatureCacheSize = buildconstants.BlsSignatureCacheSize // Deprecated: Use buildconstants.BlsSignatureCacheSize instead -// Size of signature verification cache -// 32k keeps the cache around 10MB in size, max var VerifSigCacheSize = buildconstants.VerifSigCacheSize // Deprecated: Use buildconstants.VerifSigCacheSize instead // /////// @@ -94,3 +88,10 @@ var MinDealDuration = buildconstants.MinDealDuration // Deprecated: Use buildcon var MaxDealDuration = buildconstants.MaxDealDuration // Deprecated: Use buildconstants.MaxDealDuration instead const TestNetworkVersion = buildconstants.TestNetworkVersion // Deprecated: Use buildconstants.TestNetworkVersion instead + +func init() { + policy.SetSupportedProofTypes(buildconstants.SupportedProofTypes...) + policy.SetConsensusMinerMinPower(buildconstants.ConsensusMinerMinPower) + policy.SetMinVerifiedDealSize(buildconstants.MinVerifiedDealSize) + policy.SetPreCommitChallengeDelay(buildconstants.PreCommitChallengeDelay) +} diff --git a/build/params_testground_vals.go b/build/params_testground_vals.go index a032e5fad48..4503f76dcfb 100644 --- a/build/params_testground_vals.go +++ b/build/params_testground_vals.go @@ -4,17 +4,9 @@ package build import ( - "github.com/filecoin-project/lotus/build/buildconstants" "github.com/filecoin-project/lotus/chain/actors/policy" ) // Actor consts // TODO: pieceSize unused from actors var MinDealDuration, MaxDealDuration = policy.DealDurationBounds(0) - -func init() { - policy.SetSupportedProofTypes(buildconstants.SupportedProofTypes...) - policy.SetConsensusMinerMinPower(buildconstants.ConsensusMinerMinPower) - policy.SetMinVerifiedDealSize(buildconstants.MinVerifiedDealSize) - policy.SetPreCommitChallengeDelay(buildconstants.PreCommitChallengeDelay) -} diff --git a/build/proof-params/parameters.go b/build/proof-params/parameters.go index 68158391e99..1bef20f60f7 100644 --- a/build/proof-params/parameters.go +++ b/build/proof-params/parameters.go @@ -1,4 +1,4 @@ -package build +package proofparams import ( _ "embed" diff --git a/build/version.go b/build/version.go index ea1f4db8810..39b0e193ff8 100644 --- a/build/version.go +++ b/build/version.go @@ -9,16 +9,16 @@ import ( type BuildVersion string var CurrentCommit string -var BuildType = buildconstants.BuildType +var BuildType = buildconstants.BuildType // Deprecated: Use buildconstants.BuildType instead const ( - BuildDefault = buildconstants.BuildDefault - BuildMainnet = buildconstants.BuildMainnet - Build2k = buildconstants.Build2k - BuildDebug = buildconstants.BuildDebug - BuildCalibnet = buildconstants.BuildCalibnet - BuildInteropnet = buildconstants.BuildInteropnet - BuildButterflynet = buildconstants.BuildButterflynet + BuildDefault = buildconstants.BuildDefault // Deprecated: Use buildconstants.BuildDefault instead + BuildMainnet = buildconstants.BuildMainnet // Deprecated: Use buildconstants.BuildMainnet instead + Build2k = buildconstants.Build2k // Deprecated: Use buildconstants.Build2k instead + BuildDebug = buildconstants.BuildDebug // Deprecated: Use buildconstants.BuildDebug instead + BuildCalibnet = buildconstants.BuildCalibnet // Deprecated: Use buildconstants.BuildCalibnet instead + BuildInteropnet = buildconstants.BuildInteropnet // Deprecated: Use buildconstants.BuildInteropnet instead + BuildButterflynet = buildconstants.BuildButterflynet // Deprecated: Use buildconstants.BuildButterflynet instead ) func BuildTypeString() string { diff --git a/itests/api_test.go b/itests/api_test.go index 8ad1156b35a..afa2aa2bf4d 100644 --- a/itests/api_test.go +++ b/itests/api_test.go @@ -167,11 +167,9 @@ func (ts *apiSuite) testOutOfGasError(t *testing.T) { // the gas estimator API executes the message with gasLimit = BlockGasLimit // Lowering it to 2 will cause it to run out of gas, testing the failure case we want - originalLimit := build.BlockGasLimit - build.BlockGasLimit = 2 + originalLimit := buildconstants.BlockGasTarget buildconstants.BlockGasTarget = 2 defer func() { - build.BlockGasLimit = originalLimit buildconstants.BlockGasTarget = originalLimit }() From 81387ca70835944bbe7e938545228a4b06128e73 Mon Sep 17 00:00:00 2001 From: "Andrew Jackson (Ajax)" Date: Thu, 11 Jul 2024 22:50:54 -0500 Subject: [PATCH 19/27] itests fix alerting --- build/buildconstants/params_interop.go | 3 +-- node/builder.go | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/build/buildconstants/params_interop.go b/build/buildconstants/params_interop.go index c00ac8d9fc2..784505130f6 100644 --- a/build/buildconstants/params_interop.go +++ b/build/buildconstants/params_interop.go @@ -7,9 +7,8 @@ import ( "os" "strconv" - logging "github.com/ipfs/go-log/v2" - "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log/v2" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" diff --git a/node/builder.go b/node/builder.go index afc46bbdfa2..978336bd133 100644 --- a/node/builder.go +++ b/node/builder.go @@ -160,7 +160,7 @@ func defaults() []Option { // global system journal. Override(new(journal.DisabledEvents), journal.EnvDisabledEvents), Override(new(journal.Journal), modules.OpenFilesystemJournal), - Override(new(alerting.Alerting), alerting.NewAlertingSystem), + Override(new(*alerting.Alerting), alerting.NewAlertingSystem), Override(new(alertinginterface.AlertingInterface), alerting.NewAlertingSystem), Override(new(dtypes.NodeStartTime), FromVal(dtypes.NodeStartTime(time.Now()))), From 72721775ad217be3df2d8208414b187a6b4c2c8b Mon Sep 17 00:00:00 2001 From: "Andrew Jackson (Ajax)" Date: Thu, 11 Jul 2024 23:10:58 -0500 Subject: [PATCH 20/27] rm obsolete alertinginterface --- node/builder.go | 2 -- storage/paths/alertinginterface/ai.go | 11 ----------- storage/paths/db_index.go | 10 +++++----- 3 files changed, 5 insertions(+), 18 deletions(-) delete mode 100644 storage/paths/alertinginterface/ai.go diff --git a/node/builder.go b/node/builder.go index 978336bd133..8fb29c249fa 100644 --- a/node/builder.go +++ b/node/builder.go @@ -43,7 +43,6 @@ import ( "github.com/filecoin-project/lotus/node/modules/testing" "github.com/filecoin-project/lotus/node/repo" "github.com/filecoin-project/lotus/storage/paths" - "github.com/filecoin-project/lotus/storage/paths/alertinginterface" "github.com/filecoin-project/lotus/system" ) @@ -161,7 +160,6 @@ func defaults() []Option { Override(new(journal.DisabledEvents), journal.EnvDisabledEvents), Override(new(journal.Journal), modules.OpenFilesystemJournal), Override(new(*alerting.Alerting), alerting.NewAlertingSystem), - Override(new(alertinginterface.AlertingInterface), alerting.NewAlertingSystem), Override(new(dtypes.NodeStartTime), FromVal(dtypes.NodeStartTime(time.Now()))), Override(CheckFDLimit, modules.CheckFdLimit(build.DefaultFDLimit)), diff --git a/storage/paths/alertinginterface/ai.go b/storage/paths/alertinginterface/ai.go deleted file mode 100644 index c8d592efc90..00000000000 --- a/storage/paths/alertinginterface/ai.go +++ /dev/null @@ -1,11 +0,0 @@ -package alertinginterface - -type AlertingInterface interface { - AddAlertType(name, id string) AlertType - Raise(alert AlertType, metadata map[string]interface{}) - IsRaised(alert AlertType) bool - Resolve(alert AlertType, metadata map[string]string) -} -type AlertType struct { - System, Subsystem string -} diff --git a/storage/paths/db_index.go b/storage/paths/db_index.go index 67b8c8a7d30..e6def455112 100644 --- a/storage/paths/db_index.go +++ b/storage/paths/db_index.go @@ -17,9 +17,9 @@ import ( "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/journal/alerting" "github.com/filecoin-project/lotus/lib/harmony/harmonydb" "github.com/filecoin-project/lotus/metrics" - "github.com/filecoin-project/lotus/storage/paths/alertinginterface" "github.com/filecoin-project/lotus/storage/sealer/fsutil" "github.com/filecoin-project/lotus/storage/sealer/storiface" ) @@ -31,18 +31,18 @@ const URLSeparator = "," var errAlreadyLocked = errors.New("already locked") type DBIndex struct { - alerting alertinginterface.AlertingInterface - pathAlerts map[storiface.ID]alertinginterface.AlertType + alerting *alerting.Alerting + pathAlerts map[storiface.ID]alerting.AlertType harmonyDB *harmonydb.DB } -func NewDBIndex(al alertinginterface.AlertingInterface, db *harmonydb.DB) *DBIndex { +func NewDBIndex(al *alerting.Alerting, db *harmonydb.DB) *DBIndex { return &DBIndex{ harmonyDB: db, alerting: al, - pathAlerts: map[storiface.ID]alertinginterface.AlertType{}, + pathAlerts: map[storiface.ID]alerting.AlertType{}, } } From 7639a070a65137782fc23026c11dc7ef5990dfa0 Mon Sep 17 00:00:00 2001 From: "Andrew Jackson (Ajax)" Date: Thu, 11 Jul 2024 23:16:19 -0500 Subject: [PATCH 21/27] spelling oops --- node/modules/storageminer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/modules/storageminer.go b/node/modules/storageminer.go index 11b2420a7a0..04a0a53768c 100644 --- a/node/modules/storageminer.go +++ b/node/modules/storageminer.go @@ -412,7 +412,7 @@ func F3Participation(mctx helpers.MetricsCtx, lc fx.Lifecycle, api v1api.FullNod continue } - // we have succeded in giving a lease, reset the backoff + // we have succeeded in giving a lease, reset the backoff b.Reset() oldLease = newLease From 320c31f746879659be4b2d015e34e26ce3b3317d Mon Sep 17 00:00:00 2001 From: "Andrew Jackson (Ajax)" Date: Fri, 12 Jul 2024 09:31:58 -0500 Subject: [PATCH 22/27] changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index bfb91552674..79f7d55f0c7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ - https://github.com/filecoin-project/lotus/pull/12203: Fix slice modification bug in ETH Tx Events Bloom Filter - https://github.com/filecoin-project/lotus/pull/12221: Fix a nil reference panic in the ETH Trace API +- https://github.com/filecoin-project/lotus/pull/12112: Moved consts from build/ to build/buildconstants/ for ligher curio deps. ## ☢️ Upgrade Warnings ☢️ From 86b9797c6c7a38b126657f5153760a3ab804ff40 Mon Sep 17 00:00:00 2001 From: "Andrew Jackson (Ajax)" Date: Fri, 12 Jul 2024 11:16:11 -0500 Subject: [PATCH 23/27] tests need buildconstants port --- node/impl/full/eth.go | 17 +++++++++-------- node/impl/full/eth_utils.go | 5 +++-- node/impl/full/gas.go | 13 +++++++------ node/impl/full/gas_test.go | 24 ++++++++++++------------ node/impl/full/state.go | 13 +++++++------ 5 files changed, 38 insertions(+), 34 deletions(-) diff --git a/node/impl/full/eth.go b/node/impl/full/eth.go index a7c1975f5ff..a1a6bec6f9c 100644 --- a/node/impl/full/eth.go +++ b/node/impl/full/eth.go @@ -27,6 +27,7 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/build/buildconstants" "github.com/filecoin-project/lotus/chain/actors" builtinactors "github.com/filecoin-project/lotus/chain/actors/builtin" builtinevm "github.com/filecoin-project/lotus/chain/actors/builtin/evm" @@ -484,7 +485,7 @@ func (a *EthModule) EthGetCode(ctx context.Context, ethAddr ethtypes.EthAddress, Value: big.Zero(), Method: builtintypes.MethodsEVM.GetBytecode, Params: nil, - GasLimit: build.BlockGasLimit, + GasLimit: buildconstants.BlockGasLimit, GasFeeCap: big.Zero(), GasPremium: big.Zero(), } @@ -582,7 +583,7 @@ func (a *EthModule) EthGetStorageAt(ctx context.Context, ethAddr ethtypes.EthAdd Value: big.Zero(), Method: builtintypes.MethodsEVM.GetStorageAt, Params: params, - GasLimit: build.BlockGasLimit, + GasLimit: buildconstants.BlockGasLimit, GasFeeCap: big.Zero(), GasPremium: big.Zero(), } @@ -650,7 +651,7 @@ func (a *EthModule) EthGetBalance(ctx context.Context, address ethtypes.EthAddre } func (a *EthModule) EthChainId(ctx context.Context) (ethtypes.EthUint64, error) { - return ethtypes.EthUint64(build.Eip155ChainId), nil + return ethtypes.EthUint64(buildconstants.Eip155ChainId), nil } func (a *EthModule) EthSyncing(ctx context.Context) (ethtypes.EthSyncingResult, error) { @@ -749,7 +750,7 @@ func (a *EthModule) EthFeeHistory(ctx context.Context, p jsonrpc.RawParams) (eth } rewards, totalGasUsed := calculateRewardsAndGasUsed(rewardPercentiles, txGasRewards) - maxGas := build.BlockGasLimit * int64(len(ts.Blocks())) + maxGas := buildconstants.BlockGasLimit * int64(len(ts.Blocks())) // arrays should be reversed at the end baseFeeArray = append(baseFeeArray, ethtypes.EthBigInt(basefee)) @@ -788,7 +789,7 @@ func (a *EthModule) EthFeeHistory(ctx context.Context, p jsonrpc.RawParams) (eth } func (a *EthModule) NetVersion(_ context.Context) (string, error) { - return strconv.FormatInt(build.Eip155ChainId, 10), nil + return strconv.FormatInt(buildconstants.Eip155ChainId, 10), nil } func (a *EthModule) NetListening(ctx context.Context) (bool, error) { @@ -1093,7 +1094,7 @@ func (a *EthModule) EthEstimateGas(ctx context.Context, p jsonrpc.RawParams) (et // So we re-execute the message with EthCall (well, applyMessage which contains the // guts of EthCall). This will give us an ethereum specific error with revert // information. - msg.GasLimit = build.BlockGasLimit + msg.GasLimit = buildconstants.BlockGasLimit if _, err2 := a.applyMessage(ctx, msg, ts.Key()); err2 != nil { err = err2 } @@ -1156,8 +1157,8 @@ func gasSearch( low = high high = high * 2 - if high > build.BlockGasLimit { - high = build.BlockGasLimit + if high > buildconstants.BlockGasLimit { + high = buildconstants.BlockGasLimit break } } diff --git a/node/impl/full/eth_utils.go b/node/impl/full/eth_utils.go index 9e7d88dfa7f..d67bb26fb44 100644 --- a/node/impl/full/eth_utils.go +++ b/node/impl/full/eth_utils.go @@ -21,6 +21,7 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/build/buildconstants" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/state" @@ -202,7 +203,7 @@ func ethCallToFilecoinMessage(ctx context.Context, tx ethtypes.EthCall) (*types. Value: big.Int(tx.Value), Method: method, Params: params, - GasLimit: build.BlockGasLimit, + GasLimit: buildconstants.BlockGasLimit, GasFeeCap: big.Zero(), GasPremium: big.Zero(), }, nil @@ -558,7 +559,7 @@ func ethTxFromNativeMessage(msg *types.Message, st *state.StateTree) (ethtypes.E From: from, Input: encodeFilecoinParamsAsABI(msg.Method, codec, msg.Params), Nonce: ethtypes.EthUint64(msg.Nonce), - ChainID: ethtypes.EthUint64(build.Eip155ChainId), + ChainID: ethtypes.EthUint64(buildconstants.Eip155ChainId), Value: ethtypes.EthBigInt(msg.Value), Type: ethtypes.EIP1559TxType, Gas: ethtypes.EthUint64(msg.GasLimit), diff --git a/node/impl/full/gas.go b/node/impl/full/gas.go index c5b22354a52..3f105e63753 100644 --- a/node/impl/full/gas.go +++ b/node/impl/full/gas.go @@ -19,6 +19,7 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/build/buildconstants" lbuiltin "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/messagepool" "github.com/filecoin-project/lotus/chain/stmgr" @@ -128,7 +129,7 @@ func gasEstimateFeeCap(cstore *store.ChainStore, msg *types.Message, maxqueueblk ts := cstore.GetHeaviestTipSet() parentBaseFee := ts.Blocks()[0].ParentBaseFee - increaseFactor := math.Pow(1.+1./float64(build.BaseFeeMaxChangeDenom), float64(maxqueueblks)) + increaseFactor := math.Pow(1.+1./float64(buildconstants.BaseFeeMaxChangeDenom), float64(maxqueueblks)) feeInFuture := types.BigMul(parentBaseFee, types.NewInt(uint64(increaseFactor*(1<<8)))) out := types.BigDiv(feeInFuture, types.NewInt(1<<8)) @@ -147,8 +148,8 @@ func medianGasPremium(prices []GasMeta, blocks int) abi.TokenAmount { return prices[i].Price.GreaterThan(prices[j].Price) }) - at := build.BlockGasTarget * int64(blocks) / 2 // 50th - at += build.BlockGasTarget * int64(blocks) / (2 * 20) // move 5% further + at := buildconstants.BlockGasTarget * int64(blocks) / 2 // 50th + at += buildconstants.BlockGasTarget * int64(blocks) / (2 * 20) // move 5% further prev1, prev2 := big.Zero(), big.Zero() for _, price := range prices { prev1, prev2 = price.Price, prev1 @@ -310,7 +311,7 @@ func gasEstimateGasLimit( currTs *types.TipSet, ) (int64, error) { msg := *msgIn - msg.GasLimit = build.BlockGasLimit + msg.GasLimit = buildconstants.BlockGasLimit msg.GasFeeCap = big.Zero() msg.GasPremium = big.Zero() @@ -390,8 +391,8 @@ func (m *GasModule) GasEstimateMessageGas(ctx context.Context, msg *types.Messag msg.GasLimit = int64(float64(gasLimit) * m.Mpool.GetConfig().GasLimitOverestimation) // Gas overestimation can cause us to exceed the block gas limit, cap it. - if msg.GasLimit > build.BlockGasLimit { - msg.GasLimit = build.BlockGasLimit + if msg.GasLimit > buildconstants.BlockGasLimit { + msg.GasLimit = buildconstants.BlockGasLimit } } diff --git a/node/impl/full/gas_test.go b/node/impl/full/gas_test.go index 8fc585bd544..ee4ca3b9850 100644 --- a/node/impl/full/gas_test.go +++ b/node/impl/full/gas_test.go @@ -8,35 +8,35 @@ import ( "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/build/buildconstants" "github.com/filecoin-project/lotus/chain/types" ) func TestMedian(t *testing.T) { //stm: @MARKET_GAS_GET_MEDIAN_PREMIUM_001 require.Equal(t, types.NewInt(5), medianGasPremium([]GasMeta{ - {big.NewInt(5), build.BlockGasTarget}, + {big.NewInt(5), buildconstants.BlockGasTarget}, }, 1)) require.Equal(t, types.NewInt(10), medianGasPremium([]GasMeta{ - {big.NewInt(5), build.BlockGasTarget}, - {big.NewInt(10), build.BlockGasTarget}, + {big.NewInt(5), buildconstants.BlockGasTarget}, + {big.NewInt(10), buildconstants.BlockGasTarget}, }, 1)) require.Equal(t, types.NewInt(15), medianGasPremium([]GasMeta{ - {big.NewInt(10), build.BlockGasTarget / 2}, - {big.NewInt(20), build.BlockGasTarget / 2}, + {big.NewInt(10), buildconstants.BlockGasTarget / 2}, + {big.NewInt(20), buildconstants.BlockGasTarget / 2}, }, 1)) require.Equal(t, types.NewInt(25), medianGasPremium([]GasMeta{ - {big.NewInt(10), build.BlockGasTarget / 2}, - {big.NewInt(20), build.BlockGasTarget / 2}, - {big.NewInt(30), build.BlockGasTarget / 2}, + {big.NewInt(10), buildconstants.BlockGasTarget / 2}, + {big.NewInt(20), buildconstants.BlockGasTarget / 2}, + {big.NewInt(30), buildconstants.BlockGasTarget / 2}, }, 1)) require.Equal(t, types.NewInt(15), medianGasPremium([]GasMeta{ - {big.NewInt(10), build.BlockGasTarget / 2}, - {big.NewInt(20), build.BlockGasTarget / 2}, - {big.NewInt(30), build.BlockGasTarget / 2}, + {big.NewInt(10), buildconstants.BlockGasTarget / 2}, + {big.NewInt(20), buildconstants.BlockGasTarget / 2}, + {big.NewInt(30), buildconstants.BlockGasTarget / 2}, }, 2)) } diff --git a/node/impl/full/state.go b/node/impl/full/state.go index 312d30ee315..332ba00720f 100644 --- a/node/impl/full/state.go +++ b/node/impl/full/state.go @@ -30,6 +30,7 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/build/buildconstants" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/builtin/datacap" @@ -1929,11 +1930,11 @@ func (a *StateAPI) StateGetNetworkParams(ctx context.Context) (*api.NetworkParam return &api.NetworkParams{ NetworkName: networkName, - BlockDelaySecs: build.BlockDelaySecs, - ConsensusMinerMinPower: build.ConsensusMinerMinPower, - SupportedProofTypes: build.SupportedProofTypes, - PreCommitChallengeDelay: build.PreCommitChallengeDelay, - Eip155ChainID: build.Eip155ChainId, + BlockDelaySecs: buildconstants.BlockDelaySecs, + ConsensusMinerMinPower: buildconstants.ConsensusMinerMinPower, + SupportedProofTypes: buildconstants.SupportedProofTypes, + PreCommitChallengeDelay: buildconstants.PreCommitChallengeDelay, + Eip155ChainID: buildconstants.Eip155ChainId, ForkUpgradeParams: api.ForkUpgradeParams{ UpgradeSmokeHeight: build.UpgradeSmokeHeight, UpgradeBreezeHeight: build.UpgradeBreezeHeight, @@ -1943,7 +1944,7 @@ func (a *StateAPI) StateGetNetworkParams(ctx context.Context) (*api.NetworkParam UpgradeRefuelHeight: build.UpgradeRefuelHeight, UpgradeTapeHeight: build.UpgradeTapeHeight, UpgradeKumquatHeight: build.UpgradeKumquatHeight, - BreezeGasTampingDuration: build.BreezeGasTampingDuration, + BreezeGasTampingDuration: buildconstants.BreezeGasTampingDuration, UpgradeCalicoHeight: build.UpgradeCalicoHeight, UpgradePersianHeight: build.UpgradePersianHeight, UpgradeOrangeHeight: build.UpgradeOrangeHeight, From 20947d232c89ed1871eb3d14b737c1ca2d0d7c1e Mon Sep 17 00:00:00 2001 From: "Andrew Jackson (Ajax)" Date: Fri, 12 Jul 2024 11:28:59 -0500 Subject: [PATCH 24/27] Fully migrate BlockGasTarget --- chain/store/basefee.go | 17 +++++++++-------- chain/store/basefee_test.go | 5 +++-- .../simulation/blockbuilder/blockbuilder.go | 6 +++--- tools/stats/points/collect.go | 7 ++++--- 4 files changed, 19 insertions(+), 16 deletions(-) diff --git a/chain/store/basefee.go b/chain/store/basefee.go index 3b6af5c0716..e5c8049efa0 100644 --- a/chain/store/basefee.go +++ b/chain/store/basefee.go @@ -10,11 +10,12 @@ import ( "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/build/buildconstants" "github.com/filecoin-project/lotus/chain/types" ) func ComputeNextBaseFee(baseFee types.BigInt, gasLimitUsed int64, noOfBlocks int, epoch abi.ChainEpoch) types.BigInt { - // deta := gasLimitUsed/noOfBlocks - build.BlockGasTarget + // deta := gasLimitUsed/noOfBlocks - buildconstants.BlockGasTarget // change := baseFee * deta / BlockGasTarget // nextBaseFee = baseFee + change // nextBaseFee = max(nextBaseFee, build.MinimumBaseFee) @@ -22,22 +23,22 @@ func ComputeNextBaseFee(baseFee types.BigInt, gasLimitUsed int64, noOfBlocks int var delta int64 if epoch > build.UpgradeSmokeHeight { delta = gasLimitUsed / int64(noOfBlocks) - delta -= build.BlockGasTarget + delta -= buildconstants.BlockGasTarget } else { delta = build.PackingEfficiencyDenom * gasLimitUsed / (int64(noOfBlocks) * build.PackingEfficiencyNum) - delta -= build.BlockGasTarget + delta -= buildconstants.BlockGasTarget } // cap change at 12.5% (BaseFeeMaxChangeDenom) by capping delta - if delta > build.BlockGasTarget { - delta = build.BlockGasTarget + if delta > buildconstants.BlockGasTarget { + delta = buildconstants.BlockGasTarget } - if delta < -build.BlockGasTarget { - delta = -build.BlockGasTarget + if delta < -buildconstants.BlockGasTarget { + delta = -buildconstants.BlockGasTarget } change := big.Mul(baseFee, big.NewInt(delta)) - change = big.Div(change, big.NewInt(build.BlockGasTarget)) + change = big.Div(change, big.NewInt(buildconstants.BlockGasTarget)) change = big.Div(change, big.NewInt(build.BaseFeeMaxChangeDenom)) nextBaseFee := big.Add(baseFee, change) diff --git a/chain/store/basefee_test.go b/chain/store/basefee_test.go index 8dd61f709e0..ef80ab122f6 100644 --- a/chain/store/basefee_test.go +++ b/chain/store/basefee_test.go @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/build/buildconstants" "github.com/filecoin-project/lotus/chain/types" ) @@ -22,8 +23,8 @@ func TestBaseFee(t *testing.T) { }{ {100e6, 0, 1, 87.5e6, 87.5e6}, {100e6, 0, 5, 87.5e6, 87.5e6}, - {100e6, build.BlockGasTarget, 1, 103.125e6, 100e6}, - {100e6, build.BlockGasTarget * 2, 2, 103.125e6, 100e6}, + {100e6, buildconstants.BlockGasTarget, 1, 103.125e6, 100e6}, + {100e6, buildconstants.BlockGasTarget * 2, 2, 103.125e6, 100e6}, {100e6, build.BlockGasLimit * 2, 2, 112.5e6, 112.5e6}, {100e6, (build.BlockGasLimit * 15) / 10, 2, 110937500, 106.250e6}, } diff --git a/cmd/lotus-sim/simulation/blockbuilder/blockbuilder.go b/cmd/lotus-sim/simulation/blockbuilder/blockbuilder.go index 273ab337c1b..58468533a57 100644 --- a/cmd/lotus-sim/simulation/blockbuilder/blockbuilder.go +++ b/cmd/lotus-sim/simulation/blockbuilder/blockbuilder.go @@ -12,7 +12,7 @@ import ( actorstypes "github.com/filecoin-project/go-state-types/actors" "github.com/filecoin-project/go-state-types/network" - "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/build/buildconstants" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/builtin/account" @@ -36,7 +36,7 @@ const ( // TODO: This will produce invalid blocks but it will accurately model the amount of gas // we're willing to use per-tipset. // A more correct approach would be to produce 5 blocks. We can do that later. -var targetGas = build.BlockGasTarget * expectedBlocks +var targetGas = buildconstants.BlockGasTarget * expectedBlocks type BlockBuilder struct { ctx context.Context @@ -150,7 +150,7 @@ func (bb *BlockBuilder) PushMessage(msg *types.Message) (*types.MessageReceipt, } msg.GasPremium = abi.NewTokenAmount(0) msg.GasFeeCap = abi.NewTokenAmount(0) - msg.GasLimit = build.BlockGasTarget + msg.GasLimit = buildconstants.BlockGasTarget // We manually snapshot so we can revert nonce changes, etc. on failure. err = st.Snapshot(bb.ctx) diff --git a/tools/stats/points/collect.go b/tools/stats/points/collect.go index 8b86695742e..24be14a6a96 100644 --- a/tools/stats/points/collect.go +++ b/tools/stats/points/collect.go @@ -18,6 +18,7 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/build/buildconstants" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/builtin/power" @@ -203,11 +204,11 @@ func (c *ChainPointCollector) collectBlockheaderPoints(ctx context.Context, pl * } { blks := int64(len(cids)) - p = influx.NewPoint("chain.gas_fill_ratio", float64(totalGasLimit)/float64(blks*build.BlockGasTarget)) + p = influx.NewPoint("chain.gas_fill_ratio", float64(totalGasLimit)/float64(blks*buildconstants.BlockGasTarget)) pl.AddPoint(p) - p = influx.NewPoint("chain.gas_capacity_ratio", float64(totalUniqGasLimit)/float64(blks*build.BlockGasTarget)) + p = influx.NewPoint("chain.gas_capacity_ratio", float64(totalUniqGasLimit)/float64(blks*buildconstants.BlockGasTarget)) pl.AddPoint(p) - p = influx.NewPoint("chain.gas_waste_ratio", float64(totalGasLimit-totalUniqGasLimit)/float64(blks*build.BlockGasTarget)) + p = influx.NewPoint("chain.gas_waste_ratio", float64(totalGasLimit-totalUniqGasLimit)/float64(blks*buildconstants.BlockGasTarget)) pl.AddPoint(p) } From 21ca683a6f5095736cca6c9f1a3b1cfbfd6bab2a Mon Sep 17 00:00:00 2001 From: "Andrew Jackson (Ajax)" Date: Fri, 12 Jul 2024 16:01:23 -0500 Subject: [PATCH 25/27] ulimit should not depend on build --- build/buildconstants/limits.go | 6 +++++ build/limits.go | 5 ++-- chain/consensus/common.go | 3 ++- chain/consensus/compute_state.go | 3 ++- chain/messagepool/repub.go | 3 ++- chain/messagepool/selection.go | 11 +++++---- chain/messagepool/selection_test.go | 37 +++++++++++++++-------------- chain/stmgr/call.go | 4 ++-- chain/store/basefee_test.go | 4 ++-- cli/chain.go | 7 +++--- cli/mpool.go | 6 ++--- cmd/lotus-shed/gas-estimation.go | 3 ++- cmd/lotus-shed/mpool.go | 4 ++-- itests/api_test.go | 6 ++--- itests/eth_block_hash_test.go | 4 ++-- itests/fevm_test.go | 3 ++- itests/kit/evm.go | 4 ++-- lib/ulimit/ulimit.go | 4 ++-- lib/ulimit/ulimit_test.go | 4 ++-- node/builder.go | 3 ++- 20 files changed, 70 insertions(+), 54 deletions(-) create mode 100644 build/buildconstants/limits.go diff --git a/build/buildconstants/limits.go b/build/buildconstants/limits.go new file mode 100644 index 00000000000..1a2024479be --- /dev/null +++ b/build/buildconstants/limits.go @@ -0,0 +1,6 @@ +package buildconstants + +var ( + DefaultFDLimit uint64 = 16 << 10 + MinerFDLimit uint64 = 100_000 +) diff --git a/build/limits.go b/build/limits.go index 93d56577c44..5cf3eda88d1 100644 --- a/build/limits.go +++ b/build/limits.go @@ -1,6 +1,7 @@ package build +import "github.com/filecoin-project/lotus/build/buildconstants" + var ( - DefaultFDLimit uint64 = 16 << 10 - MinerFDLimit uint64 = 100_000 + MinerFDLimit uint64 = buildconstants.MinerFDLimit ) diff --git a/chain/consensus/common.go b/chain/consensus/common.go index 49c38fee15a..9f832e69433 100644 --- a/chain/consensus/common.go +++ b/chain/consensus/common.go @@ -24,6 +24,7 @@ import ( "github.com/filecoin-project/lotus/api" bstore "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/build/buildconstants" "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/stmgr" @@ -225,7 +226,7 @@ func checkBlockMessages(ctx context.Context, sm *stmgr.StateManager, cs *store.C // ValidForBlockInclusion checks if any single message does not exceed BlockGasLimit // So below is overflow safe sumGasLimit += m.GasLimit - if sumGasLimit > build.BlockGasLimit { + if sumGasLimit > buildconstants.BlockGasLimit { return xerrors.Errorf("block gas limit exceeded") } diff --git a/chain/consensus/compute_state.go b/chain/consensus/compute_state.go index a5e82a57ffe..b8fec248aca 100644 --- a/chain/consensus/compute_state.go +++ b/chain/consensus/compute_state.go @@ -27,6 +27,7 @@ import ( "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/build/buildconstants" "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/builtin/cron" "github.com/filecoin-project/lotus/chain/actors/builtin/reward" @@ -129,7 +130,7 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context, Value: types.NewInt(0), GasFeeCap: types.NewInt(0), GasPremium: types.NewInt(0), - GasLimit: build.BlockGasLimit * 10000, // Make super sure this is never too little + GasLimit: buildconstants.BlockGasLimit * 10000, // Make super sure this is never too little Method: cron.Methods.EpochTick, Params: nil, } diff --git a/chain/messagepool/repub.go b/chain/messagepool/repub.go index a87d5e08a84..04f83f42300 100644 --- a/chain/messagepool/repub.go +++ b/chain/messagepool/repub.go @@ -11,6 +11,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/build/buildconstants" "github.com/filecoin-project/lotus/chain/messagepool/gasguess" "github.com/filecoin-project/lotus/chain/types" ) @@ -81,7 +82,7 @@ func (mp *MessagePool) republishPendingMessages(ctx context.Context) error { return chains[i].Before(chains[j]) }) - gasLimit := build.BlockGasLimit + gasLimit := buildconstants.BlockGasLimit minGas := int64(gasguess.MinGas) var msgs []*types.SignedMessage loop: diff --git a/chain/messagepool/selection.go b/chain/messagepool/selection.go index 0d0ed3cbab2..cfe68ed63ba 100644 --- a/chain/messagepool/selection.go +++ b/chain/messagepool/selection.go @@ -15,12 +15,13 @@ import ( "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/build/buildconstants" "github.com/filecoin-project/lotus/chain/messagepool/gasguess" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" ) -var bigBlockGasLimit = big.NewInt(build.BlockGasLimit) +var bigBlockGasLimit = big.NewInt(buildconstants.BlockGasLimit) const MaxBlocks = 15 @@ -268,7 +269,7 @@ func (mp *MessagePool) selectMessagesOptimal(ctx context.Context, curTs, ts *typ nextChain := 0 partitions := make([][]*msgChain, MaxBlocks) for i := 0; i < MaxBlocks && nextChain < len(chains); i++ { - gasLimit := build.BlockGasLimit + gasLimit := buildconstants.BlockGasLimit msgLimit := build.BlockMessageLimit for nextChain < len(chains) { chain := chains[nextChain] @@ -600,7 +601,7 @@ func (mp *MessagePool) selectPriorityMessages(ctx context.Context, pending map[a mpCfg := mp.getConfig() result := &selectedMessages{ msgs: make([]*types.SignedMessage, 0, mpCfg.SizeLimitLow), - gasLimit: build.BlockGasLimit, + gasLimit: buildconstants.BlockGasLimit, blsLimit: cbg.MaxLength, secpLimit: cbg.MaxLength, } @@ -762,7 +763,7 @@ func (*MessagePool) getGasReward(msg *types.SignedMessage, baseFee types.BigInt) } func (*MessagePool) getGasPerf(gasReward *big.Int, gasLimit int64) float64 { - // gasPerf = gasReward * build.BlockGasLimit / gasLimit + // gasPerf = gasReward * buildconstants.BlockGasLimit / gasLimit a := new(big.Rat).SetInt(new(big.Int).Mul(gasReward, bigBlockGasLimit)) b := big.NewRat(1, gasLimit) c := new(big.Rat).Mul(a, b) @@ -822,7 +823,7 @@ func (mp *MessagePool) createMessageChains(actor address.Address, mset map[uint6 } gasLimit += m.Message.GasLimit - if gasLimit > build.BlockGasLimit { + if gasLimit > buildconstants.BlockGasLimit { break } diff --git a/chain/messagepool/selection_test.go b/chain/messagepool/selection_test.go index ff98383633e..9379675a672 100644 --- a/chain/messagepool/selection_test.go +++ b/chain/messagepool/selection_test.go @@ -26,6 +26,7 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/build/buildconstants" "github.com/filecoin-project/lotus/chain/consensus/filcns" "github.com/filecoin-project/lotus/chain/messagepool/gasguess" "github.com/filecoin-project/lotus/chain/types" @@ -267,7 +268,7 @@ func TestMessageChains(t *testing.T) { // test6: one more message than what can fit in a block according to gas limit, with increasing // gasPerf; it should create a single chain with the max messages - maxMessages := int(build.BlockGasLimit / gasLimit) + maxMessages := int(buildconstants.BlockGasLimit / gasLimit) nMessages := maxMessages + 1 mset = make(map[uint64]*types.SignedMessage) @@ -571,7 +572,7 @@ func TestMessageSelectionTrimmingGas(t *testing.T) { tma.setBalance(a2, 1) // in FIL // make many small chains for the two actors - nMessages := int((build.BlockGasLimit / gasLimit) + 1) + nMessages := int((buildconstants.BlockGasLimit / gasLimit) + 1) for i := 0; i < nMessages; i++ { bias := (nMessages - i) / 3 m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(1+i%3+bias)) @@ -585,7 +586,7 @@ func TestMessageSelectionTrimmingGas(t *testing.T) { t.Fatal(err) } - expected := int(build.BlockGasLimit / gasLimit) + expected := int(buildconstants.BlockGasLimit / gasLimit) if len(msgs) != expected { t.Fatalf("expected %d messages, but got %d", expected, len(msgs)) } @@ -594,7 +595,7 @@ func TestMessageSelectionTrimmingGas(t *testing.T) { for _, m := range msgs { mGasLimit += m.Message.GasLimit } - if mGasLimit > build.BlockGasLimit { + if mGasLimit > buildconstants.BlockGasLimit { t.Fatal("selected messages gas limit exceeds block gas limit!") } @@ -641,7 +642,7 @@ func TestMessageSelectionTrimmingMsgsBasic(t *testing.T) { for _, m := range msgs { mGasLimit += m.Message.GasLimit } - if mGasLimit > build.BlockGasLimit { + if mGasLimit > buildconstants.BlockGasLimit { t.Fatal("selected messages gas limit exceeds block gas limit!") } @@ -700,7 +701,7 @@ func TestMessageSelectionTrimmingMsgsTwoSendersBasic(t *testing.T) { counts[m.Signature.Type]++ } - if mGasLimit > build.BlockGasLimit { + if mGasLimit > buildconstants.BlockGasLimit { t.Fatal("selected messages gas limit exceeds block gas limit!") } @@ -781,7 +782,7 @@ func TestMessageSelectionTrimmingMsgsTwoSendersAdvanced(t *testing.T) { counts[m.Signature.Type]++ } - if mGasLimit > build.BlockGasLimit { + if mGasLimit > buildconstants.BlockGasLimit { t.Fatal("selected messages gas limit exceeds block gas limit!") } @@ -912,7 +913,7 @@ func TestPriorityMessageSelection2(t *testing.T) { mp.cfg.PriorityAddrs = []address.Address{a1} - nMessages := int(2 * build.BlockGasLimit / gasLimit) + nMessages := int(2 * buildconstants.BlockGasLimit / gasLimit) for i := 0; i < nMessages; i++ { bias := (nMessages - i) / 3 m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(1+i%3+bias)) @@ -926,7 +927,7 @@ func TestPriorityMessageSelection2(t *testing.T) { t.Fatal(err) } - expectedMsgs := int(build.BlockGasLimit / gasLimit) + expectedMsgs := int(buildconstants.BlockGasLimit / gasLimit) if len(msgs) != expectedMsgs { t.Fatalf("expected %d messages but got %d", expectedMsgs, len(msgs)) } @@ -1077,7 +1078,7 @@ func TestOptimalMessageSelection1(t *testing.T) { tma.setBalance(a1, 1) // in FIL tma.setBalance(a2, 1) // in FIL - nMessages := int(10 * build.BlockGasLimit / gasLimit) + nMessages := int(10 * buildconstants.BlockGasLimit / gasLimit) for i := 0; i < nMessages; i++ { bias := (nMessages - i) / 3 m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(1+i%3+bias)) @@ -1089,7 +1090,7 @@ func TestOptimalMessageSelection1(t *testing.T) { t.Fatal(err) } - expectedMsgs := int(build.BlockGasLimit / gasLimit) + expectedMsgs := int(buildconstants.BlockGasLimit / gasLimit) if len(msgs) != expectedMsgs { t.Fatalf("expected %d messages, but got %d", expectedMsgs, len(msgs)) } @@ -1146,7 +1147,7 @@ func TestOptimalMessageSelection2(t *testing.T) { tma.setBalance(a1, 1) // in FIL tma.setBalance(a2, 1) // in FIL - nMessages := int(5 * build.BlockGasLimit / gasLimit) + nMessages := int(5 * buildconstants.BlockGasLimit / gasLimit) for i := 0; i < nMessages; i++ { bias := (nMessages - i) / 3 m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(200000+i%3+bias)) @@ -1160,7 +1161,7 @@ func TestOptimalMessageSelection2(t *testing.T) { t.Fatal(err) } - expectedMsgs := int(build.BlockGasLimit / gasLimit) + expectedMsgs := int(buildconstants.BlockGasLimit / gasLimit) if len(msgs) != expectedMsgs { t.Fatalf("expected %d messages, but got %d", expectedMsgs, len(msgs)) } @@ -1227,7 +1228,7 @@ func TestOptimalMessageSelection3(t *testing.T) { tma.setBalance(a, 1) // in FIL } - nMessages := int(build.BlockGasLimit/gasLimit) + 1 + nMessages := int(buildconstants.BlockGasLimit/gasLimit) + 1 for i := 0; i < nMessages; i++ { for j := 0; j < nActors; j++ { premium := 500000 + 10000*(nActors-j) + (nMessages+2-i)/(30*nActors) + i%3 @@ -1241,7 +1242,7 @@ func TestOptimalMessageSelection3(t *testing.T) { t.Fatal(err) } - expectedMsgs := int(build.BlockGasLimit / gasLimit) + expectedMsgs := int(buildconstants.BlockGasLimit / gasLimit) if len(msgs) != expectedMsgs { t.Fatalf("expected %d messages, but got %d", expectedMsgs, len(msgs)) } @@ -1308,7 +1309,7 @@ func testCompetitiveMessageSelection(t *testing.T, rng *rand.Rand, getPremium fu tma.setBalance(a, 1) // in FIL } - nMessages := 10 * int(build.BlockGasLimit/gasLimit) + nMessages := 10 * int(buildconstants.BlockGasLimit/gasLimit) t.Log("nMessages", nMessages) nonces := make([]uint64, nActors) for i := 0; i < nMessages; i++ { @@ -1618,7 +1619,7 @@ readLoop: } // do message selection and check block packing - minGasLimit := int64(0.9 * float64(build.BlockGasLimit)) + minGasLimit := int64(0.9 * float64(buildconstants.BlockGasLimit)) // greedy first selected, err := mp.SelectMessages(context.Background(), ts, 1.0) @@ -1794,7 +1795,7 @@ readLoop: } // do message selection and check block packing - minGasLimit := int64(0.9 * float64(build.BlockGasLimit)) + minGasLimit := int64(0.9 * float64(buildconstants.BlockGasLimit)) // greedy first start := time.Now() diff --git a/chain/stmgr/call.go b/chain/stmgr/call.go index 7f2a57a6112..fe88630e55a 100644 --- a/chain/stmgr/call.go +++ b/chain/stmgr/call.go @@ -18,7 +18,7 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/blockstore" - "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/build/buildconstants" "github.com/filecoin-project/lotus/chain/rand" "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/types" @@ -44,7 +44,7 @@ func (sm *StateManager) Call(ctx context.Context, msg *types.Message, ts *types. msg = &msgCopy if msg.GasLimit == 0 { - msg.GasLimit = build.BlockGasLimit + msg.GasLimit = buildconstants.BlockGasLimit } if msg.GasFeeCap == types.EmptyInt { msg.GasFeeCap = types.NewInt(0) diff --git a/chain/store/basefee_test.go b/chain/store/basefee_test.go index ef80ab122f6..fa22f7d042c 100644 --- a/chain/store/basefee_test.go +++ b/chain/store/basefee_test.go @@ -25,8 +25,8 @@ func TestBaseFee(t *testing.T) { {100e6, 0, 5, 87.5e6, 87.5e6}, {100e6, buildconstants.BlockGasTarget, 1, 103.125e6, 100e6}, {100e6, buildconstants.BlockGasTarget * 2, 2, 103.125e6, 100e6}, - {100e6, build.BlockGasLimit * 2, 2, 112.5e6, 112.5e6}, - {100e6, (build.BlockGasLimit * 15) / 10, 2, 110937500, 106.250e6}, + {100e6, buildconstants.BlockGasLimit * 2, 2, 112.5e6, 112.5e6}, + {100e6, (buildconstants.BlockGasLimit * 15) / 10, 2, 110937500, 106.250e6}, } for _, test := range tests { diff --git a/cli/chain.go b/cli/chain.go index c0d54fd6382..d9f6dc0bd96 100644 --- a/cli/chain.go +++ b/cli/chain.go @@ -37,6 +37,7 @@ import ( lapi "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api/v0api" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/build/buildconstants" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/types" @@ -631,7 +632,7 @@ var ChainListCmd = &cli.Command{ tss = otss for i, ts := range tss { pbf := ts.Blocks()[0].ParentBaseFee - afmt.Printf("%d: %d blocks (baseFee: %s -> maxFee: %s)\n", ts.Height(), len(ts.Blocks()), ts.Blocks()[0].ParentBaseFee, types.FIL(types.BigMul(pbf, types.NewInt(uint64(build.BlockGasLimit))))) + afmt.Printf("%d: %d blocks (baseFee: %s -> maxFee: %s)\n", ts.Height(), len(ts.Blocks()), ts.Blocks()[0].ParentBaseFee, types.FIL(types.BigMul(pbf, types.NewInt(uint64(buildconstants.BlockGasLimit))))) for _, b := range ts.Blocks() { msgs, err := api.ChainGetBlockMessages(ctx, b.Cid()) @@ -657,7 +658,7 @@ var ChainListCmd = &cli.Command{ avgpremium = big.Div(psum, big.NewInt(int64(lenmsgs))) } - afmt.Printf("\t%s: \t%d msgs, gasLimit: %d / %d (%0.2f%%), avgPremium: %s\n", b.Miner, len(msgs.BlsMessages)+len(msgs.SecpkMessages), limitSum, build.BlockGasLimit, 100*float64(limitSum)/float64(build.BlockGasLimit), avgpremium) + afmt.Printf("\t%s: \t%d msgs, gasLimit: %d / %d (%0.2f%%), avgPremium: %s\n", b.Miner, len(msgs.BlsMessages)+len(msgs.SecpkMessages), limitSum, buildconstants.BlockGasLimit, 100*float64(limitSum)/float64(buildconstants.BlockGasLimit), avgpremium) } if i < len(tss)-1 { msgs, err := api.ChainGetParentMessages(ctx, tss[i+1].Blocks()[0].Cid()) @@ -680,7 +681,7 @@ var ChainListCmd = &cli.Command{ } gasEfficiency := 100 * float64(gasUsed) / float64(limitSum) - gasCapacity := 100 * float64(limitSum) / float64(build.BlockGasLimit) + gasCapacity := 100 * float64(limitSum) / float64(buildconstants.BlockGasLimit) afmt.Printf("\ttipset: \t%d msgs, %d (%0.2f%%) / %d (%0.2f%%)\n", len(msgs), gasUsed, gasEfficiency, limitSum, gasCapacity) } diff --git a/cli/mpool.go b/cli/mpool.go index f38a900fb7b..eb05e3e81e5 100644 --- a/cli/mpool.go +++ b/cli/mpool.go @@ -16,7 +16,7 @@ import ( "github.com/filecoin-project/go-state-types/big" lapi "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/build/buildconstants" "github.com/filecoin-project/lotus/chain/messagepool" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/node/config" @@ -712,7 +712,7 @@ var MpoolGasPerfCmd = &cli.Command{ baseFee := ts.Blocks()[0].ParentBaseFee - bigBlockGasLimit := big.NewInt(build.BlockGasLimit) + bigBlockGasLimit := big.NewInt(buildconstants.BlockGasLimit) getGasReward := func(msg *types.SignedMessage) big.Int { maxPremium := types.BigSub(msg.Message.GasFeeCap, baseFee) @@ -723,7 +723,7 @@ var MpoolGasPerfCmd = &cli.Command{ } getGasPerf := func(gasReward big.Int, gasLimit int64) float64 { - // gasPerf = gasReward * build.BlockGasLimit / gasLimit + // gasPerf = gasReward * buildconstants.BlockGasLimit / gasLimit a := new(stdbig.Rat).SetInt(new(stdbig.Int).Mul(gasReward.Int, bigBlockGasLimit.Int)) b := stdbig.NewRat(1, gasLimit) c := new(stdbig.Rat).Mul(a, b) diff --git a/cmd/lotus-shed/gas-estimation.go b/cmd/lotus-shed/gas-estimation.go index 5dc048f562c..31b5384d5ce 100644 --- a/cmd/lotus-shed/gas-estimation.go +++ b/cmd/lotus-shed/gas-estimation.go @@ -16,6 +16,7 @@ import ( "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/build/buildconstants" "github.com/filecoin-project/lotus/chain/beacon/drand" "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/consensus/filcns" @@ -118,7 +119,7 @@ var gasTraceCmd = &cli.Command{ } // Set to block limit so message will not run out of gas - msg.GasLimit = build.BlockGasLimit + msg.GasLimit = buildconstants.BlockGasLimit err = cs.Load(ctx) if err != nil { diff --git a/cmd/lotus-shed/mpool.go b/cmd/lotus-shed/mpool.go index 6b210bbc10e..0e24392ae5d 100644 --- a/cmd/lotus-shed/mpool.go +++ b/cmd/lotus-shed/mpool.go @@ -6,7 +6,7 @@ import ( "github.com/urfave/cli/v2" - "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/build/buildconstants" "github.com/filecoin-project/lotus/chain/types" lcli "github.com/filecoin-project/lotus/cli" ) @@ -79,7 +79,7 @@ var minerSelectMsgsCmd = &cli.Command{ fmt.Printf("Message selection took %s\n", duration) fmt.Printf("Size of the mempool: %d\n", mpoolSize) fmt.Println("selected messages: ", len(msgs)) - fmt.Printf("total gas limit of selected messages: %d / %d (%0.2f%%)\n", totalGas, build.BlockGasLimit, 100*float64(totalGas)/float64(build.BlockGasLimit)) + fmt.Printf("total gas limit of selected messages: %d / %d (%0.2f%%)\n", totalGas, buildconstants.BlockGasLimit, 100*float64(totalGas)/float64(buildconstants.BlockGasLimit)) return nil }, } diff --git a/itests/api_test.go b/itests/api_test.go index afa2aa2bf4d..5d26a12a2e4 100644 --- a/itests/api_test.go +++ b/itests/api_test.go @@ -167,10 +167,10 @@ func (ts *apiSuite) testOutOfGasError(t *testing.T) { // the gas estimator API executes the message with gasLimit = BlockGasLimit // Lowering it to 2 will cause it to run out of gas, testing the failure case we want - originalLimit := buildconstants.BlockGasTarget - buildconstants.BlockGasTarget = 2 + originalLimit := buildconstants.BlockGasLimit + buildconstants.BlockGasLimit = 2 defer func() { - buildconstants.BlockGasTarget = originalLimit + buildconstants.BlockGasLimit = originalLimit }() msg := &types.Message{ diff --git a/itests/eth_block_hash_test.go b/itests/eth_block_hash_test.go index e7da435bad6..2e30fe0e641 100644 --- a/itests/eth_block_hash_test.go +++ b/itests/eth_block_hash_test.go @@ -11,7 +11,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/build/buildconstants" "github.com/filecoin-project/lotus/chain/types/ethtypes" "github.com/filecoin-project/lotus/itests/kit" ) @@ -79,7 +79,7 @@ func TestEthBlockHashesCorrect_MultiBlockTipset(t *testing.T) { require.Equal(t, ethBlockA, ethBlockB) numBlocks := len(ts.Blocks()) - expGasLimit := ethtypes.EthUint64(int64(numBlocks) * build.BlockGasLimit) + expGasLimit := ethtypes.EthUint64(int64(numBlocks) * buildconstants.BlockGasLimit) require.Equal(t, expGasLimit, ethBlockB.GasLimit) } } diff --git a/itests/fevm_test.go b/itests/fevm_test.go index 68071fff51a..778461effdc 100644 --- a/itests/fevm_test.go +++ b/itests/fevm_test.go @@ -21,6 +21,7 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/build/buildconstants" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types/ethtypes" "github.com/filecoin-project/lotus/itests/kit" @@ -667,7 +668,7 @@ func TestFEVMRecursiveActorCallEstimate(t *testing.T) { gaslimit, err := client.EthEstimateGas(ctx, gasParams) require.NoError(t, err) - require.LessOrEqual(t, int64(gaslimit), build.BlockGasLimit) + require.LessOrEqual(t, int64(gaslimit), buildconstants.BlockGasLimit) t.Logf("EthEstimateGas GasLimit=%d", gaslimit) diff --git a/itests/kit/evm.go b/itests/kit/evm.go index 84a8ead1d3f..f0db5877ee0 100644 --- a/itests/kit/evm.go +++ b/itests/kit/evm.go @@ -29,7 +29,7 @@ import ( "github.com/filecoin-project/go-state-types/exitcode" "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/build/buildconstants" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types/ethtypes" @@ -165,7 +165,7 @@ func (e *EVM) InvokeSolidityWithValue(ctx context.Context, sender address.Addres From: sender, Value: value, Method: builtintypes.MethodsEVM.InvokeContract, - GasLimit: build.BlockGasLimit, // note: we hardcode block gas limit due to slightly broken gas estimation - https://github.com/filecoin-project/lotus/issues/10041 + GasLimit: buildconstants.BlockGasLimit, // note: we hardcode block gas limit due to slightly broken gas estimation - https://github.com/filecoin-project/lotus/issues/10041 Params: params, } diff --git a/lib/ulimit/ulimit.go b/lib/ulimit/ulimit.go index e900f12136e..739802dbf5d 100644 --- a/lib/ulimit/ulimit.go +++ b/lib/ulimit/ulimit.go @@ -11,7 +11,7 @@ import ( logging "github.com/ipfs/go-log/v2" - "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/build/buildconstants" ) var log = logging.Logger("ulimit") @@ -65,7 +65,7 @@ func ManageFdLimit() (changed bool, newLimit uint64, err error) { return false, 0, nil } - targetLimit := build.DefaultFDLimit + targetLimit := buildconstants.DefaultFDLimit userLimit := userMaxFDs() if userLimit > 0 { targetLimit = userLimit diff --git a/lib/ulimit/ulimit_test.go b/lib/ulimit/ulimit_test.go index ad20feb1de9..4b3cf73dfae 100644 --- a/lib/ulimit/ulimit_test.go +++ b/lib/ulimit/ulimit_test.go @@ -13,7 +13,7 @@ import ( "syscall" "testing" - "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/build/buildconstants" ) func TestManageFdLimit(t *testing.T) { @@ -22,7 +22,7 @@ func TestManageFdLimit(t *testing.T) { t.Errorf("Cannot manage file descriptors") } - if build.DefaultFDLimit != uint64(16<<10) { + if buildconstants.DefaultFDLimit != uint64(16<<10) { t.Errorf("Maximum file descriptors default value changed") } } diff --git a/node/builder.go b/node/builder.go index 8fb29c249fa..0bdb1fab223 100644 --- a/node/builder.go +++ b/node/builder.go @@ -23,6 +23,7 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/build/buildconstants" "github.com/filecoin-project/lotus/chain/beacon" "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/types" @@ -162,7 +163,7 @@ func defaults() []Option { Override(new(*alerting.Alerting), alerting.NewAlertingSystem), Override(new(dtypes.NodeStartTime), FromVal(dtypes.NodeStartTime(time.Now()))), - Override(CheckFDLimit, modules.CheckFdLimit(build.DefaultFDLimit)), + Override(CheckFDLimit, modules.CheckFdLimit(buildconstants.DefaultFDLimit)), Override(CheckFvmConcurrency, modules.CheckFvmConcurrency()), Override(CheckUDPBufferSize, modules.CheckUDPBufferSize(2048*1024)), From bec5965917ea21a9c17f0d9114d5cd3c84f80b06 Mon Sep 17 00:00:00 2001 From: "Andrew Jackson (Ajax)" Date: Fri, 12 Jul 2024 16:54:29 -0500 Subject: [PATCH 26/27] complete the simplest deprecations --- build/buildconstants/params_shared_vals.go | 3 --- build/parameters.go | 2 -- build/params_shared_vals.go | 3 --- build/version.go | 24 +++++++--------------- itests/kit/ensemble.go | 1 - miner/miner.go | 3 ++- 6 files changed, 9 insertions(+), 27 deletions(-) diff --git a/build/buildconstants/params_shared_vals.go b/build/buildconstants/params_shared_vals.go index d2367aae6f5..0a3798099a9 100644 --- a/build/buildconstants/params_shared_vals.go +++ b/build/buildconstants/params_shared_vals.go @@ -66,8 +66,6 @@ const FilReserved = uint64(300_000_000) var InitialRewardBalance *big.Int var InitialFilReserved *big.Int -// TODO: Move other important consts here - func init() { InitialRewardBalance = big.NewInt(int64(FilAllocStorageMining)) InitialRewardBalance = InitialRewardBalance.Mul(InitialRewardBalance, big.NewInt(int64(FilecoinPrecision))) @@ -94,7 +92,6 @@ const VerifSigCacheSize = 32000 // /////// // Limits -// TODO: If this is gonna stay, it should move to specs-actors const BlockMessageLimit = 10000 var BlockGasLimit = int64(10_000_000_000) diff --git a/build/parameters.go b/build/parameters.go index 627b0442c57..31243e96fcd 100644 --- a/build/parameters.go +++ b/build/parameters.go @@ -73,8 +73,6 @@ var BlockDelaySecs = buildconstants.BlockDelaySecs // Deprecated: Use buildconst var PropagationDelaySecs = buildconstants.PropagationDelaySecs // Deprecated: Use buildconstants.PropagationDelaySecs instead -var EquivocationDelaySecs = buildconstants.EquivocationDelaySecs // Deprecated: Use buildconstants.EquivocationDelaySecs instead - const BootstrapPeerThreshold = buildconstants.BootstrapPeerThreshold // Deprecated: Use buildconstants.BootstrapPeerThreshold instead // ChainId defines the chain ID used in the Ethereum JSON-RPC endpoint. diff --git a/build/params_shared_vals.go b/build/params_shared_vals.go index 6dc3935f16e..ab1bca3bb59 100644 --- a/build/params_shared_vals.go +++ b/build/params_shared_vals.go @@ -52,8 +52,6 @@ var FilReserved = buildconstants.FilReserved // Deprecated: Use buil var InitialRewardBalance *big.Int var InitialFilReserved *big.Int -// TODO: Move other important consts here - func init() { InitialRewardBalance = big.NewInt(int64(FilAllocStorageMining)) InitialRewardBalance = InitialRewardBalance.Mul(InitialRewardBalance, big.NewInt(int64(FilecoinPrecision))) @@ -72,7 +70,6 @@ var VerifSigCacheSize = buildconstants.VerifSigCacheSize // Deprecated: Use buil // /////// // Limits -// TODO: If this is gonna stay, it should move to specs-actors var BlockMessageLimit = buildconstants.BlockMessageLimit // Deprecated: Use buildconstants.BlockMessageLimit instead var BlockGasLimit = buildconstants.BlockGasLimit // Deprecated: Use buildconstants.BlockGasLimit instead diff --git a/build/version.go b/build/version.go index 59dae7fdb69..fd806fba233 100644 --- a/build/version.go +++ b/build/version.go @@ -11,31 +11,21 @@ type BuildVersion string var CurrentCommit string var BuildType = buildconstants.BuildType // Deprecated: Use buildconstants.BuildType instead -const ( - BuildDefault = buildconstants.BuildDefault // Deprecated: Use buildconstants.BuildDefault instead - BuildMainnet = buildconstants.BuildMainnet // Deprecated: Use buildconstants.BuildMainnet instead - Build2k = buildconstants.Build2k // Deprecated: Use buildconstants.Build2k instead - BuildDebug = buildconstants.BuildDebug // Deprecated: Use buildconstants.BuildDebug instead - BuildCalibnet = buildconstants.BuildCalibnet // Deprecated: Use buildconstants.BuildCalibnet instead - BuildInteropnet = buildconstants.BuildInteropnet // Deprecated: Use buildconstants.BuildInteropnet instead - BuildButterflynet = buildconstants.BuildButterflynet // Deprecated: Use buildconstants.BuildButterflynet instead -) - func BuildTypeString() string { switch BuildType { - case BuildDefault: + case buildconstants.BuildDefault: return "" - case BuildMainnet: + case buildconstants.BuildMainnet: return "+mainnet" - case Build2k: + case buildconstants.Build2k: return "+2k" - case BuildDebug: + case buildconstants.BuildDebug: return "+debug" - case BuildCalibnet: + case buildconstants.BuildCalibnet: return "+calibnet" - case BuildInteropnet: + case buildconstants.BuildInteropnet: return "+interopnet" - case BuildButterflynet: + case buildconstants.BuildButterflynet: return "+butterflynet" default: return "+huh?" diff --git a/itests/kit/ensemble.go b/itests/kit/ensemble.go index 2d36f10208f..84036deb623 100644 --- a/itests/kit/ensemble.go +++ b/itests/kit/ensemble.go @@ -172,7 +172,6 @@ func NewEnsemble(t *testing.T, opts ...EnsembleOpt) *Ensemble { require.NoError(t, build.UseNetworkBundle("testing")) } - build.EquivocationDelaySecs = 0 buildconstants.EquivocationDelaySecs = 0 return n diff --git a/miner/miner.go b/miner/miner.go index 4f27c53dbcd..b18e027a28b 100644 --- a/miner/miner.go +++ b/miner/miner.go @@ -25,6 +25,7 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api/v1api" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/build/buildconstants" "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/gen" @@ -588,7 +589,7 @@ func (m *Miner) mineOne(ctx context.Context, base *MiningBase) (minedBlock *type // To safeguard against this, we make sure it's been EquivocationDelaySecs since our base was calculated, // then re-calculate it. // If the daemon detected equivocated blocks, those blocks will no longer be in the new base. - m.niceSleep(time.Until(base.ComputeTime.Add(time.Duration(build.EquivocationDelaySecs) * time.Second))) + m.niceSleep(time.Until(base.ComputeTime.Add(time.Duration(buildconstants.EquivocationDelaySecs) * time.Second))) newBase, err := m.GetBestMiningCandidate(ctx) if err != nil { err = xerrors.Errorf("failed to refresh best mining candidate: %w", err) From 3d830f03960ebe38cd28d33641c2f5dc178244de Mon Sep 17 00:00:00 2001 From: "Andrew Jackson (Ajax)" Date: Fri, 12 Jul 2024 19:52:41 -0500 Subject: [PATCH 27/27] bringing back versions --- build/version.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/build/version.go b/build/version.go index fd806fba233..5ee2bf71d49 100644 --- a/build/version.go +++ b/build/version.go @@ -9,7 +9,13 @@ import ( type BuildVersion string var CurrentCommit string -var BuildType = buildconstants.BuildType // Deprecated: Use buildconstants.BuildType instead +var BuildType = buildconstants.BuildType // Deprecated: Use buildconstants.BuildType instead +var BuildMainnet = buildconstants.BuildMainnet // Deprecated: Use buildconstants.BuildMainnet instead +var Build2k = buildconstants.Build2k // Deprecated: Use buildconstants.Build2k instead +var BuildDebug = buildconstants.BuildDebug // Deprecated: Use buildconstants.BuildDebug instead +var BuildCalibnet = buildconstants.BuildCalibnet // Deprecated: Use buildconstants.BuildCalibnet instead +var BuildInteropnet = buildconstants.BuildInteropnet // Deprecated: Use buildconstants.BuildInteropnet instead +var BuildButterflynet = buildconstants.BuildButterflynet // Deprecated: Use buildconstants.BuildButterflynet instead func BuildTypeString() string { switch BuildType {