diff --git a/core/commands/add.go b/core/commands/add.go index cc6576914..a25773c35 100644 --- a/core/commands/add.go +++ b/core/commands/add.go @@ -8,7 +8,9 @@ import ( "math/big" "os" "path" + "strconv" "strings" + "time" "github.com/bittorrent/go-btfs/chain/abi" chainconfig "github.com/bittorrent/go-btfs/chain/config" @@ -32,11 +34,30 @@ import ( // ErrDepthLimitExceeded indicates that the max depth has been exceeded. var ErrDepthLimitExceeded = fmt.Errorf("depth limit exceeded") +type TimeParts struct { + t *time.Time +} + +func (t TimeParts) MarshalJSON() ([]byte, error) { + return t.t.MarshalJSON() +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +// The time is expected to be a quoted string in RFC 3339 format. +func (t *TimeParts) UnmarshalJSON(data []byte) (err error) { + // Fractional seconds are handled implicitly by Parse. + tt, err := time.Parse("\"2006-01-02T15:04:05Z\"", string(data)) + *t = TimeParts{&tt} + return +} + type AddEvent struct { Name string Hash string `json:",omitempty"` Bytes int64 `json:",omitempty"` Size string `json:",omitempty"` + Mode string `json:",omitempty"` + Mtime int64 `json:",omitempty"` } const ( @@ -61,6 +82,10 @@ const ( peerIdName = "peer-id" pinDurationCountOptionName = "pin-duration-count" uploadToBlockchainOptionName = "to-blockchain" + preserveModeOptionName = "preserve-mode" + preserveMtimeOptionName = "preserve-mtime" + modeOptionName = "mode" + mtimeOptionName = "mtime" ) const adderOutChanSize = 8 @@ -168,6 +193,10 @@ only-hash, and progress/status related flags) will change the final hash. cmds.StringOption(peerIdName, "The peer id to encrypt the file."), cmds.IntOption(pinDurationCountOptionName, "d", "Duration for which the object is pinned in days.").WithDefault(0), cmds.BoolOption(uploadToBlockchainOptionName, "add file meta to blockchain").WithDefault(false), + cmds.BoolOption(preserveModeOptionName, "Apply existing POSIX permissions to created UnixFS entries. Disables raw-leaves. (experimental)"), + cmds.BoolOption(preserveMtimeOptionName, "Apply existing POSIX modification time to created UnixFS entries. Disables raw-leaves. (experimental)"), + cmds.UintOption(modeOptionName, "Custom POSIX file mode to store in created UnixFS entries. Disables raw-leaves. (experimental)"), + cmds.Int64Option(mtimeOptionName, "Custom POSIX modification time to store in created UnixFS entries (seconds before or after the Unix Epoch). Disables raw-leaves. (experimental)"), }, PreRun: func(req *cmds.Request, env cmds.Environment) error { quiet, _ := req.Options[quietOptionName].(bool) @@ -214,6 +243,10 @@ only-hash, and progress/status related flags) will change the final hash. peerId, _ := req.Options[peerIdName].(string) pinDuration, _ := req.Options[pinDurationCountOptionName].(int) uploadToBlockchain, _ := req.Options[uploadToBlockchainOptionName].(bool) + preserveMode, _ := req.Options[preserveModeOptionName].(bool) + preserveMtime, _ := req.Options[preserveMtimeOptionName].(bool) + mode, _ := req.Options[modeOptionName].(uint) + mtime, _ := req.Options[mtimeOptionName].(int64) hashFunCode, ok := mh.Names[strings.ToLower(hashFunStr)] if !ok { @@ -250,6 +283,9 @@ only-hash, and progress/status related flags) will change the final hash. options.Unixfs.TokenMetadata(tokenMetadata), options.Unixfs.PinDuration(int64(pinDuration)), + + options.Unixfs.PreserveMode(preserveMode), + options.Unixfs.PreserveMtime(preserveMtime), } if cidVerSet { @@ -260,6 +296,19 @@ only-hash, and progress/status related flags) will change the final hash. opts = append(opts, options.Unixfs.RawLeaves(rawblks)) } + // Storing optional mode or mtime (UnixFS 1.5) requires root block + // to always be 'dag-pb' and not 'raw'. Below adjusts raw-leaves setting, if possible. + if preserveMode || preserveMtime || mode != 0 || mtime != 0 { + // Error if --raw-leaves flag was explicitly passed by the user. + // (let user make a decision to manually disable it and retry) + if rbset && rawblks { + return fmt.Errorf("%s can't be used with UnixFS metadata like mode or modification time", rawLeavesOptionName) + } + // No explicit preference from user, disable raw-leaves and continue + rbset = true + rawblks = false + } + if trickle { opts = append(opts, options.Unixfs.Layout(options.TrickleLayout)) } @@ -270,6 +319,13 @@ only-hash, and progress/status related flags) will change the final hash. opts = append(opts, options.Unixfs.PeerId(peerId)) } + if mode != 0 { + opts = append(opts, options.Unixfs.Mode(os.FileMode(mode))) + } + if mtime != 0 { + opts = append(opts, options.Unixfs.Mtime(mtime)) + } + opts = append(opts, nil) // events option placeholder var added int @@ -304,12 +360,19 @@ only-hash, and progress/status related flags) will change the final hash. output.Name = path.Join(addit.Name(), output.Name) } - if err := res.Emit(&AddEvent{ + addEvent := AddEvent{ Name: output.Name, Hash: h, Bytes: output.Bytes, Size: output.Size, - }); err != nil { + Mtime: output.Mtime, + } + + if output.Mode != 0 { + addEvent.Mode = "0" + strconv.FormatUint(uint64(output.Mode), 8) + } + + if err := res.Emit(&addEvent); err != nil { return err } } diff --git a/core/commands/cidstore.go b/core/commands/cidstore.go new file mode 100644 index 000000000..b4a2b853b --- /dev/null +++ b/core/commands/cidstore.go @@ -0,0 +1,235 @@ +package commands + +import ( + "fmt" + "io" + "strings" + + cmds "github.com/bittorrent/go-btfs-cmds" + "github.com/bittorrent/go-btfs/core/commands/cmdenv" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/query" +) + +const ( + SizeOptionName = "size" + batchOptionName = "batch" +) + +const ( + FilterKeyPrefix = "/gateway/filter/cid" +) + +const ( + cidSeparator = "," +) + +var CidStoreCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Manage cid stored in this node but don't want to be get by gateway api.", + ShortDescription: "Commands for generate, update, get and list access-keys stored in this node.", + }, + Subcommands: map[string]*cmds.Command{ + "add": addCidCmd, + "del": delCidCmd, + "get": getCidCmd, + "has": hasCidCmd, + "list": listCidCmd, + }, + NoLocal: true, +} + +var addCidCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Add cid to store.", + }, + Options: []cmds.Option{ + cmds.BoolOption(batchOptionName, "b", "batch add cids, cids split by , and all exits will be deleted").WithDefault(false), + }, + Arguments: []cmds.Argument{ + cmds.StringArg("cid", true, false, "cid to add to store"), + }, + Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { + nd, err := cmdenv.GetNode(env) + if err != nil { + return err + } + + batch, _ := req.Options[batchOptionName].(bool) + if batch { + cids := strings.Split(req.Arguments[0], cidSeparator) + batch, err := nd.Repo.Datastore().Batch(req.Context) + if err != nil { + return cmds.EmitOnce(res, err.Error()) + } + + // check if all cid is valid if not return + err = validateCIDs(cids) + if err != nil { + return cmds.EmitOnce(res, err.Error()) + } + + // delete all exits + results, err := nd.Repo.Datastore().Query(req.Context, query.Query{ + Prefix: FilterKeyPrefix, + }) + if err != nil { + return cmds.EmitOnce(res, err.Error()) + } + for v := range results.Next() { + err = batch.Delete(req.Context, datastore.NewKey(NewGatewayFilterKey(string(v.Value)))) + if err != nil { + return cmds.EmitOnce(res, err.Error()) + } + } + + for _, v := range cids { + if v == "" { + continue + } + err = batch.Put(req.Context, datastore.NewKey(NewGatewayFilterKey(v)), []byte(v)) + if err != nil { + return cmds.EmitOnce(res, err.Error()) + } + } + err = batch.Commit(req.Context) + if err != nil { + return cmds.EmitOnce(res, err.Error()) + } + return cmds.EmitOnce(res, "Add batch ok.") + } + + cid := req.Arguments[0] + err = validateCIDs([]string{cid}) + if err != nil { + return cmds.EmitOnce(res, err.Error()) + } + err = nd.Repo.Datastore().Put(req.Context, datastore.NewKey(NewGatewayFilterKey(cid)), + []byte(req.Arguments[0])) + if err != nil { + return cmds.EmitOnce(res, err.Error()) + } + return cmds.EmitOnce(res, "Add ok.") + }, +} + +var getCidCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Get cid from store.", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("cid", true, false, "cid to add to store"), + }, + Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { + nd, err := cmdenv.GetNode(env) + if err != nil { + return err + } + v, err := nd.Repo.Datastore().Get(req.Context, datastore.NewKey(NewGatewayFilterKey(req.Arguments[0]))) + if err != nil { + return cmds.EmitOnce(res, err.Error()) + } + return cmds.EmitOnce(res, string(v)) + }, +} + +var delCidCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Delete cid from store.", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("cid", true, false, "cid to add to store"), + }, + Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { + nd, err := cmdenv.GetNode(env) + if err != nil { + return err + } + err = nd.Repo.Datastore().Delete(req.Context, datastore.NewKey(NewGatewayFilterKey(req.Arguments[0]))) + if err != nil { + return cmds.EmitOnce(res, err.Error()) + } + return cmds.EmitOnce(res, "Del ok.") + }, +} + +var hasCidCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Check cid exits in store", + }, + Arguments: []cmds.Argument{ + cmds.StringArg("cid", true, false, "cid to add to store"), + }, + Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { + nd, err := cmdenv.GetNode(env) + if err != nil { + return err + } + exits, err := nd.Repo.Datastore().Has(req.Context, datastore.NewKey(NewGatewayFilterKey(req.Arguments[0]))) + if err != nil { + return err + } + if !exits { + return cmds.EmitOnce(res, "Cid not exits") + } + return cmds.EmitOnce(res, "Cid exits") + }, +} + +var listCidCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "List all cids in store", + }, + Options: []cmds.Option{ + cmds.IntOption(SizeOptionName, "s", "Number of cids to return."), + }, + Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { + nd, err := cmdenv.GetNode(env) + if err != nil { + return err + } + size, _ := req.Options[SizeOptionName].(int) + results, err := nd.Repo.Datastore().Query(req.Context, query.Query{ + Prefix: FilterKeyPrefix, + Limit: size, + }) + if err != nil { + return err + } + var resStr []string + for v := range results.Next() { + resStr = append(resStr, string(v.Value)) + } + return cmds.EmitOnce(res, resStr) + }, + Encoders: cmds.EncoderMap{ + cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, cids []string) error { + for _, v := range cids { + _, err := w.Write([]byte(v + "\n")) + if err != nil { + return err + } + } + return nil + }), + }, + Type: []string{}, +} + +func NewGatewayFilterKey(key string) string { + return fmt.Sprintf("%s/%s", FilterKeyPrefix, key) +} + +func validateCIDs(cids []string) error { + for _, c := range cids { + if c == "" { + continue + } + _, err := cid.Decode(c) + if err != nil { + return fmt.Errorf("Invalid CID: %s", c) + } + } + return nil +} diff --git a/core/commands/commands_test.go b/core/commands/commands_test.go index e8a899036..f27b1a3a9 100644 --- a/core/commands/commands_test.go +++ b/core/commands/commands_test.go @@ -215,6 +215,7 @@ func TestCommands(t *testing.T) { "/swarm/filters/add", "/swarm/filters/rm", "/swarm/peers", + "/swarm/resources", "/urlstore", "/urlstore/add", "/version", @@ -364,6 +365,12 @@ func TestCommands(t *testing.T) { "/dashboard/logout", "/dashboard/change", "/dashboard/validate", + "/cidstore", + "/cidstore/add", + "/cidstore/get", + "/cidstore/has", + "/cidstore/del", + "/cidstore/list", } cmdSet := make(map[string]struct{}) diff --git a/core/commands/files.go b/core/commands/files.go index 1b9768bf1..0d4a1dc47 100644 --- a/core/commands/files.go +++ b/core/commands/files.go @@ -2,13 +2,16 @@ package commands import ( "context" + "encoding/json" "errors" "fmt" "io" "os" gopath "path" "sort" + "strconv" "strings" + "time" "github.com/bittorrent/go-btfs/core" "github.com/bittorrent/go-btfs/core/commands/cmdenv" @@ -101,6 +104,42 @@ type statOutput struct { WithLocality bool `json:",omitempty"` Local bool `json:",omitempty"` SizeLocal uint64 `json:",omitempty"` + Mode uint32 `json:",omitempty"` + Mtime int64 `json:",omitempty"` +} + +func (s *statOutput) MarshalJSON() ([]byte, error) { + type so statOutput + out := &struct { + *so + Mode string `json:",omitempty"` + }{so: (*so)(s)} + + if s.Mode != 0 { + out.Mode = fmt.Sprintf("%04o", s.Mode) + } + return json.Marshal(out) +} + +func (s *statOutput) UnmarshalJSON(data []byte) error { + var err error + type so statOutput + tmp := &struct { + *so + Mode string `json:",omitempty"` + }{so: (*so)(s)} + + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } + + if tmp.Mode != "" { + mode, err := strconv.ParseUint(tmp.Mode, 8, 32) + if err == nil { + s.Mode = uint32(mode) + } + } + return err } const ( @@ -108,10 +147,13 @@ const ( Size: CumulativeSize: ChildBlocks: -Type: ` +Type: +Mode: () +Mtime: ` filesFormatOptionName = "format" filesSizeOptionName = "size" filesWithLocalOptionName = "with-local" + filesStatUnspecified = "not set" ) var filesStatCmd = &cmds.Command{ @@ -196,12 +238,24 @@ var filesStatCmd = &cmds.Command{ }, Encoders: cmds.EncoderMap{ cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *statOutput) error { + mode, modeo := filesStatUnspecified, filesStatUnspecified + if out.Mode != 0 { + mode = strings.ToLower(os.FileMode(out.Mode).String()) + modeo = "0" + strconv.FormatInt(int64(out.Mode&0x1FF), 8) + } + mtime := filesStatUnspecified + if out.Mtime > 0 { + mtime = time.Unix(out.Mtime, 0).UTC().Format("2 Jan 2006, 15:04:05 MST") + } s, _ := statGetFormatOptions(req) s = strings.Replace(s, "", out.Hash, -1) s = strings.Replace(s, "", fmt.Sprintf("%d", out.Size), -1) s = strings.Replace(s, "", fmt.Sprintf("%d", out.CumulativeSize), -1) s = strings.Replace(s, "", fmt.Sprintf("%d", out.Blocks), -1) s = strings.Replace(s, "", out.Type, -1) + s = strings.Replace(s, "", mode, -1) + s = strings.Replace(s, "", modeo, -1) + s = strings.Replace(s, "", mtime, -1) fmt.Fprintln(w, s) @@ -267,12 +321,21 @@ func statNode(nd ipld.Node, enc cidenc.Encoder) (*statOutput, error) { return nil, fmt.Errorf("unrecognized node type: %s", d.Type()) } + var mode uint32 + if m := d.Mode(); m != 0 { + mode = uint32(m) + } else if d.Type() == ft.TSymlink { + mode = uint32(os.ModeSymlink | 0x1FF) + } + return &statOutput{ Hash: enc.Encode(c), Blocks: len(nd.Links()), Size: d.FileSize(), CumulativeSize: cumulsize, Type: ndtype, + Mode: mode, + Mtime: d.ModTime().Unix(), }, nil case *dag.RawNode: return &statOutput{ diff --git a/core/commands/ls.go b/core/commands/ls.go index 7ad1c675d..6afba970a 100644 --- a/core/commands/ls.go +++ b/core/commands/ls.go @@ -6,6 +6,7 @@ import ( "os" "sort" "text/tabwriter" + "time" cmdenv "github.com/bittorrent/go-btfs/core/commands/cmdenv" @@ -23,6 +24,8 @@ type LsLink struct { Size uint64 Type unixfs_pb.Data_DataType Target string + Mode os.FileMode + Mtime time.Time } // LsObject is an element of LsOutput @@ -43,6 +46,8 @@ const ( lsResolveTypeOptionName = "resolve-type" lsSizeOptionName = "size" lsStreamOptionName = "stream" + lsMTimeOptionName = "mtime" + lsModeOptionName = "mode" ) var LsCmd = &cmds.Command{ @@ -66,6 +71,8 @@ The JSON output contains type information. cmds.BoolOption(lsResolveTypeOptionName, "Resolve linked objects to find out their types.").WithDefault(true), cmds.BoolOption(lsSizeOptionName, "Resolve linked objects to find out their file size.").WithDefault(true), cmds.BoolOption(lsStreamOptionName, "s", "Enable experimental streaming of directory entries as they are traversed."), + cmds.BoolOption(lsMTimeOptionName, "t", "Print modification time."), + cmds.BoolOption(lsModeOptionName, "m", "Print mode."), }, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { api, err := cmdenv.GetApi(env, req) @@ -158,6 +165,8 @@ The JSON output contains type information. Size: link.Size, Type: ftype, Target: link.Target, + Mode: link.Mode, + Mtime: link.ModTime, } if err := processLink(paths[i], lsLink); err != nil { return err @@ -202,6 +211,8 @@ func tabularOutput(req *cmds.Request, w io.Writer, out *LsOutput, lastObjectHash headers, _ := req.Options[lsHeadersOptionNameTime].(bool) stream, _ := req.Options[lsStreamOptionName].(bool) size, _ := req.Options[lsSizeOptionName].(bool) + mtime, _ := req.Options[lsMTimeOptionName].(bool) + mode, _ := req.Options[lsModeOptionName].(bool) // in streaming mode we can't automatically align the tabs // so we take a best guess var minTabWidth int @@ -229,6 +240,10 @@ func tabularOutput(req *cmds.Request, w io.Writer, out *LsOutput, lastObjectHash if size { s = "Hash\tSize\tName" } + + s = buildHeader(mode, "Mode", s) + s = buildHeader(mtime, "Mtime", s) + fmt.Fprintln(tw, s) } lastObjectHash = object.Hash @@ -239,21 +254,50 @@ func tabularOutput(req *cmds.Request, w io.Writer, out *LsOutput, lastObjectHash switch link.Type { case unixfs.TDirectory, unixfs.THAMTShard, unixfs.TMetadata: if size { - s = "%[1]s\t-\t%[3]s/\n" + s = "%[1]s\t-\t%[3]s/" } else { - s = "%[1]s\t%[3]s/\n" + s = "%[1]s\t%[3]s/" } + s = buildString(mode, s, 4) + s = buildString(mtime, s, 5) + s = s + "\n" default: if size { - s = "%s\t%v\t%s\n" + s = "%[1]s\t%[2]v\t%[3]s" } else { - s = "%[1]s\t%[3]s\n" + s = "%[1]s\t%[3]s" } + s = buildString(mode, s, 4) + s = buildString(mtime, s, 5) + s = s + "\n" } - fmt.Fprintf(tw, s, link.Hash, link.Size, link.Name) + modeS := "-" + mtimeS := "-" + + if link.Mode != 0 { + modeS = link.Mode.String() + } + if link.Mtime.Unix() != 0 { + mtimeS = link.Mtime.Format("2 Jan 2006, 15:04:05 MST") + } + fmt.Fprintf(tw, s, link.Hash, link.Size, link.Name, modeS, mtimeS) } } tw.Flush() return lastObjectHash } + +func buildString(set bool, s string, index int) string { + if set { + return fmt.Sprintf("%s\t%%[%d]s", s, index) + } + return s +} + +func buildHeader(set bool, name, s string) string { + if set { + return s + "\t" + name + } + return s +} diff --git a/core/commands/root.go b/core/commands/root.go index 40192145c..7170b8ec0 100644 --- a/core/commands/root.go +++ b/core/commands/root.go @@ -183,6 +183,7 @@ var rootSubcommands = map[string]*cmds.Command{ "encrypt": encryptCmd, "decrypt": decryptCmd, "dashboard": dashboardCmd, + "cidstore": CidStoreCmd, } // RootRO is the readonly version of Root diff --git a/core/commands/swarm.go b/core/commands/swarm.go index 2454c4dbf..e18e3334a 100644 --- a/core/commands/swarm.go +++ b/core/commands/swarm.go @@ -2,20 +2,25 @@ package commands import ( "context" + "encoding/json" "errors" "fmt" "io" "path" "sort" + "strconv" "strings" "sync" + "text/tabwriter" "time" "github.com/bittorrent/go-btfs/bindata" commands "github.com/bittorrent/go-btfs/commands" cmdenv "github.com/bittorrent/go-btfs/core/commands/cmdenv" + "github.com/bittorrent/go-btfs/core/node/libp2p" repo "github.com/bittorrent/go-btfs/repo" fsrepo "github.com/bittorrent/go-btfs/repo/fsrepo" + rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager" cmds "github.com/bittorrent/go-btfs-cmds" config "github.com/bittorrent/go-btfs-config" @@ -53,6 +58,7 @@ btfs peers in the internet. "disconnect": swarmDisconnectCmd, "filters": swarmFiltersCmd, "peers": swarmPeersCmd, + "resources": swarmResourceCmd, }, } @@ -468,6 +474,90 @@ it will reconnect. Type: stringList{}, } +var swarmResourceCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Get a summary of all resources accounted for by the libp2p Resource Manager.", + LongDescription: ` +Get a summary of all resources accounted for by the libp2p Resource Manager. +This includes the limits and the usage against those limits. +This can output a human readable table and JSON encoding. +`, + }, + Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { + node, err := cmdenv.GetNode(env) + if err != nil { + return err + } + + cfg, err := node.Repo.Config() + + if err != nil { + return err + } + + if node.ResourceManager == nil { + return libp2p.ErrNoResourceMgr + } + + userResourceOverrides, err := node.Repo.UserResourceOverrides() + if err != nil { + return err + } + + // FIXME: we shouldn't recompute limits, either save them or load them from libp2p (https://github.com/libp2p/go-libp2p/issues/2166) + limitConfig, err := libp2p.LimitConfig(cfg.Swarm, userResourceOverrides) + if err != nil { + return err + } + + rapi, ok := node.ResourceManager.(rcmgr.ResourceManagerState) + if !ok { // NullResourceManager + return libp2p.ErrNoResourceMgr + } + + return cmds.EmitOnce(res, libp2p.MergeLimitsAndStatsIntoLimitsConfigAndUsage(limitConfig, rapi.Stat())) + }, + Encoders: cmds.EncoderMap{ + cmds.JSON: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, limitsAndUsage libp2p.LimitsConfigAndUsage) error { + return json.NewEncoder(w).Encode(limitsAndUsage) + }), + cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, limitsAndUsage libp2p.LimitsConfigAndUsage) error { + tw := tabwriter.NewWriter(w, 20, 8, 0, '\t', 0) + defer tw.Flush() + + fmt.Fprintf(tw, "%s\t%s\t%s\t%s\t%s\t\n", "Scope", "Limit Name", "Limit Value", "Limit Usage Amount", "Limit Usage Percent") + for _, ri := range libp2p.LimitConfigsToInfo(limitsAndUsage) { + var limit, percentage string + switch ri.LimitValue { + case rcmgr.Unlimited64: + limit = "unlimited" + percentage = "n/a" + case rcmgr.BlockAllLimit64: + limit = "blockAll" + percentage = "n/a" + default: + limit = strconv.FormatInt(int64(ri.LimitValue), 10) + if ri.CurrentUsage == 0 { + percentage = "0%" + } else { + percentage = strconv.FormatFloat(float64(ri.CurrentUsage)/float64(ri.LimitValue)*100, 'f', 1, 64) + "%" + } + } + fmt.Fprintf(tw, "%s\t%s\t%s\t%d\t%s\t\n", + ri.ScopeName, + ri.LimitName, + limit, + ri.CurrentUsage, + percentage, + ) + } + + return nil + }), + }, + Type: libp2p.LimitsConfigAndUsage{}, +} + // parseAddresses is a function that takes in a slice of string peer addresses // (multiaddr + peerid) and returns a slice of properly constructed peers func parseAddresses(ctx context.Context, addrs []string) ([]peer.AddrInfo, error) { diff --git a/core/core.go b/core/core.go index fce6c68c3..6a19dfbed 100644 --- a/core/core.go +++ b/core/core.go @@ -15,6 +15,7 @@ import ( "github.com/bittorrent/go-btfs/peering" irouting "github.com/bittorrent/go-btfs/routing" + "github.com/libp2p/go-libp2p/core/network" "github.com/bittorrent/go-btfs/core/bootstrap" "github.com/bittorrent/go-btfs/core/node" @@ -97,6 +98,8 @@ type IpfsNode struct { IpnsRepub *ipnsrp.Republisher `optional:"true"` GraphExchange graphsync.GraphExchange `optional:"true"` + ResourceManager network.ResourceManager `optional:"true"` + PubSub *pubsub.PubSub `optional:"true"` PSRouter *psrouter.PubsubValueStore `optional:"true"` DHT *ddht.DHT `optional:"true"` diff --git a/core/coreapi/unixfs.go b/core/coreapi/unixfs.go index 5c201c4df..845f08ab9 100644 --- a/core/coreapi/unixfs.go +++ b/core/coreapi/unixfs.go @@ -53,7 +53,7 @@ func getOrCreateNilNode() (*core.IpfsNode, error) { return } node, err := core.NewNode(context.Background(), &core.BuildCfg{ - //TODO: need this to be true or all files + // TODO: need this to be true or all files // hashed will be stored in memory! NilRepo: true, }) @@ -82,10 +82,10 @@ func (api *UnixfsAPI) Add(ctx context.Context, filesNode files.Node, opts ...opt // check if repo will exceed storage limit if added // TODO: this doesn't handle the case if the hashed file is already in blocks (deduplicated) // TODO: conditional GC is disabled due to it is somehow not possible to pass the size to the daemon - //if err := corerepo.ConditionalGC(req.Context(), n, uint64(size)); err != nil { + // if err := corerepo.ConditionalGC(req.Context(), n, uint64(size)); err != nil { // res.SetError(err, cmds.ErrNormal) // return - //} + // } if settings.NoCopy && !(cfg.Experimental.FilestoreEnabled || cfg.Experimental.UrlstoreEnabled) { return nil, fmt.Errorf(`either the filestore or the urlstore must be enabled to use nocopy @@ -153,6 +153,11 @@ func (api *UnixfsAPI) Add(ctx context.Context, filesNode files.Node, opts ...opt fileAdder.NoCopy = settings.NoCopy fileAdder.CidBuilder = prefix + fileAdder.PreserveMode = settings.PreserveMode + fileAdder.PreserveMtime = settings.PreserveMtime + fileAdder.FileMode = settings.Mode + fileAdder.FileMtime = settings.Mtime + switch settings.Layout { case options.BalancedLayout: // Default @@ -671,6 +676,8 @@ func (api *UnixfsAPI) processLink(ctx context.Context, linkres ft.LinkResult, se lnk.Target = string(d.Data()) } lnk.Size = d.FileSize() + lnk.Mode = d.Mode() + lnk.ModTime = d.ModTime() } } @@ -684,7 +691,7 @@ func (api *UnixfsAPI) lsFromLinksAsync(ctx context.Context, dir uio.Directory, s defer close(out) for l := range dir.EnumLinksAsync(ctx) { select { - case out <- api.processLink(ctx, l, settings): //TODO: perf: processing can be done in background and in parallel + case out <- api.processLink(ctx, l, settings): // TODO: perf: processing can be done in background and in parallel case <-ctx.Done(): return } @@ -699,7 +706,7 @@ func (api *UnixfsAPI) lsFromLinks(ctx context.Context, ndlinks []*ipld.Link, set for _, l := range ndlinks { lr := ft.LinkResult{Link: &ipld.Link{Name: l.Name, Size: l.Size, Cid: l.Cid}} - links <- api.processLink(ctx, lr, settings) //TODO: can be parallel if settings.Async + links <- api.processLink(ctx, lr, settings) // TODO: can be parallel if settings.Async } close(links) return links, nil diff --git a/core/corehttp/corehttp.go b/core/corehttp/corehttp.go index 5b1dae616..9af5c7f82 100644 --- a/core/corehttp/corehttp.go +++ b/core/corehttp/corehttp.go @@ -6,6 +6,7 @@ package corehttp import ( "context" + "errors" "fmt" "net" "net/http" @@ -53,16 +54,17 @@ func makeHandler(n *core.IpfsNode, l net.Listener, options ...ServeOption) (http } err := interceptorBeforeReq(r, n) - if err != nil { - // set allow origin - w.Header().Set("Access-Control-Allow-Origin", "*") - if r.Method == http.MethodOptions { - w.Header().Set("Access-Control-Allow-Origin", "*") - w.Header().Set("Access-Control-Allow-Headers", "Content-Type, X-Stream-Output, X-Chunked-Output, X-Content-Length") + + if errors.Is(err, ErrGatewayCidExits) { + http.Error(w, "", http.StatusNotFound) + return + } + + if errors.Is(err, ErrNotLogin) || errors.Is(err, ErrInvalidToken) || errors.Is(err, ErrTwoStepCheckErr) { + if r.Method != http.MethodOptions { + http.Error(w, err.Error(), http.StatusUnauthorized) return } - http.Error(w, err.Error(), http.StatusUnauthorized) - return } topMux.ServeHTTP(w, r) diff --git a/core/corehttp/corehttp_interceptor.go b/core/corehttp/corehttp_interceptor.go index 908f8ffe0..002d07aaa 100644 --- a/core/corehttp/corehttp_interceptor.go +++ b/core/corehttp/corehttp_interceptor.go @@ -17,6 +17,13 @@ const defaultTwoStepDuration = 30 * time.Minute const firstStepUrl = "dashboard/validate" +var ( + ErrNotLogin = errors.New("please login") + ErrInvalidToken = errors.New("invalid token") + ErrTwoStepCheckErr = errors.New("please validate your password first") + ErrGatewayCidExits = errors.New("cid exits") +) + func interceptorBeforeReq(r *http.Request, n *core.IpfsNode) error { config, err := n.Repo.Config() if err != nil { @@ -24,15 +31,24 @@ func interceptorBeforeReq(r *http.Request, n *core.IpfsNode) error { } if config.API.EnableTokenAuth { - err := tokenCheckInterceptor(r, n) + err = tokenCheckInterceptor(r, n) + if err != nil { + return err + } + + err = twoStepCheckInterceptor(r) if err != nil { return err } } - err = twoStepCheckInterceptor(r) - if err != nil { - return err + exits, err := gatewayCidInterceptor(r, n) + if err != nil || !exits { + return nil + } + + if exits { + return ErrGatewayCidExits } return nil @@ -46,7 +62,7 @@ func twoStepCheckInterceptor(r *http.Request) error { return nil } - return errors.New("please validate your password first") + return ErrTwoStepCheckErr } func interceptorAfterResp(r *http.Request, w http.ResponseWriter, n *core.IpfsNode) error { @@ -63,11 +79,11 @@ func tokenCheckInterceptor(r *http.Request, n *core.IpfsNode) error { return err } apiHost := fmt.Sprint(strings.Split(conf.Addresses.API[0], "/")[2], ":", strings.Split(conf.Addresses.API[0], "/")[4]) - if filterNoNeedTokenCheckReq(r, apiHost) { + if filterNoNeedTokenCheckReq(r, apiHost, conf.Identity.PeerID) { return nil } if !commands.IsLogin { - return fmt.Errorf("please login") + return ErrNotLogin } args := r.URL.Query() token := args.Get("token") @@ -77,17 +93,31 @@ func tokenCheckInterceptor(r *http.Request, n *core.IpfsNode) error { } claims, err := utils.VerifyToken(token, string(password)) if err != nil { - return err + return ErrInvalidToken } + if claims.PeerId != n.Identity.String() { - return fmt.Errorf("token is invalid") + return ErrInvalidToken } return nil } -func filterNoNeedTokenCheckReq(r *http.Request, apiHost string) bool { - if filterUrl(r) || filterP2pSchema(r) || filterLocalShellApi(r, apiHost) || filterGatewayUrl(r) { +func gatewayCidInterceptor(r *http.Request, n *core.IpfsNode) (bool, error) { + if filterGatewayUrl(r) { + sPath := strings.Split(r.URL.Path, "/") + if len(sPath) < 3 { + return false, nil + } + key := strings.Split(r.URL.Path, "/")[2] + exits, err := n.Repo.Datastore().Has(r.Context(), ds.NewKey(commands.NewGatewayFilterKey(key))) + return exits, err + } + return false, nil +} + +func filterNoNeedTokenCheckReq(r *http.Request, apiHost string, peerId string) bool { + if filterUrl(r) || filterP2pSchema(r, peerId) || filterLocalShellApi(r, apiHost) || filterGatewayUrl(r) { return true } return false @@ -134,10 +164,13 @@ func filterLocalShellApi(r *http.Request, apiHost string) bool { return false } -func filterP2pSchema(r *http.Request) bool { +func filterP2pSchema(r *http.Request, peerId string) bool { if r.URL.Scheme == "libp2p" { return true } + if r.Host == peerId { + return true + } return false } diff --git a/core/corehttp/webui.go b/core/corehttp/webui.go index ac784567d..f12e9b4ee 100644 --- a/core/corehttp/webui.go +++ b/core/corehttp/webui.go @@ -1,10 +1,11 @@ package corehttp -const WebUIPath = "/btfs/QmXVrEfPAHqSSZp1hSMFh933WdxSzvbinSsYgV5bSWg5AE" // v3.1.0 +const WebUIPath = "/btfs/QmNytdpG1FstSmR7eo547A8o9EdF7cFe1tcExunu8uEgDc" // v3.2.0 // this is a list of all past webUI paths. var WebUIPaths = []string{ WebUIPath, + "/btfs/QmXVrEfPAHqSSZp1hSMFh933WdxSzvbinSsYgV5bSWg5AE", // v3.1.0 "/btfs/QmRwt94fr6b3phJThuDMjZUXr6nHxxG61znN3PCukNAxyi", // v3.0.0 "/btfs/QmSTcj1pWk972bdtStQtJeu3yCYo1SDShrbEHaEybciTLR", // v2.3.5 "/btfs/QmPFT7PscyJ1FZ4FeFPFgikszBugQSBFNycPpy5zpK2pZe", // v2.3.3 diff --git a/core/coreunix/add.go b/core/coreunix/add.go index 8cfdb44c1..cf53fb1fc 100644 --- a/core/coreunix/add.go +++ b/core/coreunix/add.go @@ -6,8 +6,10 @@ import ( "errors" "fmt" "io" + "os" gopath "path" "strconv" + "time" chunker "github.com/bittorrent/go-btfs-chunker" files "github.com/bittorrent/go-btfs-files" @@ -94,6 +96,11 @@ type Adder struct { liveNodes uint64 TokenMetadata string PinDuration int64 + + PreserveMtime bool + PreserveMode bool + FileMode os.FileMode + FileMtime time.Time } func (adder *Adder) GcLocker() bstore.GCLocker { @@ -176,6 +183,9 @@ func (adder *Adder) add(reader io.Reader, dirTreeBytes []byte) (ipld.Node, error CidBuilder: adder.CidBuilder, TokenMetadata: metaBytes, ChunkSize: chunkSize, + + FileMode: adder.FileMode, + FileModTime: adder.FileMtime, } db, err := params.New(chnk) @@ -464,6 +474,14 @@ func (adder *Adder) addFileNode(ctx context.Context, path string, file files.Nod return err } + if adder.PreserveMtime { + adder.FileMtime = file.ModTime() + } + + if adder.PreserveMode { + adder.FileMode = file.Mode() + } + if adder.liveNodes >= liveCacheSize { // TODO: A smarter cache that uses some sort of lru cache with an eviction handler mr, err := adder.mfsRoot() diff --git a/core/coreunix/reed_solomon_add.go b/core/coreunix/reed_solomon_add.go index 0fca75ad9..6acb9d7fd 100644 --- a/core/coreunix/reed_solomon_add.go +++ b/core/coreunix/reed_solomon_add.go @@ -6,7 +6,9 @@ import ( "errors" "io" "io/ioutil" + "os" gopath "path" + "time" "container/list" "encoding/json" @@ -134,6 +136,14 @@ func (rsadder *ReedSolomonAdder) addFileNode(ctx context.Context, path string, f return nil, err } + if rsadder.FileMtime.Unix() != 0 { + rsadder.FileMtime = time.Now() + } + + if rsadder.FileMode != os.FileMode(0) { + rsadder.FileMode = file.Mode() + } + if rsadder.liveNodes >= liveCacheSize { // TODO: flush free memory rsadder.mfsRoot()'s FlushMemFree() to flush free memory log.Info("rsadder liveNodes >= liveCacheSize, needs flushing") @@ -162,7 +172,7 @@ func (rsadder *ReedSolomonAdder) addDir(ctx context.Context, path string, dir fi NodePath: path, NodeName: dstName}, // Kept the following line for possible use cases than `btfs get` - //Directory: dir, + // Directory: dir, } it := dir.Entries() @@ -195,7 +205,7 @@ func (rsadder *ReedSolomonAdder) addSymlink(path string, l *files.Symlink) (uio. NodeName: dstName}, Data: l.Target, // Kept the following line for the same reason as DirNode. - //Symlink: *l, + // Symlink: *l, }, nil } @@ -251,7 +261,7 @@ func (rsadder *ReedSolomonAdder) addFile(path string, file files.File) (uio.Node StartOffset: currentOffset, }, // Kept the following line for the same reason as DirNode. - //File: file, + // File: file, } return node, nil } diff --git a/core/node/libp2p/rcmgr.go b/core/node/libp2p/rcmgr.go index 721d5ac46..75c1aa966 100644 --- a/core/node/libp2p/rcmgr.go +++ b/core/node/libp2p/rcmgr.go @@ -2,6 +2,7 @@ package libp2p import ( "context" + "encoding/json" "errors" "fmt" "os" @@ -510,3 +511,276 @@ func NetLimit(mgr network.ResourceManager, scope string) (rcmgr.BaseLimit, error // return nil // } + +func LimitConfig(cfg config.SwarmConfig, userResourceOverrides rcmgr.PartialLimitConfig) (limitConfig rcmgr.ConcreteLimitConfig, err error) { + limitConfig, err = createDefaultLimitConfig(cfg) + if err != nil { + return rcmgr.ConcreteLimitConfig{}, err + } + + // The logic for defaults and overriding with specified userResourceOverrides + // is documented in docs/libp2p-resource-management.md. + // Any changes here should be reflected there. + + // This effectively overrides the computed default LimitConfig with any non-"useDefault" values from the userResourceOverrides file. + // Because of how how Build works, any rcmgr.Default value in userResourceOverrides + // will be overridden with a computed default value. + limitConfig = userResourceOverrides.Build(limitConfig) + + return limitConfig, nil +} + +type ResourceLimitsAndUsage struct { + // This is duplicated from rcmgr.ResourceResourceLimits but adding *Usage fields. + Memory rcmgr.LimitVal64 + MemoryUsage int64 + FD rcmgr.LimitVal + FDUsage int + Conns rcmgr.LimitVal + ConnsUsage int + ConnsInbound rcmgr.LimitVal + ConnsInboundUsage int + ConnsOutbound rcmgr.LimitVal + ConnsOutboundUsage int + Streams rcmgr.LimitVal + StreamsUsage int + StreamsInbound rcmgr.LimitVal + StreamsInboundUsage int + StreamsOutbound rcmgr.LimitVal + StreamsOutboundUsage int +} + +func (u ResourceLimitsAndUsage) ToResourceLimits() rcmgr.ResourceLimits { + return rcmgr.ResourceLimits{ + Memory: u.Memory, + FD: u.FD, + Conns: u.Conns, + ConnsInbound: u.ConnsInbound, + ConnsOutbound: u.ConnsOutbound, + Streams: u.Streams, + StreamsInbound: u.StreamsInbound, + StreamsOutbound: u.StreamsOutbound, + } +} + +type LimitsConfigAndUsage struct { + // This is duplicated from rcmgr.ResourceManagerStat but using ResourceLimitsAndUsage + // instead of network.ScopeStat. + System ResourceLimitsAndUsage `json:",omitempty"` + Transient ResourceLimitsAndUsage `json:",omitempty"` + Services map[string]ResourceLimitsAndUsage `json:",omitempty"` + Protocols map[protocol.ID]ResourceLimitsAndUsage `json:",omitempty"` + Peers map[peer.ID]ResourceLimitsAndUsage `json:",omitempty"` +} + +func (u LimitsConfigAndUsage) MarshalJSON() ([]byte, error) { + // we want to marshal the encoded peer id + encodedPeerMap := make(map[string]ResourceLimitsAndUsage, len(u.Peers)) + for p, v := range u.Peers { + encodedPeerMap[p.String()] = v + } + + type Alias LimitsConfigAndUsage + return json.Marshal(&struct { + *Alias + Peers map[string]ResourceLimitsAndUsage `json:",omitempty"` + }{ + Alias: (*Alias)(&u), + Peers: encodedPeerMap, + }) +} + +func (u LimitsConfigAndUsage) ToPartialLimitConfig() (result rcmgr.PartialLimitConfig) { + result.System = u.System.ToResourceLimits() + result.Transient = u.Transient.ToResourceLimits() + + result.Service = make(map[string]rcmgr.ResourceLimits, len(u.Services)) + for s, l := range u.Services { + result.Service[s] = l.ToResourceLimits() + } + result.Protocol = make(map[protocol.ID]rcmgr.ResourceLimits, len(u.Protocols)) + for p, l := range u.Protocols { + result.Protocol[p] = l.ToResourceLimits() + } + result.Peer = make(map[peer.ID]rcmgr.ResourceLimits, len(u.Peers)) + for p, l := range u.Peers { + result.Peer[p] = l.ToResourceLimits() + } + + return +} + +func MergeLimitsAndStatsIntoLimitsConfigAndUsage(l rcmgr.ConcreteLimitConfig, stats rcmgr.ResourceManagerStat) LimitsConfigAndUsage { + limits := l.ToPartialLimitConfig() + + return LimitsConfigAndUsage{ + System: mergeResourceLimitsAndScopeStatToResourceLimitsAndUsage(limits.System, stats.System), + Transient: mergeResourceLimitsAndScopeStatToResourceLimitsAndUsage(limits.Transient, stats.Transient), + Services: mergeLimitsAndStatsMapIntoLimitsConfigAndUsageMap(limits.Service, stats.Services), + Protocols: mergeLimitsAndStatsMapIntoLimitsConfigAndUsageMap(limits.Protocol, stats.Protocols), + Peers: mergeLimitsAndStatsMapIntoLimitsConfigAndUsageMap(limits.Peer, stats.Peers), + } +} + +func mergeResourceLimitsAndScopeStatToResourceLimitsAndUsage(rl rcmgr.ResourceLimits, ss network.ScopeStat) ResourceLimitsAndUsage { + return ResourceLimitsAndUsage{ + Memory: rl.Memory, + MemoryUsage: ss.Memory, + FD: rl.FD, + FDUsage: ss.NumFD, + Conns: rl.Conns, + ConnsUsage: ss.NumConnsOutbound + ss.NumConnsInbound, + ConnsOutbound: rl.ConnsOutbound, + ConnsOutboundUsage: ss.NumConnsOutbound, + ConnsInbound: rl.ConnsInbound, + ConnsInboundUsage: ss.NumConnsInbound, + Streams: rl.Streams, + StreamsUsage: ss.NumStreamsOutbound + ss.NumStreamsInbound, + StreamsOutbound: rl.StreamsOutbound, + StreamsOutboundUsage: ss.NumStreamsOutbound, + StreamsInbound: rl.StreamsInbound, + StreamsInboundUsage: ss.NumStreamsInbound, + } +} + +func mergeLimitsAndStatsMapIntoLimitsConfigAndUsageMap[K comparable](limits map[K]rcmgr.ResourceLimits, stats map[K]network.ScopeStat) map[K]ResourceLimitsAndUsage { + r := make(map[K]ResourceLimitsAndUsage, maxInt(len(limits), len(stats))) + for p, s := range stats { + var l rcmgr.ResourceLimits + if limits != nil { + if rl, ok := limits[p]; ok { + l = rl + } + } + r[p] = mergeResourceLimitsAndScopeStatToResourceLimitsAndUsage(l, s) + } + for p, s := range limits { + if _, ok := stats[p]; ok { + continue // we already processed this element in the loop above + } + + r[p] = mergeResourceLimitsAndScopeStatToResourceLimitsAndUsage(s, network.ScopeStat{}) + } + return r +} + +func maxInt(x, y int) int { + if x > y { + return x + } + return y +} + +// LimitConfigsToInfo gets limits and stats and generates a list of scopes and limits to be printed. +func LimitConfigsToInfo(stats LimitsConfigAndUsage) ResourceInfos { + result := ResourceInfos{} + + result = append(result, resourceLimitsAndUsageToResourceInfo(config.ResourceMgrSystemScope, stats.System)...) + result = append(result, resourceLimitsAndUsageToResourceInfo(config.ResourceMgrTransientScope, stats.Transient)...) + + for i, s := range stats.Services { + result = append(result, resourceLimitsAndUsageToResourceInfo( + config.ResourceMgrServiceScopePrefix+i, + s, + )...) + } + + for i, p := range stats.Protocols { + result = append(result, resourceLimitsAndUsageToResourceInfo( + config.ResourceMgrProtocolScopePrefix+string(i), + p, + )...) + } + + for i, p := range stats.Peers { + result = append(result, resourceLimitsAndUsageToResourceInfo( + config.ResourceMgrPeerScopePrefix+i.String(), + p, + )...) + } + + return result +} + +type ResourceInfo struct { + ScopeName string + LimitName string + LimitValue rcmgr.LimitVal64 + CurrentUsage int64 +} + +type ResourceInfos []ResourceInfo + +const ( + limitNameMemory = "Memory" + limitNameFD = "FD" + limitNameConns = "Conns" + limitNameConnsInbound = "ConnsInbound" + limitNameConnsOutbound = "ConnsOutbound" + limitNameStreams = "Streams" + limitNameStreamsInbound = "StreamsInbound" + limitNameStreamsOutbound = "StreamsOutbound" +) + +var limits = []string{ + limitNameMemory, + limitNameFD, + limitNameConns, + limitNameConnsInbound, + limitNameConnsOutbound, + limitNameStreams, + limitNameStreamsInbound, + limitNameStreamsOutbound, +} + +func resourceLimitsAndUsageToResourceInfo(scopeName string, stats ResourceLimitsAndUsage) ResourceInfos { + result := ResourceInfos{} + for _, l := range limits { + ri := ResourceInfo{ + ScopeName: scopeName, + } + switch l { + case limitNameMemory: + ri.LimitName = limitNameMemory + ri.LimitValue = stats.Memory + ri.CurrentUsage = stats.MemoryUsage + case limitNameFD: + ri.LimitName = limitNameFD + ri.LimitValue = rcmgr.LimitVal64(stats.FD) + ri.CurrentUsage = int64(stats.FDUsage) + case limitNameConns: + ri.LimitName = limitNameConns + ri.LimitValue = rcmgr.LimitVal64(stats.Conns) + ri.CurrentUsage = int64(stats.ConnsUsage) + case limitNameConnsInbound: + ri.LimitName = limitNameConnsInbound + ri.LimitValue = rcmgr.LimitVal64(stats.ConnsInbound) + ri.CurrentUsage = int64(stats.ConnsInboundUsage) + case limitNameConnsOutbound: + ri.LimitName = limitNameConnsOutbound + ri.LimitValue = rcmgr.LimitVal64(stats.ConnsOutbound) + ri.CurrentUsage = int64(stats.ConnsOutboundUsage) + case limitNameStreams: + ri.LimitName = limitNameStreams + ri.LimitValue = rcmgr.LimitVal64(stats.Streams) + ri.CurrentUsage = int64(stats.StreamsUsage) + case limitNameStreamsInbound: + ri.LimitName = limitNameStreamsInbound + ri.LimitValue = rcmgr.LimitVal64(stats.StreamsInbound) + ri.CurrentUsage = int64(stats.StreamsInboundUsage) + case limitNameStreamsOutbound: + ri.LimitName = limitNameStreamsOutbound + ri.LimitValue = rcmgr.LimitVal64(stats.StreamsOutbound) + ri.CurrentUsage = int64(stats.StreamsOutboundUsage) + } + + if ri.LimitValue == rcmgr.Unlimited64 || ri.LimitValue == rcmgr.DefaultLimit64 { + // ignore unlimited and unset limits to remove noise from output. + continue + } + + result = append(result, ri) + } + + return result +} diff --git a/go.mod b/go.mod index e57aa9916..6a1da1ec3 100644 --- a/go.mod +++ b/go.mod @@ -10,14 +10,14 @@ require ( github.com/bittorrent/go-btfs-chunker v0.4.0 github.com/bittorrent/go-btfs-cmds v0.3.0 github.com/bittorrent/go-btfs-common v0.9.1-0.20240823025041-824b78e1c643 - github.com/bittorrent/go-btfs-config v0.13.3-0.20240822075319-c9a0e978f0b2 - github.com/bittorrent/go-btfs-files v0.3.1 + github.com/bittorrent/go-btfs-config v0.13.3 + github.com/bittorrent/go-btfs-files v0.3.2 github.com/bittorrent/go-btns v0.2.0 github.com/bittorrent/go-common/v2 v2.4.0 github.com/bittorrent/go-eccrypto v0.1.0 github.com/bittorrent/go-mfs v0.4.0 - github.com/bittorrent/go-unixfs v0.7.0 - github.com/bittorrent/interface-go-btfs-core v0.8.2 + github.com/bittorrent/go-unixfs v0.8.0 + github.com/bittorrent/interface-go-btfs-core v0.9.0 github.com/bittorrent/protobuf v1.4.0 github.com/blang/semver v3.5.1+incompatible github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 diff --git a/go.sum b/go.sum index 67f45ec2d..2099885cd 100644 --- a/go.sum +++ b/go.sum @@ -212,11 +212,11 @@ github.com/bittorrent/go-btfs-cmds v0.3.0 h1:xpCBgk3zIm84Ne6EjeJgi8WLB5YJJUIFMjK github.com/bittorrent/go-btfs-cmds v0.3.0/go.mod h1:Fbac/Rou32G0jpoa6wLrNNDxcGOZbGfk+GiG0r3uEIU= github.com/bittorrent/go-btfs-common v0.9.1-0.20240823025041-824b78e1c643 h1:oUeQ8DRKOsgeRs11gkzPcgAw1wXYt48nSmYtqrkPSnA= github.com/bittorrent/go-btfs-common v0.9.1-0.20240823025041-824b78e1c643/go.mod h1:TO1VUCAZ1ZtCzMByGay8Kl0NyF+0PadIAX6k1Zp5H4w= -github.com/bittorrent/go-btfs-config v0.13.3-0.20240822075319-c9a0e978f0b2 h1:zbmWBI69iBg6uRriAJ496NQfeEWlAwOrhSgsNK1/lLg= -github.com/bittorrent/go-btfs-config v0.13.3-0.20240822075319-c9a0e978f0b2/go.mod h1:ahh4rLSA+sl3FsHJ/ma6OOl6yu4ogCmeumzgBxKSIjg= +github.com/bittorrent/go-btfs-config v0.13.3 h1:2JeWoTiWUPmvsW3NUyuocMhuEbXya3sBaOz6cPFA3xs= +github.com/bittorrent/go-btfs-config v0.13.3/go.mod h1:ahh4rLSA+sl3FsHJ/ma6OOl6yu4ogCmeumzgBxKSIjg= github.com/bittorrent/go-btfs-files v0.3.0/go.mod h1:ylMf73m6oK94hL7VPblY1ZZpePsr6XbPV4BaNUwGZR0= -github.com/bittorrent/go-btfs-files v0.3.1 h1:esq3j+6FtZ+SlaxKjVtiYgvXk/SWUiTcv0Q1MeJoPnQ= -github.com/bittorrent/go-btfs-files v0.3.1/go.mod h1:ylMf73m6oK94hL7VPblY1ZZpePsr6XbPV4BaNUwGZR0= +github.com/bittorrent/go-btfs-files v0.3.2 h1:NqCLqbq5S6hUe1dX5EINDDxbQIO7fDzVS8tS+N6eTcA= +github.com/bittorrent/go-btfs-files v0.3.2/go.mod h1:ylMf73m6oK94hL7VPblY1ZZpePsr6XbPV4BaNUwGZR0= github.com/bittorrent/go-btns v0.2.0 h1:OMpxUiRbtb/PRTK/z/flxcwOfTvNKMsTLOubYFhKy1s= github.com/bittorrent/go-btns v0.2.0/go.mod h1:+Cinr/1Jl7V/Pqgz+vbOdHXkLVFbMqjypmbAv8QiQPs= github.com/bittorrent/go-common/v2 v2.4.0 h1:u0jldKnQteTPQDNKj5GUBOUj2Tswn0+GfWN7yq2QAaY= @@ -227,10 +227,11 @@ github.com/bittorrent/go-mfs v0.4.0 h1:xb7Bxp65LQP8yhflx47ZMuXzIMSSo9ZrasVhroCvR github.com/bittorrent/go-mfs v0.4.0/go.mod h1:w7XQuaSCDsL0MhcMP02ViFJQHYg2tLf+/v0w/m7wMfM= github.com/bittorrent/go-path v0.4.1 h1:9qJe6V2/O3n8Z3tqgN3wgbYcXrcwAv1U3de5xiyYodg= github.com/bittorrent/go-path v0.4.1/go.mod h1:eNLsxJEEMxn/CDzUJ6wuNl+6No6tEUhOZcPKsZsYX0E= -github.com/bittorrent/go-unixfs v0.7.0 h1:2SPuQcAmubJUl+zuKoGWdculoZRn7D0zkDnTZ9pupqo= github.com/bittorrent/go-unixfs v0.7.0/go.mod h1:0UNGV0k5MFsMGOeNjOJFtURcXDFz8bjtyfhcom+vW7A= -github.com/bittorrent/interface-go-btfs-core v0.8.2 h1:iTStlXLoandcKyFruq4U0uVSR3CQU7ey9Lwf8Mu3jw0= -github.com/bittorrent/interface-go-btfs-core v0.8.2/go.mod h1:tQ3d3uI2gH+AO7ikbBwlulRgff0/dzobz9H3SL00yYo= +github.com/bittorrent/go-unixfs v0.8.0 h1:uWMDFYB0BfKP7xyffjziSBriCxNoNvQR5bdcRXDUiPo= +github.com/bittorrent/go-unixfs v0.8.0/go.mod h1:0UNGV0k5MFsMGOeNjOJFtURcXDFz8bjtyfhcom+vW7A= +github.com/bittorrent/interface-go-btfs-core v0.9.0 h1:MRt0JKM3Tgstv1Z7OXlB2EJabifaY2uP2AHiWdE/oEM= +github.com/bittorrent/interface-go-btfs-core v0.9.0/go.mod h1:tQ3d3uI2gH+AO7ikbBwlulRgff0/dzobz9H3SL00yYo= github.com/bittorrent/protobuf v1.4.0 h1:3AW4SZUud3/8/orb8O/957CdspwxWjX/qprvF49aQ70= github.com/bittorrent/protobuf v1.4.0/go.mod h1:k2fZczatqZOyvWUezE02Xt5uFcVqdUd1tNeZwXjELCk= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= diff --git a/repo/fsrepo/fsrepo.go b/repo/fsrepo/fsrepo.go index a154304e5..2394620fa 100644 --- a/repo/fsrepo/fsrepo.go +++ b/repo/fsrepo/fsrepo.go @@ -16,6 +16,7 @@ import ( "github.com/bittorrent/go-btfs/repo/common" mfsr "github.com/bittorrent/go-btfs/repo/fsrepo/migrations" dir "github.com/bittorrent/go-btfs/thirdparty/dir" + rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager" config "github.com/bittorrent/go-btfs-config" serialize "github.com/bittorrent/go-btfs-config/serialize" @@ -98,11 +99,12 @@ type FSRepo struct { path string // lockfile is the file system lock to prevent others from opening // the same fsrepo path concurrently - lockfile io.Closer - config *config.Config - ds repo.Datastore - keystore keystore.Keystore - filemgr *filestore.FileManager + lockfile io.Closer + config *config.Config + userResourceOverrides rcmgr.PartialLimitConfig + ds repo.Datastore + keystore keystore.Keystore + filemgr *filestore.FileManager } var _ repo.Repo = (*FSRepo)(nil) @@ -143,21 +145,21 @@ func open(repoPath string) (repo.Repo, error) { }() // Check version, and error out if not matching - //ver, err := mfsr.RepoPath(r.path).Version() - //fmt.Println("...3 checkInitialized ", ver, err) - //if err != nil { + // ver, err := mfsr.RepoPath(r.path).Version() + // fmt.Println("...3 checkInitialized ", ver, err) + // if err != nil { // if os.IsNotExist(err) { // return nil, ErrNoVersion // } // return nil, err - //} + // } // - //if RepoVersion > ver { + // if RepoVersion > ver { // return nil, ErrNeedMigration - //} else if ver > RepoVersion { + // } else if ver > RepoVersion { // // program version too low for existing repo // return nil, fmt.Errorf(programTooLowMessage, RepoVersion, ver) - //} + // } // check repo path, then check all constituent parts. if err := dir.Writable(r.path); err != nil { @@ -168,6 +170,10 @@ func open(repoPath string) (repo.Repo, error) { return nil, err } + if err := r.openUserResourceOverrides(); err != nil { + return nil, err + } + if err := r.openDatastore(); err != nil { return nil, err } @@ -702,6 +708,32 @@ func (r *FSRepo) SwarmKey() ([]byte, error) { return ioutil.ReadAll(f) } +// openUserResourceOverrides will remove all overrides if the file is not present. +// It will error if the decoding fails. +func (r *FSRepo) openUserResourceOverrides() error { + // This filepath is documented in docs/libp2p-resource-management.md and be kept in sync. + err := serialize.ReadConfigFile(filepath.Join(r.path, "libp2p-resource-limit-overrides.json"), &r.userResourceOverrides) + if errors.Is(err, serialize.ErrNotInitialized) { + err = nil + } + return err +} + +func (r *FSRepo) UserResourceOverrides() (rcmgr.PartialLimitConfig, error) { + // It is not necessary to hold the package lock since the repo is in an + // opened state. The package lock is _not_ meant to ensure that the repo is + // thread-safe. The package lock is only meant to guard against removal and + // coordinate the lockfile. However, we provide thread-safety to keep + // things simple. + packageLock.Lock() + defer packageLock.Unlock() + + if r.closed { + return rcmgr.PartialLimitConfig{}, errors.New("cannot access config, repo not open") + } + return r.userResourceOverrides, nil +} + var _ io.Closer = &FSRepo{} var _ repo.Repo = &FSRepo{} diff --git a/repo/mock.go b/repo/mock.go index b3b06c0cd..951da6831 100644 --- a/repo/mock.go +++ b/repo/mock.go @@ -4,6 +4,7 @@ import ( "errors" keystore "github.com/bittorrent/go-btfs/keystore" + rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager" config "github.com/bittorrent/go-btfs-config" filestore "github.com/ipfs/go-filestore" @@ -62,3 +63,7 @@ func (m *Mock) SwarmKey() ([]byte, error) { } func (m *Mock) FileManager() *filestore.FileManager { return m.F } + +func (m *Mock) UserResourceOverrides() (rcmgr.PartialLimitConfig, error) { + return rcmgr.PartialLimitConfig{}, nil +} diff --git a/repo/repo.go b/repo/repo.go index 83b57c77f..3ba186f24 100644 --- a/repo/repo.go +++ b/repo/repo.go @@ -5,6 +5,7 @@ import ( "io" keystore "github.com/bittorrent/go-btfs/keystore" + rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager" config "github.com/bittorrent/go-btfs-config" ds "github.com/ipfs/go-datastore" @@ -22,6 +23,8 @@ type Repo interface { // to the returned config are not automatically persisted. Config() (*config.Config, error) + UserResourceOverrides() (rcmgr.PartialLimitConfig, error) + // BackupConfig creates a backup of the current configuration file using // the given prefix for naming. BackupConfig(prefix string) (string, error) diff --git a/version.go b/version.go index 97850285e..541579ca7 100644 --- a/version.go +++ b/version.go @@ -4,7 +4,7 @@ package btfs var CurrentCommit string // CurrentVersionNumber is the current application's version literal -const CurrentVersionNumber = "3.1.0" +const CurrentVersionNumber = "3.2.0" const ApiVersion = "/go-btfs/" + CurrentVersionNumber + "/"