Skip to content

Commit

Permalink
Merge branch 'release_2.3.3' into feat/repo-backup
Browse files Browse the repository at this point in the history
  • Loading branch information
Shawn-Huang-Tron authored Sep 22, 2023
2 parents 470c2d1 + 3395b69 commit 6bf1a4f
Show file tree
Hide file tree
Showing 93 changed files with 13,366 additions and 641 deletions.
4 changes: 2 additions & 2 deletions blocks/blockstoreutil/remove.go
Original file line number Diff line number Diff line change
Expand Up @@ -71,8 +71,8 @@ func RmBlocks(ctx context.Context, blocks bs.GCBlockstore, pins pin.Pinner, cids
}

// FilterPinned takes a slice of Cids and returns it with the pinned Cids
// removed. If a Cid is pinned, it will place RemovedBlock objects in the given
// out channel, with an error which indicates that the Cid is pinned.
// removed. If a CID is pinned, it will place RemovedBlock objects in the given
// out channel, with an error which indicates that the CID is pinned.
// This function is used in RmBlocks to filter out any blocks which are not
// to be removed (because they are pinned).
func FilterPinned(ctx context.Context, pins pin.Pinner, out chan<- interface{}, cids []cid.Cid) []cid.Cid {
Expand Down
31 changes: 31 additions & 0 deletions cmd/btfs/daemon.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@ import (
"errors"
_ "expvar"
"fmt"
"github.com/bittorrent/go-btfs/s3"
"github.com/bittorrent/go-btfs/s3/api/services/accesskey"
"io/ioutil"
"math/rand"
"net"
Expand Down Expand Up @@ -100,6 +102,7 @@ const (
chainID = "chain-id"
// apiAddrKwd = "address-api"
// swarmAddrKwd = "address-swarm"
enableS3CompatibleAPIKwd = "s3-compatible-api"
)

// BTFS daemon test exit error code
Expand Down Expand Up @@ -227,6 +230,7 @@ Headers.
// TODO: add way to override addresses. tricky part: updating the config if also --init.
// cmds.StringOption(apiAddrKwd, "Address for the daemon rpc API (overrides config)"),
// cmds.StringOption(swarmAddrKwd, "Address for the swarm socket (overrides config)"),
cmds.BoolOption(enableS3CompatibleAPIKwd, "Enable s3-compatible-api server"),
},
Subcommands: map[string]*cmds.Command{},
NoRemote: true,
Expand Down Expand Up @@ -713,6 +717,33 @@ If the user need to start multiple nodes on the same machine, the configuration
functest(cfg.Services.OnlineServerDomain, cfg.Identity.PeerID, hValue)
}

// Init s3 providers
err = s3.InitProviders(statestore)
if err != nil {
return err
}

// Init access-key
accesskey.InitService(s3.GetProviders())

// Start s3-compatible-api server
s3OptEnable, s3Opt := req.Options[enableS3CompatibleAPIKwd].(bool)
if s3OptEnable || (!s3Opt && cfg.S3CompatibleAPI.Enable) {
s3Server := s3.NewServer(cfg.S3CompatibleAPI)
err = s3Server.Start()
if err != nil {
fmt.Printf("S3-Compatible-API server: %v\n", err)
return
}
fmt.Printf("S3-Compatible-API server started, endpoint-url: http://%s\n", cfg.S3CompatibleAPI.Address)
defer func() {
err = s3Server.Stop()
if err != nil {
fmt.Printf("S3-Compatible-API server: %v\n", err)
}
}()
}

if SimpleMode == false {
// set Analytics flag if specified
if dc, ok := req.Options[enableDataCollection]; ok == true {
Expand Down
175 changes: 175 additions & 0 deletions core/commands/accesskey.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,175 @@
package commands

import (
"errors"
cmds "github.com/bittorrent/go-btfs-cmds"
"github.com/bittorrent/go-btfs/core/commands/cmdenv"
"github.com/bittorrent/go-btfs/s3/api/services/accesskey"
)

var AccessKeyCmd = &cmds.Command{
Helptext: cmds.HelpText{
Tagline: "Manage S3-Compatible-API access-keys.",
ShortDescription: "Commands for generate, update, get and list access-keys stored in this node.",
},
Subcommands: map[string]*cmds.Command{
"generate": accessKeyGenerateCmd,
"enable": accessKeyEnableCmd,
"disable": accessKeyDisableCmd,
"reset": accessKeyResetCmd,
"delete": accessKeyDeleteCmd,
"get": accessKeyGetCmd,
"list": accessKeyListCmd,
},
NoLocal: true,
}

func checkDaemon(env cmds.Environment) (err error) {
node, err := cmdenv.GetNode(env)
if err != nil {
return
}
if !node.IsDaemon {
err = errors.New("please start the node first")
}
return
}

var accessKeyGenerateCmd = &cmds.Command{
Helptext: cmds.HelpText{
Tagline: "Generate a new access-key record.",
ShortDescription: "Outputs the new created access-key record.",
},
Arguments: []cmds.Argument{},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) (err error) {
err = checkDaemon(env)
if err != nil {
return
}
ack, err := accesskey.Generate()
if err != nil {
return
}
err = cmds.EmitOnce(res, ack)
return
},
}

var accessKeyEnableCmd = &cmds.Command{
Helptext: cmds.HelpText{
Tagline: "Set status of the specified access-key to enable.",
ShortDescription: "Outputs empty if the access-key has been set to enable or it was already enabled.",
},
Arguments: []cmds.Argument{
cmds.StringArg("key", true, true, "The key").EnableStdin(),
},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) (err error) {
err = checkDaemon(env)
if err != nil {
return
}
key := req.Arguments[0]
err = accesskey.Enable(key)
return
},
}

var accessKeyDisableCmd = &cmds.Command{
Helptext: cmds.HelpText{
Tagline: "Set status of the specified access-key to enable.",
ShortDescription: "Outputs empty if the access-key has been set to disable or it was already disabled.",
},
Arguments: []cmds.Argument{
cmds.StringArg("key", true, true, "The key").EnableStdin(),
},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) (err error) {
err = checkDaemon(env)
if err != nil {
return
}
key := req.Arguments[0]
err = accesskey.Disable(key)
return
},
}

var accessKeyResetCmd = &cmds.Command{
Helptext: cmds.HelpText{
Tagline: "Rest secret of the specified access-key.",
ShortDescription: "Outputs the updated access-key record if it's secret has been reset.",
},
Arguments: []cmds.Argument{
cmds.StringArg("key", true, true, "The key").EnableStdin(),
},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) (err error) {
err = checkDaemon(env)
if err != nil {
return
}
key := req.Arguments[0]
err = accesskey.Reset(key)
return
},
}

var accessKeyDeleteCmd = &cmds.Command{
Helptext: cmds.HelpText{
Tagline: "Delete the specified access-key",
ShortDescription: "Outputs empty if access-key record has been deleted.",
},
Arguments: []cmds.Argument{
cmds.StringArg("key", true, true, "The key").EnableStdin(),
},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) (err error) {
err = checkDaemon(env)
if err != nil {
return
}
key := req.Arguments[0]
err = accesskey.Delete(key)
return
},
}

var accessKeyGetCmd = &cmds.Command{
Helptext: cmds.HelpText{
Tagline: "Get an access-key detail info.",
ShortDescription: "Outputs access-key record for the specified key.",
},
Arguments: []cmds.Argument{
cmds.StringArg("key", true, true, "The key").EnableStdin(),
},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) (err error) {
err = checkDaemon(env)
if err != nil {
return
}
key := req.Arguments[0]
ack, err := accesskey.Get(key)
if err != nil {
return
}
err = cmds.EmitOnce(res, ack)
return
},
}

var accessKeyListCmd = &cmds.Command{
Helptext: cmds.HelpText{
Tagline: "List all access-keys.",
ShortDescription: "Outputs all non-deleted access-keys stored in current node.",
},
Arguments: []cmds.Argument{},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) (err error) {
err = checkDaemon(env)
if err != nil {
return
}
list, err := accesskey.List()
if err != nil {
return
}
err = cmds.EmitOnce(res, list)
return
},
}
2 changes: 1 addition & 1 deletion core/commands/cid.go
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ The optional format string is a printf style format string:
}

type CidFormatRes struct {
CidStr string // Original Cid String passed in
CidStr string // Original CID String passed in
Formatted string // Formatted Result
ErrorMsg string // Error
}
Expand Down
2 changes: 1 addition & 1 deletion core/commands/cmdenv/cidbase.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ func CidBaseDefined(req *cmds.Request) bool {
}

// CidEncoderFromPath creates a new encoder that is influenced from
// the encoded Cid in a Path. For CidV0 the multibase from the base
// the encoded CID in a Path. For CidV0 the multibase from the base
// encoder is used and automatic upgrades are disabled. For CidV1 the
// multibase from the CID is used and upgrades are enabled.
//
Expand Down
6 changes: 3 additions & 3 deletions core/commands/files.go
Original file line number Diff line number Diff line change
Expand Up @@ -87,8 +87,8 @@ const (
filesHashOptionName = "hash"
)

var cidVersionOption = cmds.IntOption(filesCidVersionOptionName, "cid-ver", "Cid version to use. (experimental)")
var hashOption = cmds.StringOption(filesHashOptionName, "Hash function to use. Will set Cid version to 1 if used. (experimental)")
var cidVersionOption = cmds.IntOption(filesCidVersionOptionName, "cid-ver", "CID version to use. (experimental)")
var hashOption = cmds.StringOption(filesHashOptionName, "Hash function to use. Will set CID version to 1 if used. (experimental)")

var errFormat = errors.New("format was set by multiple options. Only one format option is allowed")

Expand Down Expand Up @@ -735,7 +735,7 @@ stat' on the file or any of its ancestors.
},
Arguments: []cmds.Argument{
cmds.StringArg("path", true, false, "Path to write to."),
cmds.FileArg("data", true, false, "Data to write.").EnableStdin(),
cmds.FileArg("data", true, false, "data to write.").EnableStdin(),
},
Options: []cmds.Option{
cmds.Int64Option(filesOffsetOptionName, "o", "Byte offset to begin writing at."),
Expand Down
4 changes: 2 additions & 2 deletions core/commands/filestore.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ The output is:
`,
},
Arguments: []cmds.Argument{
cmds.StringArg("obj", false, true, "Cid of objects to list."),
cmds.StringArg("obj", false, true, "CID of objects to list."),
},
Options: []cmds.Option{
cmds.BoolOption(fileOrderOptionName, "sort the results based on the path of the backing file"),
Expand Down Expand Up @@ -122,7 +122,7 @@ For ERROR entries the error will also be printed to stderr.
`,
},
Arguments: []cmds.Argument{
cmds.StringArg("obj", false, true, "Cid of objects to verify."),
cmds.StringArg("obj", false, true, "CID of objects to verify."),
},
Options: []cmds.Option{
cmds.BoolOption(fileOrderOptionName, "verify the objects based on the order of the backing file"),
Expand Down
10 changes: 5 additions & 5 deletions core/commands/object/object.go
Original file line number Diff line number Diff line change
Expand Up @@ -270,7 +270,7 @@ Supported values are:
Type: Node{},
Encoders: cmds.EncoderMap{
cmds.Protobuf: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *Node) error {
// deserialize the Data field as text as this was the standard behaviour
// deserialize the data field as text as this was the standard behaviour
object, err := deserializeNode(out, "text")
if err != nil {
return nil
Expand Down Expand Up @@ -371,20 +371,20 @@ It reads from stdin, and the output is a base58 encoded multihash.
'btfs object put' is a plumbing command for storing DAG nodes.
It reads from stdin, and the output is a base58 encoded multihash.
Data should be in the format specified by the --inputenc flag.
data should be in the format specified by the --inputenc flag.
--inputenc may be one of the following:
* "protobuf"
* "json" (default)
Examples:
$ echo '{ "Data": "abc" }' | btfs object put
$ echo '{ "data": "abc" }' | btfs object put
This creates a node with the data 'abc' and no links. For an object with
links, create a file named 'node.json' with the contents:
{
"Data": "another",
"data": "another",
"Links": [ {
"Name": "some link",
"Hash": "QmXg9Pp2ytZ14xgmQjYEiHjVjMFXzCVVEcRTWJBmLgR39V",
Expand All @@ -399,7 +399,7 @@ And then run:
},

Arguments: []cmds.Argument{
cmds.FileArg("data", true, false, "Data to be stored as a DAG object.").EnableStdin(),
cmds.FileArg("data", true, false, "data to be stored as a DAG object.").EnableStdin(),
},
Options: []cmds.Option{
cmds.StringOption(inputencOptionName, "Encoding type of input data. One of: {\"protobuf\", \"json\"}.").WithDefault("json"),
Expand Down
4 changes: 2 additions & 2 deletions core/commands/object/patch.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,13 +40,13 @@ Example:
$ echo "hello" | btfs object patch $HASH append-data
NOTE: This does not append data to a file - it modifies the actual raw
data within an object. Objects have a max size of 1MB and objects larger than
data within an object. ToDeleteObjects have a max size of 1MB and objects larger than
the limit will not be respected by the network.
`,
},
Arguments: []cmds.Argument{
cmds.StringArg("root", true, false, "The hash of the node to modify."),
cmds.FileArg("data", true, false, "Data to append.").EnableStdin(),
cmds.FileArg("data", true, false, "data to append.").EnableStdin(),
},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
api, err := cmdenv.GetApi(env, req)
Expand Down
2 changes: 1 addition & 1 deletion core/commands/refs.go
Original file line number Diff line number Diff line change
Expand Up @@ -296,7 +296,7 @@ func (rw *RefWriter) visit(c cid.Cid, depth int) (bool, bool) {
// Unique == true && depth < MaxDepth (or unlimited) from this point

// Branch pruning cases:
// - We saw the Cid before and either:
// - We saw the CID before and either:
// - Depth is unlimited (MaxDepth = -1)
// - We saw it higher (smaller depth) in the DAG (means we must have
// explored deep enough before)
Expand Down
1 change: 1 addition & 0 deletions core/commands/root.go
Original file line number Diff line number Diff line change
Expand Up @@ -182,6 +182,7 @@ var rootSubcommands = map[string]*cmds.Command{
"multibase": MbaseCmd,
"backup": BackupCmd,
"recovery": RecoveryCmd,
"accesskey": AccessKeyCmd,
}

// RootRO is the readonly version of Root
Expand Down
2 changes: 1 addition & 1 deletion core/corehttp/gateway/gateway.go
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ type IPFSBackend interface {
// Get returns a GetResponse with UnixFS file, directory or a block in IPLD
// format e.g., (DAG-)CBOR/JSON.
//
// Returned Directories are preferably a minimum info required for enumeration: Name, Size, and Cid.
// Returned Directories are preferably a minimum info required for enumeration: Name, Size, and CID.
//
// Optional ranges follow [HTTP Byte Ranges] notation and can be used for
// pre-fetching specific sections of a file or a block.
Expand Down
Loading

0 comments on commit 6bf1a4f

Please sign in to comment.