Skip to content

Commit

Permalink
Feat/s3 compatible api (#388)
Browse files Browse the repository at this point in the history
* feat: accesss key

* feat: access-key module & access-key cmds

* fix: remove redunt error condition

* opt: handle wrapper error and remove unused bucket type define in access-key module

* feat: add daemon check before execute accesskey commands

* optmize: access-key store prefix

* optmize: not found error

* feat: add s3 signature

* chore:

* chore:

* chore:

* chore:

* chore:

* feat: add store

* chore:

* feat: s3 access-key, server, handlers, statestore, filestore

* chore:

* optmize: code structure

* style: s3 code structure

* style: code structure

* optmize: code structure

* feat: add multiple context lock

* feat: check auth

* chore:

* chore:

* feat: add bucket service

* chore:

* mod: update bucket lock

* chore:

* chore:

* chore:

* del s3d

* chore: s3 req & rsp structure

* chore:

* feat: add pubBucket api

* feat: add more bucket api

* chore:

* chore:

* feat: add request and response

* chore:

* feat: server build

* chore: check acl

* chore

* chore: adjust bucket url

* �

* feat: add auth middleware

* feat: adjust code structure

* optmize: code structure & auth bug

* feat: put object

* chore:

* mod: mod bucket parse req

* optmize: adjust place of response error

* chore: mig sig 01

* chore: clear sig

* chore: mig sig 02

* chore: mig sig 03

* chore: mig sig 04

* optmize: refractor codes

* optmize: rename auth to sign

* optmize: code structure & h.name

* fix: h.name

* feat: put-object

* feat: multipart

* fix: nslock key

* chore: rename s3 constructor file name

* fix: tidy example go-ipfs-as-a-library go mod

* chore: change default s3 server address to local

* mod: add object api

* feat: s3-compatible-api - 1. add start option and configure; 2. optmize providers interfaces and implements; 3. rewrite the server construct function

* merge: object

* chore: add object lock

* chore: of delete objs

* chore:

* fix: list objects bug

* chore: rename ListObjetV1Handler to ListObjectHandler, rename BTFS-Hash to CID

* refractor: bucket service

* refactor: object service

* refractor: refract object service

* refractor: handlers

* refractor: bucket handler

* refractor: bucket handler

* refractor: response

* refractor: response func

* refractor: response

* refractor: object

* refractor: object

* refractor: objects

* refractor: btf api add timeout  & add cid refs to enable referred cid can not be deleted

* ref: fix delete object remove body

* ref: format code

* fix: routers

* fix: add cors header

* fix: router option

* feat: add delete objects handler

* refractor: multipart

* ref: multipart

* fix: multipart etag calculation

* chore: add min part size todo

* chore: upgrade 'github.com/anacrolix/torrent' from v1.47.0 to v1.52.5

* opt: comment and amz header

* opt: code

* opt: preflight cache max age

* feat: bucket response add acl header

* opt: change cid-list header to cid

* fix: required check exlude unknow location

* fix: get object acl

* ref: requests

* ref: complete refractor

* fix: args parse

* fix: get object unlock

* fix: object acl writer

* fix: delete objects error

* fix: Sign handler name

* fix: object name escape

* fix: copy source validate

* opt: s3 log

* opt: s3 api log

* fix: allow Cache-Control header in PutObject and CopyObject Action

* opt: add access-key command taglines

* opt: add accesskey command description

* chore: add accesskey commands test path

* chore: add accesskey test path

---------

Co-authored-by: fish <[email protected]>
  • Loading branch information
imstevez and turingczz authored Oct 10, 2023
1 parent 58978cb commit e3cc0be
Show file tree
Hide file tree
Showing 94 changed files with 13,374 additions and 641 deletions.
4 changes: 2 additions & 2 deletions blocks/blockstoreutil/remove.go
Original file line number Diff line number Diff line change
Expand Up @@ -71,8 +71,8 @@ func RmBlocks(ctx context.Context, blocks bs.GCBlockstore, pins pin.Pinner, cids
}

// FilterPinned takes a slice of Cids and returns it with the pinned Cids
// removed. If a Cid is pinned, it will place RemovedBlock objects in the given
// out channel, with an error which indicates that the Cid is pinned.
// removed. If a CID is pinned, it will place RemovedBlock objects in the given
// out channel, with an error which indicates that the CID is pinned.
// This function is used in RmBlocks to filter out any blocks which are not
// to be removed (because they are pinned).
func FilterPinned(ctx context.Context, pins pin.Pinner, out chan<- interface{}, cids []cid.Cid) []cid.Cid {
Expand Down
31 changes: 31 additions & 0 deletions cmd/btfs/daemon.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@ import (
"errors"
_ "expvar"
"fmt"
"github.com/bittorrent/go-btfs/s3"
"github.com/bittorrent/go-btfs/s3/api/services/accesskey"
"io/ioutil"
"math/rand"
"net"
Expand Down Expand Up @@ -100,6 +102,7 @@ const (
chainID = "chain-id"
// apiAddrKwd = "address-api"
// swarmAddrKwd = "address-swarm"
enableS3CompatibleAPIKwd = "s3-compatible-api"
)

// BTFS daemon test exit error code
Expand Down Expand Up @@ -227,6 +230,7 @@ Headers.
// TODO: add way to override addresses. tricky part: updating the config if also --init.
// cmds.StringOption(apiAddrKwd, "Address for the daemon rpc API (overrides config)"),
// cmds.StringOption(swarmAddrKwd, "Address for the swarm socket (overrides config)"),
cmds.BoolOption(enableS3CompatibleAPIKwd, "Enable s3-compatible-api server"),
},
Subcommands: map[string]*cmds.Command{},
NoRemote: true,
Expand Down Expand Up @@ -713,6 +717,33 @@ If the user need to start multiple nodes on the same machine, the configuration
functest(cfg.Services.OnlineServerDomain, cfg.Identity.PeerID, hValue)
}

// Init s3 providers
err = s3.InitProviders(statestore)
if err != nil {
return err
}

// Init access-key
accesskey.InitService(s3.GetProviders())

// Start s3-compatible-api server
s3OptEnable, s3Opt := req.Options[enableS3CompatibleAPIKwd].(bool)
if s3OptEnable || (!s3Opt && cfg.S3CompatibleAPI.Enable) {
s3Server := s3.NewServer(cfg.S3CompatibleAPI)
err = s3Server.Start()
if err != nil {
fmt.Printf("S3-Compatible-API server: %v\n", err)
return
}
fmt.Printf("S3-Compatible-API server started, endpoint-url: http://%s\n", cfg.S3CompatibleAPI.Address)
defer func() {
err = s3Server.Stop()
if err != nil {
fmt.Printf("S3-Compatible-API server: %v\n", err)
}
}()
}

if SimpleMode == false {
// set Analytics flag if specified
if dc, ok := req.Options[enableDataCollection]; ok == true {
Expand Down
175 changes: 175 additions & 0 deletions core/commands/accesskey.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,175 @@
package commands

import (
"errors"
cmds "github.com/bittorrent/go-btfs-cmds"
"github.com/bittorrent/go-btfs/core/commands/cmdenv"
"github.com/bittorrent/go-btfs/s3/api/services/accesskey"
)

var AccessKeyCmd = &cmds.Command{
Helptext: cmds.HelpText{
Tagline: "Manage S3-Compatible-API access-keys.",
ShortDescription: "Commands for generate, update, get and list access-keys stored in this node.",
},
Subcommands: map[string]*cmds.Command{
"generate": accessKeyGenerateCmd,
"enable": accessKeyEnableCmd,
"disable": accessKeyDisableCmd,
"reset": accessKeyResetCmd,
"delete": accessKeyDeleteCmd,
"get": accessKeyGetCmd,
"list": accessKeyListCmd,
},
NoLocal: true,
}

func checkDaemon(env cmds.Environment) (err error) {
node, err := cmdenv.GetNode(env)
if err != nil {
return
}
if !node.IsDaemon {
err = errors.New("please start the node first")
}
return
}

var accessKeyGenerateCmd = &cmds.Command{
Helptext: cmds.HelpText{
Tagline: "Generate a new access-key record.",
ShortDescription: "Outputs the new created access-key record.",
},
Arguments: []cmds.Argument{},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) (err error) {
err = checkDaemon(env)
if err != nil {
return
}
ack, err := accesskey.Generate()
if err != nil {
return
}
err = cmds.EmitOnce(res, ack)
return
},
}

var accessKeyEnableCmd = &cmds.Command{
Helptext: cmds.HelpText{
Tagline: "Set status of the specified access-key to enable.",
ShortDescription: "Outputs empty if the access-key has been set to enable or it was already enabled.",
},
Arguments: []cmds.Argument{
cmds.StringArg("key", true, true, "The key").EnableStdin(),
},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) (err error) {
err = checkDaemon(env)
if err != nil {
return
}
key := req.Arguments[0]
err = accesskey.Enable(key)
return
},
}

var accessKeyDisableCmd = &cmds.Command{
Helptext: cmds.HelpText{
Tagline: "Set status of the specified access-key to enable.",
ShortDescription: "Outputs empty if the access-key has been set to disable or it was already disabled.",
},
Arguments: []cmds.Argument{
cmds.StringArg("key", true, true, "The key").EnableStdin(),
},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) (err error) {
err = checkDaemon(env)
if err != nil {
return
}
key := req.Arguments[0]
err = accesskey.Disable(key)
return
},
}

var accessKeyResetCmd = &cmds.Command{
Helptext: cmds.HelpText{
Tagline: "Rest secret of the specified access-key.",
ShortDescription: "Outputs the updated access-key record if it's secret has been reset.",
},
Arguments: []cmds.Argument{
cmds.StringArg("key", true, true, "The key").EnableStdin(),
},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) (err error) {
err = checkDaemon(env)
if err != nil {
return
}
key := req.Arguments[0]
err = accesskey.Reset(key)
return
},
}

var accessKeyDeleteCmd = &cmds.Command{
Helptext: cmds.HelpText{
Tagline: "Delete the specified access-key",
ShortDescription: "Outputs empty if access-key record has been deleted.",
},
Arguments: []cmds.Argument{
cmds.StringArg("key", true, true, "The key").EnableStdin(),
},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) (err error) {
err = checkDaemon(env)
if err != nil {
return
}
key := req.Arguments[0]
err = accesskey.Delete(key)
return
},
}

var accessKeyGetCmd = &cmds.Command{
Helptext: cmds.HelpText{
Tagline: "Get an access-key detail info.",
ShortDescription: "Outputs access-key record for the specified key.",
},
Arguments: []cmds.Argument{
cmds.StringArg("key", true, true, "The key").EnableStdin(),
},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) (err error) {
err = checkDaemon(env)
if err != nil {
return
}
key := req.Arguments[0]
ack, err := accesskey.Get(key)
if err != nil {
return
}
err = cmds.EmitOnce(res, ack)
return
},
}

var accessKeyListCmd = &cmds.Command{
Helptext: cmds.HelpText{
Tagline: "List all access-keys.",
ShortDescription: "Outputs all non-deleted access-keys stored in current node.",
},
Arguments: []cmds.Argument{},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) (err error) {
err = checkDaemon(env)
if err != nil {
return
}
list, err := accesskey.List()
if err != nil {
return
}
err = cmds.EmitOnce(res, list)
return
},
}
2 changes: 1 addition & 1 deletion core/commands/cid.go
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ The optional format string is a printf style format string:
}

type CidFormatRes struct {
CidStr string // Original Cid String passed in
CidStr string // Original CID String passed in
Formatted string // Formatted Result
ErrorMsg string // Error
}
Expand Down
2 changes: 1 addition & 1 deletion core/commands/cmdenv/cidbase.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ func CidBaseDefined(req *cmds.Request) bool {
}

// CidEncoderFromPath creates a new encoder that is influenced from
// the encoded Cid in a Path. For CidV0 the multibase from the base
// the encoded CID in a Path. For CidV0 the multibase from the base
// encoder is used and automatic upgrades are disabled. For CidV1 the
// multibase from the CID is used and upgrades are enabled.
//
Expand Down
8 changes: 8 additions & 0 deletions core/commands/commands_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -342,6 +342,14 @@ func TestCommands(t *testing.T) {
"/bittorrent/scrape",
"/bittorrent/metainfo",
"/bittorrent/bencode",
"/accesskey",
"/accesskey/generate",
"/accesskey/enable",
"/accesskey/disable",
"/accesskey/reset",
"/accesskey/delete",
"/accesskey/get",
"/accesskey/list",
}

cmdSet := make(map[string]struct{})
Expand Down
6 changes: 3 additions & 3 deletions core/commands/files.go
Original file line number Diff line number Diff line change
Expand Up @@ -87,8 +87,8 @@ const (
filesHashOptionName = "hash"
)

var cidVersionOption = cmds.IntOption(filesCidVersionOptionName, "cid-ver", "Cid version to use. (experimental)")
var hashOption = cmds.StringOption(filesHashOptionName, "Hash function to use. Will set Cid version to 1 if used. (experimental)")
var cidVersionOption = cmds.IntOption(filesCidVersionOptionName, "cid-ver", "CID version to use. (experimental)")
var hashOption = cmds.StringOption(filesHashOptionName, "Hash function to use. Will set CID version to 1 if used. (experimental)")

var errFormat = errors.New("format was set by multiple options. Only one format option is allowed")

Expand Down Expand Up @@ -735,7 +735,7 @@ stat' on the file or any of its ancestors.
},
Arguments: []cmds.Argument{
cmds.StringArg("path", true, false, "Path to write to."),
cmds.FileArg("data", true, false, "Data to write.").EnableStdin(),
cmds.FileArg("data", true, false, "data to write.").EnableStdin(),
},
Options: []cmds.Option{
cmds.Int64Option(filesOffsetOptionName, "o", "Byte offset to begin writing at."),
Expand Down
4 changes: 2 additions & 2 deletions core/commands/filestore.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ The output is:
`,
},
Arguments: []cmds.Argument{
cmds.StringArg("obj", false, true, "Cid of objects to list."),
cmds.StringArg("obj", false, true, "CID of objects to list."),
},
Options: []cmds.Option{
cmds.BoolOption(fileOrderOptionName, "sort the results based on the path of the backing file"),
Expand Down Expand Up @@ -122,7 +122,7 @@ For ERROR entries the error will also be printed to stderr.
`,
},
Arguments: []cmds.Argument{
cmds.StringArg("obj", false, true, "Cid of objects to verify."),
cmds.StringArg("obj", false, true, "CID of objects to verify."),
},
Options: []cmds.Option{
cmds.BoolOption(fileOrderOptionName, "verify the objects based on the order of the backing file"),
Expand Down
10 changes: 5 additions & 5 deletions core/commands/object/object.go
Original file line number Diff line number Diff line change
Expand Up @@ -270,7 +270,7 @@ Supported values are:
Type: Node{},
Encoders: cmds.EncoderMap{
cmds.Protobuf: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *Node) error {
// deserialize the Data field as text as this was the standard behaviour
// deserialize the data field as text as this was the standard behaviour
object, err := deserializeNode(out, "text")
if err != nil {
return nil
Expand Down Expand Up @@ -371,20 +371,20 @@ It reads from stdin, and the output is a base58 encoded multihash.
'btfs object put' is a plumbing command for storing DAG nodes.
It reads from stdin, and the output is a base58 encoded multihash.
Data should be in the format specified by the --inputenc flag.
data should be in the format specified by the --inputenc flag.
--inputenc may be one of the following:
* "protobuf"
* "json" (default)
Examples:
$ echo '{ "Data": "abc" }' | btfs object put
$ echo '{ "data": "abc" }' | btfs object put
This creates a node with the data 'abc' and no links. For an object with
links, create a file named 'node.json' with the contents:
{
"Data": "another",
"data": "another",
"Links": [ {
"Name": "some link",
"Hash": "QmXg9Pp2ytZ14xgmQjYEiHjVjMFXzCVVEcRTWJBmLgR39V",
Expand All @@ -399,7 +399,7 @@ And then run:
},

Arguments: []cmds.Argument{
cmds.FileArg("data", true, false, "Data to be stored as a DAG object.").EnableStdin(),
cmds.FileArg("data", true, false, "data to be stored as a DAG object.").EnableStdin(),
},
Options: []cmds.Option{
cmds.StringOption(inputencOptionName, "Encoding type of input data. One of: {\"protobuf\", \"json\"}.").WithDefault("json"),
Expand Down
4 changes: 2 additions & 2 deletions core/commands/object/patch.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,13 +40,13 @@ Example:
$ echo "hello" | btfs object patch $HASH append-data
NOTE: This does not append data to a file - it modifies the actual raw
data within an object. Objects have a max size of 1MB and objects larger than
data within an object. ToDeleteObjects have a max size of 1MB and objects larger than
the limit will not be respected by the network.
`,
},
Arguments: []cmds.Argument{
cmds.StringArg("root", true, false, "The hash of the node to modify."),
cmds.FileArg("data", true, false, "Data to append.").EnableStdin(),
cmds.FileArg("data", true, false, "data to append.").EnableStdin(),
},
Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error {
api, err := cmdenv.GetApi(env, req)
Expand Down
2 changes: 1 addition & 1 deletion core/commands/refs.go
Original file line number Diff line number Diff line change
Expand Up @@ -296,7 +296,7 @@ func (rw *RefWriter) visit(c cid.Cid, depth int) (bool, bool) {
// Unique == true && depth < MaxDepth (or unlimited) from this point

// Branch pruning cases:
// - We saw the Cid before and either:
// - We saw the CID before and either:
// - Depth is unlimited (MaxDepth = -1)
// - We saw it higher (smaller depth) in the DAG (means we must have
// explored deep enough before)
Expand Down
1 change: 1 addition & 0 deletions core/commands/root.go
Original file line number Diff line number Diff line change
Expand Up @@ -179,6 +179,7 @@ var rootSubcommands = map[string]*cmds.Command{
"network": NetworkCmd,
"statuscontract": StatusContractCmd,
"bittorrent": bittorrentCmd,
"accesskey": AccessKeyCmd,
}

// RootRO is the readonly version of Root
Expand Down
Loading

0 comments on commit e3cc0be

Please sign in to comment.