From 7627be38ea931b5ebbb55d2cdaded1c5b574b937 Mon Sep 17 00:00:00 2001 From: Steve Date: Tue, 25 Jul 2023 03:11:04 +0800 Subject: [PATCH 001/139] feat: accesss key --- s3/accesskey/accesskey.go | 58 ++++++++++++++++++++++++++++++++ s3/accesskey/define.go | 20 +++++++++++ s3/accesskey/helper.go | 20 +++++++++++ s3/adaptor/request.go | 4 +++ s3/adaptor/response.go | 4 +++ s3/authentication/acl.go | 9 +++++ s3/authentication/auth _error.go | 8 +++++ s3/authentication/service.go | 7 ++++ s3/service.go | 13 +++++++ 9 files changed, 143 insertions(+) create mode 100644 s3/accesskey/accesskey.go create mode 100644 s3/accesskey/define.go create mode 100644 s3/accesskey/helper.go create mode 100644 s3/adaptor/request.go create mode 100644 s3/adaptor/response.go create mode 100644 s3/authentication/acl.go create mode 100644 s3/authentication/auth _error.go create mode 100644 s3/authentication/service.go create mode 100644 s3/service.go diff --git a/s3/accesskey/accesskey.go b/s3/accesskey/accesskey.go new file mode 100644 index 000000000..64a26499f --- /dev/null +++ b/s3/accesskey/accesskey.go @@ -0,0 +1,58 @@ +package accesskey + +import ( + "github.com/bittorrent/go-btfs/transaction/storage" + "github.com/bittorrent/go-mfs" + "path" + "time" +) + +var _ Service = &service{} + +const ( + KeyLen = 8 + SecretLen = 32 + RootPrefix = "s3_buckets" + StorePrefix = "s3-access-key-" +) + +type service struct { + store storage.StateStorer +} + +func (svc *service) Generate() (ak *AccessKey, err error) { + ak := &AccessKey{ + Key: GetRandStr(KeyLen), + Secret: GetRandStr(SecretLen), + Root: path.Join("/", RootPrefix, GetRandStr(8)), + Enable: true, + CreatedAt: time.Now(), + } + + // create root dir + mfs.Mkdir() + + // store accessKey + err = svc.store.Put(StorePrefix+ak.Key, ak) + return +} + +func (svc *service) Get(key string) (ak *AccessKey, err error) { + return +} + +func (svc *service) Disable(key string) (err error) { + return +} + +func (svc *service) Reset(key string) (err error) { + return +} + +func (svc *service) Delete(key string) (err error) { + return +} + +func (svc *service) List() (aks []*AccessKey, err error) { + return +} diff --git a/s3/accesskey/define.go b/s3/accesskey/define.go new file mode 100644 index 000000000..df6a9f477 --- /dev/null +++ b/s3/accesskey/define.go @@ -0,0 +1,20 @@ +package accesskey + +import "time" + +type AccessKey struct { + Key string `json:"key"` + Secret string `json:"secret"` + Root string `json:"root"` + Enable bool `json:"enable"` + CreatedAt time.Time `json:"created_at"` +} + +type Service interface { + Generate() (*AccessKey, error) + Get(key string) (*AccessKey, error) + Disable(key string) error + Reset(key string) error + Delete(key string) error + List() ([]*AccessKey, error) +} diff --git a/s3/accesskey/helper.go b/s3/accesskey/helper.go new file mode 100644 index 000000000..1c71609c9 --- /dev/null +++ b/s3/accesskey/helper.go @@ -0,0 +1,20 @@ +package accesskey + +import ( + "math/rand" + "time" +) + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +var letters = []rune("0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + +func GetRandStr(l int) string { + b := make([]rune, l) + for i := range b { + b[i] = letters[rand.Intn(len(letters))] + } + return string(b) +} diff --git a/s3/adaptor/request.go b/s3/adaptor/request.go new file mode 100644 index 000000000..9dd03dda8 --- /dev/null +++ b/s3/adaptor/request.go @@ -0,0 +1,4 @@ +package adaptor + +type Request struct { +} diff --git a/s3/adaptor/response.go b/s3/adaptor/response.go new file mode 100644 index 000000000..5240345fa --- /dev/null +++ b/s3/adaptor/response.go @@ -0,0 +1,4 @@ +package adaptor + +type Response struct { +} diff --git a/s3/authentication/acl.go b/s3/authentication/acl.go new file mode 100644 index 000000000..de188d77d --- /dev/null +++ b/s3/authentication/acl.go @@ -0,0 +1,9 @@ +package authentication + +type ACLKey string + +const ( + ACLKeyPrivate ACLKey = "private" + ACLKeyPublicRead ACLKey = "public-read" + ACLKeyPublicReadWrite ACLKey = "public-read-write" +) diff --git a/s3/authentication/auth _error.go b/s3/authentication/auth _error.go new file mode 100644 index 000000000..132b655f1 --- /dev/null +++ b/s3/authentication/auth _error.go @@ -0,0 +1,8 @@ +package authentication + +type AuthErr struct { +} + +func (err AuthErr) Error() string { + return "" +} diff --git a/s3/authentication/service.go b/s3/authentication/service.go new file mode 100644 index 000000000..0b1ddfc48 --- /dev/null +++ b/s3/authentication/service.go @@ -0,0 +1,7 @@ +package authentication + +import "github.com/bittorrent/go-btfs/s3/adaptor" + +type Service interface { + VerifyRequest(req *adaptor.Request) *AuthErr +} diff --git a/s3/service.go b/s3/service.go new file mode 100644 index 000000000..eb659b752 --- /dev/null +++ b/s3/service.go @@ -0,0 +1,13 @@ +package s3 + +import ( + cfg "github.com/bittorrent/go-btfs-config" +) + +type Request struct { +} + +type Service interface { + Start(config *cfg.S3CompatibleAPI) (err error) + Stop() (err error) +} From a57905b3386b7e72962556466747b393c9cc1e47 Mon Sep 17 00:00:00 2001 From: steve Date: Thu, 27 Jul 2023 22:13:26 +0800 Subject: [PATCH 002/139] feat: access-key module & access-key cmds --- cmd/btfs/daemon.go | 7 + core/commands/accesskey.go | 141 +++++++++++++++ core/commands/root.go | 1 + s3/accesskey/accesskey.go | 58 ------ s3/accesskey/define.go | 20 --- s3/accesskey/instance.go | 170 ++++++++++++++++++ s3/accesskey/interface.go | 43 +++++ s3/accesskey/service.go | 39 ++++ s3/adaptor/request.go | 4 - s3/adaptor/response.go | 4 - s3/authentication/acl.go | 9 - s3/authentication/auth _error.go | 8 - s3/authentication/service.go | 7 - s3/service.go | 13 -- .../helper.go => utils/random/string.go} | 4 +- 15 files changed, 403 insertions(+), 125 deletions(-) create mode 100644 core/commands/accesskey.go delete mode 100644 s3/accesskey/accesskey.go delete mode 100644 s3/accesskey/define.go create mode 100644 s3/accesskey/instance.go create mode 100644 s3/accesskey/interface.go create mode 100644 s3/accesskey/service.go delete mode 100644 s3/adaptor/request.go delete mode 100644 s3/adaptor/response.go delete mode 100644 s3/authentication/acl.go delete mode 100644 s3/authentication/auth _error.go delete mode 100644 s3/authentication/service.go delete mode 100644 s3/service.go rename s3/{accesskey/helper.go => utils/random/string.go} (84%) diff --git a/cmd/btfs/daemon.go b/cmd/btfs/daemon.go index 889e5d984..6c47abc5d 100644 --- a/cmd/btfs/daemon.go +++ b/cmd/btfs/daemon.go @@ -7,6 +7,7 @@ import ( "errors" _ "expvar" "fmt" + "github.com/bittorrent/go-btfs/s3/accesskey" "io/ioutil" "math/rand" "net" @@ -421,6 +422,12 @@ If the user need to start multiple nodes on the same machine, the configuration statestore.Close() }() + // access-key init + accesskey.InitService(&accesskey.Config{ + SecretLength: 32, + StorePrefix: "access-keys-test-", + }, statestore) + if SimpleMode == false { chainid, stored, err := getChainID(req, cfg, statestore) if err != nil { diff --git a/core/commands/accesskey.go b/core/commands/accesskey.go new file mode 100644 index 000000000..990a96926 --- /dev/null +++ b/core/commands/accesskey.go @@ -0,0 +1,141 @@ +package commands + +import ( + cmds "github.com/bittorrent/go-btfs-cmds" + "github.com/bittorrent/go-btfs/s3/accesskey" +) + +const () + +var AccessKeyCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "", + ShortDescription: ` +`, + }, + + Subcommands: map[string]*cmds.Command{ + "generate": accessKeyGenerateCmd, + "enable": accessKeyEnableCmd, + "disable": accessKeyDisableCmd, + "reset": accessKeyResetCmd, + "delete": accessKeyDeleteCmd, + "get": accessKeyGetCmd, + "list": accessKeyListCmd, + }, +} + +var accessKeyGenerateCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "", + ShortDescription: ` +`, + }, + Arguments: []cmds.Argument{}, + Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { + ack, err := accesskey.Generate() + if err != nil { + return err + } + return cmds.EmitOnce(res, ack) + }, +} + +var accessKeyEnableCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "", + ShortDescription: ` +`, + }, + Arguments: []cmds.Argument{ + cmds.StringArg("key", true, true, "The key").EnableStdin(), + }, + Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { + key := req.Arguments[0] + err := accesskey.Enable(key) + return err + }, +} + +var accessKeyDisableCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "", + ShortDescription: ` +`, + }, + Arguments: []cmds.Argument{ + cmds.StringArg("key", true, true, "The key").EnableStdin(), + }, + Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { + key := req.Arguments[0] + err := accesskey.Disable(key) + return err + }, +} + +var accessKeyResetCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "", + ShortDescription: ` +`, + }, + Arguments: []cmds.Argument{ + cmds.StringArg("key", true, true, "The key").EnableStdin(), + }, + Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { + key := req.Arguments[0] + err := accesskey.Reset(key) + return err + }, +} + +var accessKeyDeleteCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "", + ShortDescription: ` +`, + }, + Arguments: []cmds.Argument{ + cmds.StringArg("key", true, true, "The key").EnableStdin(), + }, + Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { + key := req.Arguments[0] + err := accesskey.Delete(key) + return err + }, +} + +var accessKeyGetCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "", + ShortDescription: ` +`, + }, + Arguments: []cmds.Argument{ + cmds.StringArg("key", true, true, "The key").EnableStdin(), + }, + Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { + key := req.Arguments[0] + ack, err := accesskey.Get(key) + if err != nil { + return err + } + return cmds.EmitOnce(res, ack) + }, +} + +var accessKeyListCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "", + ShortDescription: ` +`, + }, + Arguments: []cmds.Argument{}, + Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { + list, err := accesskey.List() + if err != nil { + return err + } + return cmds.EmitOnce(res, list) + }, +} diff --git a/core/commands/root.go b/core/commands/root.go index 7a9c9ed21..049aa661b 100644 --- a/core/commands/root.go +++ b/core/commands/root.go @@ -179,6 +179,7 @@ var rootSubcommands = map[string]*cmds.Command{ "network": NetworkCmd, "statuscontract": StatusContractCmd, "bittorrent": bittorrentCmd, + "accesskey": AccessKeyCmd, } // RootRO is the readonly version of Root diff --git a/s3/accesskey/accesskey.go b/s3/accesskey/accesskey.go deleted file mode 100644 index 64a26499f..000000000 --- a/s3/accesskey/accesskey.go +++ /dev/null @@ -1,58 +0,0 @@ -package accesskey - -import ( - "github.com/bittorrent/go-btfs/transaction/storage" - "github.com/bittorrent/go-mfs" - "path" - "time" -) - -var _ Service = &service{} - -const ( - KeyLen = 8 - SecretLen = 32 - RootPrefix = "s3_buckets" - StorePrefix = "s3-access-key-" -) - -type service struct { - store storage.StateStorer -} - -func (svc *service) Generate() (ak *AccessKey, err error) { - ak := &AccessKey{ - Key: GetRandStr(KeyLen), - Secret: GetRandStr(SecretLen), - Root: path.Join("/", RootPrefix, GetRandStr(8)), - Enable: true, - CreatedAt: time.Now(), - } - - // create root dir - mfs.Mkdir() - - // store accessKey - err = svc.store.Put(StorePrefix+ak.Key, ak) - return -} - -func (svc *service) Get(key string) (ak *AccessKey, err error) { - return -} - -func (svc *service) Disable(key string) (err error) { - return -} - -func (svc *service) Reset(key string) (err error) { - return -} - -func (svc *service) Delete(key string) (err error) { - return -} - -func (svc *service) List() (aks []*AccessKey, err error) { - return -} diff --git a/s3/accesskey/define.go b/s3/accesskey/define.go deleted file mode 100644 index df6a9f477..000000000 --- a/s3/accesskey/define.go +++ /dev/null @@ -1,20 +0,0 @@ -package accesskey - -import "time" - -type AccessKey struct { - Key string `json:"key"` - Secret string `json:"secret"` - Root string `json:"root"` - Enable bool `json:"enable"` - CreatedAt time.Time `json:"created_at"` -} - -type Service interface { - Generate() (*AccessKey, error) - Get(key string) (*AccessKey, error) - Disable(key string) error - Reset(key string) error - Delete(key string) error - List() ([]*AccessKey, error) -} diff --git a/s3/accesskey/instance.go b/s3/accesskey/instance.go new file mode 100644 index 000000000..f3d22947d --- /dev/null +++ b/s3/accesskey/instance.go @@ -0,0 +1,170 @@ +package accesskey + +import ( + "github.com/bittorrent/go-btfs/s3/utils/random" + "github.com/bittorrent/go-btfs/transaction/storage" + "github.com/google/uuid" + "sync" + "time" +) + +var _ Service = &service{} + +type service struct { + config *Config + store storage.StateStorer + locks sync.Map +} + +func newService(config *Config, store storage.StateStorer) *service { + return &service{ + config: config, + store: store, + locks: sync.Map{}, + } +} + +func (s *service) Generate() (ack *AccessKey, err error) { + now := time.Now() + ack = &AccessKey{ + Key: s.newKey(), + Secret: s.newSecret(), + Enable: true, + IsDeleted: false, + CreatedAt: now, + UpdatedAt: now, + } + err = s.store.Put(s.getStoreKey(ack.Key), ack) + return +} + +func (s *service) Enable(key string) (err error) { + enable := true + err = s.update(key, &updateArgs{ + Enable: &enable, + }) + return +} + +func (s *service) Disable(key string) (err error) { + enable := false + err = s.update(key, &updateArgs{ + Enable: &enable, + }) + return +} + +func (s *service) Reset(key string) (err error) { + secret := s.newSecret() + err = s.update(key, &updateArgs{ + Secret: &secret, + }) + return +} + +func (s *service) Delete(key string) (err error) { + isDelete := true + err = s.update(key, &updateArgs{ + IsDelete: &isDelete, + }) + return +} + +func (s *service) Get(key string) (ack *AccessKey, err error) { + ack = &AccessKey{} + err = s.store.Get(s.getStoreKey(key), ack) + if err != nil && err != storage.ErrNotFound { + return + } + if err == storage.ErrNotFound || ack.IsDeleted { + err = ErrNotFound + } + return +} + +func (s *service) List() (list []*AccessKey, err error) { + err = s.store.Iterate(s.config.StorePrefix, func(key, _ []byte) (stop bool, er error) { + ack := &AccessKey{} + er = s.store.Get(string(key), ack) + if er != nil { + return + } + if ack.IsDeleted { + return + } + list = append(list, ack) + return + }) + return +} + +func (s *service) newKey() (key string) { + key = uuid.NewString() + return +} + +func (s *service) newSecret() (secret string) { + secret = random.NewString(s.config.SecretLength) + return +} + +func (s *service) getStoreKey(key string) (storeKey string) { + storeKey = s.config.StorePrefix + key + return +} + +func (s *service) lock(key string) (unlock func()) { + loaded := true + for loaded { + _, loaded = s.locks.LoadOrStore(key, nil) + time.Sleep(10 * time.Millisecond) + } + unlock = func() { + s.locks.Delete(key) + } + return +} + +type updateArgs struct { + Enable *bool + Secret *string + IsDelete *bool +} + +func (s *service) update(key string, args *updateArgs) (err error) { + unlock := s.lock(key) + defer unlock() + + ack := &AccessKey{} + stk := s.getStoreKey(key) + + err = s.store.Get(stk, ack) + if err != nil && err != storage.ErrNotFound { + return + } + if err == storage.ErrNotFound || ack.IsDeleted { + err = ErrNotFound + return + } + + if ack.IsDeleted { + err = ErrNotFound + return + } + + if args.Enable != nil { + ack.Enable = *args.Enable + } + if args.Secret != nil { + ack.Secret = *args.Secret + } + if args.IsDelete != nil { + ack.IsDeleted = *args.IsDelete + } + + ack.UpdatedAt = time.Now() + + err = s.store.Put(stk, ack) + + return +} diff --git a/s3/accesskey/interface.go b/s3/accesskey/interface.go new file mode 100644 index 000000000..40509a1c4 --- /dev/null +++ b/s3/accesskey/interface.go @@ -0,0 +1,43 @@ +package accesskey + +import ( + "errors" + "time" +) + +var ErrNotFound = errors.New("not found") + +type Config struct { + SecretLength int + StorePrefix string +} + +type AccessKey struct { + Key string `json:"key"` + Secret string `json:"secret"` + Enable bool `json:"enable"` + IsDeleted bool `json:"is_deleted"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +type Bucket struct { + ID string `json:"id"` + Name string `json:"name"` + Owner string `json:"owner"` + ACL string `json:"acl"` + CID string `json:"cid"` + IsDeleted bool `json:"is_deleted"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` +} + +type Service interface { + Generate() (ack *AccessKey, err error) + Enable(key string) (err error) + Disable(key string) (err error) + Reset(key string) (err error) + Delete(key string) (err error) + Get(key string) (ack *AccessKey, err error) + List() (list []*AccessKey, err error) +} diff --git a/s3/accesskey/service.go b/s3/accesskey/service.go new file mode 100644 index 000000000..ba0f0b89e --- /dev/null +++ b/s3/accesskey/service.go @@ -0,0 +1,39 @@ +package accesskey + +import ( + "github.com/bittorrent/go-btfs/transaction/storage" +) + +var svc Service + +func InitService(config *Config, store storage.StateStorer) { + svc = newService(config, store) +} + +func Generate() (ack *AccessKey, err error) { + return svc.Generate() +} + +func Enable(key string) (err error) { + return svc.Enable(key) +} + +func Disable(key string) (err error) { + return svc.Disable(key) +} + +func Reset(key string) (err error) { + return svc.Reset(key) +} + +func Delete(key string) (err error) { + return svc.Delete(key) +} + +func Get(key string) (ack *AccessKey, err error) { + return svc.Get(key) +} + +func List() (list []*AccessKey, err error) { + return svc.List() +} diff --git a/s3/adaptor/request.go b/s3/adaptor/request.go deleted file mode 100644 index 9dd03dda8..000000000 --- a/s3/adaptor/request.go +++ /dev/null @@ -1,4 +0,0 @@ -package adaptor - -type Request struct { -} diff --git a/s3/adaptor/response.go b/s3/adaptor/response.go deleted file mode 100644 index 5240345fa..000000000 --- a/s3/adaptor/response.go +++ /dev/null @@ -1,4 +0,0 @@ -package adaptor - -type Response struct { -} diff --git a/s3/authentication/acl.go b/s3/authentication/acl.go deleted file mode 100644 index de188d77d..000000000 --- a/s3/authentication/acl.go +++ /dev/null @@ -1,9 +0,0 @@ -package authentication - -type ACLKey string - -const ( - ACLKeyPrivate ACLKey = "private" - ACLKeyPublicRead ACLKey = "public-read" - ACLKeyPublicReadWrite ACLKey = "public-read-write" -) diff --git a/s3/authentication/auth _error.go b/s3/authentication/auth _error.go deleted file mode 100644 index 132b655f1..000000000 --- a/s3/authentication/auth _error.go +++ /dev/null @@ -1,8 +0,0 @@ -package authentication - -type AuthErr struct { -} - -func (err AuthErr) Error() string { - return "" -} diff --git a/s3/authentication/service.go b/s3/authentication/service.go deleted file mode 100644 index 0b1ddfc48..000000000 --- a/s3/authentication/service.go +++ /dev/null @@ -1,7 +0,0 @@ -package authentication - -import "github.com/bittorrent/go-btfs/s3/adaptor" - -type Service interface { - VerifyRequest(req *adaptor.Request) *AuthErr -} diff --git a/s3/service.go b/s3/service.go deleted file mode 100644 index eb659b752..000000000 --- a/s3/service.go +++ /dev/null @@ -1,13 +0,0 @@ -package s3 - -import ( - cfg "github.com/bittorrent/go-btfs-config" -) - -type Request struct { -} - -type Service interface { - Start(config *cfg.S3CompatibleAPI) (err error) - Stop() (err error) -} diff --git a/s3/accesskey/helper.go b/s3/utils/random/string.go similarity index 84% rename from s3/accesskey/helper.go rename to s3/utils/random/string.go index 1c71609c9..90071d6d9 100644 --- a/s3/accesskey/helper.go +++ b/s3/utils/random/string.go @@ -1,4 +1,4 @@ -package accesskey +package random import ( "math/rand" @@ -11,7 +11,7 @@ func init() { var letters = []rune("0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") -func GetRandStr(l int) string { +func NewString(l int) string { b := make([]rune, l) for i := range b { b[i] = letters[rand.Intn(len(letters))] From 959d35d41d8b1e98ae476888540a70259c48c7b5 Mon Sep 17 00:00:00 2001 From: Steve Date: Thu, 27 Jul 2023 23:17:03 +0800 Subject: [PATCH 003/139] fix: remove redunt error condition --- s3/accesskey/instance.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/s3/accesskey/instance.go b/s3/accesskey/instance.go index f3d22947d..14aa34273 100644 --- a/s3/accesskey/instance.go +++ b/s3/accesskey/instance.go @@ -147,11 +147,6 @@ func (s *service) update(key string, args *updateArgs) (err error) { return } - if ack.IsDeleted { - err = ErrNotFound - return - } - if args.Enable != nil { ack.Enable = *args.Enable } From 4ddc47647d2c1e0ffa0df1ba2d14b86043dd7325 Mon Sep 17 00:00:00 2001 From: Steve Date: Thu, 27 Jul 2023 23:41:59 +0800 Subject: [PATCH 004/139] opt: handle wrapper error and remove unused bucket type define in access-key module --- s3/accesskey/instance.go | 9 +++++---- s3/accesskey/interface.go | 11 ----------- 2 files changed, 5 insertions(+), 15 deletions(-) diff --git a/s3/accesskey/instance.go b/s3/accesskey/instance.go index 14aa34273..7488322cd 100644 --- a/s3/accesskey/instance.go +++ b/s3/accesskey/instance.go @@ -1,6 +1,7 @@ package accesskey import ( + "errors" "github.com/bittorrent/go-btfs/s3/utils/random" "github.com/bittorrent/go-btfs/transaction/storage" "github.com/google/uuid" @@ -73,10 +74,10 @@ func (s *service) Delete(key string) (err error) { func (s *service) Get(key string) (ack *AccessKey, err error) { ack = &AccessKey{} err = s.store.Get(s.getStoreKey(key), ack) - if err != nil && err != storage.ErrNotFound { + if err != nil && !errors.Is(err, storage.ErrNotFound) { return } - if err == storage.ErrNotFound || ack.IsDeleted { + if errors.Is(err, storage.ErrNotFound) || ack.IsDeleted { err = ErrNotFound } return @@ -139,10 +140,10 @@ func (s *service) update(key string, args *updateArgs) (err error) { stk := s.getStoreKey(key) err = s.store.Get(stk, ack) - if err != nil && err != storage.ErrNotFound { + if err != nil && !errors.Is(err, storage.ErrNotFound) { return } - if err == storage.ErrNotFound || ack.IsDeleted { + if errors.Is(err, storage.ErrNotFound) || ack.IsDeleted { err = ErrNotFound return } diff --git a/s3/accesskey/interface.go b/s3/accesskey/interface.go index 40509a1c4..6a422e48b 100644 --- a/s3/accesskey/interface.go +++ b/s3/accesskey/interface.go @@ -21,17 +21,6 @@ type AccessKey struct { UpdatedAt time.Time `json:"updated_at"` } -type Bucket struct { - ID string `json:"id"` - Name string `json:"name"` - Owner string `json:"owner"` - ACL string `json:"acl"` - CID string `json:"cid"` - IsDeleted bool `json:"is_deleted"` - CreatedAt string `json:"created_at"` - UpdatedAt string `json:"updated_at"` -} - type Service interface { Generate() (ack *AccessKey, err error) Enable(key string) (err error) From a791ada39665bfb01c8f6f37a857f2a457e584ef Mon Sep 17 00:00:00 2001 From: Shawn-Huang-Tron <107823650+Shawn-Huang-Tron@users.noreply.github.com> Date: Fri, 28 Jul 2023 14:42:52 +0800 Subject: [PATCH 005/139] fix: return an explicit error instead of panic and optimize the error log (#339) (#341) --- chain/chain.go | 5 +++-- core/commands/commands.go | 2 +- core/commands/storage/upload/upload/upload.go | 17 ++++++++++++++--- 3 files changed, 18 insertions(+), 6 deletions(-) diff --git a/chain/chain.go b/chain/chain.go index 6f4c8df3e..a81ecfade 100644 --- a/chain/chain.go +++ b/chain/chain.go @@ -5,12 +5,13 @@ import ( "encoding/hex" "errors" "fmt" - "github.com/bittorrent/go-btfs/chain/tokencfg" "io" "math/big" "strings" "time" + "github.com/bittorrent/go-btfs/chain/tokencfg" + "github.com/bittorrent/go-btfs/accounting" "github.com/bittorrent/go-btfs/chain/config" "github.com/bittorrent/go-btfs/settlement" @@ -348,7 +349,7 @@ func initSwap( priceOracle := priceoracle.New(currentPriceOracleAddress, transactionService) _, err := priceOracle.CheckNewPrice(tokencfg.GetWbttToken()) // CheckNewPrice when node starts if err != nil { - return nil, nil, errors.New("CheckNewPrice " + err.Error()) + return nil, nil, errors.New("CheckNewPrice error, it may happens when contract call failed if bttc chain rpc is down, please try again") } swapProtocol := swapprotocol.New(overlayEthAddress, priceOracle) diff --git a/core/commands/commands.go b/core/commands/commands.go index 4f92bd252..3d1d6ecc1 100644 --- a/core/commands/commands.go +++ b/core/commands/commands.go @@ -12,7 +12,7 @@ import ( "sort" "strings" - "github.com/bittorrent/go-btfs-cmds" + cmds "github.com/bittorrent/go-btfs-cmds" ) type commandEncoder struct { diff --git a/core/commands/storage/upload/upload/upload.go b/core/commands/storage/upload/upload/upload.go index 7a8c7b248..390b3e41e 100644 --- a/core/commands/storage/upload/upload/upload.go +++ b/core/commands/storage/upload/upload/upload.go @@ -4,15 +4,18 @@ import ( "context" "errors" "fmt" - "github.com/bittorrent/go-btfs/chain/tokencfg" - "github.com/bittorrent/go-btfs/utils" "strconv" "strings" "time" + "github.com/bittorrent/go-btfs/chain/tokencfg" + "github.com/bittorrent/go-btfs/utils" + coreiface "github.com/bittorrent/interface-go-btfs-core" + "github.com/bittorrent/go-btfs/settlement/swap/swapprotocol" "github.com/bittorrent/go-btfs/chain" + "github.com/bittorrent/go-btfs/core/commands/cmdenv" "github.com/bittorrent/go-btfs/core/commands/storage/hosts" "github.com/bittorrent/go-btfs/core/commands/storage/upload/helper" "github.com/bittorrent/go-btfs/core/commands/storage/upload/offline" @@ -111,7 +114,15 @@ Use status command to check for completion: }, RunTimeout: 15 * time.Minute, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { - err := utils.CheckSimpleMode(env) + nd, err := cmdenv.GetNode(env) + if err != nil { + return err + } + + if !nd.IsOnline { + return coreiface.ErrOffline + } + err = utils.CheckSimpleMode(env) if err != nil { return err } From 92436c7d950ee458abbc0739d1c942034373e68d Mon Sep 17 00:00:00 2001 From: steve Date: Tue, 8 Aug 2023 00:49:59 +0800 Subject: [PATCH 006/139] feat: add daemon check before execute accesskey commands --- cmd/btfs/daemon.go | 11 +++-- core/commands/accesskey.go | 88 ++++++++++++++++++++++++++++---------- 2 files changed, 72 insertions(+), 27 deletions(-) diff --git a/cmd/btfs/daemon.go b/cmd/btfs/daemon.go index 6c47abc5d..be2f7d64a 100644 --- a/cmd/btfs/daemon.go +++ b/cmd/btfs/daemon.go @@ -423,10 +423,13 @@ If the user need to start multiple nodes on the same machine, the configuration }() // access-key init - accesskey.InitService(&accesskey.Config{ - SecretLength: 32, - StorePrefix: "access-keys-test-", - }, statestore) + accesskey.InitService( + &accesskey.Config{ + SecretLength: 32, + StorePrefix: "access-keys-", + }, + statestore, + ) if SimpleMode == false { chainid, stored, err := getChainID(req, cfg, statestore) diff --git a/core/commands/accesskey.go b/core/commands/accesskey.go index 990a96926..f1cee49b1 100644 --- a/core/commands/accesskey.go +++ b/core/commands/accesskey.go @@ -1,12 +1,12 @@ package commands import ( + "errors" cmds "github.com/bittorrent/go-btfs-cmds" + "github.com/bittorrent/go-btfs/core/commands/cmdenv" "github.com/bittorrent/go-btfs/s3/accesskey" ) -const () - var AccessKeyCmd = &cmds.Command{ Helptext: cmds.HelpText{ Tagline: "", @@ -25,6 +25,17 @@ var AccessKeyCmd = &cmds.Command{ }, } +func checkDaemon(env cmds.Environment) (err error) { + node, err := cmdenv.GetNode(env) + if err != nil { + return + } + if !node.IsDaemon { + err = errors.New("please start the node first") + } + return +} + var accessKeyGenerateCmd = &cmds.Command{ Helptext: cmds.HelpText{ Tagline: "", @@ -32,12 +43,17 @@ var accessKeyGenerateCmd = &cmds.Command{ `, }, Arguments: []cmds.Argument{}, - Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { + Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) (err error) { + err = checkDaemon(env) + if err != nil { + return + } ack, err := accesskey.Generate() if err != nil { - return err + return } - return cmds.EmitOnce(res, ack) + err = cmds.EmitOnce(res, ack) + return }, } @@ -50,10 +66,14 @@ var accessKeyEnableCmd = &cmds.Command{ Arguments: []cmds.Argument{ cmds.StringArg("key", true, true, "The key").EnableStdin(), }, - Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { + Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) (err error) { + err = checkDaemon(env) + if err != nil { + return + } key := req.Arguments[0] - err := accesskey.Enable(key) - return err + err = accesskey.Enable(key) + return }, } @@ -66,10 +86,14 @@ var accessKeyDisableCmd = &cmds.Command{ Arguments: []cmds.Argument{ cmds.StringArg("key", true, true, "The key").EnableStdin(), }, - Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { + Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) (err error) { + err = checkDaemon(env) + if err != nil { + return + } key := req.Arguments[0] - err := accesskey.Disable(key) - return err + err = accesskey.Disable(key) + return }, } @@ -82,10 +106,14 @@ var accessKeyResetCmd = &cmds.Command{ Arguments: []cmds.Argument{ cmds.StringArg("key", true, true, "The key").EnableStdin(), }, - Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { + Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) (err error) { + err = checkDaemon(env) + if err != nil { + return + } key := req.Arguments[0] - err := accesskey.Reset(key) - return err + err = accesskey.Reset(key) + return }, } @@ -98,10 +126,14 @@ var accessKeyDeleteCmd = &cmds.Command{ Arguments: []cmds.Argument{ cmds.StringArg("key", true, true, "The key").EnableStdin(), }, - Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { + Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) (err error) { + err = checkDaemon(env) + if err != nil { + return + } key := req.Arguments[0] - err := accesskey.Delete(key) - return err + err = accesskey.Delete(key) + return }, } @@ -114,13 +146,18 @@ var accessKeyGetCmd = &cmds.Command{ Arguments: []cmds.Argument{ cmds.StringArg("key", true, true, "The key").EnableStdin(), }, - Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { + Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) (err error) { + err = checkDaemon(env) + if err != nil { + return + } key := req.Arguments[0] ack, err := accesskey.Get(key) if err != nil { - return err + return } - return cmds.EmitOnce(res, ack) + err = cmds.EmitOnce(res, ack) + return }, } @@ -131,11 +168,16 @@ var accessKeyListCmd = &cmds.Command{ `, }, Arguments: []cmds.Argument{}, - Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { + Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) (err error) { + err = checkDaemon(env) + if err != nil { + return + } list, err := accesskey.List() if err != nil { - return err + return } - return cmds.EmitOnce(res, list) + err = cmds.EmitOnce(res, list) + return }, } From 3082ff119bb3ac2611b4c989ee62dfe3ac87bf82 Mon Sep 17 00:00:00 2001 From: steve Date: Tue, 8 Aug 2023 00:57:03 +0800 Subject: [PATCH 007/139] optmize: access-key store prefix --- cmd/btfs/daemon.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/btfs/daemon.go b/cmd/btfs/daemon.go index be2f7d64a..6b023bda1 100644 --- a/cmd/btfs/daemon.go +++ b/cmd/btfs/daemon.go @@ -426,7 +426,7 @@ If the user need to start multiple nodes on the same machine, the configuration accesskey.InitService( &accesskey.Config{ SecretLength: 32, - StorePrefix: "access-keys-", + StorePrefix: "access-keys:", }, statestore, ) From 5e72e40b8525d91ff2aa90167670500016bd5ecd Mon Sep 17 00:00:00 2001 From: steve Date: Tue, 8 Aug 2023 14:38:21 +0800 Subject: [PATCH 008/139] optmize: not found error --- s3/accesskey/interface.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/s3/accesskey/interface.go b/s3/accesskey/interface.go index 6a422e48b..8112bab12 100644 --- a/s3/accesskey/interface.go +++ b/s3/accesskey/interface.go @@ -5,7 +5,7 @@ import ( "time" ) -var ErrNotFound = errors.New("not found") +var ErrNotFound = errors.New("key is not found") type Config struct { SecretLength int From 3962044927a8dcc5e9682cce0959db7687c46885 Mon Sep 17 00:00:00 2001 From: fish <920886811@163.com> Date: Tue, 8 Aug 2023 17:32:05 +0800 Subject: [PATCH 009/139] feat: add s3 signature --- s3/action/action.go | 274 +++++++ s3/action/action_test.go | 45 ++ s3/apierrors/errors.go | 80 ++ s3/apierrors/s3_error.go | 44 ++ s3/apierrors/s3api_errors.go | 1310 ++++++++++++++++++++++++++++++++ s3/auth/auth_type.go | 182 +++++ s3/auth/cred.go | 69 ++ s3/auth/other.go | 1 + s3/auth/service.go | 1 + s3/auth/service_instance.go | 30 + s3/auth/service_interface.go | 11 + s3/auth/service_test.go | 1 + s3/auth/signature-v4-parser.go | 285 +++++++ s3/auth/signature-v4-utils.go | 177 +++++ s3/auth/signature-v4.go | 261 +++++++ s3/consts/consts.go | 183 +++++ s3/set/match.go | 48 ++ s3/set/match_test.go | 529 +++++++++++++ s3/set/stringset.go | 198 +++++ s3/set/stringset_test.go | 359 +++++++++ s3/utils/bgcontext.go | 35 + s3/utils/encode.go | 88 +++ s3/utils/hash/errors.go | 33 + s3/utils/hash/reader.go | 211 +++++ s3/utils/ip.go | 46 ++ s3/utils/levels.go | 15 + s3/utils/signature.go | 359 +++++++++ s3/utils/utils.go | 9 + s3/utils/xml.go | 26 + 29 files changed, 4910 insertions(+) create mode 100644 s3/action/action.go create mode 100644 s3/action/action_test.go create mode 100644 s3/apierrors/errors.go create mode 100644 s3/apierrors/s3_error.go create mode 100644 s3/apierrors/s3api_errors.go create mode 100644 s3/auth/auth_type.go create mode 100644 s3/auth/cred.go create mode 100644 s3/auth/other.go create mode 100644 s3/auth/service.go create mode 100644 s3/auth/service_instance.go create mode 100644 s3/auth/service_interface.go create mode 100644 s3/auth/service_test.go create mode 100644 s3/auth/signature-v4-parser.go create mode 100644 s3/auth/signature-v4-utils.go create mode 100644 s3/auth/signature-v4.go create mode 100644 s3/consts/consts.go create mode 100644 s3/set/match.go create mode 100644 s3/set/match_test.go create mode 100644 s3/set/stringset.go create mode 100644 s3/set/stringset_test.go create mode 100644 s3/utils/bgcontext.go create mode 100644 s3/utils/encode.go create mode 100644 s3/utils/hash/errors.go create mode 100644 s3/utils/hash/reader.go create mode 100644 s3/utils/ip.go create mode 100644 s3/utils/levels.go create mode 100644 s3/utils/signature.go create mode 100644 s3/utils/utils.go create mode 100644 s3/utils/xml.go diff --git a/s3/action/action.go b/s3/action/action.go new file mode 100644 index 000000000..4744e0252 --- /dev/null +++ b/s3/action/action.go @@ -0,0 +1,274 @@ +package s3action + +type Action string + +// ActionSet - set of actions. +// https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazons3.html#amazons3-actions-as-permissions +const ( + //--- bucket + + // CreateBucketAction - CreateBucket Rest API action. + CreateBucketAction = "s3:CreateBucket" + + // HeadBucketAction - HeadBucket Rest API action. + HeadBucketAction = "s3:HeadBucket" + + // ListBucketAction - ListBucket Rest API action. + ListBucketAction = "s3:ListBucket" + + // DeleteBucketAction - DeleteBucket Rest API action. + DeleteBucketAction = "s3:DeleteBucket" + + // PutBucketAclAction - PutBucketAcl Rest API action. + PutBucketAclAction = "s3:PutBucketAcl" + + // GetBucketAclAction - GetBucketAcl Rest API action. + GetBucketAclAction = "s3:GetBucketAcl" + + //--- object + + // ListObjectsAction - ListObjects Rest API action. + ListObjectsAction = "s3:ListObjects" + + // ListObjectsV2Action - ListObjectsV2 Rest API action. + ListObjectsV2Action = "s3:ListObjectsV2" + + // HeadObjectAction - HeadObject Rest API action. + HeadObjectAction = "s3:HeadObject" + + // PutObjectAction - PutObject Rest API action. + PutObjectAction = "s3:PutObject" + + // GetObjectAction - GetObject Rest API action. + GetObjectAction = "s3:GetObject" + + // CopyObjectAction - CopyObject Rest API action. + CopyObjectAction = "s3:CopyObject" + + // DeleteObjectAction - DeleteObject Rest API action. + DeleteObjectAction = "s3:DeleteObject" + + // DeleteObjectsAction - DeleteObjects Rest API action. + DeleteObjectsAction = "s3:DeleteObjects" + + //--- multipart upload + + // CreateMultipartUploadAction - CreateMultipartUpload Rest API action. + CreateMultipartUploadAction Action = "s3:CreateMultipartUpload" + + // AbortMultipartUploadAction - AbortMultipartUpload Rest API action. + AbortMultipartUploadAction Action = "s3:AbortMultipartUpload" + + // CompleteMultipartUploadAction - CompleteMultipartUpload Rest API action. + CompleteMultipartUploadAction Action = "s3:CompleteMultipartUpload" + + // UploadPartAction - UploadPartUpload Rest API action. + UploadPartAction Action = "s3:UploadPartUpload" +) + +// SupportedActions List of all supported actions. +var SupportedActions = map[Action]struct{}{ + CreateBucketAction: {}, + HeadBucketAction: {}, + ListBucketAction: {}, + DeleteBucketAction: {}, + PutBucketAclAction: {}, + GetBucketAclAction: {}, + + ListObjectsAction: {}, + ListObjectsV2Action: {}, + HeadObjectAction: {}, + PutObjectAction: {}, + GetObjectAction: {}, + CopyObjectAction: {}, + DeleteObjectAction: {}, + DeleteObjectsAction: {}, + + CreateMultipartUploadAction: {}, + AbortMultipartUploadAction: {}, + CompleteMultipartUploadAction: {}, + UploadPartAction: {}, +} + +// IsValid - checks if action is valid or not. +func (action Action) IsValid() bool { + for supAction := range SupportedActions { + if action.Match(supAction) { + return true + } + } + return false +} + +// Match - matches action name with action patter. +func (action Action) Match(a Action) bool { + //return set.Match(string(action), string(a)) + return true +} + +// List of all supported object actions. +var supportedObjectActions = map[Action]struct{}{ + ListObjectsAction: {}, + ListObjectsV2Action: {}, + HeadObjectAction: {}, + PutObjectAction: {}, + GetObjectAction: {}, + CopyObjectAction: {}, + DeleteObjectAction: {}, + DeleteObjectsAction: {}, + + CreateMultipartUploadAction: {}, + AbortMultipartUploadAction: {}, + CompleteMultipartUploadAction: {}, + UploadPartAction: {}, +} + +// IsObjectAction - returns whether action is object type or not. +func (action Action) IsObjectAction() bool { + _, ok := supportedObjectActions[action] + return ok +} + +//func createActionConditionKeyMap() map[Action]condition.KeySet { +// commonKeys := []condition.Key{} +// for _, keyName := range condition.CommonKeys { +// commonKeys = append(commonKeys, keyName.ToKey()) +// } +// +// return map[Action]condition.KeySet{ +// AbortMultipartUploadAction: condition.NewKeySet(commonKeys...), +// +// CreateBucketAction: condition.NewKeySet(commonKeys...), +// +// DeleteObjectAction: condition.NewKeySet(commonKeys...), +// +// GetBucketLocationAction: condition.NewKeySet(commonKeys...), +// +// GetBucketPolicyStatusAction: condition.NewKeySet(commonKeys...), +// +// GetObjectAction: condition.NewKeySet( +// append([]condition.Key{ +// condition.S3XAmzServerSideEncryption.ToKey(), +// condition.S3XAmzServerSideEncryptionCustomerAlgorithm.ToKey(), +// }, commonKeys...)...), +// +// HeadBucketAction: condition.NewKeySet(commonKeys...), +// +// ListAllMyBucketsAction: condition.NewKeySet(commonKeys...), +// +// ListBucketAction: condition.NewKeySet( +// append([]condition.Key{ +// condition.S3Prefix.ToKey(), +// condition.S3Delimiter.ToKey(), +// condition.S3MaxKeys.ToKey(), +// }, commonKeys...)...), +// +// ListBucketVersionsAction: condition.NewKeySet( +// append([]condition.Key{ +// condition.S3Prefix.ToKey(), +// condition.S3Delimiter.ToKey(), +// condition.S3MaxKeys.ToKey(), +// }, commonKeys...)...), +// +// ListBucketMultipartUploadsAction: condition.NewKeySet(commonKeys...), +// +// ListenNotificationAction: condition.NewKeySet(commonKeys...), +// +// ListenBucketNotificationAction: condition.NewKeySet(commonKeys...), +// +// ListMultipartUploadPartsAction: condition.NewKeySet(commonKeys...), +// +// PutObjectAction: condition.NewKeySet( +// append([]condition.Key{ +// condition.S3XAmzCopySource.ToKey(), +// condition.S3XAmzServerSideEncryption.ToKey(), +// condition.S3XAmzServerSideEncryptionCustomerAlgorithm.ToKey(), +// condition.S3XAmzMetadataDirective.ToKey(), +// condition.S3XAmzStorageClass.ToKey(), +// condition.S3ObjectLockRetainUntilDate.ToKey(), +// condition.S3ObjectLockMode.ToKey(), +// condition.S3ObjectLockLegalHold.ToKey(), +// condition.S3RequestObjectTagKeys.ToKey(), +// condition.S3RequestObjectTag.ToKey(), +// }, commonKeys...)...), +// +// // https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html +// // LockLegalHold is not supported with PutObjectRetentionAction +// PutObjectRetentionAction: condition.NewKeySet( +// append([]condition.Key{ +// condition.S3ObjectLockRemainingRetentionDays.ToKey(), +// condition.S3ObjectLockRetainUntilDate.ToKey(), +// condition.S3ObjectLockMode.ToKey(), +// }, commonKeys...)...), +// +// GetObjectRetentionAction: condition.NewKeySet(commonKeys...), +// PutObjectLegalHoldAction: condition.NewKeySet( +// append([]condition.Key{ +// condition.S3ObjectLockLegalHold.ToKey(), +// }, commonKeys...)...), +// GetObjectLegalHoldAction: condition.NewKeySet(commonKeys...), +// +// // https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html +// BypassGovernanceRetentionAction: condition.NewKeySet( +// append([]condition.Key{ +// condition.S3ObjectLockRemainingRetentionDays.ToKey(), +// condition.S3ObjectLockRetainUntilDate.ToKey(), +// condition.S3ObjectLockMode.ToKey(), +// condition.S3ObjectLockLegalHold.ToKey(), +// }, commonKeys...)...), +// +// GetBucketObjectLockConfigurationAction: condition.NewKeySet(commonKeys...), +// PutBucketObjectLockConfigurationAction: condition.NewKeySet(commonKeys...), +// GetBucketTaggingAction: condition.NewKeySet(commonKeys...), +// PutBucketTaggingAction: condition.NewKeySet( +// append([]condition.Key{ +// condition.S3RequestObjectTagKeys.ToKey(), +// condition.S3RequestObjectTag.ToKey(), +// }, commonKeys...)...), +// PutObjectTaggingAction: condition.NewKeySet( +// append([]condition.Key{ +// condition.S3RequestObjectTagKeys.ToKey(), +// condition.S3RequestObjectTag.ToKey(), +// }, commonKeys...)...), +// GetObjectTaggingAction: condition.NewKeySet(commonKeys...), +// DeleteObjectTaggingAction: condition.NewKeySet( +// append([]condition.Key{ +// condition.S3RequestObjectTagKeys.ToKey(), +// condition.S3RequestObjectTag.ToKey(), +// }, commonKeys...)...), +// PutObjectVersionTaggingAction: condition.NewKeySet( +// append([]condition.Key{ +// condition.S3VersionID.ToKey(), +// condition.S3RequestObjectTagKeys.ToKey(), +// condition.S3RequestObjectTag.ToKey(), +// }, commonKeys...)...), +// GetObjectVersionAction: condition.NewKeySet( +// append([]condition.Key{ +// condition.S3VersionID.ToKey(), +// }, commonKeys...)...), +// GetObjectVersionTaggingAction: condition.NewKeySet( +// append([]condition.Key{ +// condition.S3VersionID.ToKey(), +// }, commonKeys...)...), +// DeleteObjectVersionAction: condition.NewKeySet( +// append([]condition.Key{ +// condition.S3VersionID.ToKey(), +// }, commonKeys...)...), +// DeleteObjectVersionTaggingAction: condition.NewKeySet( +// append([]condition.Key{ +// condition.S3VersionID.ToKey(), +// condition.S3RequestObjectTagKeys.ToKey(), +// condition.S3RequestObjectTag.ToKey(), +// }, commonKeys...)...), +// GetReplicationConfigurationAction: condition.NewKeySet(commonKeys...), +// PutReplicationConfigurationAction: condition.NewKeySet(commonKeys...), +// ReplicateObjectAction: condition.NewKeySet(commonKeys...), +// ReplicateDeleteAction: condition.NewKeySet(commonKeys...), +// ReplicateTagsAction: condition.NewKeySet(commonKeys...), +// GetObjectVersionForReplicationAction: condition.NewKeySet(commonKeys...), +// RestoreObjectAction: condition.NewKeySet(commonKeys...), +// } +//} +// +//// ActionConditionKeyMap - holds mapping of supported condition key for an action. +//var ActionConditionKeyMap = createActionConditionKeyMap() diff --git a/s3/action/action_test.go b/s3/action/action_test.go new file mode 100644 index 000000000..a2fcd3e50 --- /dev/null +++ b/s3/action/action_test.go @@ -0,0 +1,45 @@ +package s3action + +import "testing" + +func TestAction_IsValid(t *testing.T) { + testCases := []struct { + action Action + expectedResult bool + }{ + {Action("*"), true}, + {Action(PutObjectAction), true}, + {Action("abcd"), false}, + {Action(PutObjectAction + "*"), true}, + } + for _, testCase := range testCases { + if testCase.action.IsValid() != testCase.expectedResult { + t.Errorf("Test case failed: %s", testCase.action) + } + } +} +func TestAction_Match(t *testing.T) { + testCases := []struct { + name string + action Action + resource Action + expectedResult bool + }{ + {"test1", Action("*"), Action(""), true}, + {"test1", Action("*"), Action(PutObjectAction), true}, + {"test1", Action("*"), Action("abcd"), true}, + {"test2", Action(PutObjectAction), Action(""), false}, + {"test2", Action(PutObjectAction), Action(PutObjectAction), true}, + {"test2", Action(PutObjectAction), Action("abcd"), false}, + {"test3", Action(""), Action("*"), false}, + {"test3", Action(""), Action(PutObjectAction), false}, + {"test3", Action(""), Action("abcd"), false}, + } + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + if testCase.action.Match(testCase.resource) != testCase.expectedResult { + t.Errorf("Test case failed: %s", testCase.action) + } + }) + } +} diff --git a/s3/apierrors/errors.go b/s3/apierrors/errors.go new file mode 100644 index 000000000..ba968163a --- /dev/null +++ b/s3/apierrors/errors.go @@ -0,0 +1,80 @@ +package apierrors + +import ( + "context" + "github.com/yann-y/fds/internal/lock" + "github.com/yann-y/fds/internal/store" + "github.com/yann-y/fds/internal/utils/hash" + "github.com/yann-y/fds/pkg/s3utils" + "golang.org/x/xerrors" + "net/url" +) + +// NotImplemented If a feature is not implemented +type NotImplemented struct { + Message string +} + +// ContextCanceled returns whether a context is canceled. +func ContextCanceled(ctx context.Context) bool { + select { + case <-ctx.Done(): + return true + default: + return false + } +} + +func ToApiError(ctx context.Context, err error) ErrorCode { + if ContextCanceled(ctx) { + if ctx.Err() == context.Canceled { + return ErrClientDisconnected + } + } + errCode := ErrInternalError + switch err.(type) { + case lock.OperationTimedOut: + errCode = ErrOperationTimedOut + case hash.SHA256Mismatch: + errCode = ErrContentSHA256Mismatch + case hash.BadDigest: + errCode = ErrBadDigest + case store.BucketNotFound: + errCode = ErrNoSuchBucket + case store.BucketPolicyNotFound: + errCode = ErrNoSuchBucketPolicy + case store.BucketTaggingNotFound: + errCode = ErrBucketTaggingNotFound + case s3utils.BucketNameInvalid: + errCode = ErrInvalidBucketName + case s3utils.ObjectNameInvalid: + errCode = ErrInvalidObjectName + case s3utils.ObjectNameTooLong: + errCode = ErrKeyTooLongError + case s3utils.ObjectNamePrefixAsSlash: + errCode = ErrInvalidObjectNamePrefixSlash + case s3utils.InvalidUploadIDKeyCombination: + errCode = ErrNotImplemented + case s3utils.InvalidMarkerPrefixCombination: + errCode = ErrNotImplemented + case s3utils.MalformedUploadID: + errCode = ErrNoSuchUpload + case s3utils.InvalidUploadID: + errCode = ErrNoSuchUpload + case s3utils.InvalidPart: + errCode = ErrInvalidPart + case s3utils.PartTooSmall: + errCode = ErrEntityTooSmall + case s3utils.PartTooBig: + errCode = ErrEntityTooLarge + case url.EscapeError: + errCode = ErrInvalidObjectName + default: + if xerrors.Is(err, store.ErrObjectNotFound) { + errCode = ErrNoSuchKey + } else if xerrors.Is(err, store.ErrBucketNotEmpty) { + errCode = ErrBucketNotEmpty + } + } + return errCode +} diff --git a/s3/apierrors/s3_error.go b/s3/apierrors/s3_error.go new file mode 100644 index 000000000..c440daa24 --- /dev/null +++ b/s3/apierrors/s3_error.go @@ -0,0 +1,44 @@ +package apierrors + +// http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html +var s3ErrorResponseMap = map[string]string{ + "AccessDenied": "Access Denied.", + "BadDigest": "The Content-Md5 you specified did not match what we received.", + "EntityTooSmall": "Your proposed upload is smaller than the minimum allowed object size.", + "EntityTooLarge": "Your proposed upload exceeds the maximum allowed object size.", + "IncompleteBody": "You did not provide the number of bytes specified by the Content-Length HTTP header.", + "InternalError": "We encountered an internal error, please try again.", + "InvalidAccessKeyId": "The access key ID you provided does not exist in our records.", + "InvalidBucketName": "The specified bucket is not valid.", + "InvalidDigest": "The Content-Md5 you specified is not valid.", + "InvalidRange": "The requested range is not satisfiable", + "MalformedXML": "The XML you provided was not well-formed or did not validate against our published schema.", + "MissingContentLength": "You must provide the Content-Length HTTP header.", + "MissingContentMD5": "Missing required header for this request: Content-Md5.", + "MissingRequestBodyError": "Request body is empty.", + "NoSuchBucket": "The specified bucket does not exist.", + "NoSuchBucketPolicy": "The bucket policy does not exist", + "NoSuchKey": "The specified key does not exist.", + "NoSuchUpload": "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.", + "NotImplemented": "A header you provided implies functionality that is not implemented", + "PreconditionFailed": "At least one of the pre-conditions you specified did not hold", + "RequestTimeTooSkewed": "The difference between the request time and the server's time is too large.", + "SignatureDoesNotMatch": "The request signature we calculated does not match the signature you provided. Check your key and signing method.", + "MethodNotAllowed": "The specified method is not allowed against this resource.", + "InvalidPart": "One or more of the specified parts could not be found.", + "InvalidPartOrder": "The list of parts was not in ascending order. The parts list must be specified in order by part number.", + "InvalidObjectState": "The operation is not valid for the current state of the object.", + "AuthorizationHeaderMalformed": "The authorization header is malformed; the region is wrong.", + "MalformedPOSTRequest": "The body of your POST request is not well-formed multipart/form-data.", + "BucketNotEmpty": "The bucket you tried to delete is not empty", + "AllAccessDisabled": "All access to this bucket has been disabled.", + "MalformedPolicy": "Policy has invalid resource.", + "MissingFields": "Missing fields in request.", + "AuthorizationQueryParametersError": "Error parsing the X-Amz-Credential parameter; the Credential is mal-formed; expecting \"/YYYYMMDD/REGION/SERVICE/aws4_request\".", + "MalformedDate": "Invalid date format header, expected to be in ISO8601, RFC1123 or RFC1123Z time format.", + "BucketAlreadyOwnedByYou": "Your previous request to create the named bucket succeeded and you already own it.", + "InvalidDuration": "Duration provided in the request is invalid.", + "XAmzContentSHA256Mismatch": "The provided 'x-amz-content-sha256' header does not match what was computed.", + // Add new API errors here. + "NoSuchCORSConfiguration": "The CORS configuration does not exist", +} diff --git a/s3/apierrors/s3api_errors.go b/s3/apierrors/s3api_errors.go new file mode 100644 index 000000000..39505c5f5 --- /dev/null +++ b/s3/apierrors/s3api_errors.go @@ -0,0 +1,1310 @@ +package apierrors + +import ( + "encoding/xml" + "fmt" + "net/http" +) + +// APIError structure +type APIError struct { + Code string + Description string + HTTPStatusCode int +} + +// RESTErrorResponse - error response format +type RESTErrorResponse struct { + XMLName xml.Name `xml:"Error" json:"-"` + Code string `xml:"Code" json:"Code"` + Message string `xml:"Message" json:"Message"` + Resource string `xml:"Resource" json:"Resource"` + RequestID string `xml:"RequestId" json:"RequestId"` + Key string `xml:"Key,omitempty" json:"Key,omitempty"` + BucketName string `xml:"BucketName,omitempty" json:"BucketName,omitempty"` +} + +// Error - Returns S3 error string. +func (e RESTErrorResponse) Error() string { + if e.Message == "" { + msg, ok := s3ErrorResponseMap[e.Code] + if !ok { + msg = fmt.Sprintf("Error response code %s.", e.Code) + } + return msg + } + return e.Message +} + +// ErrorCode type of error status. +type ErrorCode int + +// Error codes, non exhaustive list - http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html +const ( + ErrNone ErrorCode = iota + ErrAccessDenied + ErrBadDigest + ErrEntityTooSmall + ErrEntityTooLarge + ErrIncompleteBody + ErrInternalError + ErrInvalidAccessKeyID + ErrAccessKeyDisabled + ErrInvalidBucketName + ErrInvalidDigest + ErrInvalidRange + ErrInvalidRangePartNumber + ErrInvalidCopyPartRange + ErrInvalidCopyPartRangeSource + ErrInvalidMaxKeys + ErrInvalidEncodingMethod + ErrInvalidMaxUploads + ErrInvalidMaxParts + ErrInvalidPartNumberMarker + ErrInvalidRequestBody + ErrInvalidCopySource + ErrInvalidMetadataDirective + ErrInvalidCopyDest + ErrInvalidPolicyDocument + ErrInvalidObjectState + ErrMalformedXML + ErrMissingContentLength + ErrMissingContentMD5 + ErrMissingRequestBodyError + ErrMissingSecurityHeader + ErrNoSuchUser + ErrUserAlreadyExists + ErrNoSuchUserPolicy + ErrUserPolicyAlreadyExists + ErrNoSuchBucket + ErrNoSuchBucketPolicy + ErrNoSuchLifecycleConfiguration + ErrNoSuchCORSConfiguration + ErrNoSuchWebsiteConfiguration + ErrReplicationConfigurationNotFoundError + ErrReplicationNeedsVersioningError + ErrReplicationBucketNeedsVersioningError + ErrObjectRestoreAlreadyInProgress + ErrNoSuchKey + ErrNoSuchUpload + ErrInvalidVersionID + ErrNoSuchVersion + ErrNotImplemented + ErrPreconditionFailed + ErrRequestTimeTooSkewed + ErrSignatureDoesNotMatch + ErrMethodNotAllowed + ErrInvalidPart + ErrInvalidPartOrder + ErrAuthorizationHeaderMalformed + ErrMalformedDate + ErrMalformedPOSTRequest + ErrPOSTFileRequired + ErrSignatureVersionNotSupported + ErrBucketNotEmpty + ErrAllAccessDisabled + ErrMalformedPolicy + ErrMissingFields + ErrMissingCredTag + ErrCredMalformed + ErrInvalidRegion + + ErrMissingSignTag + ErrMissingSignHeadersTag + + ErrAuthHeaderEmpty + ErrExpiredPresignRequest + ErrRequestNotReadyYet + ErrUnsignedHeaders + ErrMissingDateHeader + + ErrBucketAlreadyOwnedByYou + ErrInvalidDuration + ErrBucketAlreadyExists + ErrMetadataTooLarge + ErrUnsupportedMetadata + + ErrSlowDown + ErrBadRequest + ErrKeyTooLongError + ErrInvalidBucketObjectLockConfiguration + ErrObjectLockConfigurationNotAllowed + ErrNoSuchObjectLockConfiguration + ErrObjectLocked + ErrInvalidRetentionDate + ErrPastObjectLockRetainDate + ErrUnknownWORMModeDirective + ErrBucketTaggingNotFound + ErrObjectLockInvalidHeaders + ErrInvalidTagDirective + // Add new error codes here. + + // SSE-S3 related API errors + ErrInvalidEncryptionMethod + ErrInvalidQueryParams + ErrNoAccessKey + ErrInvalidToken + + // Bucket notification related errors. + ErrEventNotification + ErrARNNotification + ErrRegionNotification + ErrOverlappingFilterNotification + ErrFilterNameInvalid + ErrFilterNamePrefix + ErrFilterNameSuffix + ErrFilterValueInvalid + ErrOverlappingConfigs + + // S3 extended errors. + ErrContentSHA256Mismatch + + // Add new extended error codes here. + ErrInvalidObjectName + ErrInvalidObjectNamePrefixSlash + ErrClientDisconnected + ErrOperationTimedOut + ErrOperationMaxedOut + ErrInvalidRequest + ErrIncorrectContinuationToken + ErrInvalidFormatAccessKey + + // S3 Select Errors + ErrEmptyRequestBody + ErrUnsupportedFunction + ErrInvalidExpressionType + ErrBusy + ErrUnauthorizedAccess + ErrExpressionTooLong + ErrIllegalSQLFunctionArgument + ErrInvalidKeyPath + ErrInvalidCompressionFormat + ErrInvalidFileHeaderInfo + ErrInvalidJSONType + ErrInvalidQuoteFields + ErrInvalidRequestParameter + ErrInvalidDataType + ErrInvalidTextEncoding + ErrInvalidDataSource + ErrInvalidTableAlias + ErrMissingRequiredParameter + ErrObjectSerializationConflict + ErrUnsupportedSQLOperation + ErrUnsupportedSQLStructure + ErrUnsupportedSyntax + ErrUnsupportedRangeHeader + ErrLexerInvalidChar + ErrLexerInvalidOperator + ErrLexerInvalidLiteral + ErrLexerInvalidIONLiteral + ErrParseExpectedDatePart + ErrParseExpectedKeyword + ErrParseExpectedTokenType + ErrParseExpected2TokenTypes + ErrParseExpectedNumber + ErrParseExpectedRightParenBuiltinFunctionCall + ErrParseExpectedTypeName + ErrParseExpectedWhenClause + ErrParseUnsupportedToken + ErrParseUnsupportedLiteralsGroupBy + ErrParseExpectedMember + ErrParseUnsupportedSelect + ErrParseUnsupportedCase + ErrParseUnsupportedCaseClause + ErrParseUnsupportedAlias + ErrParseUnsupportedSyntax + ErrParseUnknownOperator + ErrParseMissingIdentAfterAt + ErrParseUnexpectedOperator + ErrParseUnexpectedTerm + ErrParseUnexpectedToken + ErrParseUnexpectedKeyword + ErrParseExpectedExpression + ErrParseExpectedLeftParenAfterCast + ErrParseExpectedLeftParenValueConstructor + ErrParseExpectedLeftParenBuiltinFunctionCall + ErrParseExpectedArgumentDelimiter + ErrParseCastArity + ErrParseInvalidTypeParam + ErrParseEmptySelect + ErrParseSelectMissingFrom + ErrParseExpectedIdentForGroupName + ErrParseExpectedIdentForAlias + ErrParseUnsupportedCallWithStar + ErrParseNonUnaryAgregateFunctionCall + ErrParseMalformedJoin + ErrParseExpectedIdentForAt + ErrParseAsteriskIsNotAloneInSelectList + ErrParseCannotMixSqbAndWildcardInSelectList + ErrParseInvalidContextForWildcardInSelectList + ErrIncorrectSQLFunctionArgumentType + ErrValueParseFailure + ErrEvaluatorInvalidArguments + ErrIntegerOverflow + ErrLikeInvalidInputs + ErrCastFailed + ErrInvalidCast + ErrEvaluatorInvalidTimestampFormatPattern + ErrEvaluatorInvalidTimestampFormatPatternSymbolForParsing + ErrEvaluatorTimestampFormatPatternDuplicateFields + ErrEvaluatorTimestampFormatPatternHourClockAmPmMismatch + ErrEvaluatorUnterminatedTimestampFormatPatternToken + ErrEvaluatorInvalidTimestampFormatPatternToken + ErrEvaluatorInvalidTimestampFormatPatternSymbol + ErrEvaluatorBindingDoesNotExist + ErrMissingHeaders + ErrInvalidColumnIndex + ErrPostPolicyConditionInvalidFormat + + ErrMalformedJSON +) + +// error code to APIError structure, these fields carry respective +// descriptions for all the error responses. +var errorCodeResponse = map[ErrorCode]APIError{ + ErrInvalidCopyDest: { + Code: "InvalidRequest", + Description: "This copy request is illegal because it is trying to copy an object to itself without changing the object's metadata, storage class, website redirect location or encryption attributes.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidCopySource: { + Code: "InvalidArgument", + Description: "Copy Source must mention the source bucket and key: sourcebucket/sourcekey.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidMetadataDirective: { + Code: "InvalidArgument", + Description: "Unknown metadata directive.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidRequestBody: { + Code: "InvalidArgument", + Description: "Body shouldn't be set for this request.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidMaxUploads: { + Code: "InvalidArgument", + Description: "Argument max-uploads must be an integer between 0 and 2147483647", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidMaxKeys: { + Code: "InvalidArgument", + Description: "Argument maxKeys must be an integer between 0 and 2147483647", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidEncodingMethod: { + Code: "InvalidArgument", + Description: "Invalid Encoding Method specified in Request", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidMaxParts: { + Code: "InvalidArgument", + Description: "Part number must be an integer between 1 and 10000, inclusive", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidPartNumberMarker: { + Code: "InvalidArgument", + Description: "Argument partNumberMarker must be an integer.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidPolicyDocument: { + Code: "InvalidPolicyDocument", + Description: "The content of the form does not meet the conditions specified in the policy document.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrAccessDenied: { + Code: "AccessDenied", + Description: "Access Denied.", + HTTPStatusCode: http.StatusForbidden, + }, + ErrBadDigest: { + Code: "BadDigest", + Description: "The Content-Md5 you specified did not match what we received.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrEntityTooSmall: { + Code: "EntityTooSmall", + Description: "Your proposed upload is smaller than the minimum allowed object size.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrEntityTooLarge: { + Code: "EntityTooLarge", + Description: "Your proposed upload exceeds the maximum allowed object size.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrIncompleteBody: { + Code: "IncompleteBody", + Description: "You did not provide the number of bytes specified by the Content-Length HTTP header.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInternalError: { + Code: "InternalError", + Description: "We encountered an internal error, please try again.", + HTTPStatusCode: http.StatusInternalServerError, + }, + ErrInvalidAccessKeyID: { + Code: "InvalidAccessKeyId", + Description: "The Access Key Id you provided does not exist in our records.", + HTTPStatusCode: http.StatusForbidden, + }, + ErrAccessKeyDisabled: { + Code: "InvalidAccessKeyId", + Description: "Your account is disabled; please contact your administrator.", + HTTPStatusCode: http.StatusForbidden, + }, + ErrInvalidBucketName: { + Code: "InvalidBucketName", + Description: "The specified bucket is not valid.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidDigest: { + Code: "InvalidDigest", + Description: "The Content-Md5 you specified is not valid.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidRange: { + Code: "InvalidRange", + Description: "The requested range is not satisfiable", + HTTPStatusCode: http.StatusRequestedRangeNotSatisfiable, + }, + ErrInvalidRangePartNumber: { + Code: "InvalidRequest", + Description: "Cannot specify both Range header and partNumber query parameter", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMalformedXML: { + Code: "MalformedXML", + Description: "The XML you provided was not well-formed or did not validate against our published schema.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMissingContentLength: { + Code: "MissingContentLength", + Description: "You must provide the Content-Length HTTP header.", + HTTPStatusCode: http.StatusLengthRequired, + }, + ErrMissingContentMD5: { + Code: "MissingContentMD5", + Description: "Missing required header for this request: Content-Md5.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMissingSecurityHeader: { + Code: "MissingSecurityHeader", + Description: "Your request was missing a required header", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMissingRequestBodyError: { + Code: "MissingRequestBodyError", + Description: "Request body is empty.", + HTTPStatusCode: http.StatusLengthRequired, + }, + ErrNoSuchBucket: { + Code: "NoSuchBucket", + Description: "The specified bucket does not exist", + HTTPStatusCode: http.StatusNotFound, + }, + ErrNoSuchBucketPolicy: { + Code: "NoSuchBucketPolicy", + Description: "The bucket policy does not exist", + HTTPStatusCode: http.StatusNotFound, + }, + ErrNoSuchLifecycleConfiguration: { + Code: "NoSuchLifecycleConfiguration", + Description: "The lifecycle configuration does not exist", + HTTPStatusCode: http.StatusNotFound, + }, + ErrNoSuchUser: { + Code: "NoSuchUser", + Description: "The specified user does not exist", + HTTPStatusCode: http.StatusConflict, + }, + ErrUserAlreadyExists: { + Code: "UserAlreadyExists", + Description: "The request was rejected because it attempted to create a resource that already exists .", + HTTPStatusCode: http.StatusConflict, + }, + ErrNoSuchUserPolicy: { + Code: "NoSuchUserPolicy", + Description: "The specified user policy does not exist", + HTTPStatusCode: http.StatusConflict, + }, + ErrUserPolicyAlreadyExists: { + Code: "UserPolicyAlreadyExists", + Description: "The same user policy already exists .", + HTTPStatusCode: http.StatusConflict, + }, + ErrNoSuchKey: { + Code: "NoSuchKey", + Description: "The specified key does not exist.", + HTTPStatusCode: http.StatusNotFound, + }, + ErrNoSuchUpload: { + Code: "NoSuchUpload", + Description: "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.", + HTTPStatusCode: http.StatusNotFound, + }, + ErrInvalidVersionID: { + Code: "InvalidArgument", + Description: "Invalid version id specified", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrNoSuchVersion: { + Code: "NoSuchVersion", + Description: "The specified version does not exist.", + HTTPStatusCode: http.StatusNotFound, + }, + ErrNotImplemented: { + Code: "NotImplemented", + Description: "A header you provided implies functionality that is not implemented", + HTTPStatusCode: http.StatusNotImplemented, + }, + ErrPreconditionFailed: { + Code: "PreconditionFailed", + Description: "At least one of the pre-conditions you specified did not hold", + HTTPStatusCode: http.StatusPreconditionFailed, + }, + ErrRequestTimeTooSkewed: { + Code: "RequestTimeTooSkewed", + Description: "The difference between the request time and the server's time is too large.", + HTTPStatusCode: http.StatusForbidden, + }, + ErrSignatureDoesNotMatch: { + Code: "SignatureDoesNotMatch", + Description: "The request signature we calculated does not match the signature you provided. Check your key and signing method.", + HTTPStatusCode: http.StatusForbidden, + }, + ErrMethodNotAllowed: { + Code: "MethodNotAllowed", + Description: "The specified method is not allowed against this resource.", + HTTPStatusCode: http.StatusMethodNotAllowed, + }, + ErrInvalidPart: { + Code: "InvalidPart", + Description: "One or more of the specified parts could not be found. The part may not have been uploaded, or the specified entity tag may not match the part's entity tag.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidPartOrder: { + Code: "InvalidPartOrder", + Description: "The list of parts was not in ascending order. The parts list must be specified in order by part number.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidObjectState: { + Code: "InvalidObjectState", + Description: "The operation is not valid for the current state of the object.", + HTTPStatusCode: http.StatusForbidden, + }, + ErrAuthorizationHeaderMalformed: { + Code: "AuthorizationHeaderMalformed", + Description: "The authorization header is malformed; the region is wrong; expecting 'us-east-1'.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMalformedPOSTRequest: { + Code: "MalformedPOSTRequest", + Description: "The body of your POST request is not well-formed multipart/form-data.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrPOSTFileRequired: { + Code: "InvalidArgument", + Description: "POST requires exactly one file upload per request.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrSignatureVersionNotSupported: { + Code: "InvalidRequest", + Description: "The authorization mechanism you have provided is not supported. Please use AWS4-HMAC-SHA256.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrBucketNotEmpty: { + Code: "BucketNotEmpty", + Description: "The bucket you tried to delete is not empty", + HTTPStatusCode: http.StatusConflict, + }, + ErrBucketAlreadyExists: { + Code: "BucketAlreadyExists", + Description: "The requested bucket name is not available. The bucket namespace is shared by all users of the system. Please select a different name and try again.", + HTTPStatusCode: http.StatusConflict, + }, + ErrAllAccessDisabled: { + Code: "AllAccessDisabled", + Description: "All access to this resource has been disabled.", + HTTPStatusCode: http.StatusForbidden, + }, + ErrMalformedPolicy: { + Code: "MalformedPolicy", + Description: "Policy has invalid resource.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMissingCredTag: { + Code: "InvalidRequest", + Description: "Missing Credential field for this request.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidRegion: { + Code: "InvalidRegion", + Description: "Region does not match.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMissingSignTag: { + Code: "AccessDenied", + Description: "Signature header missing Signature field.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMissingSignHeadersTag: { + Code: "InvalidArgument", + Description: "Signature header missing SignedHeaders field.", + HTTPStatusCode: http.StatusBadRequest, + }, + + ErrAuthHeaderEmpty: { + Code: "InvalidArgument", + Description: "Authorization header is invalid -- one and only one ' ' (space) required.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMissingDateHeader: { + Code: "AccessDenied", + Description: "AWS authentication requires a valid Date or x-amz-date header", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrExpiredPresignRequest: { + Code: "AccessDenied", + Description: "Request has expired", + HTTPStatusCode: http.StatusForbidden, + }, + ErrRequestNotReadyYet: { + Code: "AccessDenied", + Description: "Request is not valid yet", + HTTPStatusCode: http.StatusForbidden, + }, + ErrSlowDown: { + Code: "SlowDown", + Description: "Resource requested is unreadable, please reduce your request rate", + HTTPStatusCode: http.StatusServiceUnavailable, + }, + ErrBadRequest: { + Code: "BadRequest", + Description: "400 BadRequest", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrKeyTooLongError: { + Code: "KeyTooLongError", + Description: "Your key is too long", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrUnsignedHeaders: { + Code: "AccessDenied", + Description: "There were headers present in the request which were not signed", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrBucketAlreadyOwnedByYou: { + Code: "BucketAlreadyOwnedByYou", + Description: "Your previous request to create the named bucket succeeded and you already own it.", + HTTPStatusCode: http.StatusConflict, + }, + ErrInvalidDuration: { + Code: "InvalidDuration", + Description: "Duration provided in the request is invalid.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidBucketObjectLockConfiguration: { + Code: "InvalidRequest", + Description: "Bucket is missing ObjectLockConfiguration", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrBucketTaggingNotFound: { + Code: "NoSuchTagSet", + Description: "The TagSet does not exist", + HTTPStatusCode: http.StatusNotFound, + }, + ErrObjectLockConfigurationNotAllowed: { + Code: "InvalidBucketState", + Description: "Object Lock configuration cannot be enabled on existing buckets", + HTTPStatusCode: http.StatusConflict, + }, + ErrNoSuchCORSConfiguration: { + Code: "NoSuchCORSConfiguration", + Description: "The CORS configuration does not exist", + HTTPStatusCode: http.StatusNotFound, + }, + ErrNoSuchWebsiteConfiguration: { + Code: "NoSuchWebsiteConfiguration", + Description: "The specified bucket does not have a website configuration", + HTTPStatusCode: http.StatusNotFound, + }, + ErrReplicationConfigurationNotFoundError: { + Code: "ReplicationConfigurationNotFoundError", + Description: "The replication configuration was not found", + HTTPStatusCode: http.StatusNotFound, + }, + ErrReplicationNeedsVersioningError: { + Code: "InvalidRequest", + Description: "Versioning must be 'Enabled' on the bucket to apply a replication configuration", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrReplicationBucketNeedsVersioningError: { + Code: "InvalidRequest", + Description: "Versioning must be 'Enabled' on the bucket to add a replication target", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrNoSuchObjectLockConfiguration: { + Code: "NoSuchObjectLockConfiguration", + Description: "The specified object does not have a ObjectLock configuration", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrObjectLocked: { + Code: "InvalidRequest", + Description: "Object is WORM protected and cannot be overwritten", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidRetentionDate: { + Code: "InvalidRequest", + Description: "Date must be provided in ISO 8601 format", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrPastObjectLockRetainDate: { + Code: "InvalidRequest", + Description: "the retain until date must be in the future", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrUnknownWORMModeDirective: { + Code: "InvalidRequest", + Description: "unknown wormMode directive", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrObjectLockInvalidHeaders: { + Code: "InvalidRequest", + Description: "x-amz-object-lock-retain-until-date and x-amz-object-lock-mode must both be supplied", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrObjectRestoreAlreadyInProgress: { + Code: "RestoreAlreadyInProgress", + Description: "Object restore is already in progress", + HTTPStatusCode: http.StatusConflict, + }, + // Bucket notification related errors. + ErrEventNotification: { + Code: "InvalidArgument", + Description: "A specified event is not supported for notifications.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrARNNotification: { + Code: "InvalidArgument", + Description: "A specified destination ARN does not exist or is not well-formed. Verify the destination ARN.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrRegionNotification: { + Code: "InvalidArgument", + Description: "A specified destination is in a different region than the bucket. You must use a destination that resides in the same region as the bucket.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrOverlappingFilterNotification: { + Code: "InvalidArgument", + Description: "An object key name filtering rule defined with overlapping prefixes, overlapping suffixes, or overlapping combinations of prefixes and suffixes for the same event types.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrFilterNameInvalid: { + Code: "InvalidArgument", + Description: "filter rule name must be either prefix or suffix", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrFilterNamePrefix: { + Code: "InvalidArgument", + Description: "Cannot specify more than one prefix rule in a filter.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrFilterNameSuffix: { + Code: "InvalidArgument", + Description: "Cannot specify more than one suffix rule in a filter.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrFilterValueInvalid: { + Code: "InvalidArgument", + Description: "Size of filter rule value cannot exceed 1024 bytes in UTF-8 representation", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrOverlappingConfigs: { + Code: "InvalidArgument", + Description: "Configurations overlap. Configurations on the same bucket cannot share a common event type.", + HTTPStatusCode: http.StatusBadRequest, + }, + + ErrInvalidCopyPartRange: { + Code: "InvalidArgument", + Description: "The x-amz-copy-source-range value must be of the form bytes=first-last where first and last are the zero-based offsets of the first and last bytes to copy", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidCopyPartRangeSource: { + Code: "InvalidArgument", + Description: "Range specified is not valid for source object", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMetadataTooLarge: { + Code: "MetadataTooLarge", + Description: "Your metadata headers exceed the maximum allowed metadata size.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidTagDirective: { + Code: "InvalidArgument", + Description: "Unknown tag directive.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidEncryptionMethod: { + Code: "InvalidRequest", + Description: "The encryption method specified is not supported", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidQueryParams: { + Code: "AuthorizationQueryParametersError", + Description: "Query-string authentication version 4 requires the X-Amz-Algorithm, X-Amz-Credential, X-Amz-Signature, X-Amz-Date, X-Amz-SignedHeaders, and X-Amz-Expires parameters.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrNoAccessKey: { + Code: "AccessDenied", + Description: "No AWSAccessKey was presented", + HTTPStatusCode: http.StatusForbidden, + }, + ErrInvalidToken: { + Code: "InvalidTokenId", + Description: "The security token included in the request is invalid", + HTTPStatusCode: http.StatusForbidden, + }, + + // S3 extensions. + ErrInvalidObjectName: { + Code: "InvalidObjectName", + Description: "Object name contains unsupported characters.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidObjectNamePrefixSlash: { + Code: "InvalidObjectName", + Description: "Object name contains a leading slash.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrClientDisconnected: { + Code: "ClientDisconnected", + Description: "Client disconnected before response was ready", + HTTPStatusCode: 499, // No official code, use nginx value. + }, + ErrOperationTimedOut: { + Code: "RequestTimeout", + Description: "A timeout occurred while trying to lock a resource, please reduce your request rate", + HTTPStatusCode: http.StatusServiceUnavailable, + }, + ErrOperationMaxedOut: { + Code: "SlowDown", + Description: "A timeout exceeded while waiting to proceed with the request, please reduce your request rate", + HTTPStatusCode: http.StatusServiceUnavailable, + }, + ErrUnsupportedMetadata: { + Code: "InvalidArgument", + Description: "Your metadata headers are not supported.", + HTTPStatusCode: http.StatusBadRequest, + }, + // Generic Invalid-Request error. Should be used for response errors only for unlikely + // corner case errors for which introducing new APIErrorCode is not worth it. LogIf() + // should be used to log the error at the source of the error for debugging purposes. + ErrInvalidRequest: { + Code: "InvalidRequest", + Description: "Invalid Request", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrIncorrectContinuationToken: { + Code: "InvalidArgument", + Description: "The continuation token provided is incorrect", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidFormatAccessKey: { + Code: "InvalidAccessKeyId", + Description: "The Access Key Id you provided contains invalid characters.", + HTTPStatusCode: http.StatusBadRequest, + }, + // S3 Select API Errors + ErrEmptyRequestBody: { + Code: "EmptyRequestBody", + Description: "Request body cannot be empty.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrUnsupportedFunction: { + Code: "UnsupportedFunction", + Description: "Encountered an unsupported SQL function.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidDataSource: { + Code: "InvalidDataSource", + Description: "Invalid data source type. Only CSV and JSON are supported at this time.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidExpressionType: { + Code: "InvalidExpressionType", + Description: "The ExpressionType is invalid. Only SQL expressions are supported at this time.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrBusy: { + Code: "Busy", + Description: "The service is unavailable. Please retry.", + HTTPStatusCode: http.StatusServiceUnavailable, + }, + ErrUnauthorizedAccess: { + Code: "UnauthorizedAccess", + Description: "You are not authorized to perform this operation", + HTTPStatusCode: http.StatusUnauthorized, + }, + ErrExpressionTooLong: { + Code: "ExpressionTooLong", + Description: "The SQL expression is too long: The maximum byte-length for the SQL expression is 256 KB.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrIllegalSQLFunctionArgument: { + Code: "IllegalSqlFunctionArgument", + Description: "Illegal argument was used in the SQL function.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidKeyPath: { + Code: "InvalidKeyPath", + Description: "Key path in the SQL expression is invalid.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidCompressionFormat: { + Code: "InvalidCompressionFormat", + Description: "The file is not in a supported compression format. Only GZIP is supported at this time.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidFileHeaderInfo: { + Code: "InvalidFileHeaderInfo", + Description: "The FileHeaderInfo is invalid. Only NONE, USE, and IGNORE are supported.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidJSONType: { + Code: "InvalidJsonType", + Description: "The JsonType is invalid. Only DOCUMENT and LINES are supported at this time.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidQuoteFields: { + Code: "InvalidQuoteFields", + Description: "The QuoteFields is invalid. Only ALWAYS and ASNEEDED are supported.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidRequestParameter: { + Code: "InvalidRequestParameter", + Description: "The value of a parameter in SelectRequest element is invalid. Check the service API documentation and try again.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidDataType: { + Code: "InvalidDataType", + Description: "The SQL expression contains an invalid data type.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidTextEncoding: { + Code: "InvalidTextEncoding", + Description: "Invalid encoding type. Only UTF-8 encoding is supported at this time.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidTableAlias: { + Code: "InvalidTableAlias", + Description: "The SQL expression contains an invalid table alias.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMissingRequiredParameter: { + Code: "MissingRequiredParameter", + Description: "The SelectRequest entity is missing a required parameter. Check the service documentation and try again.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrObjectSerializationConflict: { + Code: "ObjectSerializationConflict", + Description: "The SelectRequest entity can only contain one of CSV or JSON. Check the service documentation and try again.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrUnsupportedSQLOperation: { + Code: "UnsupportedSqlOperation", + Description: "Encountered an unsupported SQL operation.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrUnsupportedSQLStructure: { + Code: "UnsupportedSqlStructure", + Description: "Encountered an unsupported SQL structure. Check the SQL Reference.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrUnsupportedSyntax: { + Code: "UnsupportedSyntax", + Description: "Encountered invalid syntax.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrUnsupportedRangeHeader: { + Code: "UnsupportedRangeHeader", + Description: "Range header is not supported for this operation.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrLexerInvalidChar: { + Code: "LexerInvalidChar", + Description: "The SQL expression contains an invalid character.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrLexerInvalidOperator: { + Code: "LexerInvalidOperator", + Description: "The SQL expression contains an invalid literal.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrLexerInvalidLiteral: { + Code: "LexerInvalidLiteral", + Description: "The SQL expression contains an invalid operator.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrLexerInvalidIONLiteral: { + Code: "LexerInvalidIONLiteral", + Description: "The SQL expression contains an invalid operator.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpectedDatePart: { + Code: "ParseExpectedDatePart", + Description: "Did not find the expected date part in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpectedKeyword: { + Code: "ParseExpectedKeyword", + Description: "Did not find the expected keyword in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpectedTokenType: { + Code: "ParseExpectedTokenType", + Description: "Did not find the expected token in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpected2TokenTypes: { + Code: "ParseExpected2TokenTypes", + Description: "Did not find the expected token in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpectedNumber: { + Code: "ParseExpectedNumber", + Description: "Did not find the expected number in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpectedRightParenBuiltinFunctionCall: { + Code: "ParseExpectedRightParenBuiltinFunctionCall", + Description: "Did not find the expected right parenthesis character in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpectedTypeName: { + Code: "ParseExpectedTypeName", + Description: "Did not find the expected type name in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpectedWhenClause: { + Code: "ParseExpectedWhenClause", + Description: "Did not find the expected WHEN clause in the SQL expression. CASE is not supported.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseUnsupportedToken: { + Code: "ParseUnsupportedToken", + Description: "The SQL expression contains an unsupported token.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseUnsupportedLiteralsGroupBy: { + Code: "ParseUnsupportedLiteralsGroupBy", + Description: "The SQL expression contains an unsupported use of GROUP BY.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpectedMember: { + Code: "ParseExpectedMember", + Description: "The SQL expression contains an unsupported use of MEMBER.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseUnsupportedSelect: { + Code: "ParseUnsupportedSelect", + Description: "The SQL expression contains an unsupported use of SELECT.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseUnsupportedCase: { + Code: "ParseUnsupportedCase", + Description: "The SQL expression contains an unsupported use of CASE.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseUnsupportedCaseClause: { + Code: "ParseUnsupportedCaseClause", + Description: "The SQL expression contains an unsupported use of CASE.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseUnsupportedAlias: { + Code: "ParseUnsupportedAlias", + Description: "The SQL expression contains an unsupported use of ALIAS.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseUnsupportedSyntax: { + Code: "ParseUnsupportedSyntax", + Description: "The SQL expression contains unsupported syntax.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseUnknownOperator: { + Code: "ParseUnknownOperator", + Description: "The SQL expression contains an invalid operator.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseMissingIdentAfterAt: { + Code: "ParseMissingIdentAfterAt", + Description: "Did not find the expected identifier after the @ symbol in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseUnexpectedOperator: { + Code: "ParseUnexpectedOperator", + Description: "The SQL expression contains an unexpected operator.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseUnexpectedTerm: { + Code: "ParseUnexpectedTerm", + Description: "The SQL expression contains an unexpected term.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseUnexpectedToken: { + Code: "ParseUnexpectedToken", + Description: "The SQL expression contains an unexpected token.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseUnexpectedKeyword: { + Code: "ParseUnexpectedKeyword", + Description: "The SQL expression contains an unexpected keyword.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpectedExpression: { + Code: "ParseExpectedExpression", + Description: "Did not find the expected SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpectedLeftParenAfterCast: { + Code: "ParseExpectedLeftParenAfterCast", + Description: "Did not find expected the left parenthesis in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpectedLeftParenValueConstructor: { + Code: "ParseExpectedLeftParenValueConstructor", + Description: "Did not find expected the left parenthesis in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpectedLeftParenBuiltinFunctionCall: { + Code: "ParseExpectedLeftParenBuiltinFunctionCall", + Description: "Did not find the expected left parenthesis in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpectedArgumentDelimiter: { + Code: "ParseExpectedArgumentDelimiter", + Description: "Did not find the expected argument delimiter in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseCastArity: { + Code: "ParseCastArity", + Description: "The SQL expression CAST has incorrect arity.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseInvalidTypeParam: { + Code: "ParseInvalidTypeParam", + Description: "The SQL expression contains an invalid parameter value.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseEmptySelect: { + Code: "ParseEmptySelect", + Description: "The SQL expression contains an empty SELECT.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseSelectMissingFrom: { + Code: "ParseSelectMissingFrom", + Description: "GROUP is not supported in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpectedIdentForGroupName: { + Code: "ParseExpectedIdentForGroupName", + Description: "GROUP is not supported in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpectedIdentForAlias: { + Code: "ParseExpectedIdentForAlias", + Description: "Did not find the expected identifier for the alias in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseUnsupportedCallWithStar: { + Code: "ParseUnsupportedCallWithStar", + Description: "Only COUNT with (*) as a parameter is supported in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseNonUnaryAgregateFunctionCall: { + Code: "ParseNonUnaryAgregateFunctionCall", + Description: "Only one argument is supported for aggregate functions in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseMalformedJoin: { + Code: "ParseMalformedJoin", + Description: "JOIN is not supported in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpectedIdentForAt: { + Code: "ParseExpectedIdentForAt", + Description: "Did not find the expected identifier for AT name in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseAsteriskIsNotAloneInSelectList: { + Code: "ParseAsteriskIsNotAloneInSelectList", + Description: "Other expressions are not allowed in the SELECT list when '*' is used without dot notation in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseCannotMixSqbAndWildcardInSelectList: { + Code: "ParseCannotMixSqbAndWildcardInSelectList", + Description: "Cannot mix [] and * in the same expression in a SELECT list in SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseInvalidContextForWildcardInSelectList: { + Code: "ParseInvalidContextForWildcardInSelectList", + Description: "Invalid use of * in SELECT list in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrIncorrectSQLFunctionArgumentType: { + Code: "IncorrectSqlFunctionArgumentType", + Description: "Incorrect type of arguments in function call in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrValueParseFailure: { + Code: "ValueParseFailure", + Description: "Time stamp parse failure in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrEvaluatorInvalidArguments: { + Code: "EvaluatorInvalidArguments", + Description: "Incorrect number of arguments in the function call in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrIntegerOverflow: { + Code: "IntegerOverflow", + Description: "Int overflow or underflow in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrLikeInvalidInputs: { + Code: "LikeInvalidInputs", + Description: "Invalid argument given to the LIKE clause in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrCastFailed: { + Code: "CastFailed", + Description: "Attempt to convert from one data type to another using CAST failed in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidCast: { + Code: "InvalidCast", + Description: "Attempt to convert from one data type to another using CAST failed in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrEvaluatorInvalidTimestampFormatPattern: { + Code: "EvaluatorInvalidTimestampFormatPattern", + Description: "Time stamp format pattern requires additional fields in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrEvaluatorInvalidTimestampFormatPatternSymbolForParsing: { + Code: "EvaluatorInvalidTimestampFormatPatternSymbolForParsing", + Description: "Time stamp format pattern contains a valid format symbol that cannot be applied to time stamp parsing in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrEvaluatorTimestampFormatPatternDuplicateFields: { + Code: "EvaluatorTimestampFormatPatternDuplicateFields", + Description: "Time stamp format pattern contains multiple format specifiers representing the time stamp field in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrEvaluatorTimestampFormatPatternHourClockAmPmMismatch: { + Code: "EvaluatorUnterminatedTimestampFormatPatternToken", + Description: "Time stamp format pattern contains unterminated token in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrEvaluatorUnterminatedTimestampFormatPatternToken: { + Code: "EvaluatorInvalidTimestampFormatPatternToken", + Description: "Time stamp format pattern contains an invalid token in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrEvaluatorInvalidTimestampFormatPatternToken: { + Code: "EvaluatorInvalidTimestampFormatPatternToken", + Description: "Time stamp format pattern contains an invalid token in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrEvaluatorInvalidTimestampFormatPatternSymbol: { + Code: "EvaluatorInvalidTimestampFormatPatternSymbol", + Description: "Time stamp format pattern contains an invalid symbol in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrEvaluatorBindingDoesNotExist: { + Code: "ErrEvaluatorBindingDoesNotExist", + Description: "A column name or a path provided does not exist in the SQL expression", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMissingHeaders: { + Code: "MissingHeaders", + Description: "Some headers in the query are missing from the file. Check the file and try again.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidColumnIndex: { + Code: "InvalidColumnIndex", + Description: "The column index is invalid. Please check the service documentation and try again.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrPostPolicyConditionInvalidFormat: { + Code: "PostPolicyInvalidKeyName", + Description: "Invalid according to Policy: Policy Conditions failed", + HTTPStatusCode: http.StatusForbidden, + }, + // Add your error structure here. + ErrMalformedJSON: { + Code: "MalformedJSON", + Description: "The JSON was not well-formed or did not validate against our published format.", + HTTPStatusCode: http.StatusBadRequest, + }, +} + +// GetAPIError provides API Error for input API error code. +func GetAPIError(code ErrorCode) APIError { + return errorCodeResponse[code] +} + +// STSErrorCode type of error status. +type STSErrorCode int + +// STSError structure +type STSError struct { + Code string + Description string + HTTPStatusCode int +} + +// Error codes,list - http://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithSAML.html +const ( + ErrSTSNone STSErrorCode = iota + ErrSTSAccessDenied + ErrSTSMissingParameter + ErrSTSInvalidParameterValue + ErrSTSInternalError +) + +type stsErrorCodeMap map[STSErrorCode]STSError + +//ToSTSErr code to err +func (e stsErrorCodeMap) ToSTSErr(errCode STSErrorCode) STSError { + apiErr, ok := e[errCode] + if !ok { + return e[ErrSTSInternalError] + } + return apiErr +} + +// StsErrCodes error code to STSError structure, these fields carry respective +// descriptions for all the error responses. +var StsErrCodes = stsErrorCodeMap{ + ErrSTSAccessDenied: { + Code: "AccessDenied", + Description: "Generating temporary credentials not allowed for this request.", + HTTPStatusCode: http.StatusForbidden, + }, + ErrSTSMissingParameter: { + Code: "MissingParameter", + Description: "A required parameter for the specified action is not supplied.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrSTSInvalidParameterValue: { + Code: "InvalidParameterValue", + Description: "An invalid or out-of-range value was supplied for the input parameter.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrSTSInternalError: { + Code: "InternalError", + Description: "We encountered an internal error generating credentials, please try again.", + HTTPStatusCode: http.StatusInternalServerError, + }, +} diff --git a/s3/auth/auth_type.go b/s3/auth/auth_type.go new file mode 100644 index 000000000..10bbe6567 --- /dev/null +++ b/s3/auth/auth_type.go @@ -0,0 +1,182 @@ +/* + * The following code tries to reverse engineer the Amazon S3 APIs, + * and is mostly copied from minio implementation. + */ + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. + +package auth + +import ( + "net/http" + "net/url" + "reflect" + "strconv" + "strings" + + "github.com/bittorrent/go-btfs/s3/apierrors" + "github.com/bittorrent/go-btfs/s3/consts" +) + +// http Header "x-amz-content-sha256" == "UNSIGNED-PAYLOAD" indicates that the +// client did not calculate sha256 of the payload. +const unsignedPayload = "UNSIGNED-PAYLOAD" + +// isValidRegion - verify if incoming region value is valid with configured Region. +func isValidRegion(reqRegion string, confRegion string) bool { + if confRegion == "" { + return true + } + if confRegion == "US" { + confRegion = consts.DefaultRegion + } + // Some older s3 clients set region as "US" instead of + // globalDefaultRegion, handle it. + if reqRegion == "US" { + reqRegion = consts.DefaultRegion + } + return reqRegion == confRegion +} + +func contains(slice interface{}, elem interface{}) bool { + v := reflect.ValueOf(slice) + if v.Kind() == reflect.Slice { + for i := 0; i < v.Len(); i++ { + if v.Index(i).Interface() == elem { + return true + } + } + } + return false +} + +// extractSignedHeaders extract signed headers from Authorization header +func extractSignedHeaders(signedHeaders []string, r *http.Request) (http.Header, apierrors.ErrorCode) { + reqHeaders := r.Header + reqQueries := r.Form + // find whether "host" is part of list of signed headers. + // if not return ErrUnsignedHeaders. "host" is mandatory. + if !contains(signedHeaders, "host") { + return nil, apierrors.ErrUnsignedHeaders + } + extractedSignedHeaders := make(http.Header) + for _, header := range signedHeaders { + // `host` will not be found in the headers, can be found in r.Host. + // but its alway necessary that the list of signed headers containing host in it. + val, ok := reqHeaders[http.CanonicalHeaderKey(header)] + if !ok { + // try to set headers from Query String + val, ok = reqQueries[header] + } + if ok { + extractedSignedHeaders[http.CanonicalHeaderKey(header)] = val + continue + } + switch header { + case "expect": + // Golang http server strips off 'Expect' header, if the + // client sent this as part of signed headers we need to + // handle otherwise we would see a signature mismatch. + // `aws-cli` sets this as part of signed headers. + // + // According to + // http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.20 + // Expect header is always of form: + // + // Expect = "Expect" ":" 1#expectation + // expectation = "100-continue" | expectation-extension + // + // So it safe to assume that '100-continue' is what would + // be sent, for the time being keep this work around. + // Adding a *TODO* to remove this later when Golang server + // doesn't filter out the 'Expect' header. + extractedSignedHeaders.Set(header, "100-continue") + case "host": + // Go http server removes "host" from Request.Header + + //extractedSignedHeaders.Set(header, r.Host) + // todo use r.Host, or filedag-web deal with + //value := strings.Split(r.Host, ":") + extractedSignedHeaders.Set(header, r.Host) + case "transfer-encoding": + // Go http server removes "host" from Request.Header + extractedSignedHeaders[http.CanonicalHeaderKey(header)] = r.TransferEncoding + case "content-length": + // Signature-V4 spec excludes Content-Length from signed headers list for signature calculation. + // But some clients deviate from this rule. Hence we consider Content-Length for signature + // calculation to be compatible with such clients. + extractedSignedHeaders.Set(header, strconv.FormatInt(r.ContentLength, 10)) + default: + return nil, apierrors.ErrUnsignedHeaders + } + } + return extractedSignedHeaders, apierrors.ErrNone +} + +// isRequestSignatureV4 Verify if request has AWS Signature Version '4'. +func isRequestSignatureV4(r *http.Request) bool { + return strings.HasPrefix(r.Header.Get("Authorization"), signV4Algorithm) +} + +// Verify if request has AWS PreSign Version '4'. +func isRequestPresignedSignatureV4(r *http.Request) bool { + _, ok := r.URL.Query()["X-Amz-Credential"] + return ok +} + + +// List of all supported auth types. +const ( + AuthTypeUnknown AuthType = iota + AuthTypeAnonymous + AuthTypePresigned + AuthTypePresignedV2 + AuthTypePostPolicy + AuthTypeStreamingSigned + AuthTypeSigned + AuthTypeSignedV2 + AuthTypeJWT + AuthTypeSTS +) + +// GetRequestAuthType Get request authentication type. +func GetRequestAuthType(r *http.Request) AuthType { + if r.URL != nil { + var err error + r.Form, err = url.ParseQuery(r.URL.RawQuery) + if err != nil { + log.Infof("parse query failed, query: %s, error: %v", r.URL.RawQuery, err) + return AuthTypeUnknown + } + } + if isRequestSignatureV2(r) { + return AuthTypeSignedV2 + } else if isRequestPresignedSignatureV2(r) { + return AuthTypePresignedV2 + } else if isRequestSignStreamingV4(r) { + return AuthTypeStreamingSigned + } else if IsRequestSignatureV4(r) { + return AuthTypeSigned + } else if isRequestPresignedSignatureV4(r) { + return AuthTypePresigned + } else if isRequestJWT(r) { + return AuthTypeJWT + } else if isRequestPostPolicySignatureV4(r) { + return AuthTypePostPolicy + } else if _, ok := r.Form[consts.StsAction]; ok { + return AuthTypeSTS + } else if _, ok := r.Header[consts.Authorization]; !ok { + return AuthTypeAnonymous + } + return AuthTypeUnknown +} diff --git a/s3/auth/cred.go b/s3/auth/cred.go new file mode 100644 index 000000000..d43adadd2 --- /dev/null +++ b/s3/auth/cred.go @@ -0,0 +1,69 @@ +package auth + +import ( + "github.com/bittorrent/go-btfs/s3/apierrors" + "time" +) + +var timeSentinel = time.Unix(0, 0).UTC() + +// Credentials holds access and secret keys. +type Credentials struct { + AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` + SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` + CreateTime time.Time `xml:"CreateTime" json:"createTime,omitempty"` + Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` + SessionToken string `xml:"SessionToken" json:"sessionToken"` + Status string `xml:"-" json:"status,omitempty"` + ParentUser string `xml:"-" json:"parentUser,omitempty"` +} + +// IsValid - returns whether credential is valid or not. +func (cred *Credentials) IsValid() bool { + return true +} + +// IsExpired - returns whether Credential is expired or not. +func (cred *Credentials) IsExpired() bool { + return false +} + +func CheckAccessKeyValid(accessKey string) (*Credentials, apierrors.ErrorCode) { + + ////check it + //cred, bl: = mp[accessKey] + //if bl { + // return cred, nil + //} else { + // return nil, errors.New("node found accessKey! ") + //} + + return &Credentials{AccessKey: accessKey}, apierrors.ErrNone +} + +const ( + // Minimum length for access key. + accessKeyMinLen = 3 + + // Maximum length for access key. + // There is no max length enforcement for access keys + accessKeyMaxLen = 20 + + // Minimum length for secret key for both server and gateway mode. + secretKeyMinLen = 8 + + // Maximum secret key length , this + // is used when autogenerating new credentials. + // There is no max length enforcement for secret keys + secretKeyMaxLen = 40 +) + +// IsAccessKeyValid - validate access key for right length. +func IsAccessKeyValid(accessKey string) bool { + return len(accessKey) >= accessKeyMinLen +} + +// IsSecretKeyValid - validate secret key for right length. +func IsSecretKeyValid(secretKey string) bool { + return len(secretKey) >= secretKeyMinLen +} diff --git a/s3/auth/other.go b/s3/auth/other.go new file mode 100644 index 000000000..8832b06d1 --- /dev/null +++ b/s3/auth/other.go @@ -0,0 +1 @@ +package auth diff --git a/s3/auth/service.go b/s3/auth/service.go new file mode 100644 index 000000000..8832b06d1 --- /dev/null +++ b/s3/auth/service.go @@ -0,0 +1 @@ +package auth diff --git a/s3/auth/service_instance.go b/s3/auth/service_instance.go new file mode 100644 index 000000000..de5e1687a --- /dev/null +++ b/s3/auth/service_instance.go @@ -0,0 +1,30 @@ +package auth + +import ( + "github.com/bittorrent/go-btfs/s3/apierrors" + "net/http" +) + +type service struct { +} + +func newService() (svc *service, err error) { + svc = &service{} + return +} + +func (s *service) CheckSignatureV4Verify(r *http.Request, region string, stype serviceType) (s3Error apierrors.ErrorCode) { + sha256sum := getContentSha256Cksum(r, stype) + switch { + case isRequestSignatureV4(r): + return DoesSignatureMatch(sha256sum, r, region, stype) + case isRequestPresignedSignatureV4(r): + return DoesPresignedSignatureMatch(sha256sum, r, region, stype) + default: + return apierrors.ErrAccessDenied + } +} + +func (s *service) CheckACL(r *http.Request, region string, stype serviceType) (s3Error apierrors.ErrorCode) { + return +} diff --git a/s3/auth/service_interface.go b/s3/auth/service_interface.go new file mode 100644 index 000000000..2295095e6 --- /dev/null +++ b/s3/auth/service_interface.go @@ -0,0 +1,11 @@ +package auth + +import ( + "github.com/bittorrent/go-btfs/s3/apierrors" + "net/http" +) + +type Service interface { + CheckSignatureV4Verify(r *http.Request, region string, stype serviceType) (s3Error apierrors.ErrorCode) + CheckACL(r *http.Request, region string, stype serviceType) (s3Error apierrors.ErrorCode) +} diff --git a/s3/auth/service_test.go b/s3/auth/service_test.go new file mode 100644 index 000000000..8832b06d1 --- /dev/null +++ b/s3/auth/service_test.go @@ -0,0 +1 @@ +package auth diff --git a/s3/auth/signature-v4-parser.go b/s3/auth/signature-v4-parser.go new file mode 100644 index 000000000..559105b4f --- /dev/null +++ b/s3/auth/signature-v4-parser.go @@ -0,0 +1,285 @@ +/* + * The following code tries to reverse engineer the Amazon S3 APIs, + * and is mostly copied from minio implementation. + */ + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. + +package auth + +import ( + "net/url" + "strings" + "time" + + "github.com/bittorrent/go-btfs/s3/apierrors" + "github.com/bittorrent/go-btfs/s3/consts" +) + +// credentialHeader data type represents structured form of Credential +// string from authorization header. +type credentialHeader struct { + accessKey string + scope struct { + date time.Time + region string + service string + request string + } +} + +// Return scope string. +func (c credentialHeader) getScope() string { + return strings.Join([]string{ + c.scope.date.Format(yyyymmdd), + c.scope.region, + c.scope.service, + c.scope.request, + }, consts.SlashSeparator) +} + +// parse credentialHeader string into its structured form. +func parseCredentialHeader(credElement string, region string, stype serviceType) (ch credentialHeader, aec apierrors.ErrorCode) { + creds := strings.SplitN(strings.TrimSpace(credElement), "=", 2) + if len(creds) != 2 { + return ch, apierrors.ErrMissingFields + } + if creds[0] != "Credential" { + return ch, apierrors.ErrMissingCredTag + } + credElements := strings.Split(strings.TrimSpace(creds[1]), consts.SlashSeparator) + if len(credElements) < 5 { + return ch, apierrors.ErrCredMalformed + } + accessKey := strings.Join(credElements[:len(credElements)-4], consts.SlashSeparator) // The access key may contain one or more `/` + if !IsAccessKeyValid(accessKey) { + return ch, apierrors.ErrInvalidAccessKeyID + } + // Save access key id. + cred := credentialHeader{ + accessKey: accessKey, + } + credElements = credElements[len(credElements)-4:] + var e error + cred.scope.date, e = time.Parse(yyyymmdd, credElements[0]) + if e != nil { + return ch, apierrors.ErrAuthorizationHeaderMalformed + } + + cred.scope.region = credElements[1] + // Verify if region is valid. + sRegion := cred.scope.region + // Region is set to be empty, we use whatever was sent by the + // request and proceed further. This is a work-around to address + // an important problem for ListBuckets() getting signed with + // different regions. + if region == "" { + region = sRegion + } + // Should validate region, only if region is set. + if !isValidRegion(sRegion, region) { + return ch, apierrors.ErrAuthorizationHeaderMalformed + } + if credElements[2] != string(stype) { + //switch stype { + //case ServiceSTS: + // return ch, apierrors.ErrAuthorizationHeaderMalformed + //} + return ch, apierrors.ErrAuthorizationHeaderMalformed + } + cred.scope.service = credElements[2] + if credElements[3] != "aws4_request" { + return ch, apierrors.ErrAuthorizationHeaderMalformed + } + cred.scope.request = credElements[3] + return cred, apierrors.ErrNone +} + +// Parse signature from signature tag. +func parseSignature(signElement string) (string, apierrors.ErrorCode) { + signFields := strings.Split(strings.TrimSpace(signElement), "=") + if len(signFields) != 2 { + return "", apierrors.ErrMissingFields + } + if signFields[0] != "Signature" { + return "", apierrors.ErrMissingSignTag + } + if signFields[1] == "" { + return "", apierrors.ErrMissingFields + } + signature := signFields[1] + return signature, apierrors.ErrNone +} + +// Parse slice of signed headers from signed headers tag. +func parseSignedHeader(signedHdrElement string) ([]string, apierrors.ErrorCode) { + signedHdrFields := strings.Split(strings.TrimSpace(signedHdrElement), "=") + if len(signedHdrFields) != 2 { + return nil, apierrors.ErrMissingFields + } + if signedHdrFields[0] != "SignedHeaders" { + return nil, apierrors.ErrMissingSignHeadersTag + } + if signedHdrFields[1] == "" { + return nil, apierrors.ErrMissingFields + } + signedHeaders := strings.Split(signedHdrFields[1], ";") + return signedHeaders, apierrors.ErrNone +} + +// signValues data type represents structured form of AWS Signature V4 header. +type signValues struct { + Credential credentialHeader + SignedHeaders []string + Signature string +} + +// preSignValues data type represents structued form of AWS Signature V4 query string. +type preSignValues struct { + signValues + Date time.Time + Expires time.Duration +} + +// Parses signature version '4' query string of the following form. +// +// querystring = X-Amz-Algorithm=algorithm +// querystring += &X-Amz-Credential= urlencode(accessKey + '/' + credential_scope) +// querystring += &X-Amz-Date=date +// querystring += &X-Amz-Expires=timeout interval +// querystring += &X-Amz-SignedHeaders=signed_headers +// querystring += &X-Amz-Signature=signature +// +// verifies if any of the necessary query params are missing in the presigned request. +func doesV4PresignParamsExist(query url.Values) apierrors.ErrorCode { + v4PresignQueryParams := []string{consts.AmzAlgorithm, consts.AmzCredential, consts.AmzSignature, consts.AmzDate, consts.AmzSignedHeaders, consts.AmzExpires} + for _, v4PresignQueryParam := range v4PresignQueryParams { + if _, ok := query[v4PresignQueryParam]; !ok { + return apierrors.ErrInvalidQueryParams + } + } + return apierrors.ErrNone +} + +// Parses all the presigned signature values into separate elements. +func parsePreSignV4(query url.Values, region string, stype serviceType) (psv preSignValues, aec apierrors.ErrorCode) { + // verify whether the required query params exist. + aec = doesV4PresignParamsExist(query) + if aec != apierrors.ErrNone { + return psv, aec + } + + // Verify if the query algorithm is supported or not. + if query.Get(consts.AmzAlgorithm) != signV4Algorithm { + return psv, apierrors.ErrAuthorizationHeaderMalformed + } + + // Initialize signature version '4' structured header. + preSignV4Values := preSignValues{} + + // Save credential. + preSignV4Values.Credential, aec = parseCredentialHeader("Credential="+query.Get(consts.AmzCredential), region, stype) + if aec != apierrors.ErrNone { + return psv, aec + } + + var e error + // Save date in native time.Time. + preSignV4Values.Date, e = time.Parse(iso8601Format, query.Get(consts.AmzDate)) + if e != nil { + return psv, apierrors.ErrAuthorizationHeaderMalformed + } + + // Save expires in native time.Duration. + preSignV4Values.Expires, e = time.ParseDuration(query.Get(consts.AmzExpires) + "s") + if e != nil { + return psv, apierrors.ErrAuthorizationHeaderMalformed + } + + if preSignV4Values.Expires < 0 { + return psv, apierrors.ErrAuthorizationHeaderMalformed + } + + // Check if Expiry time is less than 7 days (value in seconds). + if preSignV4Values.Expires.Seconds() > 604800 { + return psv, apierrors.ErrAuthorizationHeaderMalformed + } + + // Save signed headers. + preSignV4Values.SignedHeaders, aec = parseSignedHeader("SignedHeaders=" + query.Get(consts.AmzSignedHeaders)) + if aec != apierrors.ErrNone { + return psv, aec + } + + // Save signature. + preSignV4Values.Signature, aec = parseSignature("Signature=" + query.Get(consts.AmzSignature)) + if aec != apierrors.ErrNone { + return psv, aec + } + + // Return structed form of signature query string. + return preSignV4Values, apierrors.ErrNone +} + +// Parses signature version '4' header of the following form. +// +// Authorization: algorithm Credential=accessKeyID/credScope, \ +// SignedHeaders=signedHeaders, Signature=signature +func parseSignV4(v4Auth string, region string, stype serviceType) (sv signValues, aec apierrors.ErrorCode) { + // credElement is fetched first to skip replacing the space in access key. + credElement := strings.TrimPrefix(strings.Split(strings.TrimSpace(v4Auth), ",")[0], signV4Algorithm) + // Replace all spaced strings, some clients can send spaced + // parameters and some won't. So we pro-actively remove any spaces + // to make parsing easier. + v4Auth = strings.ReplaceAll(v4Auth, " ", "") + if v4Auth == "" { + return sv, apierrors.ErrAuthHeaderEmpty + } + + // Verify if the header algorithm is supported or not. + if !strings.HasPrefix(v4Auth, signV4Algorithm) { + return sv, apierrors.ErrSignatureVersionNotSupported + } + + // Strip off the Algorithm prefix. + v4Auth = strings.TrimPrefix(v4Auth, signV4Algorithm) + authFields := strings.Split(strings.TrimSpace(v4Auth), ",") + if len(authFields) != 3 { + return sv, apierrors.ErrMissingFields + } + + // Initialize signature version '4' structured header. + signV4Values := signValues{} + + var s3Err apierrors.ErrorCode + // Save credentail values. + signV4Values.Credential, s3Err = parseCredentialHeader(strings.TrimSpace(credElement), region, stype) + if s3Err != apierrors.ErrNone { + return sv, s3Err + } + + // Save signed headers. + signV4Values.SignedHeaders, s3Err = parseSignedHeader(authFields[1]) + if s3Err != apierrors.ErrNone { + return sv, s3Err + } + + // Save signature. + signV4Values.Signature, s3Err = parseSignature(authFields[2]) + if s3Err != apierrors.ErrNone { + return sv, s3Err + } + + // Return the structure here. + return signV4Values, apierrors.ErrNone +} diff --git a/s3/auth/signature-v4-utils.go b/s3/auth/signature-v4-utils.go new file mode 100644 index 000000000..2c72776cc --- /dev/null +++ b/s3/auth/signature-v4-utils.go @@ -0,0 +1,177 @@ +/* + * The following code tries to reverse engineer the Amazon S3 APIs, + * and is mostly copied from minio implementation. + */ + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. + +package auth + +import ( + "net/http" + "reflect" + "strconv" + "strings" + + "github.com/bittorrent/go-btfs/s3/apierrors" + "github.com/bittorrent/go-btfs/s3/consts" +) + +// http Header "x-amz-content-sha256" == "UNSIGNED-PAYLOAD" indicates that the +// client did not calculate sha256 of the payload. +const unsignedPayload = "UNSIGNED-PAYLOAD" + +// isValidRegion - verify if incoming region value is valid with configured Region. +func isValidRegion(reqRegion string, confRegion string) bool { + if confRegion == "" { + return true + } + if confRegion == "US" { + confRegion = consts.DefaultRegion + } + // Some older s3 clients set region as "US" instead of + // globalDefaultRegion, handle it. + if reqRegion == "US" { + reqRegion = consts.DefaultRegion + } + return reqRegion == confRegion +} + +func contains(slice interface{}, elem interface{}) bool { + v := reflect.ValueOf(slice) + if v.Kind() == reflect.Slice { + for i := 0; i < v.Len(); i++ { + if v.Index(i).Interface() == elem { + return true + } + } + } + return false +} + +// extractSignedHeaders extract signed headers from Authorization header +func extractSignedHeaders(signedHeaders []string, r *http.Request) (http.Header, apierrors.ErrorCode) { + reqHeaders := r.Header + reqQueries := r.Form + // find whether "host" is part of list of signed headers. + // if not return ErrUnsignedHeaders. "host" is mandatory. + if !contains(signedHeaders, "host") { + return nil, apierrors.ErrUnsignedHeaders + } + extractedSignedHeaders := make(http.Header) + for _, header := range signedHeaders { + // `host` will not be found in the headers, can be found in r.Host. + // but its alway necessary that the list of signed headers containing host in it. + val, ok := reqHeaders[http.CanonicalHeaderKey(header)] + if !ok { + // try to set headers from Query String + val, ok = reqQueries[header] + } + if ok { + extractedSignedHeaders[http.CanonicalHeaderKey(header)] = val + continue + } + switch header { + case "expect": + // Golang http server strips off 'Expect' header, if the + // client sent this as part of signed headers we need to + // handle otherwise we would see a signature mismatch. + // `aws-cli` sets this as part of signed headers. + // + // According to + // http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.20 + // Expect header is always of form: + // + // Expect = "Expect" ":" 1#expectation + // expectation = "100-continue" | expectation-extension + // + // So it safe to assume that '100-continue' is what would + // be sent, for the time being keep this work around. + // Adding a *TODO* to remove this later when Golang server + // doesn't filter out the 'Expect' header. + extractedSignedHeaders.Set(header, "100-continue") + case "host": + // Go http server removes "host" from Request.Header + + //extractedSignedHeaders.Set(header, r.Host) + // todo use r.Host, or filedag-web deal with + //value := strings.Split(r.Host, ":") + extractedSignedHeaders.Set(header, r.Host) + case "transfer-encoding": + // Go http server removes "host" from Request.Header + extractedSignedHeaders[http.CanonicalHeaderKey(header)] = r.TransferEncoding + case "content-length": + // Signature-V4 spec excludes Content-Length from signed headers list for signature calculation. + // But some clients deviate from this rule. Hence we consider Content-Length for signature + // calculation to be compatible with such clients. + extractedSignedHeaders.Set(header, strconv.FormatInt(r.ContentLength, 10)) + default: + return nil, apierrors.ErrUnsignedHeaders + } + } + return extractedSignedHeaders, apierrors.ErrNone +} + +// Returns SHA256 for calculating canonical-request. +func getContentSha256Cksum(r *http.Request, stype serviceType) string { + //if stype == ServiceSTS { + // payload, err := ioutil.ReadAll(io.LimitReader(r.Body, consts.StsRequestBodyLimit)) + // if err != nil { + // //log.Errorf("ServiceSTS ReadAll err:%v", err) + // } + // sum256 := sha256.Sum256(payload) + // r.Body = ioutil.NopCloser(bytes.NewReader(payload)) + // return hex.EncodeToString(sum256[:]) + //} + + var ( + defaultSha256Cksum string + v []string + ok bool + ) + + // For a presigned request we look at the query param for sha256. + if isRequestPresignedSignatureV4(r) { + // X-Amz-Content-Sha256, if not set in presigned requests, checksum + // will default to 'UNSIGNED-PAYLOAD'. + defaultSha256Cksum = unsignedPayload + v, ok = r.Form[consts.AmzContentSha256] + if !ok { + v, ok = r.Header[consts.AmzContentSha256] + } + } else { + // X-Amz-Content-Sha256, if not set in signed requests, checksum + // will default to sha256([]byte("")). + defaultSha256Cksum = consts.EmptySHA256 + v, ok = r.Header[consts.AmzContentSha256] + } + + // We found 'X-Amz-Content-Sha256' return the captured value. + if ok { + return v[0] + } + + // We couldn't find 'X-Amz-Content-Sha256'. + return defaultSha256Cksum +} + +// isRequestSignatureV4 Verify if request has AWS Signature Version '4'. +func isRequestSignatureV4(r *http.Request) bool { + return strings.HasPrefix(r.Header.Get("Authorization"), signV4Algorithm) +} + +// Verify if request has AWS PreSign Version '4'. +func isRequestPresignedSignatureV4(r *http.Request) bool { + _, ok := r.URL.Query()["X-Amz-Credential"] + return ok +} diff --git a/s3/auth/signature-v4.go b/s3/auth/signature-v4.go new file mode 100644 index 000000000..7ff394b8c --- /dev/null +++ b/s3/auth/signature-v4.go @@ -0,0 +1,261 @@ +/* + * The following code tries to reverse engineer the Amazon S3 APIs, + * and is mostly copied from minio implementation. + */ + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. + +package auth + +import ( + "crypto/subtle" + "net/http" + "net/url" + "strconv" + "time" + + "github.com/bittorrent/go-btfs/s3/apierrors" + "github.com/bittorrent/go-btfs/s3/consts" + "github.com/bittorrent/go-btfs/s3/set" + "github.com/bittorrent/go-btfs/s3/utils" +) + +// AWS Signature Version '4' constants. +const ( + signV4Algorithm = "AWS4-HMAC-SHA256" + iso8601Format = "20060102T150405Z" + yyyymmdd = "20060102" +) + +type serviceType string + +const ( + ServiceS3 serviceType = "s3" + ////ServiceSTS STS + //ServiceSTS serviceType = "sts" +) + +// compareSignatureV4 returns true if and only if both signatures +// are equal. The signatures are expected to be HEX encoded strings +// according to the AWS S3 signature V4 spec. +func compareSignatureV4(sig1, sig2 string) bool { + // The CTC using []byte(str) works because the hex encoding + // is unique for a sequence of bytes. See also compareSignatureV2. + return subtle.ConstantTimeCompare([]byte(sig1), []byte(sig2)) == 1 +} + +// DoesPresignedSignatureMatch - Verify queryString headers with presigned signature +// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html +// +// returns apierrors.ErrNone if the signature matches. +func DoesPresignedSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType) apierrors.ErrorCode { + // Copy request + req := *r + + // Parse request query string. + pSignValues, err := parsePreSignV4(req.Form, region, stype) + if err != apierrors.ErrNone { + return err + } + + // get access_info by accessKey + cred, s3Err := CheckAccessKeyValid(pSignValues.Credential.accessKey) + if s3Err != apierrors.ErrNone { + return s3Err + } + + // Extract all the signed headers along with its values. + extractedSignedHeaders, errCode := extractSignedHeaders(pSignValues.SignedHeaders, r) + if errCode != apierrors.ErrNone { + return errCode + } + + // If the host which signed the request is slightly ahead in time (by less than MaxSkewTime) the + // request should still be allowed. + if pSignValues.Date.After(time.Now().UTC().Add(consts.MaxSkewTime)) { + return apierrors.ErrRequestNotReadyYet + } + + if time.Now().UTC().Sub(pSignValues.Date) > pSignValues.Expires { + return apierrors.ErrExpiredPresignRequest + } + + // Save the date and expires. + t := pSignValues.Date + expireSeconds := int(pSignValues.Expires / time.Second) + + // Construct new query. + query := make(url.Values) + clntHashedPayload := req.Form.Get(consts.AmzContentSha256) + if clntHashedPayload != "" { + query.Set(consts.AmzContentSha256, hashedPayload) + } + + token := req.Form.Get(consts.AmzSecurityToken) + if token != "" { + query.Set(consts.AmzSecurityToken, cred.SessionToken) + } + + query.Set(consts.AmzAlgorithm, signV4Algorithm) + + // Construct the query. + query.Set(consts.AmzDate, t.Format(iso8601Format)) + query.Set(consts.AmzExpires, strconv.Itoa(expireSeconds)) + query.Set(consts.AmzSignedHeaders, utils.GetSignedHeaders(extractedSignedHeaders)) + query.Set(consts.AmzCredential, cred.AccessKey+consts.SlashSeparator+pSignValues.Credential.getScope()) + + defaultSigParams := set.CreateStringSet( + consts.AmzContentSha256, + consts.AmzSecurityToken, + consts.AmzAlgorithm, + consts.AmzDate, + consts.AmzExpires, + consts.AmzSignedHeaders, + consts.AmzCredential, + consts.AmzSignature, + ) + + // Add missing query parameters if any provided in the request URL + for k, v := range req.Form { + if !defaultSigParams.Contains(k) { + query[k] = v + } + } + + // Get the encoded query. + encodedQuery := query.Encode() + + // Verify if date query is same. + if req.Form.Get(consts.AmzDate) != query.Get(consts.AmzDate) { + return apierrors.ErrSignatureDoesNotMatch + } + // Verify if expires query is same. + if req.Form.Get(consts.AmzExpires) != query.Get(consts.AmzExpires) { + return apierrors.ErrSignatureDoesNotMatch + } + // Verify if signed headers query is same. + if req.Form.Get(consts.AmzSignedHeaders) != query.Get(consts.AmzSignedHeaders) { + return apierrors.ErrSignatureDoesNotMatch + } + // Verify if credential query is same. + if req.Form.Get(consts.AmzCredential) != query.Get(consts.AmzCredential) { + return apierrors.ErrSignatureDoesNotMatch + } + // Verify if sha256 payload query is same. + if clntHashedPayload != "" && clntHashedPayload != query.Get(consts.AmzContentSha256) { + return apierrors.ErrContentSHA256Mismatch + } + // Verify if security token is correct. + if token != "" && subtle.ConstantTimeCompare([]byte(token), []byte(cred.SessionToken)) != 1 { + return apierrors.ErrInvalidToken + } + + // Verify finally if signature is same. + + // Get canonical request. + presignedCanonicalReq := utils.GetCanonicalRequest(extractedSignedHeaders, hashedPayload, encodedQuery, req.URL.Path, req.Method) + + // Get string to sign from canonical request. + presignedStringToSign := utils.GetStringToSign(presignedCanonicalReq, t, pSignValues.Credential.getScope()) + + // Get hmac presigned signing key. + presignedSigningKey := utils.GetSigningKey(cred.SecretKey, pSignValues.Credential.scope.date, + pSignValues.Credential.scope.region, string(stype)) + + // Get new signature. + newSignature := utils.GetSignature(presignedSigningKey, presignedStringToSign) + + // Verify signature. + if !compareSignatureV4(req.Form.Get(consts.AmzSignature), newSignature) { + return apierrors.ErrSignatureDoesNotMatch + } + return apierrors.ErrNone +} + +// DoesSignatureMatch - Verify authorization header with calculated header in accordance with +// - http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html +// +// returns apierrors.ErrNone if signature matches. +func DoesSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType) apierrors.ErrorCode { + // Copy request. + req := *r + + // Save authorization header. + v4Auth := req.Header.Get(consts.Authorization) + + // Parse signature version '4' header. + signV4Values, err := parseSignV4(v4Auth, region, stype) + if err != apierrors.ErrNone { + return err + } + + // Extract all the signed headers along with its values. + extractedSignedHeaders, errCode := extractSignedHeaders(signV4Values.SignedHeaders, r) + if errCode != apierrors.ErrNone { + return errCode + } + + cred, s3Err := CheckAccessKeyValid(signV4Values.Credential.accessKey) + if s3Err != apierrors.ErrNone { + return s3Err + } + + // Extract date, if not present throw error. + var date string + if date = req.Header.Get(consts.AmzDate); date == "" { + if date = r.Header.Get(consts.Date); date == "" { + return apierrors.ErrMissingDateHeader + } + } + + // Parse date header. + t, e := time.Parse(iso8601Format, date) + if e != nil { + return apierrors.ErrAuthorizationHeaderMalformed + } + + // Query string. + queryStr := req.URL.Query().Encode() + + // Get canonical request. + canonicalRequest := utils.GetCanonicalRequest(extractedSignedHeaders, hashedPayload, queryStr, req.URL.Path, req.Method) + + // Get string to sign from canonical request. + stringToSign := utils.GetStringToSign(canonicalRequest, t, signV4Values.Credential.getScope()) + + // Get hmac signing key. + signingKey := utils.GetSigningKey(cred.SecretKey, signV4Values.Credential.scope.date, + signV4Values.Credential.scope.region, string(stype)) + + // Calculate signature. + newSignature := utils.GetSignature(signingKey, stringToSign) + + // Verify if signature match. + if !compareSignatureV4(newSignature, signV4Values.Signature) { + return apierrors.ErrSignatureDoesNotMatch + } + + // Return error none. + return apierrors.ErrNone +} + +//// getScope generate a string of a specific date, an AWS region, and a service. +//func getScope(t time.Time, region string) string { +// scope := strings.Join([]string{ +// t.Format(yyyymmdd), +// region, +// string(ServiceS3), +// "aws4_request", +// }, consts.SlashSeparator) +// return scope +//} diff --git a/s3/consts/consts.go b/s3/consts/consts.go new file mode 100644 index 000000000..2bb2d09a8 --- /dev/null +++ b/s3/consts/consts.go @@ -0,0 +1,183 @@ +package consts + +import ( + "github.com/dustin/go-humanize" + "time" +) + +//some const +const ( + // Iso8601TimeFormat RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z + Iso8601TimeFormat = "2006-01-02T15:04:05.000Z" // Reply date format with nanosecond precision. + + StreamingContentSHA256 = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" + + // MaxLocationConstraintSize Limit of location constraint XML for unauthenticated PUT bucket operations. + MaxLocationConstraintSize = 3 * humanize.MiByte + EmptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + StsRequestBodyLimit = 10 * (1 << 20) // 10 MiB + DefaultRegion = "" + SlashSeparator = "/" + + MaxSkewTime = 15 * time.Minute // 15 minutes skew allowed. + + // STS API version. + StsAPIVersion = "2011-06-15" + StsVersion = "Version" + StsAction = "Action" + AssumeRole = "AssumeRole" + SignV4Algorithm = "AWS4-HMAC-SHA256" + + DefaultOwnerID = "02d6176db174dc93cb1b899f7c6078f08654445fe8cf1b6ce98d8855f66bdbf4" + DisplayName = "FileDagStorage" + DefaultStorageClass = "DAGSTORE" +) + +// Standard S3 HTTP request constants +const ( + IfModifiedSince = "If-Modified-Since" + IfUnmodifiedSince = "If-Unmodified-Since" + IfMatch = "If-Match" + IfNoneMatch = "If-None-Match" + + // S3 storage class + AmzStorageClass = "x-amz-storage-class" + + // S3 object version ID + AmzVersionID = "x-amz-version-id" + AmzDeleteMarker = "x-amz-delete-marker" + + // S3 object tagging + AmzObjectTagging = "X-Amz-Tagging" + AmzTagCount = "x-amz-tagging-count" + AmzTagDirective = "X-Amz-Tagging-Directive" + + // S3 transition restore + AmzRestore = "x-amz-restore" + AmzRestoreExpiryDays = "X-Amz-Restore-Expiry-Days" + AmzRestoreRequestDate = "X-Amz-Restore-Request-Date" + AmzRestoreOutputPath = "x-amz-restore-output-path" + + // S3 extensions + AmzCopySourceIfModifiedSince = "x-amz-copy-source-if-modified-since" + AmzCopySourceIfUnmodifiedSince = "x-amz-copy-source-if-unmodified-since" + + AmzCopySourceIfNoneMatch = "x-amz-copy-source-if-none-match" + AmzCopySourceIfMatch = "x-amz-copy-source-if-match" + + AmzCopySource = "X-Amz-Copy-Source" + AmzCopySourceVersionID = "X-Amz-Copy-Source-Version-Id" + AmzCopySourceRange = "X-Amz-Copy-Source-Range" + AmzMetadataDirective = "X-Amz-Metadata-Directive" + AmzObjectLockMode = "X-Amz-Object-Lock-Mode" + AmzObjectLockRetainUntilDate = "X-Amz-Object-Lock-Retain-Until-Date" + AmzObjectLockLegalHold = "X-Amz-Object-Lock-Legal-Hold" + AmzObjectLockBypassGovernance = "X-Amz-Bypass-Governance-Retention" + AmzBucketReplicationStatus = "X-Amz-Replication-Status" + AmzSnowballExtract = "X-Amz-Meta-Snowball-Auto-Extract" + + // Multipart parts count + AmzMpPartsCount = "x-amz-mp-parts-count" + + // Object date/time of expiration + AmzExpiration = "x-amz-expiration" + + // Dummy putBucketACL + AmzACL = "x-amz-acl" + + // Signature V4 related contants. + AmzContentSha256 = "X-Amz-Content-Sha256" + AmzDate = "X-Amz-Date" + AmzAlgorithm = "X-Amz-Algorithm" + AmzExpires = "X-Amz-Expires" + AmzSignedHeaders = "X-Amz-SignedHeaders" + AmzSignature = "X-Amz-Signature" + AmzCredential = "X-Amz-Credential" + AmzSecurityToken = "X-Amz-Security-Token" + AmzDecodedContentLength = "X-Amz-Decoded-Content-Length" + + AmzMetaUnencryptedContentLength = "X-Amz-Meta-X-Amz-Unencrypted-Content-Length" + AmzMetaUnencryptedContentMD5 = "X-Amz-Meta-X-Amz-Unencrypted-Content-Md5" + + // AWS server-side encryption headers for SSE-S3, SSE-KMS and SSE-C. + AmzServerSideEncryption = "X-Amz-Server-Side-Encryption" + AmzServerSideEncryptionKmsID = AmzServerSideEncryption + "-Aws-Kms-Key-Id" + AmzServerSideEncryptionKmsContext = AmzServerSideEncryption + "-Context" + AmzServerSideEncryptionCustomerAlgorithm = AmzServerSideEncryption + "-Customer-Algorithm" + AmzServerSideEncryptionCustomerKey = AmzServerSideEncryption + "-Customer-Key" + AmzServerSideEncryptionCustomerKeyMD5 = AmzServerSideEncryption + "-Customer-Key-Md5" + AmzServerSideEncryptionCopyCustomerAlgorithm = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm" + AmzServerSideEncryptionCopyCustomerKey = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key" + AmzServerSideEncryptionCopyCustomerKeyMD5 = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5" + + AmzEncryptionAES = "AES256" + AmzEncryptionKMS = "aws:kms" + + // Signature v2 related constants + AmzSignatureV2 = "Signature" + AmzAccessKeyID = "AWSAccessKeyId" + + // Response request id. + AmzRequestID = "x-amz-request-id" +) + +// Standard S3 HTTP response constants +const ( + LastModified = "Last-Modified" + Date = "Date" + ETag = "ETag" + ContentType = "Content-Type" + ContentMD5 = "Content-Md5" + ContentEncoding = "Content-Encoding" + Expires = "Expires" + ContentLength = "Content-Length" + ContentLanguage = "Content-Language" + ContentRange = "Content-Range" + Connection = "Connection" + AcceptRanges = "Accept-Ranges" + AmzBucketRegion = "X-Amz-Bucket-Region" + ServerInfo = "Server" + RetryAfter = "Retry-After" + Location = "Location" + CacheControl = "Cache-Control" + ContentDisposition = "Content-Disposition" + Authorization = "Authorization" + Action = "Action" + Range = "Range" +) + +//object const +const ( + MaxObjectSize = 5 * humanize.TiByte + + // Minimum Part size for multipart upload is 5MiB + MinPartSize = 5 * humanize.MiByte + + // Maximum Part size for multipart upload is 5GiB + MaxPartSize = 5 * humanize.GiByte + + // Maximum Part ID for multipart upload is 10000 + // (Acceptable values range from 1 to 10000 inclusive) + MaxPartID = 10000 + + MaxObjectList = 1000 // Limit number of objects in a listObjectsResponse/listObjectsVersionsResponse. + MaxDeleteList = 1000 // Limit number of objects deleted in a delete call. + MaxUploadsList = 10000 // Limit number of uploads in a listUploadsResponse. + MaxPartsList = 10000 // Limit number of parts in a listPartsResponse. +) + +// Common http query params S3 API +const ( + VersionID = "versionId" + + PartNumber = "partNumber" + + UploadID = "uploadId" +) + +// limit +const ( + // The maximum allowed time difference between the incoming request + // date and server date during signature verification. + GlobalMaxSkewTime = 15 * time.Minute // 15 minutes skew allowed. +) diff --git a/s3/set/match.go b/s3/set/match.go new file mode 100644 index 000000000..aa57c4f4c --- /dev/null +++ b/s3/set/match.go @@ -0,0 +1,48 @@ +package set + +// MatchSimple - finds whether the text matches/satisfies the pattern string. +// supports only '*' wildcard in the pattern. +// considers a file system path as a flat name space. +func MatchSimple(pattern, name string) bool { + if pattern == "" { + return name == pattern + } + if pattern == "*" { + return true + } + // Does only wildcard '*' match. + return deepMatchRune([]rune(name), []rune(pattern), true) +} + +func deepMatchRune(str, pattern []rune, simple bool) bool { + for len(pattern) > 0 { + switch pattern[0] { + default: + if len(str) == 0 || str[0] != pattern[0] { + return false + } + case '?': + if len(str) == 0 && !simple { + return false + } + case '*': + return deepMatchRune(str, pattern[1:], simple) || + (len(str) > 0 && deepMatchRune(str[1:], pattern, simple)) + } + str = str[1:] + pattern = pattern[1:] + } + return len(str) == 0 && len(pattern) == 0 +} + +//Match regular match +func Match(pattern, name string) (matched bool) { + if pattern == "" { + return name == pattern + } + if pattern == "*" { + return true + } + // Does extended wildcard '*' and '?' match. + return deepMatchRune([]rune(name), []rune(pattern), false) +} diff --git a/s3/set/match_test.go b/s3/set/match_test.go new file mode 100644 index 000000000..eec6df487 --- /dev/null +++ b/s3/set/match_test.go @@ -0,0 +1,529 @@ +package set + +import ( + "fmt" + "testing" +) + +// TestMatch - Tests validate the logic of wild card matching. +// `Match` supports '*' and '?' wildcards. +// Sample usage: In resource matching for bucket policy validation. +func TestMatch(t *testing.T) { + testCases := []struct { + pattern string + text string + matched bool + }{ + // Test case - 1. + // Test case with pattern "*". Expected to match any text. + { + pattern: "*", + text: "s3:GetObject", + matched: true, + }, + // Test case - 2. + // Test case with empty pattern. This only matches empty string. + { + pattern: "", + text: "s3:GetObject", + matched: false, + }, + // Test case - 3. + // Test case with empty pattern. This only matches empty string. + { + pattern: "", + text: "", + matched: true, + }, + // Test case - 4. + // Test case with single "*" at the end. + { + pattern: "s3:*", + text: "s3:ListMultipartUploadParts", + matched: true, + }, + // Test case - 5. + // Test case with a no "*". In this case the pattern and text should be the same. + { + pattern: "s3:ListBucketMultipartUploads", + text: "s3:ListBucket", + matched: false, + }, + // Test case - 6. + // Test case with a no "*". In this case the pattern and text should be the same. + { + pattern: "s3:ListBucket", + text: "s3:ListBucket", + matched: true, + }, + // Test case - 7. + // Test case with a no "*". In this case the pattern and text should be the same. + { + pattern: "s3:ListBucketMultipartUploads", + text: "s3:ListBucketMultipartUploads", + matched: true, + }, + // Test case - 8. + // Test case with pattern containing key name with a prefix. Should accept the same text without a "*". + { + pattern: "my-bucket/oo*", + text: "my-bucket/oo", + matched: true, + }, + // Test case - 9. + // Test case with "*" at the end of the pattern. + { + pattern: "my-bucket/In*", + text: "my-bucket/India/Karnataka/", + matched: true, + }, + // Test case - 10. + // Test case with prefixes shuffled. + // This should fail. + { + pattern: "my-bucket/In*", + text: "my-bucket/Karnataka/India/", + matched: false, + }, + // Test case - 11. + // Test case with text expanded to the wildcards in the pattern. + { + pattern: "my-bucket/In*/Ka*/Ban", + text: "my-bucket/India/Karnataka/Ban", + matched: true, + }, + // Test case - 12. + // Test case with the keyname part is repeated as prefix several times. + // This is valid. + { + pattern: "my-bucket/In*/Ka*/Ban", + text: "my-bucket/India/Karnataka/Ban/Ban/Ban/Ban/Ban", + matched: true, + }, + // Test case - 13. + // Test case to validate that `*` can be expanded into multiple prefixes. + { + pattern: "my-bucket/In*/Ka*/Ban", + text: "my-bucket/India/Karnataka/Area1/Area2/Area3/Ban", + matched: true, + }, + // Test case - 14. + // Test case to validate that `*` can be expanded into multiple prefixes. + { + pattern: "my-bucket/In*/Ka*/Ban", + text: "my-bucket/India/State1/State2/Karnataka/Area1/Area2/Area3/Ban", + matched: true, + }, + // Test case - 15. + // Test case where the keyname part of the pattern is expanded in the text. + { + pattern: "my-bucket/In*/Ka*/Ban", + text: "my-bucket/India/Karnataka/Bangalore", + matched: false, + }, + // Test case - 16. + // Test case with prefixes and wildcard expanded for all "*". + { + pattern: "my-bucket/In*/Ka*/Ban*", + text: "my-bucket/India/Karnataka/Bangalore", + matched: true, + }, + // Test case - 17. + // Test case with keyname part being a wildcard in the pattern. + { + pattern: "my-bucket/*", + text: "my-bucket/India", + matched: true, + }, + // Test case - 18. + { + pattern: "my-bucket/oo*", + text: "my-bucket/odo", + matched: false, + }, + + // Test case with pattern containing wildcard '?'. + // Test case - 19. + // "my-bucket?/" matches "my-bucket1/", "my-bucket2/", "my-bucket3" etc... + // doesn't match "mybucket/". + { + pattern: "my-bucket?/abc*", + text: "mybucket/abc", + matched: false, + }, + // Test case - 20. + { + pattern: "my-bucket?/abc*", + text: "my-bucket1/abc", + matched: true, + }, + // Test case - 21. + { + pattern: "my-?-bucket/abc*", + text: "my--bucket/abc", + matched: false, + }, + // Test case - 22. + { + pattern: "my-?-bucket/abc*", + text: "my-1-bucket/abc", + matched: true, + }, + // Test case - 23. + { + pattern: "my-?-bucket/abc*", + text: "my-k-bucket/abc", + matched: true, + }, + // Test case - 24. + { + pattern: "my??bucket/abc*", + text: "mybucket/abc", + matched: false, + }, + // Test case - 25. + { + pattern: "my??bucket/abc*", + text: "my4abucket/abc", + matched: true, + }, + // Test case - 26. + { + pattern: "my-bucket?abc*", + text: "my-bucket/abc", + matched: true, + }, + // Test case 27-28. + // '?' matches '/' too. (works with s3). + // This is because the namespace is considered flat. + // "abc?efg" matches both "abcdefg" and "abc/efg". + { + pattern: "my-bucket/abc?efg", + text: "my-bucket/abcdefg", + matched: true, + }, + { + pattern: "my-bucket/abc?efg", + text: "my-bucket/abc/efg", + matched: true, + }, + // Test case - 29. + { + pattern: "my-bucket/abc????", + text: "my-bucket/abc", + matched: false, + }, + // Test case - 30. + { + pattern: "my-bucket/abc????", + text: "my-bucket/abcde", + matched: false, + }, + // Test case - 31. + { + pattern: "my-bucket/abc????", + text: "my-bucket/abcdefg", + matched: true, + }, + // Test case 32-34. + // test case with no '*'. + { + pattern: "my-bucket/abc?", + text: "my-bucket/abc", + matched: false, + }, + { + pattern: "my-bucket/abc?", + text: "my-bucket/abcd", + matched: true, + }, + { + pattern: "my-bucket/abc?", + text: "my-bucket/abcde", + matched: false, + }, + // Test case 35. + { + pattern: "my-bucket/mnop*?", + text: "my-bucket/mnop", + matched: false, + }, + // Test case 36. + { + pattern: "my-bucket/mnop*?", + text: "my-bucket/mnopqrst/mnopqr", + matched: true, + }, + // Test case 37. + { + pattern: "my-bucket/mnop*?", + text: "my-bucket/mnopqrst/mnopqrs", + matched: true, + }, + // Test case 38. + { + pattern: "my-bucket/mnop*?", + text: "my-bucket/mnop", + matched: false, + }, + // Test case 39. + { + pattern: "my-bucket/mnop*?", + text: "my-bucket/mnopq", + matched: true, + }, + // Test case 40. + { + pattern: "my-bucket/mnop*?", + text: "my-bucket/mnopqr", + matched: true, + }, + // Test case 41. + { + pattern: "my-bucket/mnop*?and", + text: "my-bucket/mnopqand", + matched: true, + }, + // Test case 42. + { + pattern: "my-bucket/mnop*?and", + text: "my-bucket/mnopand", + matched: false, + }, + // Test case 43. + { + pattern: "my-bucket/mnop*?and", + text: "my-bucket/mnopqand", + matched: true, + }, + // Test case 44. + { + pattern: "my-bucket/mnop*?", + text: "my-bucket/mn", + matched: false, + }, + // Test case 45. + { + pattern: "my-bucket/mnop*?", + text: "my-bucket/mnopqrst/mnopqrs", + matched: true, + }, + // Test case 46. + { + pattern: "my-bucket/mnop*??", + text: "my-bucket/mnopqrst", + matched: true, + }, + // Test case 47. + { + pattern: "my-bucket/mnop*qrst", + text: "my-bucket/mnopabcdegqrst", + matched: true, + }, + // Test case 48. + { + pattern: "my-bucket/mnop*?and", + text: "my-bucket/mnopqand", + matched: true, + }, + // Test case 49. + { + pattern: "my-bucket/mnop*?and", + text: "my-bucket/mnopand", + matched: false, + }, + // Test case 50. + { + pattern: "my-bucket/mnop*?and?", + text: "my-bucket/mnopqanda", + matched: true, + }, + // Test case 51. + { + pattern: "my-bucket/mnop*?and", + text: "my-bucket/mnopqanda", + matched: false, + }, + // Test case 52. + + { + pattern: "my-?-bucket/abc*", + text: "my-bucket/mnopqanda", + matched: false, + }, + } + // Iterating over the test cases, call the function under test and asert the output. + for i, testCase := range testCases { + t.Run(fmt.Sprintf("Test %d", i+1), func(t *testing.T) { + actualResult := Match(testCase.pattern, testCase.text) + if testCase.matched != actualResult { + t.Errorf("Test %d: Expected the result to be `%v`, but instead found it to be `%v`", i+1, testCase.matched, actualResult) + } + }) + } +} + +// TestMatchSimple - Tests validate the logic of wild card matching. +// `MatchSimple` supports matching for only '*' in the pattern string. +func TestMatchSimple(t *testing.T) { + testCases := []struct { + pattern string + text string + matched bool + }{ + // Test case - 1. + // Test case with pattern "*". Expected to match any text. + { + pattern: "*", + text: "s3:GetObject", + matched: true, + }, + // Test case - 2. + // Test case with empty pattern. This only matches empty string. + { + pattern: "", + text: "s3:GetObject", + matched: false, + }, + // Test case - 3. + // Test case with empty pattern. This only matches empty string. + { + pattern: "", + text: "", + matched: true, + }, + // Test case - 4. + // Test case with single "*" at the end. + { + pattern: "s3:*", + text: "s3:ListMultipartUploadParts", + matched: true, + }, + // Test case - 5. + // Test case with a no "*". In this case the pattern and text should be the same. + { + pattern: "s3:ListBucketMultipartUploads", + text: "s3:ListBucket", + matched: false, + }, + // Test case - 6. + // Test case with a no "*". In this case the pattern and text should be the same. + { + pattern: "s3:ListBucket", + text: "s3:ListBucket", + matched: true, + }, + // Test case - 7. + // Test case with a no "*". In this case the pattern and text should be the same. + { + pattern: "s3:ListBucketMultipartUploads", + text: "s3:ListBucketMultipartUploads", + matched: true, + }, + // Test case - 8. + // Test case with pattern containing key name with a prefix. Should accept the same text without a "*". + { + pattern: "my-bucket/oo*", + text: "my-bucket/oo", + matched: true, + }, + // Test case - 9. + // Test case with "*" at the end of the pattern. + { + pattern: "my-bucket/In*", + text: "my-bucket/India/Karnataka/", + matched: true, + }, + // Test case - 10. + // Test case with prefixes shuffled. + // This should fail. + { + pattern: "my-bucket/In*", + text: "my-bucket/Karnataka/India/", + matched: false, + }, + // Test case - 11. + // Test case with text expanded to the wildcards in the pattern. + { + pattern: "my-bucket/In*/Ka*/Ban", + text: "my-bucket/India/Karnataka/Ban", + matched: true, + }, + // Test case - 12. + // Test case with the keyname part is repeated as prefix several times. + // This is valid. + { + pattern: "my-bucket/In*/Ka*/Ban", + text: "my-bucket/India/Karnataka/Ban/Ban/Ban/Ban/Ban", + matched: true, + }, + // Test case - 13. + // Test case to validate that `*` can be expanded into multiple prefixes. + { + pattern: "my-bucket/In*/Ka*/Ban", + text: "my-bucket/India/Karnataka/Area1/Area2/Area3/Ban", + matched: true, + }, + // Test case - 14. + // Test case to validate that `*` can be expanded into multiple prefixes. + { + pattern: "my-bucket/In*/Ka*/Ban", + text: "my-bucket/India/State1/State2/Karnataka/Area1/Area2/Area3/Ban", + matched: true, + }, + // Test case - 15. + // Test case where the keyname part of the pattern is expanded in the text. + { + pattern: "my-bucket/In*/Ka*/Ban", + text: "my-bucket/India/Karnataka/Bangalore", + matched: false, + }, + // Test case - 16. + // Test case with prefixes and wildcard expanded for all "*". + { + pattern: "my-bucket/In*/Ka*/Ban*", + text: "my-bucket/India/Karnataka/Bangalore", + matched: true, + }, + // Test case - 17. + // Test case with keyname part being a wildcard in the pattern. + { + pattern: "my-bucket/*", + text: "my-bucket/India", + matched: true, + }, + // Test case - 18. + { + pattern: "my-bucket/oo*", + text: "my-bucket/odo", + matched: false, + }, + // Test case - 11. + { + pattern: "my-bucket/oo?*", + text: "my-bucket/oo???", + matched: true, + }, + // Test case - 12: + { + pattern: "my-bucket/oo??*", + text: "my-bucket/odo", + matched: false, + }, + // Test case - 13: + { + pattern: "?h?*", + text: "?h?hello", + matched: true, + }, + } + // Iterating over the test cases, call the function under test and asert the output. + for i, testCase := range testCases { + t.Run(fmt.Sprintf("Test case %d", i+1), func(t *testing.T) { + actualResult := MatchSimple(testCase.pattern, testCase.text) + if testCase.matched != actualResult { + t.Errorf("Test %d: Expected the result to be `%v`, but instead found it to be `%v`", i+1, testCase.matched, actualResult) + } + }) + } +} diff --git a/s3/set/stringset.go b/s3/set/stringset.go new file mode 100644 index 000000000..5e7ba6f9c --- /dev/null +++ b/s3/set/stringset.go @@ -0,0 +1,198 @@ +package set + +import ( + "fmt" + "github.com/vmihailenco/msgpack/v5" + "sort" + + jsoniter "github.com/json-iterator/go" +) + +// StringSet - uses map as set of strings. +type StringSet map[string]struct{} + +var json = jsoniter.ConfigCompatibleWithStandardLibrary + +// ToSlice - returns StringSet as string slice. +func (set StringSet) ToSlice() []string { + keys := make([]string, 0, len(set)) + for k := range set { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +// IsEmpty - returns whether the set is empty or not. +func (set StringSet) IsEmpty() bool { + return len(set) == 0 +} + +// Add - adds string to the set. +func (set StringSet) Add(s string) { + set[s] = struct{}{} +} + +// Remove - removes string in the set. It does nothing if string does not exist in the set. +func (set StringSet) Remove(s string) { + delete(set, s) +} + +// Contains - checks if string is in the set. +func (set StringSet) Contains(s string) bool { + _, ok := set[s] + return ok +} + +// FuncMatch - returns new set containing each value who passes match function. +// A 'matchFn' should accept element in a set as first argument and +// 'matchString' as second argument. The function can do any logic to +// compare both the arguments and should return true to accept element in +// a set to include in output set else the element is ignored. +func (set StringSet) FuncMatch(matchFn func(string, string) bool, matchString string) StringSet { + nset := NewStringSet() + for k := range set { + if matchFn(k, matchString) { + nset.Add(k) + } + } + return nset +} + +// ApplyFunc - returns new set containing each value processed by 'applyFn'. +// A 'applyFn' should accept element in a set as a argument and return +// a processed string. The function can do any logic to return a processed +// string. +func (set StringSet) ApplyFunc(applyFn func(string) string) StringSet { + nset := NewStringSet() + for k := range set { + nset.Add(applyFn(k)) + } + return nset +} + +// Equals - checks whether given set is equal to current set or not. +func (set StringSet) Equals(sset StringSet) bool { + // If length of set is not equal to length of given set, the + // set is not equal to given set. + if len(set) != len(sset) { + return false + } + + // As both sets are equal in length, check each elements are equal. + for k := range set { + if _, ok := sset[k]; !ok { + return false + } + } + + return true +} + +// Intersection - returns the intersection with given set as new set. +func (set StringSet) Intersection(sset StringSet) StringSet { + nset := NewStringSet() + for k := range set { + if _, ok := sset[k]; ok { + nset.Add(k) + } + } + + return nset +} + +// Difference - returns the difference with given set as new set. +func (set StringSet) Difference(sset StringSet) StringSet { + nset := NewStringSet() + for k := range set { + if _, ok := sset[k]; !ok { + nset.Add(k) + } + } + + return nset +} + +// Union - returns the union with given set as new set. +func (set StringSet) Union(sset StringSet) StringSet { + nset := NewStringSet() + for k := range set { + nset.Add(k) + } + + for k := range sset { + nset.Add(k) + } + + return nset +} + +// MarshalJSON - converts to JSON data. +func (set StringSet) MarshalJSON() ([]byte, error) { + return json.Marshal(set.ToSlice()) +} + +// UnmarshalJSON - parses JSON data and creates new set with it. +// If 'data' contains JSON string array, the set contains each string. +// If 'data' contains JSON string, the set contains the string as one element. +// If 'data' contains Other JSON types, JSON parse error is returned. +func (set *StringSet) UnmarshalJSON(data []byte) error { + var sl []string + var err error + if err = json.Unmarshal(data, &sl); err == nil { + *set = make(StringSet) + for _, s := range sl { + set.Add(s) + } + } else { + var s string + if err = json.Unmarshal(data, &s); err == nil { + *set = make(StringSet) + set.Add(s) + } + } + + return err +} + +func (set StringSet) MarshalMsgpack() ([]byte, error) { + return msgpack.Marshal(set.ToSlice()) +} + +func (set *StringSet) UnmarshalMsgpack(data []byte) error { + var sl []string + var err error + if err = msgpack.Unmarshal(data, &sl); err == nil { + *set = make(StringSet) + for _, s := range sl { + set.Add(s) + } + } else { + var s string + if err = msgpack.Unmarshal(data, &s); err == nil { + *set = make(StringSet) + set.Add(s) + } + } + + return err +} + +// String - returns printable string of the set. +func (set StringSet) String() string { + return fmt.Sprintf("%s", set.ToSlice()) +} + +// NewStringSet - creates new string set. +func NewStringSet() StringSet { + return make(StringSet) +} + +// CreateStringSet - creates new string set with given string values. +func CreateStringSet(sl ...string) StringSet { + set := make(StringSet) + for _, k := range sl { + set.Add(k) + } + return set +} diff --git a/s3/set/stringset_test.go b/s3/set/stringset_test.go new file mode 100644 index 000000000..4c1996de6 --- /dev/null +++ b/s3/set/stringset_test.go @@ -0,0 +1,359 @@ +package set + +import ( + "fmt" + "strings" + "testing" +) + +// NewStringSet() is called and the result is validated. +func TestNewStringSet(t *testing.T) { + if ss := NewStringSet(); !ss.IsEmpty() { + t.Fatalf("expected: true, got: false") + } +} + +// CreateStringSet() is called and the result is validated. +func TestCreateStringSet(t *testing.T) { + ss := CreateStringSet("foo") + if str := ss.String(); str != `[foo]` { + t.Fatalf("expected: %s, got: %s", `["foo"]`, str) + } +} + +// StringSet.Add() is called with series of cases for valid and erroneous inputs and the result is validated. +func TestStringSetAdd(t *testing.T) { + testCases := []struct { + name string + value string + expectedResult string + }{ + // Test first addition. + {"test1", "foo", `[foo]`}, + // Test duplicate addition. + {"test2", "foo", `[foo]`}, + // Test new addition. + {"test3", "bar", `[bar foo]`}, + } + + ss := NewStringSet() + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + ss.Add(testCase.value) + if str := ss.String(); str != testCase.expectedResult { + t.Fatalf("test %v expected: %s, got: %s", testCase.name, testCase.expectedResult, str) + } + }) + } +} + +// StringSet.Remove() is called with series of cases for valid and erroneous inputs and the result is validated. +func TestStringSetRemove(t *testing.T) { + ss := CreateStringSet("foo", "bar") + testCases := []struct { + name string + value string + expectedResult string + }{ + // Test removing non-existen item. + {"test1", "baz", `[bar foo]`}, + // Test remove existing item. + {"test2", "foo", `[bar]`}, + // Test remove existing item again. + {"test2", "foo", `[bar]`}, + // Test remove to make set to empty. + {"test3", "bar", `[]`}, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + ss.Remove(testCase.value) + if str := ss.String(); str != testCase.expectedResult { + t.Fatalf("expected: %s, got: %s", testCase.expectedResult, str) + } + }) + } +} + +// StringSet.Contains() is called with series of cases for valid and erroneous inputs and the result is validated. +func TestStringSetContains(t *testing.T) { + ss := CreateStringSet("foo") + testCases := []struct { + name string + value string + expectedResult bool + }{ + // Test to check non-existent item. + {"test1", "bar", false}, + // Test to check existent item. + {"test2", "foo", true}, + // Test to verify case sensitivity. + {"test3", "Foo", false}, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + if result := ss.Contains(testCase.value); result != testCase.expectedResult { + t.Fatalf("expected: %t, got: %t", testCase.expectedResult, result) + } + }) + } +} + +// StringSet.FuncMatch() is called with series of cases for valid and erroneous inputs and the result is validated. +func TestStringSetFuncMatch(t *testing.T) { + ss := CreateStringSet("foo", "bar") + testCases := []struct { + name string + matchFn func(string, string) bool + value string + expectedResult string + }{ + // Test to check match function doing case insensive compare. + {"test1", func(setValue string, compareValue string) bool { + return strings.EqualFold(setValue, compareValue) + }, "Bar", `[bar]`}, + // Test to check match function doing prefix check. + {"test2", func(setValue string, compareValue string) bool { + return strings.HasPrefix(compareValue, setValue) + }, "foobar", `[foo]`}, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + s := ss.FuncMatch(testCase.matchFn, testCase.value) + if result := s.String(); result != testCase.expectedResult { + t.Fatalf("expected: %s, got: %s", testCase.expectedResult, result) + } + }) + } +} + +// StringSet.ApplyFunc() is called with series of cases for valid and erroneous inputs and the result is validated. +func TestStringSetApplyFunc(t *testing.T) { + ss := CreateStringSet("foo", "bar") + testCases := []struct { + name string + applyFn func(string) string + expectedResult string + }{ + // Test to apply function prepending a known string. + {"test1", func(setValue string) string { return "mybucket/" + setValue }, `[mybucket/bar mybucket/foo]`}, + // Test to apply function modifying values. + {"test2", func(setValue string) string { return setValue[1:] }, `[ar oo]`}, + } + + for _, testCase := range testCases { + s := ss.ApplyFunc(testCase.applyFn) + if result := s.String(); result != testCase.expectedResult { + t.Fatalf("expected: %s, got: %s", testCase.expectedResult, result) + } + } +} + +// StringSet.Equals() is called with series of cases for valid and erroneous inputs and the result is validated. +func TestStringSetEquals(t *testing.T) { + testCases := []struct { + name string + set1 StringSet + set2 StringSet + expectedResult bool + }{ + // Test equal set + {"test1", CreateStringSet("foo", "bar"), CreateStringSet("foo", "bar"), true}, + // Test second set with more items + {"test2", CreateStringSet("foo", "bar"), CreateStringSet("foo", "bar", "baz"), false}, + // Test second set with less items + {"test3", CreateStringSet("foo", "bar"), CreateStringSet("bar"), false}, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + if result := testCase.set1.Equals(testCase.set2); result != testCase.expectedResult { + t.Fatalf("expected: %t, got: %t", testCase.expectedResult, result) + } + }) + } +} + +// StringSet.Intersection() is called with series of cases for valid and erroneous inputs and the result is validated. +func TestStringSetIntersection(t *testing.T) { + testCases := []struct { + name string + set1 StringSet + set2 StringSet + expectedResult StringSet + }{ + // Test intersecting all values. + {"test1", CreateStringSet("foo", "bar"), CreateStringSet("foo", "bar"), CreateStringSet("foo", "bar")}, + // Test intersecting all values in second set. + {"test2", CreateStringSet("foo", "bar", "baz"), CreateStringSet("foo", "bar"), CreateStringSet("foo", "bar")}, + // Test intersecting different values in second set. + {"test3", CreateStringSet("foo", "baz"), CreateStringSet("baz", "bar"), CreateStringSet("baz")}, + // Test intersecting none. + {"test4", CreateStringSet("foo", "baz"), CreateStringSet("poo", "bar"), NewStringSet()}, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + if result := testCase.set1.Intersection(testCase.set2); !result.Equals(testCase.expectedResult) { + t.Fatalf("expected: %s, got: %s", testCase.expectedResult, result) + } + }) + } +} + +// StringSet.Difference() is called with series of cases for valid and erroneous inputs and the result is validated. +func TestStringSetDifference(t *testing.T) { + testCases := []struct { + name string + set1 StringSet + set2 StringSet + expectedResult StringSet + }{ + // Test differing none. + {"test1", CreateStringSet("foo", "bar"), CreateStringSet("foo", "bar"), NewStringSet()}, + // Test differing in first set. + {"test2", CreateStringSet("foo", "bar", "baz"), CreateStringSet("foo", "bar"), CreateStringSet("baz")}, + // Test differing values in both set. + {"test3", CreateStringSet("foo", "baz"), CreateStringSet("baz", "bar"), CreateStringSet("foo")}, + // Test differing all values. + {"test4", CreateStringSet("foo", "baz"), CreateStringSet("poo", "bar"), CreateStringSet("foo", "baz")}, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + if result := testCase.set1.Difference(testCase.set2); !result.Equals(testCase.expectedResult) { + t.Fatalf("expected: %s, got: %s", testCase.expectedResult, result) + } + }) + } +} + +// StringSet.Union() is called with series of cases for valid and erroneous inputs and the result is validated. +func TestStringSetUnion(t *testing.T) { + testCases := []struct { + name string + set1 StringSet + set2 StringSet + expectedResult StringSet + }{ + // Test union same values. + {"test1", CreateStringSet("foo", "bar"), CreateStringSet("foo", "bar"), CreateStringSet("foo", "bar")}, + // Test union same values in second set. + {"test2", CreateStringSet("foo", "bar", "baz"), CreateStringSet("foo", "bar"), CreateStringSet("foo", "bar", "baz")}, + // Test union different values in both set. + {"test2", CreateStringSet("foo", "baz"), CreateStringSet("baz", "bar"), CreateStringSet("foo", "baz", "bar")}, + // Test union all different values. + {"test2", CreateStringSet("foo", "baz"), CreateStringSet("poo", "bar"), CreateStringSet("foo", "baz", "poo", "bar")}, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + if result := testCase.set1.Union(testCase.set2); !result.Equals(testCase.expectedResult) { + t.Fatalf("expected: %s, got: %s", testCase.expectedResult, result) + } + }) + } +} + +// StringSet.MarshalJSON() is called with series of cases for valid and erroneous inputs and the result is validated. +func TestStringSetMarshalJSON(t *testing.T) { + testCases := []struct { + name string + set StringSet + expectedResult string + }{ + // Test set with values. + {"test1", CreateStringSet("foo", "bar"), `["bar","foo"]`}, + // Test empty set. + {"test2", NewStringSet(), "[]"}, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + if result, _ := testCase.set.MarshalJSON(); string(result) != testCase.expectedResult { + t.Fatalf("expected: %s, got: %s", testCase.expectedResult, string(result)) + } + }) + } +} + +// StringSet.UnmarshalJSON() is called with series of cases for valid and erroneous inputs and the result is validated. +func TestStringSetUnmarshalJSON(t *testing.T) { + testCases := []struct { + name string + data []byte + expectedResult string + }{ + // Test to convert JSON array to set. + {"test1", []byte(`["bar","foo"]`), `[bar foo]`}, + // Test to convert JSON string to set. + {"test2", []byte(`"bar"`), `[bar]`}, + // Test to convert JSON empty array to set. + {"test3", []byte(`[]`), `[]`}, + // Test to convert JSON empty string to set. + {"test4", []byte(`""`), `[]`}, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + var set StringSet + set.UnmarshalJSON(testCase.data) + if result := set.String(); result != testCase.expectedResult { + t.Fatalf("expected: %s, got: %s", testCase.expectedResult, result) + } + }) + } +} + +// StringSet.String() is called with series of cases for valid and erroneous inputs and the result is validated. +func TestStringSetString(t *testing.T) { + testCases := []struct { + name string + set StringSet + expectedResult string + }{ + // Test empty set. + {"test1", NewStringSet(), `[]`}, + // Test set with empty value. + {"test2", CreateStringSet(""), `[]`}, + // Test set with value. + {"test3", CreateStringSet("foo"), `[foo]`}, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + if str := testCase.set.String(); str != testCase.expectedResult { + t.Fatalf("expected: %s, got: %s", testCase.expectedResult, str) + } + }) + } +} + +// StringSet.ToSlice() is called with series of cases for valid and erroneous inputs and the result is validated. +func TestStringSetToSlice(t *testing.T) { + testCases := []struct { + name string + set StringSet + expectedResult string + }{ + // Test empty set. + {"test1", NewStringSet(), `[]`}, + // Test set with empty value. + {"test2", CreateStringSet(""), `[]`}, + // Test set with value. + {"test3", CreateStringSet("foo"), `[foo]`}, + // Test set with value. + {"test4", CreateStringSet("foo", "bar"), `[bar foo]`}, + } + + for _, testCase := range testCases { + t.Run("testCase.name", func(t *testing.T) { + sslice := testCase.set.ToSlice() + if str := fmt.Sprintf("%s", sslice); str != testCase.expectedResult { + t.Fatalf("expected: %s, got: %s", testCase.expectedResult, str) + } + }) + } +} diff --git a/s3/utils/bgcontext.go b/s3/utils/bgcontext.go new file mode 100644 index 000000000..3ad10e230 --- /dev/null +++ b/s3/utils/bgcontext.go @@ -0,0 +1,35 @@ +package utils + +import ( + "context" + "time" +) + +// BgContext returns a context that can be used for async operations. +// Cancellation/timeouts are removed, so parent cancellations/timeout will +// not propagate from parent. +// Context values are preserved. +// This can be used for goroutines that live beyond the parent context. +func BgContext(parent context.Context) context.Context { + return bgCtx{parent: parent} +} + +type bgCtx struct { + parent context.Context +} + +func (a bgCtx) Done() <-chan struct{} { + return nil +} + +func (a bgCtx) Err() error { + return nil +} + +func (a bgCtx) Deadline() (deadline time.Time, ok bool) { + return time.Time{}, false +} + +func (a bgCtx) Value(key interface{}) interface{} { + return a.parent.Value(key) +} diff --git a/s3/utils/encode.go b/s3/utils/encode.go new file mode 100644 index 000000000..d8fc042cd --- /dev/null +++ b/s3/utils/encode.go @@ -0,0 +1,88 @@ +package utils + +import "strings" + +// S3EncodeName encodes string in response when encodingType is specified in AWS S3 requests. +func S3EncodeName(name string, encodingType string) (result string) { + // Quick path to exit + if encodingType == "" { + return name + } + encodingType = strings.ToLower(encodingType) + switch encodingType { + case "url": + return s3URLEncode(name) + } + return name +} + +// s3URLEncode is based on Golang's url.QueryEscape() code, +// while considering some S3 exceptions: +// - Avoid encoding '/' and '*' +// - Force encoding of '~' +func s3URLEncode(s string) string { + spaceCount, hexCount := 0, 0 + for i := 0; i < len(s); i++ { + c := s[i] + if shouldEscape(c) { + if c == ' ' { + spaceCount++ + } else { + hexCount++ + } + } + } + + if spaceCount == 0 && hexCount == 0 { + return s + } + + var buf [64]byte + var t []byte + + required := len(s) + 2*hexCount + if required <= len(buf) { + t = buf[:required] + } else { + t = make([]byte, required) + } + + if hexCount == 0 { + copy(t, s) + for i := 0; i < len(s); i++ { + if s[i] == ' ' { + t[i] = '+' + } + } + return string(t) + } + + j := 0 + for i := 0; i < len(s); i++ { + switch c := s[i]; { + case c == ' ': + t[j] = '+' + j++ + case shouldEscape(c): + t[j] = '%' + t[j+1] = "0123456789ABCDEF"[c>>4] + t[j+2] = "0123456789ABCDEF"[c&15] + j += 3 + default: + t[j] = s[i] + j++ + } + } + return string(t) +} +func shouldEscape(c byte) bool { + if 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' { + return false + } + + switch c { + case '-', '_', '.', '/', '*': + return false + } + return true +} diff --git a/s3/utils/hash/errors.go b/s3/utils/hash/errors.go new file mode 100644 index 000000000..2ffea70d9 --- /dev/null +++ b/s3/utils/hash/errors.go @@ -0,0 +1,33 @@ +package hash + +import "fmt" + +// SHA256Mismatch - when content sha256 does not match with what was sent from client. +type SHA256Mismatch struct { + ExpectedSHA256 string + CalculatedSHA256 string +} + +func (e SHA256Mismatch) Error() string { + return "Bad sha256: Expected " + e.ExpectedSHA256 + " does not match calculated " + e.CalculatedSHA256 +} + +// BadDigest - Content-MD5 you specified did not match what we received. +type BadDigest struct { + ExpectedMD5 string + CalculatedMD5 string +} + +func (e BadDigest) Error() string { + return "Bad digest: Expected " + e.ExpectedMD5 + " does not match calculated " + e.CalculatedMD5 +} + +// ErrSizeMismatch error size mismatch +type ErrSizeMismatch struct { + Want int64 + Got int64 +} + +func (e ErrSizeMismatch) Error() string { + return fmt.Sprintf("Size mismatch: got %d, want %d", e.Got, e.Want) +} diff --git a/s3/utils/hash/reader.go b/s3/utils/hash/reader.go new file mode 100644 index 000000000..26157490d --- /dev/null +++ b/s3/utils/hash/reader.go @@ -0,0 +1,211 @@ +package hash + +import ( + "bytes" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "errors" + "github.com/yann-y/fds/pkg/etag" + "hash" + "io" +) + +// A Reader wraps an io.Reader and computes the MD5 checksum +// of the read content as ETag. Optionally, it also computes +// the SHA256 checksum of the content. +// +// If the reference values for the ETag and content SHA26 +// are not empty then it will check whether the computed +// match the reference values. +type Reader struct { + src io.Reader + bytesRead int64 + + size int64 + actualSize int64 + + contentSHA256 []byte + checksum etag.ETag + sha256 hash.Hash +} + +// NewReader returns a new Reader that wraps src and computes +// MD5 checksum of everything it reads as ETag. +// +// It also computes the SHA256 checksum of everything it reads +// if sha256Hex is not the empty string. +// +// If size resp. actualSize is unknown at the time of calling +// NewReader then it should be set to -1. +// +// NewReader may try merge the given size, MD5 and SHA256 values +// into src - if src is a Reader - to avoid computing the same +// checksums multiple times. +func NewReader(src io.Reader, size int64, md5Hex, sha256Hex string, actualSize int64) (*Reader, error) { + MD5, err := hex.DecodeString(md5Hex) + if err != nil { + return nil, BadDigest{ // TODO(aead): Return an error that indicates that an invalid ETag has been specified + ExpectedMD5: md5Hex, + CalculatedMD5: "", + } + } + SHA256, err := hex.DecodeString(sha256Hex) + if err != nil { + return nil, SHA256Mismatch{ // TODO(aead): Return an error that indicates that an invalid Content-SHA256 has been specified + ExpectedSHA256: sha256Hex, + CalculatedSHA256: "", + } + } + + // Merge the size, MD5 and SHA256 values if src is a Reader. + // The size may be set to -1 by callers if unknown. + if r, ok := src.(*Reader); ok { + if r.bytesRead > 0 { + return nil, errors.New("h: already read from h reader") + } + if len(r.checksum) != 0 && len(MD5) != 0 && !etag.Equal(r.checksum, etag.ETag(MD5)) { + return nil, BadDigest{ + ExpectedMD5: r.checksum.String(), + CalculatedMD5: md5Hex, + } + } + if len(r.contentSHA256) != 0 && len(SHA256) != 0 && !bytes.Equal(r.contentSHA256, SHA256) { + return nil, SHA256Mismatch{ + ExpectedSHA256: hex.EncodeToString(r.contentSHA256), + CalculatedSHA256: sha256Hex, + } + } + if r.size >= 0 && size >= 0 && r.size != size { + return nil, ErrSizeMismatch{Want: r.size, Got: size} + } + + r.checksum = etag.ETag(MD5) + r.contentSHA256 = SHA256 + if r.size < 0 && size >= 0 { + r.src = etag.Wrap(io.LimitReader(r.src, size), r.src) + r.size = size + } + if r.actualSize <= 0 && actualSize >= 0 { + r.actualSize = actualSize + } + return r, nil + } + + if size >= 0 { + r := io.LimitReader(src, size) + if _, ok := src.(etag.Tagger); !ok { + src = etag.NewReader(r, etag.ETag(MD5)) + } else { + src = etag.Wrap(r, src) + } + } else if _, ok := src.(etag.Tagger); !ok { + src = etag.NewReader(src, etag.ETag(MD5)) + } + var h hash.Hash + if len(SHA256) != 0 { + h = newSHA256() + } + return &Reader{ + src: src, + size: size, + actualSize: actualSize, + checksum: etag.ETag(MD5), + contentSHA256: SHA256, + sha256: h, + }, nil +} + +func (r *Reader) Read(p []byte) (int, error) { + n, err := r.src.Read(p) + r.bytesRead += int64(n) + if r.sha256 != nil { + r.sha256.Write(p[:n]) + } + + if err == io.EOF { // Verify content SHA256, if set. + if r.sha256 != nil { + if sum := r.sha256.Sum(nil); !bytes.Equal(r.contentSHA256, sum) { + return n, SHA256Mismatch{ + ExpectedSHA256: hex.EncodeToString(r.contentSHA256), + CalculatedSHA256: hex.EncodeToString(sum), + } + } + } + } + if err != nil && err != io.EOF { + if v, ok := err.(etag.VerifyError); ok { + return n, BadDigest{ + ExpectedMD5: v.Expected.String(), + CalculatedMD5: v.Computed.String(), + } + } + } + return n, err +} + +// Size returns the absolute number of bytes the Reader +// will return during reading. It returns -1 for unlimited +// data. +func (r *Reader) Size() int64 { return r.size } + +// ActualSize returns the pre-modified size of the object. +// DecompressedSize - For compressed objects. +func (r *Reader) ActualSize() int64 { return r.actualSize } + +// ETag returns the ETag computed by an underlying etag.Tagger. +// If the underlying io.Reader does not implement etag.Tagger +// it returns nil. +func (r *Reader) ETag() etag.ETag { + if t, ok := r.src.(etag.Tagger); ok { + return t.ETag() + } + return nil +} + +// MD5 returns the MD5 checksum set as reference value. +// +// It corresponds to the checksum that is expected and +// not the actual MD5 checksum of the content. +// Therefore, refer to MD5Current. +func (r *Reader) MD5() []byte { + return r.checksum +} + +// MD5Current returns the MD5 checksum of the content +// that has been read so far. +// +// Calling MD5Current again after reading more data may +// result in a different checksum. +func (r *Reader) MD5Current() []byte { + return r.ETag()[:] +} + +// SHA256 returns the SHA256 checksum set as reference value. +// +// It corresponds to the checksum that is expected and +// not the actual SHA256 checksum of the content. +func (r *Reader) SHA256() []byte { + return r.contentSHA256 +} + +// MD5HexString returns a hex representation of the MD5. +func (r *Reader) MD5HexString() string { + return hex.EncodeToString(r.checksum) +} + +// MD5Base64String returns a hex representation of the MD5. +func (r *Reader) MD5Base64String() string { + return base64.StdEncoding.EncodeToString(r.checksum) +} + +// SHA256HexString returns a hex representation of the SHA256. +func (r *Reader) SHA256HexString() string { + return hex.EncodeToString(r.contentSHA256) +} + +var _ io.Closer = (*Reader)(nil) // compiler check + +// Close and release resources. +func (r *Reader) Close() error { return nil } +func newSHA256() hash.Hash { return sha256.New() } diff --git a/s3/utils/ip.go b/s3/utils/ip.go new file mode 100644 index 000000000..d80841629 --- /dev/null +++ b/s3/utils/ip.go @@ -0,0 +1,46 @@ +package utils + +import ( + logging "github.com/ipfs/go-log/v2" + "github.com/yann-y/fds/internal/iam/set" + "net" + "runtime" +) + +var log = logging.Logger("utils") + +// MustGetLocalIP4 returns IPv4 addresses of localhost. It panics on error. +func MustGetLocalIP4() (ipList set.StringSet) { + ipList = set.NewStringSet() + ifs, err := net.Interfaces() + if err != nil { + log.Errorf("Unable to get IP addresses of this host %v", err) + + } + + for _, interf := range ifs { + addrs, err := interf.Addrs() + if err != nil { + continue + } + if runtime.GOOS == "windows" && interf.Flags&net.FlagUp == 0 { + continue + } + + for _, addr := range addrs { + var ip net.IP + switch v := addr.(type) { + case *net.IPNet: + ip = v.IP + case *net.IPAddr: + ip = v.IP + } + + if ip.To4() != nil { + ipList.Add(ip.String()) + } + } + } + + return ipList +} diff --git a/s3/utils/levels.go b/s3/utils/levels.go new file mode 100644 index 000000000..07af02999 --- /dev/null +++ b/s3/utils/levels.go @@ -0,0 +1,15 @@ +package utils + +import ( + logging "github.com/ipfs/go-log/v2" + "os" +) + +func SetupLogLevels() { + if _, set := os.LookupEnv("GOLOG_LOG_LEVEL"); !set { + _ = logging.SetLogLevel("*", "INFO") + + } else { + _ = logging.SetLogLevel("*", os.Getenv("GOLOG_LOG_LEVEL")) + } +} diff --git a/s3/utils/signature.go b/s3/utils/signature.go new file mode 100644 index 000000000..e96c3bf53 --- /dev/null +++ b/s3/utils/signature.go @@ -0,0 +1,359 @@ +package utils + +import ( + "bytes" + "crypto/hmac" + "crypto/md5" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "github.com/yann-y/fds/internal/consts" + "github.com/yann-y/fds/internal/iam/auth" + "io" + "net/http" + "regexp" + "sort" + "strings" + "testing" + "time" + "unicode/utf8" +) + +var ignoredHeaders = map[string]bool{ + "Authorization": true, + "Content-Type": true, + "Content-Length": true, + "User-Agent": true, +} + +// AWS Signature Version '4' constants. +const ( + signV4Algorithm = "AWS4-HMAC-SHA256" + iso8601Format = "20060102T150405Z" + yyyymmdd = "20060102" +) + +type ServiceType string + +// MustNewSignedV4Request NewSignedV4Request +func MustNewSignedV4Request(method string, urlStr string, contentLength int64, body io.ReadSeeker, st ServiceType, accessKey, secretKey string, t *testing.T) *http.Request { + req, err := NewRequest(method, urlStr, contentLength, body) + if err != nil { + t.Fatalf("newTestRequest fail err:%v", err) + } + cred := &auth.Credentials{AccessKey: accessKey, SecretKey: secretKey} + if err := SignRequestV4(req, cred.AccessKey, cred.SecretKey, st); err != nil { + t.Fatalf("Unable to inititalized new signed http request %s", err) + } + return req +} + +// SignRequestV4 Sign given request using Signature V4. +func SignRequestV4(req *http.Request, accessKey, secretKey string, st ServiceType) error { + // Get hashed payload. + hashedPayload := getContentSha256Cksum(req) + currTime := time.Now().UTC() + + // Set x-amz-date. + req.Header.Set("x-amz-date", currTime.Format(iso8601Format)) + req.Header.Set(consts.ContentType, "application/x-www-form-urlencoded") + // Query string. + // final Authorization header + // Get header keys. + // Get header map. + headerMap := make(map[string][]string) + for k, vv := range req.Header { + // If request header key is not in ignored headers, then add it. + if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; !ok { + headerMap[strings.ToLower(k)] = vv + } + } + headers := []string{"host"} + for k := range headerMap { + headers = append(headers, k) + } + sort.Strings(headers) + + // Get canonical headers. + var buf bytes.Buffer + for _, k := range headers { + buf.WriteString(k) + buf.WriteByte(':') + switch { + case k == "host": + buf.WriteString(req.URL.Host) + fallthrough + default: + for idx, v := range headerMap[k] { + if idx > 0 { + buf.WriteByte(',') + } + buf.WriteString(v) + } + buf.WriteByte('\n') + } + } + headerMap["host"] = append(headerMap["host"], req.URL.Host) + + // Get signed headers. + signedHeaders := strings.Join(headers, ";") + //a,_:=io.ReadAll(req.Body) + //b:=req.URL.Query().Encode() + //req.Form=url.Values{} + //req.Form.Add(b,string(a)) + //queryStr := req.Form.Encode() + queryStr := req.URL.Query().Encode() + region := consts.DefaultRegion + // Get scope. + scope := strings.Join([]string{ + currTime.Format(yyyymmdd), + region, + string(st), + "aws4_request", + }, "/") + // Get canonical request. + canonicalRequest := GetCanonicalRequest(headerMap, hashedPayload, queryStr, req.URL.Path, req.Method) + // Get string to sign from canonical request. + stringToSign := GetStringToSign(canonicalRequest, currTime, scope) + + // Get hmac signing key. + signingKey := GetSigningKey(secretKey, currTime, region, string(st)) + + // Calculate signature. + newSignature := GetSignature(signingKey, stringToSign) + + parts := []string{ + "AWS4-HMAC-SHA256" + " Credential=" + accessKey + "/" + scope, + "SignedHeaders=" + signedHeaders, + "Signature=" + newSignature, + } + author := strings.Join(parts, ", ") + req.Header.Set("Authorization", author) + + return nil +} + +// if object matches reserved string, no need to encode them +var reservedObjectNames = regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$") + +// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences +// +// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8 +// non english characters cannot be parsed due to the nature in which url.Encode() is written +// +// This function on the other hand is a direct replacement for url.Encode() technique to support +// pretty much every UTF-8 character. +func EncodePath(pathName string) string { + if reservedObjectNames.MatchString(pathName) { + return pathName + } + var encodedPathname string + for _, s := range pathName { + if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + } + switch s { + case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + default: + len := utf8.RuneLen(s) + if len < 0 { + // if utf8 cannot convert return the same string as is + return pathName + } + u := make([]byte, len) + utf8.EncodeRune(u, s) + for _, r := range u { + hex := hex.EncodeToString([]byte{r}) + encodedPathname = encodedPathname + "%" + strings.ToUpper(hex) + } + } + } + return encodedPathname +} + +// NewRequest Returns new HTTP request object. +func NewRequest(method, urlStr string, contentLength int64, body io.ReadSeeker) (*http.Request, error) { + if method == "" { + method = "POST" + } + + // Save for subsequent use + var hashedPayload string + var md5Base64 string + switch { + case body == nil: + hashedPayload = getSHA256Hash([]byte{}) + default: + payloadBytes, err := io.ReadAll(body) + if err != nil { + return nil, err + } + hashedPayload = getSHA256Hash(payloadBytes) + md5Base64 = getMD5HashBase64(payloadBytes) + } + // Seek back to beginning. + if body != nil { + body.Seek(0, 0) + } else { + body = bytes.NewReader([]byte("")) + } + req, err := http.NewRequest(method, urlStr, body) + if err != nil { + return nil, err + } + if md5Base64 != "" { + req.Header.Set("Content-Md5", md5Base64) + } + req.Header.Set("x-amz-content-sha256", hashedPayload) + + // Add Content-Length + req.ContentLength = contentLength + + return req, nil +} + +// getSHA256Hash returns SHA-256 hash in hex encoding of given data. +func getSHA256Hash(data []byte) string { + return hex.EncodeToString(getSHA256Sum(data)) +} + +// getMD5HashBase64 returns MD5 hash in base64 encoding of given data. +func getMD5HashBase64(data []byte) string { + return base64.StdEncoding.EncodeToString(getMD5Sum(data)) +} + +// getSHA256Hash returns SHA-256 sum of given data. +func getSHA256Sum(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +// getMD5Sum returns MD5 sum of given data. +func getMD5Sum(data []byte) []byte { + hash := md5.New() + hash.Write(data) + return hash.Sum(nil) +} + +// Returns SHA256 for calculating canonical-request. +func getContentSha256Cksum(r *http.Request) string { + var ( + defaultSha256Cksum string + v []string + ok bool + ) + + // X-Amz-Content-Sha256, if not set in signed requests, checksum + // will default to sha256([]byte("")). + defaultSha256Cksum = consts.EmptySHA256 + v, ok = r.Header[consts.AmzContentSha256] + + // We found 'X-Amz-Content-Sha256' return the captured value. + if ok { + return v[0] + } + + // We couldn't find 'X-Amz-Content-Sha256'. + return defaultSha256Cksum +} + +// GetCanonicalRequest generate a canonical request of style +// +// canonicalRequest = +// +// \n +// \n +// \n +// \n +// \n +// +func GetCanonicalRequest(extractedSignedHeaders http.Header, payload, queryStr, urlPath, method string) string { + rawQuery := strings.ReplaceAll(queryStr, "+", "%20") + encodedPath := EncodePath(urlPath) + canonicalRequest := strings.Join([]string{ + method, + encodedPath, + rawQuery, + getCanonicalHeaders(extractedSignedHeaders), + GetSignedHeaders(extractedSignedHeaders), + payload, + }, "\n") + return canonicalRequest +} + +// GetSignedHeaders generate a string i.e alphabetically sorted, semicolon-separated list of lowercase request header names +func GetSignedHeaders(signedHeaders http.Header) string { + var headers []string + for k := range signedHeaders { + headers = append(headers, strings.ToLower(k)) + } + sort.Strings(headers) + return strings.Join(headers, ";") +} + +// getCanonicalHeaders generate a list of request headers with their values +func getCanonicalHeaders(signedHeaders http.Header) string { + var headers []string + vals := make(http.Header) + for k, vv := range signedHeaders { + headers = append(headers, strings.ToLower(k)) + vals[strings.ToLower(k)] = vv + } + sort.Strings(headers) + + var buf bytes.Buffer + for _, k := range headers { + buf.WriteString(k) + buf.WriteByte(':') + for idx, v := range vals[k] { + if idx > 0 { + buf.WriteByte(',') + } + buf.WriteString(signV4TrimAll(v)) + } + buf.WriteByte('\n') + } + return buf.String() +} + +// GetStringToSign a string based on selected query values. +func GetStringToSign(canonicalRequest string, t time.Time, scope string) string { + stringToSign := signV4Algorithm + "\n" + t.Format(iso8601Format) + "\n" + stringToSign += scope + "\n" + canonicalRequestBytes := sha256.Sum256([]byte(canonicalRequest)) + stringToSign += hex.EncodeToString(canonicalRequestBytes[:]) + return stringToSign +} + +// GetSigningKey hmac seed to calculate final signature. +func GetSigningKey(secretKey string, t time.Time, region string, serviceType string) []byte { + date := sumHMAC([]byte("AWS4"+secretKey), []byte(t.Format(yyyymmdd))) + regionBytes := sumHMAC(date, []byte(region)) + service := sumHMAC(regionBytes, []byte(serviceType)) + signingKey := sumHMAC(service, []byte("aws4_request")) + return signingKey +} + +// GetSignature final signature in hexadecimal form. +func GetSignature(signingKey []byte, stringToSign string) string { + return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign))) +} + +// sumHMAC calculate hmac between two input byte array. +func sumHMAC(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +// Trim leading and trailing spaces and replace sequential spaces with one space, following Trimall() +// in http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html +func signV4TrimAll(input string) string { + // Compress adjacent spaces (a space is determined by + // unicode.IsSpace() internally here) to one space and return + return strings.Join(strings.Fields(input), " ") +} diff --git a/s3/utils/utils.go b/s3/utils/utils.go new file mode 100644 index 000000000..73842b663 --- /dev/null +++ b/s3/utils/utils.go @@ -0,0 +1,9 @@ +package utils + +func CloneMapSS(src map[string]string) map[string]string { + r := make(map[string]string, len(src)) + for k, v := range src { + r[k] = v + } + return r +} diff --git a/s3/utils/xml.go b/s3/utils/xml.go new file mode 100644 index 000000000..3374b0db1 --- /dev/null +++ b/s3/utils/xml.go @@ -0,0 +1,26 @@ +package utils + +import ( + "encoding/xml" + "io" +) + +// XmlDecoder provide decoded value in xml. +func XmlDecoder(body io.Reader, v interface{}, size int64) error { + var lbody io.Reader + if size > 0 { + lbody = io.LimitReader(body, size) + } else { + lbody = body + } + d := xml.NewDecoder(lbody) + // Ignore any encoding set in the XML body + d.CharsetReader = nopCharsetConverter + return d.Decode(v) +} + +// nopCharsetConverter is a dummy charset convert which just copies input to output, +// it is used to ignore custom encoding charset in S3 XML body. +func nopCharsetConverter(label string, input io.Reader) (io.Reader, error) { + return input, nil +} From 7d259469aedbae42cd1a3645aaaf6eb57740f6f7 Mon Sep 17 00:00:00 2001 From: fish <920886811@163.com> Date: Tue, 8 Aug 2023 17:32:35 +0800 Subject: [PATCH 010/139] chore: --- s3/action/action.go | 2 +- s3/action/action_test.go | 2 +- s3/auth/auth_type.go | 182 --------------------------------------- s3/auth/other.go | 1 - s3/set/stringset.go | 2 +- 5 files changed, 3 insertions(+), 186 deletions(-) delete mode 100644 s3/auth/auth_type.go delete mode 100644 s3/auth/other.go diff --git a/s3/action/action.go b/s3/action/action.go index 4744e0252..a354fcc6d 100644 --- a/s3/action/action.go +++ b/s3/action/action.go @@ -1,4 +1,4 @@ -package s3action +package action type Action string diff --git a/s3/action/action_test.go b/s3/action/action_test.go index a2fcd3e50..696846a67 100644 --- a/s3/action/action_test.go +++ b/s3/action/action_test.go @@ -1,4 +1,4 @@ -package s3action +package action import "testing" diff --git a/s3/auth/auth_type.go b/s3/auth/auth_type.go deleted file mode 100644 index 10bbe6567..000000000 --- a/s3/auth/auth_type.go +++ /dev/null @@ -1,182 +0,0 @@ -/* - * The following code tries to reverse engineer the Amazon S3 APIs, - * and is mostly copied from minio implementation. - */ - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the License. - -package auth - -import ( - "net/http" - "net/url" - "reflect" - "strconv" - "strings" - - "github.com/bittorrent/go-btfs/s3/apierrors" - "github.com/bittorrent/go-btfs/s3/consts" -) - -// http Header "x-amz-content-sha256" == "UNSIGNED-PAYLOAD" indicates that the -// client did not calculate sha256 of the payload. -const unsignedPayload = "UNSIGNED-PAYLOAD" - -// isValidRegion - verify if incoming region value is valid with configured Region. -func isValidRegion(reqRegion string, confRegion string) bool { - if confRegion == "" { - return true - } - if confRegion == "US" { - confRegion = consts.DefaultRegion - } - // Some older s3 clients set region as "US" instead of - // globalDefaultRegion, handle it. - if reqRegion == "US" { - reqRegion = consts.DefaultRegion - } - return reqRegion == confRegion -} - -func contains(slice interface{}, elem interface{}) bool { - v := reflect.ValueOf(slice) - if v.Kind() == reflect.Slice { - for i := 0; i < v.Len(); i++ { - if v.Index(i).Interface() == elem { - return true - } - } - } - return false -} - -// extractSignedHeaders extract signed headers from Authorization header -func extractSignedHeaders(signedHeaders []string, r *http.Request) (http.Header, apierrors.ErrorCode) { - reqHeaders := r.Header - reqQueries := r.Form - // find whether "host" is part of list of signed headers. - // if not return ErrUnsignedHeaders. "host" is mandatory. - if !contains(signedHeaders, "host") { - return nil, apierrors.ErrUnsignedHeaders - } - extractedSignedHeaders := make(http.Header) - for _, header := range signedHeaders { - // `host` will not be found in the headers, can be found in r.Host. - // but its alway necessary that the list of signed headers containing host in it. - val, ok := reqHeaders[http.CanonicalHeaderKey(header)] - if !ok { - // try to set headers from Query String - val, ok = reqQueries[header] - } - if ok { - extractedSignedHeaders[http.CanonicalHeaderKey(header)] = val - continue - } - switch header { - case "expect": - // Golang http server strips off 'Expect' header, if the - // client sent this as part of signed headers we need to - // handle otherwise we would see a signature mismatch. - // `aws-cli` sets this as part of signed headers. - // - // According to - // http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.20 - // Expect header is always of form: - // - // Expect = "Expect" ":" 1#expectation - // expectation = "100-continue" | expectation-extension - // - // So it safe to assume that '100-continue' is what would - // be sent, for the time being keep this work around. - // Adding a *TODO* to remove this later when Golang server - // doesn't filter out the 'Expect' header. - extractedSignedHeaders.Set(header, "100-continue") - case "host": - // Go http server removes "host" from Request.Header - - //extractedSignedHeaders.Set(header, r.Host) - // todo use r.Host, or filedag-web deal with - //value := strings.Split(r.Host, ":") - extractedSignedHeaders.Set(header, r.Host) - case "transfer-encoding": - // Go http server removes "host" from Request.Header - extractedSignedHeaders[http.CanonicalHeaderKey(header)] = r.TransferEncoding - case "content-length": - // Signature-V4 spec excludes Content-Length from signed headers list for signature calculation. - // But some clients deviate from this rule. Hence we consider Content-Length for signature - // calculation to be compatible with such clients. - extractedSignedHeaders.Set(header, strconv.FormatInt(r.ContentLength, 10)) - default: - return nil, apierrors.ErrUnsignedHeaders - } - } - return extractedSignedHeaders, apierrors.ErrNone -} - -// isRequestSignatureV4 Verify if request has AWS Signature Version '4'. -func isRequestSignatureV4(r *http.Request) bool { - return strings.HasPrefix(r.Header.Get("Authorization"), signV4Algorithm) -} - -// Verify if request has AWS PreSign Version '4'. -func isRequestPresignedSignatureV4(r *http.Request) bool { - _, ok := r.URL.Query()["X-Amz-Credential"] - return ok -} - - -// List of all supported auth types. -const ( - AuthTypeUnknown AuthType = iota - AuthTypeAnonymous - AuthTypePresigned - AuthTypePresignedV2 - AuthTypePostPolicy - AuthTypeStreamingSigned - AuthTypeSigned - AuthTypeSignedV2 - AuthTypeJWT - AuthTypeSTS -) - -// GetRequestAuthType Get request authentication type. -func GetRequestAuthType(r *http.Request) AuthType { - if r.URL != nil { - var err error - r.Form, err = url.ParseQuery(r.URL.RawQuery) - if err != nil { - log.Infof("parse query failed, query: %s, error: %v", r.URL.RawQuery, err) - return AuthTypeUnknown - } - } - if isRequestSignatureV2(r) { - return AuthTypeSignedV2 - } else if isRequestPresignedSignatureV2(r) { - return AuthTypePresignedV2 - } else if isRequestSignStreamingV4(r) { - return AuthTypeStreamingSigned - } else if IsRequestSignatureV4(r) { - return AuthTypeSigned - } else if isRequestPresignedSignatureV4(r) { - return AuthTypePresigned - } else if isRequestJWT(r) { - return AuthTypeJWT - } else if isRequestPostPolicySignatureV4(r) { - return AuthTypePostPolicy - } else if _, ok := r.Form[consts.StsAction]; ok { - return AuthTypeSTS - } else if _, ok := r.Header[consts.Authorization]; !ok { - return AuthTypeAnonymous - } - return AuthTypeUnknown -} diff --git a/s3/auth/other.go b/s3/auth/other.go deleted file mode 100644 index 8832b06d1..000000000 --- a/s3/auth/other.go +++ /dev/null @@ -1 +0,0 @@ -package auth diff --git a/s3/set/stringset.go b/s3/set/stringset.go index 5e7ba6f9c..82d619e19 100644 --- a/s3/set/stringset.go +++ b/s3/set/stringset.go @@ -2,7 +2,7 @@ package set import ( "fmt" - "github.com/vmihailenco/msgpack/v5" + "github.com/vmihailenco/msgpack/v4" "sort" jsoniter "github.com/json-iterator/go" From d7ad67de3737cdc4b046fe6c4226db56615795cb Mon Sep 17 00:00:00 2001 From: fish <920886811@163.com> Date: Wed, 9 Aug 2023 10:19:44 +0800 Subject: [PATCH 011/139] chore: --- {s3 => s3d}/action/action.go | 0 {s3 => s3d}/action/action_test.go | 0 {s3 => s3d}/apierrors/errors.go | 0 {s3 => s3d}/apierrors/s3_error.go | 0 {s3 => s3d}/apierrors/s3api_errors.go | 0 {s3 => s3d}/auth/cred.go | 0 {s3 => s3d}/auth/service.go | 0 {s3 => s3d}/auth/service_instance.go | 0 {s3 => s3d}/auth/service_interface.go | 0 {s3 => s3d}/auth/service_test.go | 0 {s3 => s3d}/auth/signature-v4-parser.go | 0 {s3 => s3d}/auth/signature-v4-utils.go | 0 {s3 => s3d}/auth/signature-v4.go | 0 {s3 => s3d}/consts/consts.go | 0 {s3 => s3d}/set/match.go | 0 {s3 => s3d}/set/match_test.go | 0 {s3 => s3d}/set/stringset.go | 0 {s3 => s3d}/set/stringset_test.go | 0 {s3 => s3d}/utils/bgcontext.go | 0 {s3 => s3d}/utils/encode.go | 0 {s3 => s3d}/utils/hash/errors.go | 0 {s3 => s3d}/utils/hash/reader.go | 0 {s3 => s3d}/utils/ip.go | 0 {s3 => s3d}/utils/levels.go | 0 {s3 => s3d}/utils/signature.go | 0 {s3 => s3d}/utils/utils.go | 0 {s3 => s3d}/utils/xml.go | 0 27 files changed, 0 insertions(+), 0 deletions(-) rename {s3 => s3d}/action/action.go (100%) rename {s3 => s3d}/action/action_test.go (100%) rename {s3 => s3d}/apierrors/errors.go (100%) rename {s3 => s3d}/apierrors/s3_error.go (100%) rename {s3 => s3d}/apierrors/s3api_errors.go (100%) rename {s3 => s3d}/auth/cred.go (100%) rename {s3 => s3d}/auth/service.go (100%) rename {s3 => s3d}/auth/service_instance.go (100%) rename {s3 => s3d}/auth/service_interface.go (100%) rename {s3 => s3d}/auth/service_test.go (100%) rename {s3 => s3d}/auth/signature-v4-parser.go (100%) rename {s3 => s3d}/auth/signature-v4-utils.go (100%) rename {s3 => s3d}/auth/signature-v4.go (100%) rename {s3 => s3d}/consts/consts.go (100%) rename {s3 => s3d}/set/match.go (100%) rename {s3 => s3d}/set/match_test.go (100%) rename {s3 => s3d}/set/stringset.go (100%) rename {s3 => s3d}/set/stringset_test.go (100%) rename {s3 => s3d}/utils/bgcontext.go (100%) rename {s3 => s3d}/utils/encode.go (100%) rename {s3 => s3d}/utils/hash/errors.go (100%) rename {s3 => s3d}/utils/hash/reader.go (100%) rename {s3 => s3d}/utils/ip.go (100%) rename {s3 => s3d}/utils/levels.go (100%) rename {s3 => s3d}/utils/signature.go (100%) rename {s3 => s3d}/utils/utils.go (100%) rename {s3 => s3d}/utils/xml.go (100%) diff --git a/s3/action/action.go b/s3d/action/action.go similarity index 100% rename from s3/action/action.go rename to s3d/action/action.go diff --git a/s3/action/action_test.go b/s3d/action/action_test.go similarity index 100% rename from s3/action/action_test.go rename to s3d/action/action_test.go diff --git a/s3/apierrors/errors.go b/s3d/apierrors/errors.go similarity index 100% rename from s3/apierrors/errors.go rename to s3d/apierrors/errors.go diff --git a/s3/apierrors/s3_error.go b/s3d/apierrors/s3_error.go similarity index 100% rename from s3/apierrors/s3_error.go rename to s3d/apierrors/s3_error.go diff --git a/s3/apierrors/s3api_errors.go b/s3d/apierrors/s3api_errors.go similarity index 100% rename from s3/apierrors/s3api_errors.go rename to s3d/apierrors/s3api_errors.go diff --git a/s3/auth/cred.go b/s3d/auth/cred.go similarity index 100% rename from s3/auth/cred.go rename to s3d/auth/cred.go diff --git a/s3/auth/service.go b/s3d/auth/service.go similarity index 100% rename from s3/auth/service.go rename to s3d/auth/service.go diff --git a/s3/auth/service_instance.go b/s3d/auth/service_instance.go similarity index 100% rename from s3/auth/service_instance.go rename to s3d/auth/service_instance.go diff --git a/s3/auth/service_interface.go b/s3d/auth/service_interface.go similarity index 100% rename from s3/auth/service_interface.go rename to s3d/auth/service_interface.go diff --git a/s3/auth/service_test.go b/s3d/auth/service_test.go similarity index 100% rename from s3/auth/service_test.go rename to s3d/auth/service_test.go diff --git a/s3/auth/signature-v4-parser.go b/s3d/auth/signature-v4-parser.go similarity index 100% rename from s3/auth/signature-v4-parser.go rename to s3d/auth/signature-v4-parser.go diff --git a/s3/auth/signature-v4-utils.go b/s3d/auth/signature-v4-utils.go similarity index 100% rename from s3/auth/signature-v4-utils.go rename to s3d/auth/signature-v4-utils.go diff --git a/s3/auth/signature-v4.go b/s3d/auth/signature-v4.go similarity index 100% rename from s3/auth/signature-v4.go rename to s3d/auth/signature-v4.go diff --git a/s3/consts/consts.go b/s3d/consts/consts.go similarity index 100% rename from s3/consts/consts.go rename to s3d/consts/consts.go diff --git a/s3/set/match.go b/s3d/set/match.go similarity index 100% rename from s3/set/match.go rename to s3d/set/match.go diff --git a/s3/set/match_test.go b/s3d/set/match_test.go similarity index 100% rename from s3/set/match_test.go rename to s3d/set/match_test.go diff --git a/s3/set/stringset.go b/s3d/set/stringset.go similarity index 100% rename from s3/set/stringset.go rename to s3d/set/stringset.go diff --git a/s3/set/stringset_test.go b/s3d/set/stringset_test.go similarity index 100% rename from s3/set/stringset_test.go rename to s3d/set/stringset_test.go diff --git a/s3/utils/bgcontext.go b/s3d/utils/bgcontext.go similarity index 100% rename from s3/utils/bgcontext.go rename to s3d/utils/bgcontext.go diff --git a/s3/utils/encode.go b/s3d/utils/encode.go similarity index 100% rename from s3/utils/encode.go rename to s3d/utils/encode.go diff --git a/s3/utils/hash/errors.go b/s3d/utils/hash/errors.go similarity index 100% rename from s3/utils/hash/errors.go rename to s3d/utils/hash/errors.go diff --git a/s3/utils/hash/reader.go b/s3d/utils/hash/reader.go similarity index 100% rename from s3/utils/hash/reader.go rename to s3d/utils/hash/reader.go diff --git a/s3/utils/ip.go b/s3d/utils/ip.go similarity index 100% rename from s3/utils/ip.go rename to s3d/utils/ip.go diff --git a/s3/utils/levels.go b/s3d/utils/levels.go similarity index 100% rename from s3/utils/levels.go rename to s3d/utils/levels.go diff --git a/s3/utils/signature.go b/s3d/utils/signature.go similarity index 100% rename from s3/utils/signature.go rename to s3d/utils/signature.go diff --git a/s3/utils/utils.go b/s3d/utils/utils.go similarity index 100% rename from s3/utils/utils.go rename to s3d/utils/utils.go diff --git a/s3/utils/xml.go b/s3d/utils/xml.go similarity index 100% rename from s3/utils/xml.go rename to s3d/utils/xml.go From 21d4705425a51d24cb4b6c4838b4d0f13913eacd Mon Sep 17 00:00:00 2001 From: fish <920886811@163.com> Date: Wed, 9 Aug 2023 10:20:36 +0800 Subject: [PATCH 012/139] chore: --- s3d/action/action.go | 36 ++++++++++++++-------------- s3d/auth/cred.go | 4 ++-- s3d/auth/service_instance.go | 2 +- s3d/auth/service_interface.go | 2 +- s3d/auth/signature-v4-parser.go | 4 ++-- s3d/auth/signature-v4-utils.go | 6 ++--- s3d/auth/signature-v4.go | 10 ++++---- s3d/set/match_test.go | 42 ++++++++++++++++----------------- 8 files changed, 53 insertions(+), 53 deletions(-) diff --git a/s3d/action/action.go b/s3d/action/action.go index a354fcc6d..e6225fcca 100644 --- a/s3d/action/action.go +++ b/s3d/action/action.go @@ -8,62 +8,62 @@ const ( //--- bucket // CreateBucketAction - CreateBucket Rest API action. - CreateBucketAction = "s3:CreateBucket" + CreateBucketAction = "s3d:CreateBucket" // HeadBucketAction - HeadBucket Rest API action. - HeadBucketAction = "s3:HeadBucket" + HeadBucketAction = "s3d:HeadBucket" // ListBucketAction - ListBucket Rest API action. - ListBucketAction = "s3:ListBucket" + ListBucketAction = "s3d:ListBucket" // DeleteBucketAction - DeleteBucket Rest API action. - DeleteBucketAction = "s3:DeleteBucket" + DeleteBucketAction = "s3d:DeleteBucket" // PutBucketAclAction - PutBucketAcl Rest API action. - PutBucketAclAction = "s3:PutBucketAcl" + PutBucketAclAction = "s3d:PutBucketAcl" // GetBucketAclAction - GetBucketAcl Rest API action. - GetBucketAclAction = "s3:GetBucketAcl" + GetBucketAclAction = "s3d:GetBucketAcl" //--- object // ListObjectsAction - ListObjects Rest API action. - ListObjectsAction = "s3:ListObjects" + ListObjectsAction = "s3d:ListObjects" // ListObjectsV2Action - ListObjectsV2 Rest API action. - ListObjectsV2Action = "s3:ListObjectsV2" + ListObjectsV2Action = "s3d:ListObjectsV2" // HeadObjectAction - HeadObject Rest API action. - HeadObjectAction = "s3:HeadObject" + HeadObjectAction = "s3d:HeadObject" // PutObjectAction - PutObject Rest API action. - PutObjectAction = "s3:PutObject" + PutObjectAction = "s3d:PutObject" // GetObjectAction - GetObject Rest API action. - GetObjectAction = "s3:GetObject" + GetObjectAction = "s3d:GetObject" // CopyObjectAction - CopyObject Rest API action. - CopyObjectAction = "s3:CopyObject" + CopyObjectAction = "s3d:CopyObject" // DeleteObjectAction - DeleteObject Rest API action. - DeleteObjectAction = "s3:DeleteObject" + DeleteObjectAction = "s3d:DeleteObject" // DeleteObjectsAction - DeleteObjects Rest API action. - DeleteObjectsAction = "s3:DeleteObjects" + DeleteObjectsAction = "s3d:DeleteObjects" //--- multipart upload // CreateMultipartUploadAction - CreateMultipartUpload Rest API action. - CreateMultipartUploadAction Action = "s3:CreateMultipartUpload" + CreateMultipartUploadAction Action = "s3d:CreateMultipartUpload" // AbortMultipartUploadAction - AbortMultipartUpload Rest API action. - AbortMultipartUploadAction Action = "s3:AbortMultipartUpload" + AbortMultipartUploadAction Action = "s3d:AbortMultipartUpload" // CompleteMultipartUploadAction - CompleteMultipartUpload Rest API action. - CompleteMultipartUploadAction Action = "s3:CompleteMultipartUpload" + CompleteMultipartUploadAction Action = "s3d:CompleteMultipartUpload" // UploadPartAction - UploadPartUpload Rest API action. - UploadPartAction Action = "s3:UploadPartUpload" + UploadPartAction Action = "s3d:UploadPartUpload" ) // SupportedActions List of all supported actions. diff --git a/s3d/auth/cred.go b/s3d/auth/cred.go index d43adadd2..44b488772 100644 --- a/s3d/auth/cred.go +++ b/s3d/auth/cred.go @@ -1,7 +1,7 @@ package auth import ( - "github.com/bittorrent/go-btfs/s3/apierrors" + "github.com/bittorrent/go-btfs/s3d/apierrors" "time" ) @@ -44,7 +44,7 @@ func CheckAccessKeyValid(accessKey string) (*Credentials, apierrors.ErrorCode) { const ( // Minimum length for access key. accessKeyMinLen = 3 - + // Maximum length for access key. // There is no max length enforcement for access keys accessKeyMaxLen = 20 diff --git a/s3d/auth/service_instance.go b/s3d/auth/service_instance.go index de5e1687a..d52a7aab9 100644 --- a/s3d/auth/service_instance.go +++ b/s3d/auth/service_instance.go @@ -1,7 +1,7 @@ package auth import ( - "github.com/bittorrent/go-btfs/s3/apierrors" + "github.com/bittorrent/go-btfs/s3d/apierrors" "net/http" ) diff --git a/s3d/auth/service_interface.go b/s3d/auth/service_interface.go index 2295095e6..586a03055 100644 --- a/s3d/auth/service_interface.go +++ b/s3d/auth/service_interface.go @@ -1,7 +1,7 @@ package auth import ( - "github.com/bittorrent/go-btfs/s3/apierrors" + "github.com/bittorrent/go-btfs/s3d/apierrors" "net/http" ) diff --git a/s3d/auth/signature-v4-parser.go b/s3d/auth/signature-v4-parser.go index 559105b4f..1813e1e1c 100644 --- a/s3d/auth/signature-v4-parser.go +++ b/s3d/auth/signature-v4-parser.go @@ -22,8 +22,8 @@ import ( "strings" "time" - "github.com/bittorrent/go-btfs/s3/apierrors" - "github.com/bittorrent/go-btfs/s3/consts" + "github.com/bittorrent/go-btfs/s3d/apierrors" + "github.com/bittorrent/go-btfs/s3d/consts" ) // credentialHeader data type represents structured form of Credential diff --git a/s3d/auth/signature-v4-utils.go b/s3d/auth/signature-v4-utils.go index 2c72776cc..f1ff4e0de 100644 --- a/s3d/auth/signature-v4-utils.go +++ b/s3d/auth/signature-v4-utils.go @@ -23,8 +23,8 @@ import ( "strconv" "strings" - "github.com/bittorrent/go-btfs/s3/apierrors" - "github.com/bittorrent/go-btfs/s3/consts" + "github.com/bittorrent/go-btfs/s3d/apierrors" + "github.com/bittorrent/go-btfs/s3d/consts" ) // http Header "x-amz-content-sha256" == "UNSIGNED-PAYLOAD" indicates that the @@ -39,7 +39,7 @@ func isValidRegion(reqRegion string, confRegion string) bool { if confRegion == "US" { confRegion = consts.DefaultRegion } - // Some older s3 clients set region as "US" instead of + // Some older s3d clients set region as "US" instead of // globalDefaultRegion, handle it. if reqRegion == "US" { reqRegion = consts.DefaultRegion diff --git a/s3d/auth/signature-v4.go b/s3d/auth/signature-v4.go index 7ff394b8c..ca160a605 100644 --- a/s3d/auth/signature-v4.go +++ b/s3d/auth/signature-v4.go @@ -24,10 +24,10 @@ import ( "strconv" "time" - "github.com/bittorrent/go-btfs/s3/apierrors" - "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/set" - "github.com/bittorrent/go-btfs/s3/utils" + "github.com/bittorrent/go-btfs/s3d/apierrors" + "github.com/bittorrent/go-btfs/s3d/consts" + "github.com/bittorrent/go-btfs/s3d/set" + "github.com/bittorrent/go-btfs/s3d/utils" ) // AWS Signature Version '4' constants. @@ -40,7 +40,7 @@ const ( type serviceType string const ( - ServiceS3 serviceType = "s3" + ServiceS3 serviceType = "s3d" ////ServiceSTS STS //ServiceSTS serviceType = "sts" ) diff --git a/s3d/set/match_test.go b/s3d/set/match_test.go index eec6df487..bd903c55b 100644 --- a/s3d/set/match_test.go +++ b/s3d/set/match_test.go @@ -18,14 +18,14 @@ func TestMatch(t *testing.T) { // Test case with pattern "*". Expected to match any text. { pattern: "*", - text: "s3:GetObject", + text: "s3d:GetObject", matched: true, }, // Test case - 2. // Test case with empty pattern. This only matches empty string. { pattern: "", - text: "s3:GetObject", + text: "s3d:GetObject", matched: false, }, // Test case - 3. @@ -38,29 +38,29 @@ func TestMatch(t *testing.T) { // Test case - 4. // Test case with single "*" at the end. { - pattern: "s3:*", - text: "s3:ListMultipartUploadParts", + pattern: "s3d:*", + text: "s3d:ListMultipartUploadParts", matched: true, }, // Test case - 5. // Test case with a no "*". In this case the pattern and text should be the same. { - pattern: "s3:ListBucketMultipartUploads", - text: "s3:ListBucket", + pattern: "s3d:ListBucketMultipartUploads", + text: "s3d:ListBucket", matched: false, }, // Test case - 6. // Test case with a no "*". In this case the pattern and text should be the same. { - pattern: "s3:ListBucket", - text: "s3:ListBucket", + pattern: "s3d:ListBucket", + text: "s3d:ListBucket", matched: true, }, // Test case - 7. // Test case with a no "*". In this case the pattern and text should be the same. { - pattern: "s3:ListBucketMultipartUploads", - text: "s3:ListBucketMultipartUploads", + pattern: "s3d:ListBucketMultipartUploads", + text: "s3d:ListBucketMultipartUploads", matched: true, }, // Test case - 8. @@ -194,7 +194,7 @@ func TestMatch(t *testing.T) { matched: true, }, // Test case 27-28. - // '?' matches '/' too. (works with s3). + // '?' matches '/' too. (works with s3d). // This is because the namespace is considered flat. // "abc?efg" matches both "abcdefg" and "abc/efg". { @@ -375,14 +375,14 @@ func TestMatchSimple(t *testing.T) { // Test case with pattern "*". Expected to match any text. { pattern: "*", - text: "s3:GetObject", + text: "s3d:GetObject", matched: true, }, // Test case - 2. // Test case with empty pattern. This only matches empty string. { pattern: "", - text: "s3:GetObject", + text: "s3d:GetObject", matched: false, }, // Test case - 3. @@ -395,29 +395,29 @@ func TestMatchSimple(t *testing.T) { // Test case - 4. // Test case with single "*" at the end. { - pattern: "s3:*", - text: "s3:ListMultipartUploadParts", + pattern: "s3d:*", + text: "s3d:ListMultipartUploadParts", matched: true, }, // Test case - 5. // Test case with a no "*". In this case the pattern and text should be the same. { - pattern: "s3:ListBucketMultipartUploads", - text: "s3:ListBucket", + pattern: "s3d:ListBucketMultipartUploads", + text: "s3d:ListBucket", matched: false, }, // Test case - 6. // Test case with a no "*". In this case the pattern and text should be the same. { - pattern: "s3:ListBucket", - text: "s3:ListBucket", + pattern: "s3d:ListBucket", + text: "s3d:ListBucket", matched: true, }, // Test case - 7. // Test case with a no "*". In this case the pattern and text should be the same. { - pattern: "s3:ListBucketMultipartUploads", - text: "s3:ListBucketMultipartUploads", + pattern: "s3d:ListBucketMultipartUploads", + text: "s3d:ListBucketMultipartUploads", matched: true, }, // Test case - 8. From 7d0c76fcb7c59dd4c46b11872956ac3ba2e4e791 Mon Sep 17 00:00:00 2001 From: fish <920886811@163.com> Date: Wed, 9 Aug 2023 14:34:01 +0800 Subject: [PATCH 013/139] chore: --- s3d/auth/cred_temp.go | 69 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) create mode 100644 s3d/auth/cred_temp.go diff --git a/s3d/auth/cred_temp.go b/s3d/auth/cred_temp.go new file mode 100644 index 000000000..44b488772 --- /dev/null +++ b/s3d/auth/cred_temp.go @@ -0,0 +1,69 @@ +package auth + +import ( + "github.com/bittorrent/go-btfs/s3d/apierrors" + "time" +) + +var timeSentinel = time.Unix(0, 0).UTC() + +// Credentials holds access and secret keys. +type Credentials struct { + AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` + SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` + CreateTime time.Time `xml:"CreateTime" json:"createTime,omitempty"` + Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` + SessionToken string `xml:"SessionToken" json:"sessionToken"` + Status string `xml:"-" json:"status,omitempty"` + ParentUser string `xml:"-" json:"parentUser,omitempty"` +} + +// IsValid - returns whether credential is valid or not. +func (cred *Credentials) IsValid() bool { + return true +} + +// IsExpired - returns whether Credential is expired or not. +func (cred *Credentials) IsExpired() bool { + return false +} + +func CheckAccessKeyValid(accessKey string) (*Credentials, apierrors.ErrorCode) { + + ////check it + //cred, bl: = mp[accessKey] + //if bl { + // return cred, nil + //} else { + // return nil, errors.New("node found accessKey! ") + //} + + return &Credentials{AccessKey: accessKey}, apierrors.ErrNone +} + +const ( + // Minimum length for access key. + accessKeyMinLen = 3 + + // Maximum length for access key. + // There is no max length enforcement for access keys + accessKeyMaxLen = 20 + + // Minimum length for secret key for both server and gateway mode. + secretKeyMinLen = 8 + + // Maximum secret key length , this + // is used when autogenerating new credentials. + // There is no max length enforcement for secret keys + secretKeyMaxLen = 40 +) + +// IsAccessKeyValid - validate access key for right length. +func IsAccessKeyValid(accessKey string) bool { + return len(accessKey) >= accessKeyMinLen +} + +// IsSecretKeyValid - validate secret key for right length. +func IsSecretKeyValid(secretKey string) bool { + return len(secretKey) >= secretKeyMinLen +} From 25baad402d339b9d06a3accb1908b75a35c16573 Mon Sep 17 00:00:00 2001 From: fish <920886811@163.com> Date: Wed, 9 Aug 2023 14:34:16 +0800 Subject: [PATCH 014/139] chore: --- s3d/auth/cred.go | 69 ------------------------------------------------ 1 file changed, 69 deletions(-) delete mode 100644 s3d/auth/cred.go diff --git a/s3d/auth/cred.go b/s3d/auth/cred.go deleted file mode 100644 index 44b488772..000000000 --- a/s3d/auth/cred.go +++ /dev/null @@ -1,69 +0,0 @@ -package auth - -import ( - "github.com/bittorrent/go-btfs/s3d/apierrors" - "time" -) - -var timeSentinel = time.Unix(0, 0).UTC() - -// Credentials holds access and secret keys. -type Credentials struct { - AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` - SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` - CreateTime time.Time `xml:"CreateTime" json:"createTime,omitempty"` - Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` - SessionToken string `xml:"SessionToken" json:"sessionToken"` - Status string `xml:"-" json:"status,omitempty"` - ParentUser string `xml:"-" json:"parentUser,omitempty"` -} - -// IsValid - returns whether credential is valid or not. -func (cred *Credentials) IsValid() bool { - return true -} - -// IsExpired - returns whether Credential is expired or not. -func (cred *Credentials) IsExpired() bool { - return false -} - -func CheckAccessKeyValid(accessKey string) (*Credentials, apierrors.ErrorCode) { - - ////check it - //cred, bl: = mp[accessKey] - //if bl { - // return cred, nil - //} else { - // return nil, errors.New("node found accessKey! ") - //} - - return &Credentials{AccessKey: accessKey}, apierrors.ErrNone -} - -const ( - // Minimum length for access key. - accessKeyMinLen = 3 - - // Maximum length for access key. - // There is no max length enforcement for access keys - accessKeyMaxLen = 20 - - // Minimum length for secret key for both server and gateway mode. - secretKeyMinLen = 8 - - // Maximum secret key length , this - // is used when autogenerating new credentials. - // There is no max length enforcement for secret keys - secretKeyMaxLen = 40 -) - -// IsAccessKeyValid - validate access key for right length. -func IsAccessKeyValid(accessKey string) bool { - return len(accessKey) >= accessKeyMinLen -} - -// IsSecretKeyValid - validate secret key for right length. -func IsSecretKeyValid(secretKey string) bool { - return len(secretKey) >= secretKeyMinLen -} From 7711c8256a7952d23d831523d93c276369c9deeb Mon Sep 17 00:00:00 2001 From: fish <920886811@163.com> Date: Thu, 10 Aug 2023 10:56:57 +0800 Subject: [PATCH 015/139] feat: add store --- s3d/action/action.go | 166 +++--------------------- s3d/auth/auth_type.go | 107 ++++++++++++++++ s3d/auth/check_handler_auth.go | 171 +++++++++++++++++++++++++ s3d/auth/service_instance.go | 28 ++--- s3d/auth/service_interface.go | 3 +- s3d/auth/signature-v4-parser.go | 32 +++++ s3d/auth/signature-v4-utils.go | 43 +++++++ s3d/etag/etag.go | 194 ++++++++++++++++++++++++++++ s3d/etag/etag_test.go | 210 +++++++++++++++++++++++++++++++ s3d/etag/reader.go | 139 ++++++++++++++++++++ s3d/lock/lock.go | 217 ++++++++++++++++++++++++++++++++ s3d/lock/rwmutex.go | 154 +++++++++++++++++++++++ s3d/policy/policy.go | 74 +++++++++++ s3d/store/bucket.go | 150 ++++++++++++++++++++++ s3d/store/bucket_acl.go | 34 +++++ s3d/store/err.go | 34 +++++ s3d/store/service.go | 1 + s3d/store/service_instance.go | 5 + s3d/store/service_interface.go | 16 +++ s3d/store/service_test.go | 1 + s3d/uleveldb/leveldb.go | 117 +++++++++++++++++ s3d/uleveldb/uleveldb_test.go | 24 ++++ 22 files changed, 1757 insertions(+), 163 deletions(-) create mode 100644 s3d/auth/auth_type.go create mode 100644 s3d/auth/check_handler_auth.go create mode 100644 s3d/etag/etag.go create mode 100644 s3d/etag/etag_test.go create mode 100644 s3d/etag/reader.go create mode 100644 s3d/lock/lock.go create mode 100644 s3d/lock/rwmutex.go create mode 100644 s3d/policy/policy.go create mode 100644 s3d/store/bucket.go create mode 100644 s3d/store/bucket_acl.go create mode 100644 s3d/store/err.go create mode 100644 s3d/store/service.go create mode 100644 s3d/store/service_instance.go create mode 100644 s3d/store/service_interface.go create mode 100644 s3d/store/service_test.go create mode 100644 s3d/uleveldb/leveldb.go create mode 100644 s3d/uleveldb/uleveldb_test.go diff --git a/s3d/action/action.go b/s3d/action/action.go index e6225fcca..dedaaa599 100644 --- a/s3d/action/action.go +++ b/s3d/action/action.go @@ -1,5 +1,7 @@ package action +import "github.com/bittorrent/go-btfs/s3d/set" + type Action string // ActionSet - set of actions. @@ -102,8 +104,24 @@ func (action Action) IsValid() bool { // Match - matches action name with action patter. func (action Action) Match(a Action) bool { - //return set.Match(string(action), string(a)) - return true + return set.Match(string(action), string(a)) + //return true +} + +// List of all supported object actions. +var supportedBucketActions = map[Action]struct{}{ + CreateBucketAction: {}, + HeadBucketAction: {}, + ListBucketAction: {}, + DeleteBucketAction: {}, + PutBucketAclAction: {}, + GetBucketAclAction: {}, +} + +// IsBucketAction - returns whether action is bucket type or not. +func (action Action) IsBucketAction() bool { + _, ok := supportedBucketActions[action] + return ok } // List of all supported object actions. @@ -128,147 +146,3 @@ func (action Action) IsObjectAction() bool { _, ok := supportedObjectActions[action] return ok } - -//func createActionConditionKeyMap() map[Action]condition.KeySet { -// commonKeys := []condition.Key{} -// for _, keyName := range condition.CommonKeys { -// commonKeys = append(commonKeys, keyName.ToKey()) -// } -// -// return map[Action]condition.KeySet{ -// AbortMultipartUploadAction: condition.NewKeySet(commonKeys...), -// -// CreateBucketAction: condition.NewKeySet(commonKeys...), -// -// DeleteObjectAction: condition.NewKeySet(commonKeys...), -// -// GetBucketLocationAction: condition.NewKeySet(commonKeys...), -// -// GetBucketPolicyStatusAction: condition.NewKeySet(commonKeys...), -// -// GetObjectAction: condition.NewKeySet( -// append([]condition.Key{ -// condition.S3XAmzServerSideEncryption.ToKey(), -// condition.S3XAmzServerSideEncryptionCustomerAlgorithm.ToKey(), -// }, commonKeys...)...), -// -// HeadBucketAction: condition.NewKeySet(commonKeys...), -// -// ListAllMyBucketsAction: condition.NewKeySet(commonKeys...), -// -// ListBucketAction: condition.NewKeySet( -// append([]condition.Key{ -// condition.S3Prefix.ToKey(), -// condition.S3Delimiter.ToKey(), -// condition.S3MaxKeys.ToKey(), -// }, commonKeys...)...), -// -// ListBucketVersionsAction: condition.NewKeySet( -// append([]condition.Key{ -// condition.S3Prefix.ToKey(), -// condition.S3Delimiter.ToKey(), -// condition.S3MaxKeys.ToKey(), -// }, commonKeys...)...), -// -// ListBucketMultipartUploadsAction: condition.NewKeySet(commonKeys...), -// -// ListenNotificationAction: condition.NewKeySet(commonKeys...), -// -// ListenBucketNotificationAction: condition.NewKeySet(commonKeys...), -// -// ListMultipartUploadPartsAction: condition.NewKeySet(commonKeys...), -// -// PutObjectAction: condition.NewKeySet( -// append([]condition.Key{ -// condition.S3XAmzCopySource.ToKey(), -// condition.S3XAmzServerSideEncryption.ToKey(), -// condition.S3XAmzServerSideEncryptionCustomerAlgorithm.ToKey(), -// condition.S3XAmzMetadataDirective.ToKey(), -// condition.S3XAmzStorageClass.ToKey(), -// condition.S3ObjectLockRetainUntilDate.ToKey(), -// condition.S3ObjectLockMode.ToKey(), -// condition.S3ObjectLockLegalHold.ToKey(), -// condition.S3RequestObjectTagKeys.ToKey(), -// condition.S3RequestObjectTag.ToKey(), -// }, commonKeys...)...), -// -// // https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html -// // LockLegalHold is not supported with PutObjectRetentionAction -// PutObjectRetentionAction: condition.NewKeySet( -// append([]condition.Key{ -// condition.S3ObjectLockRemainingRetentionDays.ToKey(), -// condition.S3ObjectLockRetainUntilDate.ToKey(), -// condition.S3ObjectLockMode.ToKey(), -// }, commonKeys...)...), -// -// GetObjectRetentionAction: condition.NewKeySet(commonKeys...), -// PutObjectLegalHoldAction: condition.NewKeySet( -// append([]condition.Key{ -// condition.S3ObjectLockLegalHold.ToKey(), -// }, commonKeys...)...), -// GetObjectLegalHoldAction: condition.NewKeySet(commonKeys...), -// -// // https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html -// BypassGovernanceRetentionAction: condition.NewKeySet( -// append([]condition.Key{ -// condition.S3ObjectLockRemainingRetentionDays.ToKey(), -// condition.S3ObjectLockRetainUntilDate.ToKey(), -// condition.S3ObjectLockMode.ToKey(), -// condition.S3ObjectLockLegalHold.ToKey(), -// }, commonKeys...)...), -// -// GetBucketObjectLockConfigurationAction: condition.NewKeySet(commonKeys...), -// PutBucketObjectLockConfigurationAction: condition.NewKeySet(commonKeys...), -// GetBucketTaggingAction: condition.NewKeySet(commonKeys...), -// PutBucketTaggingAction: condition.NewKeySet( -// append([]condition.Key{ -// condition.S3RequestObjectTagKeys.ToKey(), -// condition.S3RequestObjectTag.ToKey(), -// }, commonKeys...)...), -// PutObjectTaggingAction: condition.NewKeySet( -// append([]condition.Key{ -// condition.S3RequestObjectTagKeys.ToKey(), -// condition.S3RequestObjectTag.ToKey(), -// }, commonKeys...)...), -// GetObjectTaggingAction: condition.NewKeySet(commonKeys...), -// DeleteObjectTaggingAction: condition.NewKeySet( -// append([]condition.Key{ -// condition.S3RequestObjectTagKeys.ToKey(), -// condition.S3RequestObjectTag.ToKey(), -// }, commonKeys...)...), -// PutObjectVersionTaggingAction: condition.NewKeySet( -// append([]condition.Key{ -// condition.S3VersionID.ToKey(), -// condition.S3RequestObjectTagKeys.ToKey(), -// condition.S3RequestObjectTag.ToKey(), -// }, commonKeys...)...), -// GetObjectVersionAction: condition.NewKeySet( -// append([]condition.Key{ -// condition.S3VersionID.ToKey(), -// }, commonKeys...)...), -// GetObjectVersionTaggingAction: condition.NewKeySet( -// append([]condition.Key{ -// condition.S3VersionID.ToKey(), -// }, commonKeys...)...), -// DeleteObjectVersionAction: condition.NewKeySet( -// append([]condition.Key{ -// condition.S3VersionID.ToKey(), -// }, commonKeys...)...), -// DeleteObjectVersionTaggingAction: condition.NewKeySet( -// append([]condition.Key{ -// condition.S3VersionID.ToKey(), -// condition.S3RequestObjectTagKeys.ToKey(), -// condition.S3RequestObjectTag.ToKey(), -// }, commonKeys...)...), -// GetReplicationConfigurationAction: condition.NewKeySet(commonKeys...), -// PutReplicationConfigurationAction: condition.NewKeySet(commonKeys...), -// ReplicateObjectAction: condition.NewKeySet(commonKeys...), -// ReplicateDeleteAction: condition.NewKeySet(commonKeys...), -// ReplicateTagsAction: condition.NewKeySet(commonKeys...), -// GetObjectVersionForReplicationAction: condition.NewKeySet(commonKeys...), -// RestoreObjectAction: condition.NewKeySet(commonKeys...), -// } -//} -// -//// ActionConditionKeyMap - holds mapping of supported condition key for an action. -//var ActionConditionKeyMap = createActionConditionKeyMap() diff --git a/s3d/auth/auth_type.go b/s3d/auth/auth_type.go new file mode 100644 index 000000000..74ce2e913 --- /dev/null +++ b/s3d/auth/auth_type.go @@ -0,0 +1,107 @@ +package auth + +import ( + "net/http" + "net/url" + "strings" + + "github.com/bittorrent/go-btfs/s3d/consts" +) + +// Verify if request has JWT. +func isRequestJWT(r *http.Request) bool { + return strings.HasPrefix(r.Header.Get("Authorization"), "Bearer") +} + +// IsRequestSignatureV4 Verify if request has AWS Signature Version '4'. +func IsRequestSignatureV4(r *http.Request) bool { + return strings.HasPrefix(r.Header.Get("Authorization"), signV4Algorithm) +} + +// Signature and API related constants. +const ( + signV2Algorithm = "AWS" +) + +// Verify if request has AWS Signature Version '2'. +func isRequestSignatureV2(r *http.Request) bool { + return !strings.HasPrefix(r.Header.Get("Authorization"), signV4Algorithm) && + strings.HasPrefix(r.Header.Get("Authorization"), signV2Algorithm) +} + +// Verify if request has AWS PreSign Version '4'. already exist in signature-v4-utils +//func isRequestPresignedSignatureV4(r *http.Request) bool { +// _, ok := r.URL.Query()["X-Amz-Credential"] +// return ok +//} + +// Verify request has AWS PreSign Version '2'. +func isRequestPresignedSignatureV2(r *http.Request) bool { + _, ok := r.URL.Query()["AWSAccessKeyId"] + return ok +} + +// Verify if request has AWS Post policy Signature Version '4'. +func isRequestPostPolicySignatureV4(r *http.Request) bool { + return strings.Contains(r.Header.Get("Content-Type"), "multipart/form-data") && + r.Method == http.MethodPost +} + +// Verify if the request has AWS Streaming Signature Version '4'. This is only valid for 'PUT' operation. +func isRequestSignStreamingV4(r *http.Request) bool { + return r.Header.Get("x-amz-content-sha256") == consts.StreamingContentSHA256 && + r.Method == http.MethodPut +} + +// AuthType Authorization type. +type AuthType int + +// List of all supported auth types. +const ( + AuthTypeUnknown AuthType = iota + AuthTypeAnonymous + AuthTypePresigned + AuthTypePresignedV2 + AuthTypePostPolicy + AuthTypeStreamingSigned + AuthTypeSigned + AuthTypeSignedV2 + AuthTypeJWT + AuthTypeSTS +) + +// GetRequestAuthType Get request authentication type. +func GetRequestAuthType(r *http.Request) AuthType { + if r.URL != nil { + var err error + r.Form, err = url.ParseQuery(r.URL.RawQuery) + if err != nil { + //log.Infof("parse query failed, query: %s, error: %v", r.URL.RawQuery, err) + return AuthTypeUnknown + } + } + if isRequestSignatureV2(r) { + return AuthTypeSignedV2 + } else if isRequestPresignedSignatureV2(r) { + return AuthTypePresignedV2 + } else if isRequestSignStreamingV4(r) { + return AuthTypeStreamingSigned + } else if IsRequestSignatureV4(r) { + return AuthTypeSigned + } else if isRequestPresignedSignatureV4(r) { + return AuthTypePresigned + } else if isRequestJWT(r) { + return AuthTypeJWT + } else if isRequestPostPolicySignatureV4(r) { + return AuthTypePostPolicy + } else if _, ok := r.Form[consts.StsAction]; ok { + return AuthTypeSTS + } else if _, ok := r.Header[consts.Authorization]; !ok { + return AuthTypeAnonymous + } + return AuthTypeUnknown +} + +func IsAuthTypeStreamingSigned(atype AuthType) bool { + return atype == AuthTypeStreamingSigned +} diff --git a/s3d/auth/check_handler_auth.go b/s3d/auth/check_handler_auth.go new file mode 100644 index 000000000..207c0af30 --- /dev/null +++ b/s3d/auth/check_handler_auth.go @@ -0,0 +1,171 @@ +package auth + +import ( + "bytes" + "context" + "encoding/hex" + "github.com/bittorrent/go-btfs/s3d/store" + "io" + "net/http" + + s3action "github.com/bittorrent/go-btfs/s3d/action" + "github.com/bittorrent/go-btfs/s3d/apierrors" + "github.com/bittorrent/go-btfs/s3d/consts" + "github.com/bittorrent/go-btfs/s3d/etag" + "github.com/bittorrent/go-btfs/s3d/policy" + "github.com/bittorrent/go-btfs/s3d/utils/hash" +) + +// AuthSys auth and sign system +type AuthSys struct{} + +// NewAuthSys new an AuthSys +func NewAuthSys() *AuthSys { + return &AuthSys{} +} + +// CheckRequestAuthTypeCredential Check request auth type verifies the incoming http request +// - validates the request signature +// - validates the policy action if anonymous tests bucket policies if any, +// for authenticated requests validates IAM policies. +// +// returns APIErrorCode if any to be replied to the client. +// Additionally, returns the accessKey used in the request, and if this request is by an admin. +func (s *AuthSys) CheckRequestAuthTypeCredential(ctx context.Context, r *http.Request, action s3action.Action, bucketName string, bmSys *store.BucketMetadataSys) (cred Credentials, s3Err apierrors.ErrorCode) { + //todo 是否需要判断 + if bucketName == "" { + return cred, apierrors.ErrNoSuchBucket + } + + // 1.check signature + switch GetRequestAuthType(r) { + case AuthTypeUnknown, AuthTypeStreamingSigned: + return cred, apierrors.ErrSignatureVersionNotSupported + case AuthTypePresignedV2, AuthTypeSignedV2: + return cred, apierrors.ErrSignatureVersionNotSupported + case AuthTypeSigned, AuthTypePresigned: + region := "" + if s3Err = s.IsReqAuthenticated(ctx, r, region, ServiceS3); s3Err != apierrors.ErrNone { + return cred, s3Err + } + cred, s3Err = GetReqAccessKeyV4(r, region, ServiceS3) + } + if s3Err != apierrors.ErrNone { + return cred, s3Err + } + + // CreateBucketAction + if action == s3action.CreateBucketAction { + // To extract region from XML in request body, get copy of request body. + payload, err := io.ReadAll(io.LimitReader(r.Body, consts.MaxLocationConstraintSize)) + if err != nil { + //log.Errorf("ReadAll err:%v", err) + return cred, apierrors.ErrMalformedXML + } + + // Populate payload to extract location constraint. + r.Body = io.NopCloser(bytes.NewReader(payload)) + //todo check HasBucket + if bmSys.HasBucket(ctx, bucketName) { + return cred, apierrors.ErrBucketAlreadyExists + } + } + + // 2.check acl + //todo 获取bucket用户信息:owner, acl + meta, err := bmSys.GetBucketMeta(ctx, bucketName) + if err != nil { + return cred, apierrors.ErrAccessDenied + } + + if policy.IsAllowed(meta.Owner == cred.AccessKey, meta.Acl, action) == false { + return cred, apierrors.ErrAccessDenied + } + + return cred, apierrors.ErrNone +} + +func (s *AuthSys) ReqSignatureV4Verify(r *http.Request, region string, stype serviceType) (s3Error apierrors.ErrorCode) { + sha256sum := getContentSha256Cksum(r, stype) + switch { + case IsRequestSignatureV4(r): + return DoesSignatureMatch(sha256sum, r, region, stype) + case isRequestPresignedSignatureV4(r): + return DoesPresignedSignatureMatch(sha256sum, r, region, stype) + default: + return apierrors.ErrAccessDenied + } +} + +// IsReqAuthenticated Verify if request has valid AWS Signature Version '4'. +func (s *AuthSys) IsReqAuthenticated(ctx context.Context, r *http.Request, region string, stype serviceType) (s3Error apierrors.ErrorCode) { + if errCode := s.ReqSignatureV4Verify(r, region, stype); errCode != apierrors.ErrNone { + return errCode + } + clientETag, err := etag.FromContentMD5(r.Header) + if err != nil { + return apierrors.ErrInvalidDigest + } + + // Extract either 'X-Amz-Content-Sha256' header or 'X-Amz-Content-Sha256' query parameter (if V4 presigned) + // Do not verify 'X-Amz-Content-Sha256' if skipSHA256. + var contentSHA256 []byte + if skipSHA256 := SkipContentSha256Cksum(r); !skipSHA256 && isRequestPresignedSignatureV4(r) { + if sha256Sum, ok := r.Form[consts.AmzContentSha256]; ok && len(sha256Sum) > 0 { + contentSHA256, err = hex.DecodeString(sha256Sum[0]) + if err != nil { + return apierrors.ErrContentSHA256Mismatch + } + } + } else if _, ok := r.Header[consts.AmzContentSha256]; !skipSHA256 && ok { + contentSHA256, err = hex.DecodeString(r.Header.Get(consts.AmzContentSha256)) + if err != nil || len(contentSHA256) == 0 { + return apierrors.ErrContentSHA256Mismatch + } + } + + // Verify 'Content-Md5' and/or 'X-Amz-Content-Sha256' if present. + // The verification happens implicit during reading. + reader, err := hash.NewReader(r.Body, -1, clientETag.String(), hex.EncodeToString(contentSHA256), -1) + if err != nil { + return apierrors.ErrInternalError + } + r.Body = reader + return apierrors.ErrNone +} + +//// ValidateAdminSignature validate admin Signature +//func (s *AuthSys) ValidateAdminSignature(ctx context.Context, r *http.Request, region string) (Credentials, map[string]interface{}, bool, apierrors.ErrorCode) { +// var cred Credentials +// var owner bool +// s3Err := apierrors.ErrAccessDenied +// if _, ok := r.Header[consts.AmzContentSha256]; ok && +// GetRequestAuthType(r) == AuthTypeSigned { +// // We only support admin credentials to access admin APIs. +// cred, s3Err = GetReqAccessKeyV4(r, region, ServiceS3) +// if s3Err != apierrors.ErrNone { +// return cred, nil, owner, s3Err +// } +// +// // we only support V4 (no presign) with auth body +// s3Err = s.IsReqAuthenticated(ctx, r, region, ServiceS3) +// } +// if s3Err != apierrors.ErrNone { +// return cred, nil, owner, s3Err +// } +// +// return cred, nil, owner, apierrors.ErrNone +//} +//// +//func (s *AuthSys) GetCredential(r *http.Request) (cred auth.Credentials, owner bool, s3Err apierrors.ErrorCode) { +// switch GetRequestAuthType(r) { +// case AuthTypeUnknown: +// s3Err = apierrors.ErrSignatureVersionNotSupported +// case AuthTypeSignedV2, AuthTypePresignedV2: +// cred, owner, s3Err = s.getReqAccessKeyV2(r) +// case AuthTypeStreamingSigned, AuthTypePresigned, AuthTypeSigned: +// region := "" +// cred, owner, s3Err = s.GetReqAccessKeyV4(r, region, ServiceS3) +// } +// return +//} diff --git a/s3d/auth/service_instance.go b/s3d/auth/service_instance.go index d52a7aab9..9a56dfcce 100644 --- a/s3d/auth/service_instance.go +++ b/s3d/auth/service_instance.go @@ -1,30 +1,28 @@ package auth import ( + "context" + s3action "github.com/bittorrent/go-btfs/s3d/action" "github.com/bittorrent/go-btfs/s3d/apierrors" + "github.com/bittorrent/go-btfs/s3d/store" "net/http" ) type service struct { + au *AuthSys + bmSys *store.BucketMetadataSys } -func newService() (svc *service, err error) { - svc = &service{} +func newService(bmSys *store.BucketMetadataSys) (svc *service, err error) { + svc = &service{ + au: NewAuthSys(), + bmSys: bmSys, + } return } -func (s *service) CheckSignatureV4Verify(r *http.Request, region string, stype serviceType) (s3Error apierrors.ErrorCode) { - sha256sum := getContentSha256Cksum(r, stype) - switch { - case isRequestSignatureV4(r): - return DoesSignatureMatch(sha256sum, r, region, stype) - case isRequestPresignedSignatureV4(r): - return DoesPresignedSignatureMatch(sha256sum, r, region, stype) - default: - return apierrors.ErrAccessDenied - } -} +func (s *service) CheckSignatureAndAcl(ctx context.Context, r *http.Request, action s3action.Action, bucketName string) ( + cred Credentials, s3Error apierrors.ErrorCode) { -func (s *service) CheckACL(r *http.Request, region string, stype serviceType) (s3Error apierrors.ErrorCode) { - return + return s.au.CheckRequestAuthTypeCredential(ctx, r, action, bucketName, s.bmSys) } diff --git a/s3d/auth/service_interface.go b/s3d/auth/service_interface.go index 586a03055..634ca4ba4 100644 --- a/s3d/auth/service_interface.go +++ b/s3d/auth/service_interface.go @@ -6,6 +6,5 @@ import ( ) type Service interface { - CheckSignatureV4Verify(r *http.Request, region string, stype serviceType) (s3Error apierrors.ErrorCode) - CheckACL(r *http.Request, region string, stype serviceType) (s3Error apierrors.ErrorCode) + CheckSignatureAndAcl(r *http.Request, region string, stype serviceType) (s3Error apierrors.ErrorCode) } diff --git a/s3d/auth/signature-v4-parser.go b/s3d/auth/signature-v4-parser.go index 1813e1e1c..2c60d2a09 100644 --- a/s3d/auth/signature-v4-parser.go +++ b/s3d/auth/signature-v4-parser.go @@ -18,6 +18,7 @@ package auth import ( + "net/http" "net/url" "strings" "time" @@ -283,3 +284,34 @@ func parseSignV4(v4Auth string, region string, stype serviceType) (sv signValues // Return the structure here. return signV4Values, apierrors.ErrNone } + +func GetReqAccessKeyV4(r *http.Request, region string, stype serviceType) (Credentials, apierrors.ErrorCode) { + ch, s3Err := parseCredentialHeader("Credential="+r.Form.Get(consts.AmzCredential), region, stype) + if s3Err != apierrors.ErrNone { + // Strip off the Algorithm prefix. + v4Auth := strings.TrimPrefix(r.Header.Get("Authorization"), signV4Algorithm) + authFields := strings.Split(strings.TrimSpace(v4Auth), ",") + if len(authFields) != 3 { + return Credentials{}, apierrors.ErrMissingFields + } + ch, s3Err = parseCredentialHeader(authFields[0], region, stype) + if s3Err != apierrors.ErrNone { + return Credentials{}, s3Err + } + } + // TODO: Why should a temporary user be replaced with the parent user's account name? + //cerd, _ := s.Iam.GetUser(r.Context(), ch.accessKey) + //if cerd.IsTemp() { + // ch.accessKey = cerd.ParentUser + //} + return checkAccessKeyValid(ch.accessKey) +} + +// check if the access key is valid and recognized, additionally +func checkAccessKeyValid(accessKey string) (Credentials, apierrors.ErrorCode) { + + //todo 根据accessKey获取accessKey + cred := Credentials{} + + return cred, apierrors.ErrNone +} diff --git a/s3d/auth/signature-v4-utils.go b/s3d/auth/signature-v4-utils.go index f1ff4e0de..3cab7aa58 100644 --- a/s3d/auth/signature-v4-utils.go +++ b/s3d/auth/signature-v4-utils.go @@ -175,3 +175,46 @@ func isRequestPresignedSignatureV4(r *http.Request) bool { _, ok := r.URL.Query()["X-Amz-Credential"] return ok } + +// SkipContentSha256Cksum returns true if caller needs to skip +// payload checksum, false if not. +func SkipContentSha256Cksum(r *http.Request) bool { + var ( + v []string + ok bool + ) + + if isRequestPresignedSignatureV4(r) { + v, ok = r.Form[consts.AmzContentSha256] + if !ok { + v, ok = r.Header[consts.AmzContentSha256] + } + } else { + v, ok = r.Header[consts.AmzContentSha256] + } + + // Skip if no header was set. + if !ok { + return true + } + + // If x-amz-content-sha256 is set and the value is not + // 'UNSIGNED-PAYLOAD' we should validate the content sha256. + switch v[0] { + case unsignedPayload: + return true + case consts.EmptySHA256: + // some broken clients set empty-sha256 + // with > 0 content-length in the body, + // we should skip such clients and allow + // blindly such insecure clients only if + // S3 strict compatibility is disabled. + if r.ContentLength > 0 { + // We return true only in situations when + // deployment has asked MinIO to allow for + // such broken clients and content-length > 0. + return true + } + } + return false +} diff --git a/s3d/etag/etag.go b/s3d/etag/etag.go new file mode 100644 index 000000000..d68e73814 --- /dev/null +++ b/s3d/etag/etag.go @@ -0,0 +1,194 @@ +package etag + +import ( + "bytes" + "crypto/md5" + "encoding/base64" + "encoding/hex" + "errors" + "fmt" + "net/http" + "strconv" + "strings" +) + +// ETag is a single S3 ETag. +// +// An S3 ETag sometimes corresponds to the MD5 of +// the S3 object content. However, when an object +// is encrypted, compressed or uploaded using +// the S3 multipart API then its ETag is not +// necessarily the MD5 of the object content. +// +// For a more detailed description of S3 ETags +// take a look at the package documentation. +type ETag []byte + +// String returns the string representation of the ETag. +// +// The returned string is a hex representation of the +// binary ETag with an optional '-' suffix. +func (e ETag) String() string { + if e.IsMultipart() { + return hex.EncodeToString(e[:16]) + string(e[16:]) + } + return hex.EncodeToString(e) +} + +// IsEncrypted reports whether the ETag is encrypted. +func (e ETag) IsEncrypted() bool { + return len(e) > 16 && !bytes.ContainsRune(e, '-') +} + +// IsMultipart reports whether the ETag belongs to an +// object that has been uploaded using the S3 multipart +// API. +// An S3 multipart ETag has a - suffix. +func (e ETag) IsMultipart() bool { + return len(e) > 16 && bytes.ContainsRune(e, '-') +} + +// Parts returns the number of object parts that are +// referenced by this ETag. It returns 1 if the object +// has been uploaded using the S3 singlepart API. +// +// Parts may panic if the ETag is an invalid multipart +// ETag. +func (e ETag) Parts() int { + if !e.IsMultipart() { + return 1 + } + + n := bytes.IndexRune(e, '-') + parts, err := strconv.Atoi(string(e[n+1:])) + if err != nil { + panic(err) // malformed ETag + } + return parts +} + +var _ Tagger = ETag{} // compiler check + +// ETag returns the ETag itself. +// +// By providing this method ETag implements +// the Tagger interface. +func (e ETag) ETag() ETag { return e } + +// FromContentMD5 decodes and returns the Content-MD5 +// as ETag, if set. If no Content-MD5 header is set +// it returns an empty ETag and no error. +func FromContentMD5(h http.Header) (ETag, error) { + v, ok := h["Content-Md5"] + if !ok { + return nil, nil + } + if v[0] == "" { + return nil, errors.New("etag: content-md5 is set but contains no value") + } + b, err := base64.StdEncoding.Strict().DecodeString(v[0]) + if err != nil { + return nil, err + } + if len(b) != md5.Size { + return nil, errors.New("etag: invalid content-md5") + } + return ETag(b), nil +} + +// Multipart computes an S3 multipart ETag given a list of +// S3 singlepart ETags. It returns nil if the list of +// ETags is empty. +// +// Any encrypted or multipart ETag will be ignored and not +// used to compute the returned ETag. +func Multipart(etags ...ETag) ETag { + if len(etags) == 0 { + return nil + } + + var n int64 + h := md5.New() + for _, etag := range etags { + if !etag.IsMultipart() && !etag.IsEncrypted() { + h.Write(etag) + n++ + } + } + etag := append(h.Sum(nil), '-') + return strconv.AppendInt(etag, n, 10) +} + +// Equal returns true if and only if the two ETags are +// identical. +func Equal(a, b ETag) bool { return bytes.Equal(a, b) } + +// Parse parses s as an S3 ETag, returning the result. +// The string can be an encrypted, singlepart +// or multipart S3 ETag. It returns an error if s is +// not a valid textual representation of an ETag. +func Parse(s string) (ETag, error) { + const strict = false + return parse(s, strict) +} + +// parse parse s as an S3 ETag, returning the result. +// It operates in one of two modes: +// - strict +// - non-strict +// +// In strict mode, parse only accepts ETags that +// are AWS S3 compatible. In particular, an AWS +// S3 ETag always consists of a 128 bit checksum +// value and an optional - suffix. +// Therefore, s must have the following form in +// strict mode: <32-hex-characters>[-] +// +// In non-strict mode, parse also accepts ETags +// that are not AWS S3 compatible - e.g. encrypted +// ETags. +func parse(s string, strict bool) (ETag, error) { + // An S3 ETag may be a double-quoted string. + // Therefore, we remove double quotes at the + // start and end, if any. + if strings.HasPrefix(s, `"`) && strings.HasSuffix(s, `"`) { + s = s[1 : len(s)-1] + } + + // An S3 ETag may be a multipart ETag that + // contains a '-' followed by a number. + // If the ETag does not a '-' is is either + // a singlepart or encrypted ETag. + n := strings.IndexRune(s, '-') + if n == -1 { + etag, err := hex.DecodeString(s) + if err != nil { + return nil, err + } + if strict && len(etag) != 16 { // AWS S3 ETags are always 128 bit long + return nil, fmt.Errorf("etag: invalid length %d", len(etag)) + } + return ETag(etag), nil + } + + prefix, suffix := s[:n], s[n:] + if len(prefix) != 32 { + return nil, fmt.Errorf("etag: invalid prefix length %d", len(prefix)) + } + if len(suffix) <= 1 { + return nil, errors.New("etag: suffix is not a part number") + } + + etag, err := hex.DecodeString(prefix) + if err != nil { + return nil, err + } + partNumber, err := strconv.Atoi(suffix[1:]) // suffix[0] == '-' Therefore, we start parsing at suffix[1] + if err != nil { + return nil, err + } + if strict && (partNumber == 0 || partNumber > 10000) { + return nil, fmt.Errorf("etag: invalid part number %d", partNumber) + } + return ETag(append(etag, suffix...)), nil +} diff --git a/s3d/etag/etag_test.go b/s3d/etag/etag_test.go new file mode 100644 index 000000000..70b8c01ea --- /dev/null +++ b/s3d/etag/etag_test.go @@ -0,0 +1,210 @@ +package etag + +import ( + "io" + "io/ioutil" + "net/http" + "strings" + "testing" +) + +var _ Tagger = Wrap(nil, nil).(Tagger) // runtime check that wrapReader implements Tagger + +var parseTests = []struct { + String string + ETag ETag + ShouldFail bool +}{ + {String: "3b83ef96387f1465", ETag: ETag{59, 131, 239, 150, 56, 127, 20, 101}}, // 0 + {String: "3b83ef96387f14655fc854ddc3c6bd57", ETag: ETag{59, 131, 239, 150, 56, 127, 20, 101, 95, 200, 84, 221, 195, 198, 189, 87}}, // 1 + {String: `"3b83ef96387f14655fc854ddc3c6bd57"`, ETag: ETag{59, 131, 239, 150, 56, 127, 20, 101, 95, 200, 84, 221, 195, 198, 189, 87}}, // 2 + {String: "ceb8853ddc5086cc4ab9e149f8f09c88-1", ETag: ETag{206, 184, 133, 61, 220, 80, 134, 204, 74, 185, 225, 73, 248, 240, 156, 136, 45, 49}}, // 3 + {String: `"ceb8853ddc5086cc4ab9e149f8f09c88-2"`, ETag: ETag{206, 184, 133, 61, 220, 80, 134, 204, 74, 185, 225, 73, 248, 240, 156, 136, 45, 50}}, // 4 + { // 5 + String: "90402c78d2dccddee1e9e86222ce2c6361675f3529d26000ae2e900ff216b3cb59e130e092d8a2981e776f4d0bd60941", + ETag: ETag{144, 64, 44, 120, 210, 220, 205, 222, 225, 233, 232, 98, 34, 206, 44, 99, 97, 103, 95, 53, 41, 210, 96, 0, 174, 46, 144, 15, 242, 22, 179, 203, 89, 225, 48, 224, 146, 216, 162, 152, 30, 119, 111, 77, 11, 214, 9, 65}, + }, + + {String: `"3b83ef96387f14655fc854ddc3c6bd57`, ShouldFail: true}, // 6 + {String: "ceb8853ddc5086cc4ab9e149f8f09c88-", ShouldFail: true}, // 7 + {String: "ceb8853ddc5086cc4ab9e149f8f09c88-2a", ShouldFail: true}, // 8 + {String: "ceb8853ddc5086cc4ab9e149f8f09c88-2-1", ShouldFail: true}, // 9 + {String: "90402c78d2dccddee1e9e86222ce2c-1", ShouldFail: true}, // 10 + {String: "90402c78d2dccddee1e9e86222ce2c6361675f3529d26000ae2e900ff216b3cb59e130e092d8a2981e776f4d0bd60941-1", ShouldFail: true}, // 11 +} + +func TestParse(t *testing.T) { + for i, test := range parseTests { + etag, err := Parse(test.String) + if err == nil && test.ShouldFail { + t.Fatalf("Test %d: parse should have failed but succeeded", i) + } + if err != nil && !test.ShouldFail { + t.Fatalf("Test %d: failed to parse ETag %q: %v", i, test.String, err) + } + if !Equal(etag, test.ETag) { + t.Log([]byte(etag)) + t.Fatalf("Test %d: ETags don't match", i) + } + } +} + +var stringTests = []struct { + ETag ETag + String string +}{ + {ETag: ETag{59, 131, 239, 150, 56, 127, 20, 101}, String: "3b83ef96387f1465"}, // 0 + {ETag: ETag{59, 131, 239, 150, 56, 127, 20, 101, 95, 200, 84, 221, 195, 198, 189, 87}, String: "3b83ef96387f14655fc854ddc3c6bd57"}, // 1 + {ETag: ETag{206, 184, 133, 61, 220, 80, 134, 204, 74, 185, 225, 73, 248, 240, 156, 136, 45, 49}, String: "ceb8853ddc5086cc4ab9e149f8f09c88-1"}, // 2 + {ETag: ETag{206, 184, 133, 61, 220, 80, 134, 204, 74, 185, 225, 73, 248, 240, 156, 136, 45, 50}, String: "ceb8853ddc5086cc4ab9e149f8f09c88-2"}, // 3 + { // 4 + ETag: ETag{144, 64, 44, 120, 210, 220, 205, 222, 225, 233, 232, 98, 34, 206, 44, 99, 97, 103, 95, 53, 41, 210, 96, 0, 174, 46, 144, 15, 242, 22, 179, 203, 89, 225, 48, 224, 146, 216, 162, 152, 30, 119, 111, 77, 11, 214, 9, 65}, + String: "90402c78d2dccddee1e9e86222ce2c6361675f3529d26000ae2e900ff216b3cb59e130e092d8a2981e776f4d0bd60941", + }, +} + +func TestString(t *testing.T) { + for i, test := range stringTests { + s := test.ETag.String() + if s != test.String { + t.Fatalf("Test %d: got %s - want %s", i, s, test.String) + } + } +} + +var equalTests = []struct { + A string + B string + Equal bool +}{ + {A: "3b83ef96387f14655fc854ddc3c6bd57", B: "3b83ef96387f14655fc854ddc3c6bd57", Equal: true}, // 0 + {A: "3b83ef96387f14655fc854ddc3c6bd57", B: `"3b83ef96387f14655fc854ddc3c6bd57"`, Equal: true}, // 1 + + {A: "3b83ef96387f14655fc854ddc3c6bd57", B: "3b83ef96387f14655fc854ddc3c6bd57-2", Equal: false}, // 2 + {A: "3b83ef96387f14655fc854ddc3c6bd57", B: "ceb8853ddc5086cc4ab9e149f8f09c88", Equal: false}, // 3 +} + +func TestEqual(t *testing.T) { + for i, test := range equalTests { + A, err := Parse(test.A) + if err != nil { + t.Fatalf("Test %d: %v", i, err) + } + B, err := Parse(test.B) + if err != nil { + t.Fatalf("Test %d: %v", i, err) + } + if equal := Equal(A, B); equal != test.Equal { + t.Fatalf("Test %d: got %v - want %v", i, equal, test.Equal) + } + } +} + +var readerTests = []struct { // Reference values computed by: echo | md5sum + Content string + ETag ETag +}{ + { + Content: "", ETag: ETag{212, 29, 140, 217, 143, 0, 178, 4, 233, 128, 9, 152, 236, 248, 66, 126}, + }, + { + Content: " ", ETag: ETag{114, 21, 238, 156, 125, 157, 194, 41, 210, 146, 26, 64, 232, 153, 236, 95}, + }, + { + Content: "Hello World", ETag: ETag{177, 10, 141, 177, 100, 224, 117, 65, 5, 183, 169, 155, 231, 46, 63, 229}, + }, +} + +func TestReader(t *testing.T) { + for i, test := range readerTests { + reader := NewReader(strings.NewReader(test.Content), test.ETag) + if _, err := io.Copy(ioutil.Discard, reader); err != nil { + t.Fatalf("Test %d: read failed: %v", i, err) + } + if ETag := reader.ETag(); !Equal(ETag, test.ETag) { + t.Fatalf("Test %d: ETag mismatch: got %q - want %q", i, ETag, test.ETag) + } + } +} + +var multipartTests = []struct { // Test cases have been generated using AWS S3 + ETags []ETag + Multipart ETag +}{ + { + ETags: []ETag{}, + Multipart: ETag{}, + }, + { + ETags: []ETag{must("b10a8db164e0754105b7a99be72e3fe5")}, + Multipart: must("7b976cc68452e003eec7cb0eb631a19a-1"), + }, + { + ETags: []ETag{must("5f363e0e58a95f06cbe9bbc662c5dfb6"), must("5f363e0e58a95f06cbe9bbc662c5dfb6")}, + Multipart: must("a7d414b9133d6483d9a1c4e04e856e3b-2"), + }, + { + ETags: []ETag{must("5f363e0e58a95f06cbe9bbc662c5dfb6"), must("a096eb5968d607c2975fb2c4af9ab225"), must("b10a8db164e0754105b7a99be72e3fe5")}, + Multipart: must("9a0d1febd9265f59f368ceb652770bc2-3"), + }, + { // Check that multipart ETags are ignored + ETags: []ETag{must("5f363e0e58a95f06cbe9bbc662c5dfb6"), must("5f363e0e58a95f06cbe9bbc662c5dfb6"), must("ceb8853ddc5086cc4ab9e149f8f09c88-1")}, + Multipart: must("a7d414b9133d6483d9a1c4e04e856e3b-2"), + }, + { // Check that encrypted ETags are ignored + ETags: []ETag{ + must("90402c78d2dccddee1e9e86222ce2c6361675f3529d26000ae2e900ff216b3cb59e130e092d8a2981e776f4d0bd60941"), + must("5f363e0e58a95f06cbe9bbc662c5dfb6"), must("5f363e0e58a95f06cbe9bbc662c5dfb6"), + }, + Multipart: must("a7d414b9133d6483d9a1c4e04e856e3b-2"), + }, +} + +func TestMultipart(t *testing.T) { + for i, test := range multipartTests { + if multipart := Multipart(test.ETags...); !Equal(multipart, test.Multipart) { + t.Fatalf("Test %d: got %q - want %q", i, multipart, test.Multipart) + } + } +} + +var fromContentMD5Tests = []struct { + Header http.Header + ETag ETag + ShouldFail bool +}{ + {Header: http.Header{}, ETag: nil}, // 0 + {Header: http.Header{"Content-Md5": []string{"1B2M2Y8AsgTpgAmY7PhCfg=="}}, ETag: must("d41d8cd98f00b204e9800998ecf8427e")}, // 1 + {Header: http.Header{"Content-Md5": []string{"sQqNsWTgdUEFt6mb5y4/5Q=="}}, ETag: must("b10a8db164e0754105b7a99be72e3fe5")}, // 2 + {Header: http.Header{"Content-MD5": []string{"1B2M2Y8AsgTpgAmY7PhCfg=="}}, ETag: nil}, // 3 (Content-MD5 vs Content-Md5) + {Header: http.Header{"Content-Md5": []string{"sQqNsWTgdUEFt6mb5y4/5Q==", "1B2M2Y8AsgTpgAmY7PhCfg=="}}, ETag: must("b10a8db164e0754105b7a99be72e3fe5")}, // 4 + + {Header: http.Header{"Content-Md5": []string{""}}, ShouldFail: true}, // 5 (empty value) + {Header: http.Header{"Content-Md5": []string{"", "sQqNsWTgdUEFt6mb5y4/5Q=="}}, ShouldFail: true}, // 6 (empty value) + {Header: http.Header{"Content-Md5": []string{"d41d8cd98f00b204e9800998ecf8427e"}}, ShouldFail: true}, // 7 (content-md5 is invalid b64 / of invalid length) +} + +func TestFromContentMD5(t *testing.T) { + for i, test := range fromContentMD5Tests { + ETag, err := FromContentMD5(test.Header) + if err != nil && !test.ShouldFail { + t.Fatalf("Test %d: failed to convert Content-MD5 to ETag: %v", i, err) + } + if err == nil && test.ShouldFail { + t.Fatalf("Test %d: should have failed but succeeded", i) + } + if err == nil { + if !Equal(ETag, test.ETag) { + t.Fatalf("Test %d: got %q - want %q", i, ETag, test.ETag) + } + } + } +} + +func must(s string) ETag { + t, err := Parse(s) + if err != nil { + panic(err) + } + return t +} diff --git a/s3d/etag/reader.go b/s3d/etag/reader.go new file mode 100644 index 000000000..c19f7733f --- /dev/null +++ b/s3d/etag/reader.go @@ -0,0 +1,139 @@ +package etag + +import ( + "crypto/md5" + "fmt" + "hash" + "io" +) + +// Tagger is the interface that wraps the basic ETag method. +type Tagger interface { + ETag() ETag +} + +type wrapReader struct { + io.Reader + Tagger +} + +var _ Tagger = wrapReader{} // compiler check + +// ETag returns the ETag of the underlying Tagger. +func (r wrapReader) ETag() ETag { + if r.Tagger == nil { + return nil + } + return r.Tagger.ETag() +} + +// Wrap returns an io.Reader that reads from the wrapped +// io.Reader and implements the Tagger interaface. +// +// If content implements Tagger then the returned Reader +// returns ETag of the content. Otherwise, it returns +// nil as ETag. +// +// Wrap provides an adapter for io.Reader implemetations +// that don't implement the Tagger interface. +// It is mainly used to provide a high-level io.Reader +// access to the ETag computed by a low-level io.Reader: +// +// content := etag.NewReader(r.Body, nil) +// +// compressedContent := Compress(content) +// encryptedContent := Encrypt(compressedContent) +// +// // Now, we need an io.Reader that can access +// // the ETag computed over the content. +// reader := etag.Wrap(encryptedContent, content) +// +func Wrap(wrapped, content io.Reader) io.Reader { + if t, ok := content.(Tagger); ok { + return wrapReader{ + Reader: wrapped, + Tagger: t, + } + } + return wrapReader{ + Reader: wrapped, + } +} + +// A Reader wraps an io.Reader and computes the +// MD5 checksum of the read content as ETag. +// +// Optionally, a Reader can also verify that +// the computed ETag matches an expected value. +// Therefore, it compares both ETags once the +// underlying io.Reader returns io.EOF. +// If the computed ETag does not match the +// expected ETag then Read returns a VerifyError. +// +// Reader implements the Tagger interface. +type Reader struct { + src io.Reader + + md5 hash.Hash + checksum ETag + + readN int64 +} + +// NewReader returns a new Reader that computes the +// MD5 checksum of the content read from r as ETag. +// +// If the provided etag is not nil the returned +// Reader compares the etag with the computed +// MD5 sum once the r returns io.EOF. +func NewReader(r io.Reader, etag ETag) *Reader { + if er, ok := r.(*Reader); ok { + if er.readN == 0 && Equal(etag, er.checksum) { + return er + } + } + return &Reader{ + src: r, + md5: md5.New(), + checksum: etag, + } +} + +// Read reads up to len(p) bytes from the underlying +// io.Reader as specified by the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + n, err := r.src.Read(p) + r.readN += int64(n) + r.md5.Write(p[:n]) + + if err == io.EOF && len(r.checksum) != 0 { + if etag := r.ETag(); !Equal(etag, r.checksum) { + return n, VerifyError{ + Expected: r.checksum, + Computed: etag, + } + } + } + return n, err +} + +// ETag returns the ETag of all the content read +// so far. Reading more content changes the MD5 +// checksum. Therefore, calling ETag multiple +// times may return different results. +func (r *Reader) ETag() ETag { + sum := r.md5.Sum(nil) + return ETag(sum) +} + +// VerifyError is an error signaling that a +// computed ETag does not match an expected +// ETag. +type VerifyError struct { + Expected ETag + Computed ETag +} + +func (v VerifyError) Error() string { + return fmt.Sprintf("etag: expected ETag %q does not match computed ETag %q", v.Expected, v.Computed) +} diff --git a/s3d/lock/lock.go b/s3d/lock/lock.go new file mode 100644 index 000000000..1d457ac8d --- /dev/null +++ b/s3d/lock/lock.go @@ -0,0 +1,217 @@ +package lock + +import ( + "context" + "errors" + logging "github.com/ipfs/go-log/v2" + "path" + "sort" + "strings" + "sync" + "time" +) + +var log = logging.Logger("nslocker") + +// OperationTimedOut - a timeout occurred. +type OperationTimedOut struct{} + +func (e OperationTimedOut) Error() string { + return "Operation timed out" +} + +// RWLocker - locker interface to introduce GetRLock, RUnlock. +type RWLocker interface { + GetLock(ctx context.Context, timeout time.Duration) (lkCtx LockContext, timedOutErr error) + Unlock(cancel context.CancelFunc) + GetRLock(ctx context.Context, timeout time.Duration) (lkCtx LockContext, timedOutErr error) + RUnlock(cancel context.CancelFunc) +} + +// LockContext lock context holds the lock backed context and canceler for the context. +type LockContext struct { + ctx context.Context + cancel context.CancelFunc +} + +// Context returns lock context +func (l LockContext) Context() context.Context { + return l.ctx +} + +// Cancel function calls cancel() function +func (l LockContext) Cancel() { + if l.cancel != nil { + l.cancel() + } +} + +// NewNSLock - return a new name space lock map. +func NewNSLock() *NsLockMap { + return &NsLockMap{ + lockMap: make(map[string]*nsLock), + } +} + +// nsLock - provides primitives for locking critical namespace regions. +type nsLock struct { + ref int32 + *TRWMutex +} + +// NsLockMap - namespace lock map, provides primitives to Lock, +// Unlock, RLock and RUnlock. +type NsLockMap struct { + lockMap map[string]*nsLock + lockMapMutex sync.Mutex +} + +// Lock the namespace resource. +func (n *NsLockMap) lock(ctx context.Context, volume string, path string, readLock bool, timeout time.Duration) (locked bool) { + resource := PathJoin(volume, path) + + n.lockMapMutex.Lock() + nsLk, found := n.lockMap[resource] + if !found { + nsLk = &nsLock{ + TRWMutex: NewTRWMutex(), + } + } + nsLk.ref++ + n.lockMap[resource] = nsLk + n.lockMapMutex.Unlock() + + // Locking here will block (until timeout). + if readLock { + locked = nsLk.GetRLock(ctx, timeout) + } else { + locked = nsLk.GetLock(ctx, timeout) + } + + if !locked { // We failed to get the lock + // Decrement ref count since we failed to get the lock + n.lockMapMutex.Lock() + n.lockMap[resource].ref-- + if n.lockMap[resource].ref < 0 { + log.Error(errors.New("resource reference count was lower than 0")) + } + if n.lockMap[resource].ref == 0 { + // Remove from the map if there are no more references. + delete(n.lockMap, resource) + } + n.lockMapMutex.Unlock() + } + + return +} + +// Unlock the namespace resource. +func (n *NsLockMap) unlock(volume string, path string, readLock bool) { + resource := PathJoin(volume, path) + + n.lockMapMutex.Lock() + defer n.lockMapMutex.Unlock() + if _, found := n.lockMap[resource]; !found { + return + } + if readLock { + n.lockMap[resource].RUnlock() + } else { + n.lockMap[resource].Unlock() + } + n.lockMap[resource].ref-- + if n.lockMap[resource].ref < 0 { + log.Error(errors.New("resource reference count was lower than 0")) + } + if n.lockMap[resource].ref == 0 { + // Remove from the map if there are no more references. + delete(n.lockMap, resource) + } +} + +// localLockInstance - frontend/top-level interface for namespace locks. +type localLockInstance struct { + ns *NsLockMap + volume string + paths []string +} + +// NewNSLock - returns a lock instance for a given volume and +// path. The returned lockInstance object encapsulates the nsLockMap, +// volume, path and operation ID. +func (n *NsLockMap) NewNSLock(volume string, paths ...string) RWLocker { + sort.Strings(paths) + return &localLockInstance{n, volume, paths} +} + +// GetLock - block until write lock is taken or timeout has occurred. +func (li *localLockInstance) GetLock(ctx context.Context, timeout time.Duration) (_ LockContext, timedOutErr error) { + const readLock = false + success := make([]int, len(li.paths)) + for i, path := range li.paths { + if !li.ns.lock(ctx, li.volume, path, readLock, timeout) { + for si, sint := range success { + if sint == 1 { + li.ns.unlock(li.volume, li.paths[si], readLock) + } + } + return LockContext{}, OperationTimedOut{} + } + success[i] = 1 + } + return LockContext{ctx: ctx, cancel: func() {}}, nil +} + +// Unlock - block until write lock is released. +func (li *localLockInstance) Unlock(cancel context.CancelFunc) { + if cancel != nil { + cancel() + } + const readLock = false + for _, path := range li.paths { + li.ns.unlock(li.volume, path, readLock) + } +} + +// GetRLock - block until read lock is taken or timeout has occurred. +func (li *localLockInstance) GetRLock(ctx context.Context, timeout time.Duration) (_ LockContext, timedOutErr error) { + const readLock = true + success := make([]int, len(li.paths)) + for i, path := range li.paths { + if !li.ns.lock(ctx, li.volume, path, readLock, timeout) { + for si, sint := range success { + if sint == 1 { + li.ns.unlock(li.volume, li.paths[si], readLock) + } + } + return LockContext{}, OperationTimedOut{} + } + success[i] = 1 + } + return LockContext{ctx: ctx, cancel: func() {}}, nil +} + +// RUnlock - block until read lock is released. +func (li *localLockInstance) RUnlock(cancel context.CancelFunc) { + if cancel != nil { + cancel() + } + const readLock = true + for _, path := range li.paths { + li.ns.unlock(li.volume, path, readLock) + } +} + +// SlashSeparator - slash separator. +const SlashSeparator = "/" + +// PathJoin - like path.Join() but retains trailing SlashSeparator of the last element +func PathJoin(elem ...string) string { + trailingSlash := "" + if len(elem) > 0 { + if strings.HasSuffix(elem[len(elem)-1], SlashSeparator) { + trailingSlash = SlashSeparator + } + } + return path.Join(elem...) + trailingSlash +} diff --git a/s3d/lock/rwmutex.go b/s3d/lock/rwmutex.go new file mode 100644 index 000000000..11416272c --- /dev/null +++ b/s3d/lock/rwmutex.go @@ -0,0 +1,154 @@ +package lock + +import ( + "context" + "math" + "math/rand" + "sync" + "time" +) + +// A TRWMutex is a mutual exclusion lock with timeouts. +type TRWMutex struct { + isWriteLock bool + ref int + mu sync.Mutex // Mutex to prevent multiple simultaneous locks +} + +// NewTRWMutex - initializes a new lsync RW mutex. +func NewTRWMutex() *TRWMutex { + return &TRWMutex{} +} + +// Lock holds a write lock on lm. +// +// If the lock is already in use, the calling go routine +// blocks until the mutex is available. +func (m *TRWMutex) Lock() { + const isWriteLock = true + m.lockLoop(context.Background(), math.MaxInt64, isWriteLock) +} + +// GetLock tries to get a write lock on lm before the timeout occurs. +func (m *TRWMutex) GetLock(ctx context.Context, timeout time.Duration) (locked bool) { + const isWriteLock = true + return m.lockLoop(ctx, timeout, isWriteLock) +} + +// RLock holds a read lock on lm. +// +// If one or more read lock are already in use, it will grant another lock. +// Otherwise the calling go routine blocks until the mutex is available. +func (m *TRWMutex) RLock() { + const isWriteLock = false + m.lockLoop(context.Background(), 1<<63-1, isWriteLock) +} + +// GetRLock tries to get a read lock on lm before the timeout occurs. +func (m *TRWMutex) GetRLock(ctx context.Context, timeout time.Duration) (locked bool) { + const isWriteLock = false + return m.lockLoop(ctx, timeout, isWriteLock) +} + +func (m *TRWMutex) lock(isWriteLock bool) (locked bool) { + m.mu.Lock() + defer m.mu.Unlock() + + if isWriteLock { + if m.ref == 0 && !m.isWriteLock { + m.ref = 1 + m.isWriteLock = true + locked = true + } + } else { + if !m.isWriteLock { + m.ref++ + locked = true + } + } + + return locked +} + +const ( + lockRetryInterval = 50 * time.Millisecond +) + +// lockLoop will acquire either a read or a write lock +// +// The call will block until the lock is granted using a built-in +// timing randomized back-off algorithm to try again until successful +func (m *TRWMutex) lockLoop(ctx context.Context, timeout time.Duration, isWriteLock bool) (locked bool) { + r := rand.New(rand.NewSource(time.Now().UnixNano())) + + retryCtx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + for { + select { + case <-retryCtx.Done(): + // Caller context canceled or we timedout, + // return false anyways for both situations. + return false + default: + if m.lock(isWriteLock) { + return true + } + time.Sleep(time.Duration(r.Float64() * float64(lockRetryInterval))) + } + } +} + +// Unlock unlocks the write lock. +// +// It is a run-time error if lm is not locked on entry to Unlock. +func (m *TRWMutex) Unlock() { + isWriteLock := true + success := m.unlock(isWriteLock) + if !success { + panic("Trying to Unlock() while no Lock() is active") + } +} + +// RUnlock releases a read lock held on lm. +// +// It is a run-time error if lm is not locked on entry to RUnlock. +func (m *TRWMutex) RUnlock() { + isWriteLock := false + success := m.unlock(isWriteLock) + if !success { + panic("Trying to RUnlock() while no RLock() is active") + } +} + +func (m *TRWMutex) unlock(isWriteLock bool) (unlocked bool) { + m.mu.Lock() + defer m.mu.Unlock() + + // Try to release lock. + if isWriteLock { + if m.isWriteLock && m.ref == 1 { + m.ref = 0 + m.isWriteLock = false + unlocked = true + } + } else { + if !m.isWriteLock { + if m.ref > 0 { + m.ref-- + unlocked = true + } + } + } + + return unlocked +} + +// ForceUnlock will forcefully clear a write or read lock. +func (m *TRWMutex) ForceUnlock() { + m.mu.Lock() + defer m.mu.Unlock() + + m.ref = 0 + m.isWriteLock = false +} diff --git a/s3d/policy/policy.go b/s3d/policy/policy.go new file mode 100644 index 000000000..d38c09ccc --- /dev/null +++ b/s3d/policy/policy.go @@ -0,0 +1,74 @@ +package policy + +import s3action "github.com/bittorrent/go-btfs/s3d/action" + +const ( + // PublicReadWrite 公开读写,适用于桶ACL和对象ACL + PublicReadWrite = "public-read-write" + + // PublicRead 公开读,适用于桶ACL和对象ACL + PublicRead = "public-read" + + // Private 私有,适用于桶ACL和对象ACL + Private = "private" +) + +// 支持匿名公开读写的action集合 +var rwActionMap = map[s3action.Action]struct{}{ + s3action.ListObjectsAction: {}, + s3action.ListObjectsV2Action: {}, + s3action.HeadObjectAction: {}, + s3action.PutObjectAction: {}, + s3action.GetObjectAction: {}, + s3action.CopyObjectAction: {}, + s3action.DeleteObjectAction: {}, + s3action.DeleteObjectsAction: {}, + + s3action.CreateMultipartUploadAction: {}, + s3action.AbortMultipartUploadAction: {}, + s3action.CompleteMultipartUploadAction: {}, + s3action.UploadPartAction: {}, +} + +// checkActionInPublicReadWrite - returns whether action is RW or not. +func checkActionInPublicReadWrite(action s3action.Action) bool { + _, ok := rwActionMap[action] + return ok +} + +// 支持匿名公开读的action集合 +var rdActionMap = map[s3action.Action]struct{}{ + s3action.ListObjectsAction: {}, + s3action.ListObjectsV2Action: {}, + s3action.HeadObjectAction: {}, + s3action.GetObjectAction: {}, +} + +// checkActionInPublicRead - returns whether action is Read or not. +func checkActionInPublicRead(action s3action.Action) bool { + _, ok := rdActionMap[action] + return ok +} + +func IsAllowed(own bool, acl string, action s3action.Action) (allow bool) { + a := s3action.Action(action) + + // 1.if bucket + if a.IsBucketAction() { + return own + } + + // 2.if object + if a.IsObjectAction() { + switch acl { + case Private: + return own + case PublicRead: + return checkActionInPublicRead(action) + case PublicReadWrite: + return checkActionInPublicReadWrite(action) + } + } + + return false +} diff --git a/s3d/store/bucket.go b/s3d/store/bucket.go new file mode 100644 index 000000000..210857e80 --- /dev/null +++ b/s3d/store/bucket.go @@ -0,0 +1,150 @@ +package store + +import ( + "context" + "github.com/bittorrent/go-btfs/s3d/lock" + "github.com/bittorrent/go-btfs/s3d/uleveldb" + "github.com/syndtr/goleveldb/leveldb" + "time" +) + +const ( + bucketPrefix = "bkt/" +) + +const ( + globalOperationTimeout = 5 * time.Minute + deleteOperationTimeout = 1 * time.Minute +) + +// BucketMetadata contains bucket metadata. +type BucketMetadata struct { + Name string + Region string + Owner string + Acl string + Created time.Time +} + +// NewBucketMetadata creates BucketMetadata with the supplied name and Created to Now. +func NewBucketMetadata(name, region, accessKey, acl string) *BucketMetadata { + return &BucketMetadata{ + Name: name, + Region: region, + Owner: accessKey, + Acl: acl, + Created: time.Now().UTC(), + } +} + +// BucketMetadataSys captures all bucket metadata for a given cluster. +type BucketMetadataSys struct { + db *uleveldb.ULevelDB + nsLock *lock.NsLockMap + emptyBucket func(ctx context.Context, bucket string) (bool, error) +} + +// NewBucketMetadataSys - creates new policy system. +func NewBucketMetadataSys(db *uleveldb.ULevelDB) *BucketMetadataSys { + return &BucketMetadataSys{ + db: db, + nsLock: lock.NewNSLock(), + } +} + +// NewNSLock - initialize a new namespace RWLocker instance. +func (sys *BucketMetadataSys) NewNSLock(bucket string) lock.RWLocker { + return sys.nsLock.NewNSLock("meta", bucket) +} + +func (sys *BucketMetadataSys) SetEmptyBucket(emptyBucket func(ctx context.Context, bucket string) (bool, error)) { + sys.emptyBucket = emptyBucket +} + +// setBucketMeta - sets a new metadata in-db +func (sys *BucketMetadataSys) setBucketMeta(bucket string, meta *BucketMetadata) error { + return sys.db.Put(bucketPrefix+bucket, meta) +} + +// CreateBucket - create a new Bucket +func (sys *BucketMetadataSys) CreateBucket(ctx context.Context, bucket, region, accessKey, acl string) error { + lk := sys.NewNSLock(bucket) + lkctx, err := lk.GetLock(ctx, globalOperationTimeout) + if err != nil { + return err + } + ctx = lkctx.Context() + defer lk.Unlock(lkctx.Cancel) + + return sys.setBucketMeta(bucket, NewBucketMetadata(bucket, region, accessKey, acl)) +} + +func (sys *BucketMetadataSys) getBucketMeta(bucket string) (meta BucketMetadata, err error) { + err = sys.db.Get(bucketPrefix+bucket, &meta) + if err == leveldb.ErrNotFound { + err = BucketNotFound{Bucket: bucket, Err: err} + } + return meta, err +} + +// GetBucketMeta metadata for a bucket. +func (sys *BucketMetadataSys) GetBucketMeta(ctx context.Context, bucket string) (meta BucketMetadata, err error) { + lk := sys.NewNSLock(bucket) + lkctx, err := lk.GetRLock(ctx, globalOperationTimeout) + if err != nil { + return BucketMetadata{}, err + } + ctx = lkctx.Context() + defer lk.RUnlock(lkctx.Cancel) + + return sys.getBucketMeta(bucket) +} + +// HasBucket metadata for a bucket. +func (sys *BucketMetadataSys) HasBucket(ctx context.Context, bucket string) bool { + _, err := sys.GetBucketMeta(ctx, bucket) + return err == nil +} + +// DeleteBucket bucket. +func (sys *BucketMetadataSys) DeleteBucket(ctx context.Context, bucket string) error { + lk := sys.NewNSLock(bucket) + lkctx, err := lk.GetLock(ctx, deleteOperationTimeout) + if err != nil { + return err + } + ctx = lkctx.Context() + defer lk.Unlock(lkctx.Cancel) + + if _, err = sys.getBucketMeta(bucket); err != nil { + return err + } + + if empty, err := sys.emptyBucket(ctx, bucket); err != nil { + return err + } else if !empty { + return ErrBucketNotEmpty + } + + return sys.db.Delete(bucketPrefix + bucket) +} + +// GetAllBucketsOfUser metadata for all bucket. +func (sys *BucketMetadataSys) GetAllBucketsOfUser(ctx context.Context, username string) ([]BucketMetadata, error) { + var m []BucketMetadata + all, err := sys.db.ReadAllChan(ctx, bucketPrefix, "") + if err != nil { + return nil, err + } + for entry := range all { + data := BucketMetadata{} + if err = entry.UnmarshalValue(&data); err != nil { + continue + } + if data.Owner != username { + continue + } + m = append(m, data) + } + return m, nil +} diff --git a/s3d/store/bucket_acl.go b/s3d/store/bucket_acl.go new file mode 100644 index 000000000..cbecf0376 --- /dev/null +++ b/s3d/store/bucket_acl.go @@ -0,0 +1,34 @@ +package store + +import ( + "context" +) + +func (sys *BucketMetadataSys) UpdateBucketAcl(ctx context.Context, bucket, acl, accessKey string) error { + lk := sys.NewNSLock(bucket) + lkctx, err := lk.GetLock(ctx, globalOperationTimeout) + if err != nil { + return err + } + ctx = lkctx.Context() + defer lk.Unlock(lkctx.Cancel) + + meta, err := sys.getBucketMeta(bucket) + if err != nil { + return err + } + + meta.Acl = acl + return sys.setBucketMeta(bucket, &meta) +} +func (sys *BucketMetadataSys) GetBucketAcl(ctx context.Context, bucket string) (string, error) { + meta, err := sys.GetBucketMeta(ctx, bucket) + if err != nil { + switch err.(type) { + case BucketNotFound: + return "", BucketTaggingNotFound{Bucket: bucket} + } + return "", err + } + return meta.Acl, nil +} diff --git a/s3d/store/err.go b/s3d/store/err.go new file mode 100644 index 000000000..5cc5ebeb2 --- /dev/null +++ b/s3d/store/err.go @@ -0,0 +1,34 @@ +package store + +import "errors" + +var ErrBucketNotEmpty = errors.New("bucket not empty") + +// BucketPolicyNotFound - no bucket policy found. +type BucketPolicyNotFound struct { + Bucket string + Err error +} + +func (e BucketPolicyNotFound) Error() string { + return "No bucket policy configuration found for bucket: " + e.Bucket +} + +// BucketNotFound - no bucket found. +type BucketNotFound struct { + Bucket string + Err error +} + +func (e BucketNotFound) Error() string { + return "Not found for bucket: " + e.Bucket +} + +type BucketTaggingNotFound struct { + Bucket string + Err error +} + +func (e BucketTaggingNotFound) Error() string { + return "No bucket tagging configuration found for bucket: " + e.Bucket +} diff --git a/s3d/store/service.go b/s3d/store/service.go new file mode 100644 index 000000000..72440ea2a --- /dev/null +++ b/s3d/store/service.go @@ -0,0 +1 @@ +package store diff --git a/s3d/store/service_instance.go b/s3d/store/service_instance.go new file mode 100644 index 000000000..aa4d9317d --- /dev/null +++ b/s3d/store/service_instance.go @@ -0,0 +1,5 @@ +package store + +//bucket.go + +//bucket_acl.go diff --git a/s3d/store/service_interface.go b/s3d/store/service_interface.go new file mode 100644 index 000000000..4ec4a123b --- /dev/null +++ b/s3d/store/service_interface.go @@ -0,0 +1,16 @@ +package store + +import ( + "context" + "github.com/bittorrent/go-btfs/s3d/lock" +) + +type Service interface { + NewNSLock(bucket string) lock.RWLocker + SetEmptyBucket(emptyBucket func(ctx context.Context, bucket string) (bool, error)) + CreateBucket(ctx context.Context, bucket, region, accessKey, acl string) error + GetBucketMeta(ctx context.Context, bucket string) (meta BucketMetadata, err error) + HasBucket(ctx context.Context, bucket string) bool + DeleteBucket(ctx context.Context, bucket string) error + GetAllBucketsOfUser(ctx context.Context, username string) ([]BucketMetadata, error) +} diff --git a/s3d/store/service_test.go b/s3d/store/service_test.go new file mode 100644 index 000000000..72440ea2a --- /dev/null +++ b/s3d/store/service_test.go @@ -0,0 +1 @@ +package store diff --git a/s3d/uleveldb/leveldb.go b/s3d/uleveldb/leveldb.go new file mode 100644 index 000000000..781625528 --- /dev/null +++ b/s3d/uleveldb/leveldb.go @@ -0,0 +1,117 @@ +package uleveldb + +import ( + "context" + logging "github.com/ipfs/go-log/v2" + "github.com/syndtr/goleveldb/leveldb" + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/util" + "github.com/vmihailenco/msgpack/v4" + "go.uber.org/zap/buffer" +) + +var log = logging.Logger("leveldb") + +//ULevelDB level db store key-struct +type ULevelDB struct { + DB *leveldb.DB +} + +// OpenDb open a db client +func OpenDb(path string) (*ULevelDB, error) { + newDb, err := leveldb.OpenFile(path, nil) + if _, corrupted := err.(*errors.ErrCorrupted); corrupted { + newDb, err = leveldb.RecoverFile(path, nil) + } + if err != nil { + log.Errorf("Open Db path: %v,err:%v,", path, err) + return nil, err + } + return &ULevelDB{ + DB: newDb, + }, nil +} + +//Close db close +func (l *ULevelDB) Close() error { + return l.DB.Close() +} + +// Put +// * @param {string} key +// * @param {interface{}} value +func (l *ULevelDB) Put(key string, value interface{}) error { + result, err := msgpack.Marshal(value) + if err != nil { + log.Errorf("marshal error%v", err) + return err + } + return l.DB.Put([]byte(key), result, nil) +} + +// Get +// * @param {string} key +// * @param {interface{}} value +func (l *ULevelDB) Get(key string, value interface{}) error { + get, err := l.DB.Get([]byte(key), nil) + if err != nil { + return err + } + return msgpack.Unmarshal(get, value) +} + +// Delete +// * @param {string} key +// * @param {interface{}} value +func (l *ULevelDB) Delete(key string) error { + return l.DB.Delete([]byte(key), nil) +} + +// NewIterator /** +func (l *ULevelDB) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { + return l.DB.NewIterator(slice, ro) +} + +type entry struct { + Key string + Value []byte +} + +func (e *entry) UnmarshalValue(value interface{}) error { + return msgpack.Unmarshal(e.Value, value) +} + +//ReadAllChan read all key value +func (l *ULevelDB) ReadAllChan(ctx context.Context, prefix string, seekKey string) (<-chan *entry, error) { + ch := make(chan *entry) + var slice *util.Range + if prefix != "" { + slice = util.BytesPrefix([]byte(prefix)) + } + iter := l.NewIterator(slice, nil) + if seekKey != "" { + iter.Seek([]byte(seekKey)) + } + go func() { + defer func() { + iter.Release() + close(ch) + }() + for iter.Next() { + key := string(iter.Key()) + buf := buffer.Buffer{} + buf.Write(iter.Value()) + select { + case <-ctx.Done(): + return + case ch <- &entry{ + Key: key, + Value: buf.Bytes(), + }: + } + } + }() + return ch, nil +} diff --git a/s3d/uleveldb/uleveldb_test.go b/s3d/uleveldb/uleveldb_test.go new file mode 100644 index 000000000..e758e6d98 --- /dev/null +++ b/s3d/uleveldb/uleveldb_test.go @@ -0,0 +1,24 @@ +package uleveldb + +import ( + "fmt" + "testing" +) + +func TestULeveldb(t *testing.T) { + db, err := OpenDb(t.TempDir()) + if err != nil { + t.Fatal(err) + } + err = db.Put("a", 10) + if err != nil { + return + } + var a int + err = db.Get("a", &a) + db.Close() + if err != nil { + return + } + fmt.Println(a) +} From f4324c08296ebb7fc9df1db8266b9dbe63cecc3d Mon Sep 17 00:00:00 2001 From: fish <920886811@163.com> Date: Thu, 10 Aug 2023 11:09:49 +0800 Subject: [PATCH 016/139] chore: --- s3d/apierrors/errors.go | 156 ++++++++++++++++++++-------------------- s3d/utils/signature.go | 8 +-- 2 files changed, 82 insertions(+), 82 deletions(-) diff --git a/s3d/apierrors/errors.go b/s3d/apierrors/errors.go index ba968163a..e8870e73d 100644 --- a/s3d/apierrors/errors.go +++ b/s3d/apierrors/errors.go @@ -1,80 +1,80 @@ package apierrors -import ( - "context" - "github.com/yann-y/fds/internal/lock" - "github.com/yann-y/fds/internal/store" - "github.com/yann-y/fds/internal/utils/hash" - "github.com/yann-y/fds/pkg/s3utils" - "golang.org/x/xerrors" - "net/url" -) - -// NotImplemented If a feature is not implemented -type NotImplemented struct { - Message string -} - -// ContextCanceled returns whether a context is canceled. -func ContextCanceled(ctx context.Context) bool { - select { - case <-ctx.Done(): - return true - default: - return false - } -} - -func ToApiError(ctx context.Context, err error) ErrorCode { - if ContextCanceled(ctx) { - if ctx.Err() == context.Canceled { - return ErrClientDisconnected - } - } - errCode := ErrInternalError - switch err.(type) { - case lock.OperationTimedOut: - errCode = ErrOperationTimedOut - case hash.SHA256Mismatch: - errCode = ErrContentSHA256Mismatch - case hash.BadDigest: - errCode = ErrBadDigest - case store.BucketNotFound: - errCode = ErrNoSuchBucket - case store.BucketPolicyNotFound: - errCode = ErrNoSuchBucketPolicy - case store.BucketTaggingNotFound: - errCode = ErrBucketTaggingNotFound - case s3utils.BucketNameInvalid: - errCode = ErrInvalidBucketName - case s3utils.ObjectNameInvalid: - errCode = ErrInvalidObjectName - case s3utils.ObjectNameTooLong: - errCode = ErrKeyTooLongError - case s3utils.ObjectNamePrefixAsSlash: - errCode = ErrInvalidObjectNamePrefixSlash - case s3utils.InvalidUploadIDKeyCombination: - errCode = ErrNotImplemented - case s3utils.InvalidMarkerPrefixCombination: - errCode = ErrNotImplemented - case s3utils.MalformedUploadID: - errCode = ErrNoSuchUpload - case s3utils.InvalidUploadID: - errCode = ErrNoSuchUpload - case s3utils.InvalidPart: - errCode = ErrInvalidPart - case s3utils.PartTooSmall: - errCode = ErrEntityTooSmall - case s3utils.PartTooBig: - errCode = ErrEntityTooLarge - case url.EscapeError: - errCode = ErrInvalidObjectName - default: - if xerrors.Is(err, store.ErrObjectNotFound) { - errCode = ErrNoSuchKey - } else if xerrors.Is(err, store.ErrBucketNotEmpty) { - errCode = ErrBucketNotEmpty - } - } - return errCode -} +//import ( +// "context" +// "github.com/yann-y/fds/internal/lock" +// "github.com/yann-y/fds/internal/store" +// "github.com/yann-y/fds/internal/utils/hash" +// "github.com/yann-y/fds/pkg/s3utils" +// "golang.org/x/xerrors" +// "net/url" +//) +// +//// NotImplemented If a feature is not implemented +//type NotImplemented struct { +// Message string +//} +// +//// ContextCanceled returns whether a context is canceled. +//func ContextCanceled(ctx context.Context) bool { +// select { +// case <-ctx.Done(): +// return true +// default: +// return false +// } +//} +// +//func ToApiError(ctx context.Context, err error) ErrorCode { +// if ContextCanceled(ctx) { +// if ctx.Err() == context.Canceled { +// return ErrClientDisconnected +// } +// } +// errCode := ErrInternalError +// switch err.(type) { +// case lock.OperationTimedOut: +// errCode = ErrOperationTimedOut +// case hash.SHA256Mismatch: +// errCode = ErrContentSHA256Mismatch +// case hash.BadDigest: +// errCode = ErrBadDigest +// case store.BucketNotFound: +// errCode = ErrNoSuchBucket +// case store.BucketPolicyNotFound: +// errCode = ErrNoSuchBucketPolicy +// case store.BucketTaggingNotFound: +// errCode = ErrBucketTaggingNotFound +// case s3utils.BucketNameInvalid: +// errCode = ErrInvalidBucketName +// case s3utils.ObjectNameInvalid: +// errCode = ErrInvalidObjectName +// case s3utils.ObjectNameTooLong: +// errCode = ErrKeyTooLongError +// case s3utils.ObjectNamePrefixAsSlash: +// errCode = ErrInvalidObjectNamePrefixSlash +// case s3utils.InvalidUploadIDKeyCombination: +// errCode = ErrNotImplemented +// case s3utils.InvalidMarkerPrefixCombination: +// errCode = ErrNotImplemented +// case s3utils.MalformedUploadID: +// errCode = ErrNoSuchUpload +// case s3utils.InvalidUploadID: +// errCode = ErrNoSuchUpload +// case s3utils.InvalidPart: +// errCode = ErrInvalidPart +// case s3utils.PartTooSmall: +// errCode = ErrEntityTooSmall +// case s3utils.PartTooBig: +// errCode = ErrEntityTooLarge +// case url.EscapeError: +// errCode = ErrInvalidObjectName +// default: +// if xerrors.Is(err, store.ErrObjectNotFound) { +// errCode = ErrNoSuchKey +// } else if xerrors.Is(err, store.ErrBucketNotEmpty) { +// errCode = ErrBucketNotEmpty +// } +// } +// return errCode +//} diff --git a/s3d/utils/signature.go b/s3d/utils/signature.go index e96c3bf53..fe6a99d49 100644 --- a/s3d/utils/signature.go +++ b/s3d/utils/signature.go @@ -7,8 +7,6 @@ import ( "crypto/sha256" "encoding/base64" "encoding/hex" - "github.com/yann-y/fds/internal/consts" - "github.com/yann-y/fds/internal/iam/auth" "io" "net/http" "regexp" @@ -17,6 +15,8 @@ import ( "testing" "time" "unicode/utf8" + + "github.com/bittorrent/go-btfs/s3d/consts" ) var ignoredHeaders = map[string]bool{ @@ -41,8 +41,8 @@ func MustNewSignedV4Request(method string, urlStr string, contentLength int64, b if err != nil { t.Fatalf("newTestRequest fail err:%v", err) } - cred := &auth.Credentials{AccessKey: accessKey, SecretKey: secretKey} - if err := SignRequestV4(req, cred.AccessKey, cred.SecretKey, st); err != nil { + + if err := SignRequestV4(req, accessKey, secretKey, st); err != nil { t.Fatalf("Unable to inititalized new signed http request %s", err) } return req From 5c57bec602f18118d575d2378148c3d9ac3a4667 Mon Sep 17 00:00:00 2001 From: steve Date: Thu, 10 Aug 2023 14:12:16 +0800 Subject: [PATCH 017/139] feat: s3 access-key, server, handlers, statestore, filestore --- cmd/btfs/daemon.go | 11 +- core/commands/accesskey.go | 4 +- go.mod | 1 + go.sum | 2 + s3/accesskey/instance.go | 166 ----------------- s3/accesskey/interface.go | 32 ---- s3/accesskey/service.go | 39 ---- s3/consts/consts.go | 183 +++++++++++++++++++ s3/handlers/accesskey/accesskey.go | 178 ++++++++++++++++++ s3/handlers/accesskey/instance.go | 43 +++++ s3/handlers/accesskey/options.go | 15 ++ s3/handlers/filestore/local_shell.go | 18 ++ s3/handlers/handlers.go | 86 +++++++++ s3/handlers/interface.go | 46 +++++ s3/handlers/options.go | 9 + s3/handlers/statestore/storage_proxy.go | 43 +++++ s3/interface.go | 10 + s3/options.go | 9 + s3/server.go | 90 +++++++++ s3/utils/random/string.go => utils/random.go | 4 +- 20 files changed, 740 insertions(+), 249 deletions(-) delete mode 100644 s3/accesskey/instance.go delete mode 100644 s3/accesskey/interface.go delete mode 100644 s3/accesskey/service.go create mode 100644 s3/consts/consts.go create mode 100644 s3/handlers/accesskey/accesskey.go create mode 100644 s3/handlers/accesskey/instance.go create mode 100644 s3/handlers/accesskey/options.go create mode 100644 s3/handlers/filestore/local_shell.go create mode 100644 s3/handlers/handlers.go create mode 100644 s3/handlers/interface.go create mode 100644 s3/handlers/options.go create mode 100644 s3/handlers/statestore/storage_proxy.go create mode 100644 s3/interface.go create mode 100644 s3/options.go create mode 100644 s3/server.go rename s3/utils/random/string.go => utils/random.go (85%) diff --git a/cmd/btfs/daemon.go b/cmd/btfs/daemon.go index 6b023bda1..03d5ffd3d 100644 --- a/cmd/btfs/daemon.go +++ b/cmd/btfs/daemon.go @@ -7,7 +7,8 @@ import ( "errors" _ "expvar" "fmt" - "github.com/bittorrent/go-btfs/s3/accesskey" + "github.com/bittorrent/go-btfs/s3/handlers/accesskey" + s3statestore "github.com/bittorrent/go-btfs/s3/handlers/statestore" "io/ioutil" "math/rand" "net" @@ -423,13 +424,7 @@ If the user need to start multiple nodes on the same machine, the configuration }() // access-key init - accesskey.InitService( - &accesskey.Config{ - SecretLength: 32, - StorePrefix: "access-keys:", - }, - statestore, - ) + accesskey.InitInstance(s3statestore.NewStorageStateStoreProxy(statestore)) if SimpleMode == false { chainid, stored, err := getChainID(req, cfg, statestore) diff --git a/core/commands/accesskey.go b/core/commands/accesskey.go index f1cee49b1..6279078ef 100644 --- a/core/commands/accesskey.go +++ b/core/commands/accesskey.go @@ -4,7 +4,7 @@ import ( "errors" cmds "github.com/bittorrent/go-btfs-cmds" "github.com/bittorrent/go-btfs/core/commands/cmdenv" - "github.com/bittorrent/go-btfs/s3/accesskey" + "github.com/bittorrent/go-btfs/s3/handlers/accesskey" ) var AccessKeyCmd = &cmds.Command{ @@ -13,7 +13,6 @@ var AccessKeyCmd = &cmds.Command{ ShortDescription: ` `, }, - Subcommands: map[string]*cmds.Command{ "generate": accessKeyGenerateCmd, "enable": accessKeyEnableCmd, @@ -23,6 +22,7 @@ var AccessKeyCmd = &cmds.Command{ "get": accessKeyGetCmd, "list": accessKeyListCmd, }, + NoLocal: true, } func checkDaemon(env cmds.Environment) (err error) { diff --git a/go.mod b/go.mod index 5417489f9..58953f52d 100644 --- a/go.mod +++ b/go.mod @@ -174,6 +174,7 @@ require ( github.com/golang/mock v1.6.0 // indirect github.com/google/btree v1.1.2 // indirect github.com/google/pprof v0.0.0-20221203041831-ce31453925ec // indirect + github.com/gorilla/mux v1.8.0 // indirect github.com/huandu/xstrings v1.3.2 // indirect github.com/ipfs/go-bitfield v1.1.0 // indirect github.com/ipfs/go-ipld-legacy v0.1.1 // indirect diff --git a/go.sum b/go.sum index 3b0cea235..a27c84df3 100644 --- a/go.sum +++ b/go.sum @@ -602,6 +602,8 @@ github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORR github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= diff --git a/s3/accesskey/instance.go b/s3/accesskey/instance.go deleted file mode 100644 index 7488322cd..000000000 --- a/s3/accesskey/instance.go +++ /dev/null @@ -1,166 +0,0 @@ -package accesskey - -import ( - "errors" - "github.com/bittorrent/go-btfs/s3/utils/random" - "github.com/bittorrent/go-btfs/transaction/storage" - "github.com/google/uuid" - "sync" - "time" -) - -var _ Service = &service{} - -type service struct { - config *Config - store storage.StateStorer - locks sync.Map -} - -func newService(config *Config, store storage.StateStorer) *service { - return &service{ - config: config, - store: store, - locks: sync.Map{}, - } -} - -func (s *service) Generate() (ack *AccessKey, err error) { - now := time.Now() - ack = &AccessKey{ - Key: s.newKey(), - Secret: s.newSecret(), - Enable: true, - IsDeleted: false, - CreatedAt: now, - UpdatedAt: now, - } - err = s.store.Put(s.getStoreKey(ack.Key), ack) - return -} - -func (s *service) Enable(key string) (err error) { - enable := true - err = s.update(key, &updateArgs{ - Enable: &enable, - }) - return -} - -func (s *service) Disable(key string) (err error) { - enable := false - err = s.update(key, &updateArgs{ - Enable: &enable, - }) - return -} - -func (s *service) Reset(key string) (err error) { - secret := s.newSecret() - err = s.update(key, &updateArgs{ - Secret: &secret, - }) - return -} - -func (s *service) Delete(key string) (err error) { - isDelete := true - err = s.update(key, &updateArgs{ - IsDelete: &isDelete, - }) - return -} - -func (s *service) Get(key string) (ack *AccessKey, err error) { - ack = &AccessKey{} - err = s.store.Get(s.getStoreKey(key), ack) - if err != nil && !errors.Is(err, storage.ErrNotFound) { - return - } - if errors.Is(err, storage.ErrNotFound) || ack.IsDeleted { - err = ErrNotFound - } - return -} - -func (s *service) List() (list []*AccessKey, err error) { - err = s.store.Iterate(s.config.StorePrefix, func(key, _ []byte) (stop bool, er error) { - ack := &AccessKey{} - er = s.store.Get(string(key), ack) - if er != nil { - return - } - if ack.IsDeleted { - return - } - list = append(list, ack) - return - }) - return -} - -func (s *service) newKey() (key string) { - key = uuid.NewString() - return -} - -func (s *service) newSecret() (secret string) { - secret = random.NewString(s.config.SecretLength) - return -} - -func (s *service) getStoreKey(key string) (storeKey string) { - storeKey = s.config.StorePrefix + key - return -} - -func (s *service) lock(key string) (unlock func()) { - loaded := true - for loaded { - _, loaded = s.locks.LoadOrStore(key, nil) - time.Sleep(10 * time.Millisecond) - } - unlock = func() { - s.locks.Delete(key) - } - return -} - -type updateArgs struct { - Enable *bool - Secret *string - IsDelete *bool -} - -func (s *service) update(key string, args *updateArgs) (err error) { - unlock := s.lock(key) - defer unlock() - - ack := &AccessKey{} - stk := s.getStoreKey(key) - - err = s.store.Get(stk, ack) - if err != nil && !errors.Is(err, storage.ErrNotFound) { - return - } - if errors.Is(err, storage.ErrNotFound) || ack.IsDeleted { - err = ErrNotFound - return - } - - if args.Enable != nil { - ack.Enable = *args.Enable - } - if args.Secret != nil { - ack.Secret = *args.Secret - } - if args.IsDelete != nil { - ack.IsDeleted = *args.IsDelete - } - - ack.UpdatedAt = time.Now() - - err = s.store.Put(stk, ack) - - return -} diff --git a/s3/accesskey/interface.go b/s3/accesskey/interface.go deleted file mode 100644 index 8112bab12..000000000 --- a/s3/accesskey/interface.go +++ /dev/null @@ -1,32 +0,0 @@ -package accesskey - -import ( - "errors" - "time" -) - -var ErrNotFound = errors.New("key is not found") - -type Config struct { - SecretLength int - StorePrefix string -} - -type AccessKey struct { - Key string `json:"key"` - Secret string `json:"secret"` - Enable bool `json:"enable"` - IsDeleted bool `json:"is_deleted"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` -} - -type Service interface { - Generate() (ack *AccessKey, err error) - Enable(key string) (err error) - Disable(key string) (err error) - Reset(key string) (err error) - Delete(key string) (err error) - Get(key string) (ack *AccessKey, err error) - List() (list []*AccessKey, err error) -} diff --git a/s3/accesskey/service.go b/s3/accesskey/service.go deleted file mode 100644 index ba0f0b89e..000000000 --- a/s3/accesskey/service.go +++ /dev/null @@ -1,39 +0,0 @@ -package accesskey - -import ( - "github.com/bittorrent/go-btfs/transaction/storage" -) - -var svc Service - -func InitService(config *Config, store storage.StateStorer) { - svc = newService(config, store) -} - -func Generate() (ack *AccessKey, err error) { - return svc.Generate() -} - -func Enable(key string) (err error) { - return svc.Enable(key) -} - -func Disable(key string) (err error) { - return svc.Disable(key) -} - -func Reset(key string) (err error) { - return svc.Reset(key) -} - -func Delete(key string) (err error) { - return svc.Delete(key) -} - -func Get(key string) (ack *AccessKey, err error) { - return svc.Get(key) -} - -func List() (list []*AccessKey, err error) { - return svc.List() -} diff --git a/s3/consts/consts.go b/s3/consts/consts.go new file mode 100644 index 000000000..2bb2d09a8 --- /dev/null +++ b/s3/consts/consts.go @@ -0,0 +1,183 @@ +package consts + +import ( + "github.com/dustin/go-humanize" + "time" +) + +//some const +const ( + // Iso8601TimeFormat RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z + Iso8601TimeFormat = "2006-01-02T15:04:05.000Z" // Reply date format with nanosecond precision. + + StreamingContentSHA256 = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" + + // MaxLocationConstraintSize Limit of location constraint XML for unauthenticated PUT bucket operations. + MaxLocationConstraintSize = 3 * humanize.MiByte + EmptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + StsRequestBodyLimit = 10 * (1 << 20) // 10 MiB + DefaultRegion = "" + SlashSeparator = "/" + + MaxSkewTime = 15 * time.Minute // 15 minutes skew allowed. + + // STS API version. + StsAPIVersion = "2011-06-15" + StsVersion = "Version" + StsAction = "Action" + AssumeRole = "AssumeRole" + SignV4Algorithm = "AWS4-HMAC-SHA256" + + DefaultOwnerID = "02d6176db174dc93cb1b899f7c6078f08654445fe8cf1b6ce98d8855f66bdbf4" + DisplayName = "FileDagStorage" + DefaultStorageClass = "DAGSTORE" +) + +// Standard S3 HTTP request constants +const ( + IfModifiedSince = "If-Modified-Since" + IfUnmodifiedSince = "If-Unmodified-Since" + IfMatch = "If-Match" + IfNoneMatch = "If-None-Match" + + // S3 storage class + AmzStorageClass = "x-amz-storage-class" + + // S3 object version ID + AmzVersionID = "x-amz-version-id" + AmzDeleteMarker = "x-amz-delete-marker" + + // S3 object tagging + AmzObjectTagging = "X-Amz-Tagging" + AmzTagCount = "x-amz-tagging-count" + AmzTagDirective = "X-Amz-Tagging-Directive" + + // S3 transition restore + AmzRestore = "x-amz-restore" + AmzRestoreExpiryDays = "X-Amz-Restore-Expiry-Days" + AmzRestoreRequestDate = "X-Amz-Restore-Request-Date" + AmzRestoreOutputPath = "x-amz-restore-output-path" + + // S3 extensions + AmzCopySourceIfModifiedSince = "x-amz-copy-source-if-modified-since" + AmzCopySourceIfUnmodifiedSince = "x-amz-copy-source-if-unmodified-since" + + AmzCopySourceIfNoneMatch = "x-amz-copy-source-if-none-match" + AmzCopySourceIfMatch = "x-amz-copy-source-if-match" + + AmzCopySource = "X-Amz-Copy-Source" + AmzCopySourceVersionID = "X-Amz-Copy-Source-Version-Id" + AmzCopySourceRange = "X-Amz-Copy-Source-Range" + AmzMetadataDirective = "X-Amz-Metadata-Directive" + AmzObjectLockMode = "X-Amz-Object-Lock-Mode" + AmzObjectLockRetainUntilDate = "X-Amz-Object-Lock-Retain-Until-Date" + AmzObjectLockLegalHold = "X-Amz-Object-Lock-Legal-Hold" + AmzObjectLockBypassGovernance = "X-Amz-Bypass-Governance-Retention" + AmzBucketReplicationStatus = "X-Amz-Replication-Status" + AmzSnowballExtract = "X-Amz-Meta-Snowball-Auto-Extract" + + // Multipart parts count + AmzMpPartsCount = "x-amz-mp-parts-count" + + // Object date/time of expiration + AmzExpiration = "x-amz-expiration" + + // Dummy putBucketACL + AmzACL = "x-amz-acl" + + // Signature V4 related contants. + AmzContentSha256 = "X-Amz-Content-Sha256" + AmzDate = "X-Amz-Date" + AmzAlgorithm = "X-Amz-Algorithm" + AmzExpires = "X-Amz-Expires" + AmzSignedHeaders = "X-Amz-SignedHeaders" + AmzSignature = "X-Amz-Signature" + AmzCredential = "X-Amz-Credential" + AmzSecurityToken = "X-Amz-Security-Token" + AmzDecodedContentLength = "X-Amz-Decoded-Content-Length" + + AmzMetaUnencryptedContentLength = "X-Amz-Meta-X-Amz-Unencrypted-Content-Length" + AmzMetaUnencryptedContentMD5 = "X-Amz-Meta-X-Amz-Unencrypted-Content-Md5" + + // AWS server-side encryption headers for SSE-S3, SSE-KMS and SSE-C. + AmzServerSideEncryption = "X-Amz-Server-Side-Encryption" + AmzServerSideEncryptionKmsID = AmzServerSideEncryption + "-Aws-Kms-Key-Id" + AmzServerSideEncryptionKmsContext = AmzServerSideEncryption + "-Context" + AmzServerSideEncryptionCustomerAlgorithm = AmzServerSideEncryption + "-Customer-Algorithm" + AmzServerSideEncryptionCustomerKey = AmzServerSideEncryption + "-Customer-Key" + AmzServerSideEncryptionCustomerKeyMD5 = AmzServerSideEncryption + "-Customer-Key-Md5" + AmzServerSideEncryptionCopyCustomerAlgorithm = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm" + AmzServerSideEncryptionCopyCustomerKey = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key" + AmzServerSideEncryptionCopyCustomerKeyMD5 = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5" + + AmzEncryptionAES = "AES256" + AmzEncryptionKMS = "aws:kms" + + // Signature v2 related constants + AmzSignatureV2 = "Signature" + AmzAccessKeyID = "AWSAccessKeyId" + + // Response request id. + AmzRequestID = "x-amz-request-id" +) + +// Standard S3 HTTP response constants +const ( + LastModified = "Last-Modified" + Date = "Date" + ETag = "ETag" + ContentType = "Content-Type" + ContentMD5 = "Content-Md5" + ContentEncoding = "Content-Encoding" + Expires = "Expires" + ContentLength = "Content-Length" + ContentLanguage = "Content-Language" + ContentRange = "Content-Range" + Connection = "Connection" + AcceptRanges = "Accept-Ranges" + AmzBucketRegion = "X-Amz-Bucket-Region" + ServerInfo = "Server" + RetryAfter = "Retry-After" + Location = "Location" + CacheControl = "Cache-Control" + ContentDisposition = "Content-Disposition" + Authorization = "Authorization" + Action = "Action" + Range = "Range" +) + +//object const +const ( + MaxObjectSize = 5 * humanize.TiByte + + // Minimum Part size for multipart upload is 5MiB + MinPartSize = 5 * humanize.MiByte + + // Maximum Part size for multipart upload is 5GiB + MaxPartSize = 5 * humanize.GiByte + + // Maximum Part ID for multipart upload is 10000 + // (Acceptable values range from 1 to 10000 inclusive) + MaxPartID = 10000 + + MaxObjectList = 1000 // Limit number of objects in a listObjectsResponse/listObjectsVersionsResponse. + MaxDeleteList = 1000 // Limit number of objects deleted in a delete call. + MaxUploadsList = 10000 // Limit number of uploads in a listUploadsResponse. + MaxPartsList = 10000 // Limit number of parts in a listPartsResponse. +) + +// Common http query params S3 API +const ( + VersionID = "versionId" + + PartNumber = "partNumber" + + UploadID = "uploadId" +) + +// limit +const ( + // The maximum allowed time difference between the incoming request + // date and server date during signature verification. + GlobalMaxSkewTime = 15 * time.Minute // 15 minutes skew allowed. +) diff --git a/s3/handlers/accesskey/accesskey.go b/s3/handlers/accesskey/accesskey.go new file mode 100644 index 000000000..9fe8cc209 --- /dev/null +++ b/s3/handlers/accesskey/accesskey.go @@ -0,0 +1,178 @@ +package accesskey + +import ( + "errors" + "github.com/bittorrent/go-btfs/s3/handlers" + "github.com/bittorrent/go-btfs/transaction/storage" + "github.com/bittorrent/go-btfs/utils" + "github.com/google/uuid" + "sync" + "time" +) + +const ( + defaultSecretLength = 32 + defaultStoreKeyPrefix = "access-keys:" +) + +var _ handlers.AccessKeyer = (*AccessKey)(nil) + +type AccessKey struct { + secretLength int + storeKeyPrefix string + stateStore handlers.StateStorer + locks sync.Map +} + +func NewAccessKey(store handlers.StateStorer, options ...Option) (ack *AccessKey) { + ack = &AccessKey{ + secretLength: defaultSecretLength, + storeKeyPrefix: defaultStoreKeyPrefix, + stateStore: store, + locks: sync.Map{}, + } + for _, option := range options { + option(ack) + } + return +} + +func (ack *AccessKey) Generate() (record *handlers.AccessKeyRecord, err error) { + now := time.Now() + record = &handlers.AccessKeyRecord{ + Key: ack.newKey(), + Secret: ack.newSecret(), + Enable: true, + IsDeleted: false, + CreatedAt: now, + UpdatedAt: now, + } + err = ack.stateStore.Put(ack.getStoreKey(record.Key), record) + return +} + +func (ack *AccessKey) Enable(key string) (err error) { + enable := true + err = ack.update(key, &updateArgs{ + Enable: &enable, + }) + return +} + +func (ack *AccessKey) Disable(key string) (err error) { + enable := false + err = ack.update(key, &updateArgs{ + Enable: &enable, + }) + return +} + +func (ack *AccessKey) Reset(key string) (err error) { + secret := ack.newSecret() + err = ack.update(key, &updateArgs{ + Secret: &secret, + }) + return +} + +func (ack *AccessKey) Delete(key string) (err error) { + isDelete := true + err = ack.update(key, &updateArgs{ + IsDelete: &isDelete, + }) + return +} + +func (ack *AccessKey) Get(key string) (record *handlers.AccessKeyRecord, err error) { + record = &handlers.AccessKeyRecord{} + err = ack.stateStore.Get(ack.getStoreKey(key), record) + if err != nil && !errors.Is(err, handlers.ErrStateStoreNotFound) { + return + } + if errors.Is(err, handlers.ErrStateStoreNotFound) || record.IsDeleted { + err = handlers.ErrAccessKeyIsNotFound + } + return +} + +func (ack *AccessKey) List() (list []*handlers.AccessKeyRecord, err error) { + err = ack.stateStore.Iterate(ack.storeKeyPrefix, func(key, _ []byte) (stop bool, er error) { + record := &handlers.AccessKeyRecord{} + er = ack.stateStore.Get(string(key), record) + if er != nil { + return + } + if record.IsDeleted { + return + } + list = append(list, record) + return + }) + return +} + +func (ack *AccessKey) newKey() (key string) { + key = uuid.NewString() + return +} + +func (ack *AccessKey) newSecret() (secret string) { + secret = utils.RandomString(ack.secretLength) + return +} + +func (ack *AccessKey) getStoreKey(key string) (storeKey string) { + storeKey = ack.storeKeyPrefix + key + return +} + +func (ack *AccessKey) lock(key string) (unlock func()) { + loaded := true + for loaded { + _, loaded = ack.locks.LoadOrStore(key, nil) + time.Sleep(10 * time.Millisecond) + } + unlock = func() { + ack.locks.Delete(key) + } + return +} + +type updateArgs struct { + Enable *bool + Secret *string + IsDelete *bool +} + +func (ack *AccessKey) update(key string, args *updateArgs) (err error) { + unlock := ack.lock(key) + defer unlock() + + record := &handlers.AccessKeyRecord{} + stk := ack.getStoreKey(key) + + err = ack.stateStore.Get(stk, record) + if err != nil && !errors.Is(err, storage.ErrNotFound) { + return + } + if errors.Is(err, storage.ErrNotFound) || record.IsDeleted { + err = handlers.ErrAccessKeyIsNotFound + return + } + + if args.Enable != nil { + record.Enable = *args.Enable + } + if args.Secret != nil { + record.Secret = *args.Secret + } + if args.IsDelete != nil { + record.IsDeleted = *args.IsDelete + } + + record.UpdatedAt = time.Now() + + err = ack.stateStore.Put(stk, record) + + return +} diff --git a/s3/handlers/accesskey/instance.go b/s3/handlers/accesskey/instance.go new file mode 100644 index 000000000..f9a203c22 --- /dev/null +++ b/s3/handlers/accesskey/instance.go @@ -0,0 +1,43 @@ +package accesskey + +import ( + "github.com/bittorrent/go-btfs/s3/handlers" +) + +var instance handlers.AccessKeyer + +func InitInstance(storer handlers.StateStorer, options ...Option) { + instance = NewAccessKey(storer, options...) +} + +func GetInstance() handlers.AccessKeyer { + return instance +} + +func Generate() (record *handlers.AccessKeyRecord, err error) { + return instance.Generate() +} + +func Enable(key string) (err error) { + return instance.Enable(key) +} + +func Disable(key string) (err error) { + return instance.Disable(key) +} + +func Reset(key string) (err error) { + return instance.Reset(key) +} + +func Delete(key string) (err error) { + return instance.Delete(key) +} + +func Get(key string) (record *handlers.AccessKeyRecord, err error) { + return instance.Get(key) +} + +func List() (list []*handlers.AccessKeyRecord, err error) { + return instance.List() +} diff --git a/s3/handlers/accesskey/options.go b/s3/handlers/accesskey/options.go new file mode 100644 index 000000000..593856d7e --- /dev/null +++ b/s3/handlers/accesskey/options.go @@ -0,0 +1,15 @@ +package accesskey + +type Option func(ack *AccessKey) + +func WithSecretLength(length int) Option { + return func(ack *AccessKey) { + ack.secretLength = length + } +} + +func WithStoreKeyPrefix(prefix string) Option { + return func(ack *AccessKey) { + ack.storeKeyPrefix = prefix + } +} diff --git a/s3/handlers/filestore/local_shell.go b/s3/handlers/filestore/local_shell.go new file mode 100644 index 000000000..97881d0cc --- /dev/null +++ b/s3/handlers/filestore/local_shell.go @@ -0,0 +1,18 @@ +package filestore + +import ( + shell "github.com/bittorrent/go-btfs-api" + "github.com/bittorrent/go-btfs/s3/handlers" +) + +var _ handlers.FileStorer = (*LocalShell)(nil) + +type LocalShell struct { + *shell.Shell +} + +func NewFileStore() *LocalShell { + return &LocalShell{ + Shell: shell.NewLocalShell(), + } +} diff --git a/s3/handlers/handlers.go b/s3/handlers/handlers.go new file mode 100644 index 000000000..f2c432c8a --- /dev/null +++ b/s3/handlers/handlers.go @@ -0,0 +1,86 @@ +// Package handlers is an implementation of s3.Handlerser +package handlers + +import ( + "github.com/bittorrent/go-btfs/s3" + "github.com/bittorrent/go-btfs/s3/consts" + "github.com/rs/cors" + "net/http" +) + +var ( + defaultCorsAllowOrigins = []string{"*"} + defaultCorsAllowHeaders = []string{ + consts.Date, + consts.ETag, + consts.ServerInfo, + consts.Connection, + consts.AcceptRanges, + consts.ContentRange, + consts.ContentEncoding, + consts.ContentLength, + consts.ContentType, + consts.ContentDisposition, + consts.LastModified, + consts.ContentLanguage, + consts.CacheControl, + consts.RetryAfter, + consts.AmzBucketRegion, + consts.Expires, + consts.Authorization, + consts.Action, + consts.Range, + "X-Amz*", + "x-amz*", + "*", + } + defaultCorsAllowMethods = []string{ + http.MethodGet, + http.MethodPut, + http.MethodHead, + http.MethodPost, + http.MethodDelete, + http.MethodOptions, + http.MethodPatch, + } +) + +var _ s3.Handlerser = (*Handlers)(nil) + +type Handlers struct { + corsAllowOrigins []string + corsAllowHeaders []string + corsAllowMethods []string + fileStore FileStorer + stateStore StateStorer + accessKey AccessKeyer +} + +func NewHandlers(fileStore FileStorer, stateStore StateStorer, accessKey AccessKeyer, options ...Option) (handlers *Handlers) { + handlers = &Handlers{ + corsAllowOrigins: defaultCorsAllowOrigins, + corsAllowHeaders: defaultCorsAllowHeaders, + corsAllowMethods: defaultCorsAllowMethods, + fileStore: fileStore, + stateStore: stateStore, + accessKey: accessKey, + } + for _, option := range options { + option(handlers) + } + return +} + +func (s *Handlers) Cors(handler http.Handler) http.Handler { + return cors.New(cors.Options{ + AllowedOrigins: s.corsAllowOrigins, + AllowedMethods: s.corsAllowMethods, + AllowedHeaders: s.corsAllowHeaders, + ExposedHeaders: s.corsAllowHeaders, + AllowCredentials: true, + }).Handler(handler) +} + +func (s *Handlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) { + return +} diff --git a/s3/handlers/interface.go b/s3/handlers/interface.go new file mode 100644 index 000000000..bb445bcb7 --- /dev/null +++ b/s3/handlers/interface.go @@ -0,0 +1,46 @@ +package handlers + +import ( + "errors" + "io" + "time" +) + +type FileStorer interface { + AddWithOpts(r io.Reader, pin bool, rawLeaves bool) (hash string, err error) + Remove(hash string) (removed bool) + Cat(path string) (readCloser io.ReadCloser, err error) + Unpin(path string) (err error) +} + +type StateStorer interface { + Get(key string, i interface{}) (err error) + Put(key string, i interface{}) (err error) + Delete(key string) (err error) + Iterate(prefix string, iterFunc StateStoreIterFunc) (err error) +} + +type StateStoreIterFunc func(key, value []byte) (stop bool, err error) + +var ErrStateStoreNotFound = errors.New("not found") + +type AccessKeyer interface { + Generate() (record *AccessKeyRecord, err error) + Enable(key string) (err error) + Disable(key string) (err error) + Reset(key string) (err error) + Delete(key string) (err error) + Get(key string) (record *AccessKeyRecord, err error) + List() (list []*AccessKeyRecord, err error) +} + +type AccessKeyRecord struct { + Key string `json:"key"` + Secret string `json:"secret"` + Enable bool `json:"enable"` + IsDeleted bool `json:"is_deleted"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +var ErrAccessKeyIsNotFound = errors.New("access-key is not found") diff --git a/s3/handlers/options.go b/s3/handlers/options.go new file mode 100644 index 000000000..5caa7f8fc --- /dev/null +++ b/s3/handlers/options.go @@ -0,0 +1,9 @@ +package handlers + +type Option func(handlers *Handlers) + +func WithCorsAllowOrigins(origins []string) Option { + return func(handlers *Handlers) { + handlers.corsAllowOrigins = origins + } +} diff --git a/s3/handlers/statestore/storage_proxy.go b/s3/handlers/statestore/storage_proxy.go new file mode 100644 index 000000000..b468fb84f --- /dev/null +++ b/s3/handlers/statestore/storage_proxy.go @@ -0,0 +1,43 @@ +package statestore + +import ( + "errors" + "github.com/bittorrent/go-btfs/s3/handlers" + "github.com/bittorrent/go-btfs/transaction/storage" +) + +var _ handlers.StateStorer = (*StorageProxy)(nil) + +type StorageProxy struct { + proxy storage.StateStorer +} + +func NewStorageStateStoreProxy(proxy storage.StateStorer) *StorageProxy { + return &StorageProxy{ + proxy: proxy, + } +} + +func (s *StorageProxy) Put(key string, val interface{}) (err error) { + return s.proxy.Put(key, val) +} + +func (s *StorageProxy) Get(key string, i interface{}) (err error) { + err = s.proxy.Get(key, i) + if errors.Is(err, storage.ErrNotFound) { + err = handlers.ErrStateStoreNotFound + } + return +} + +func (s *StorageProxy) Delete(key string) (err error) { + err = s.proxy.Delete(key) + if errors.Is(err, storage.ErrNotFound) { + err = handlers.ErrStateStoreNotFound + } + return +} + +func (s *StorageProxy) Iterate(prefix string, iterFunc handlers.StateStoreIterFunc) (err error) { + return s.proxy.Iterate(prefix, storage.StateIterFunc(iterFunc)) +} diff --git a/s3/interface.go b/s3/interface.go new file mode 100644 index 000000000..bc89ec5b4 --- /dev/null +++ b/s3/interface.go @@ -0,0 +1,10 @@ +package s3 + +import ( + "net/http" +) + +type Handlerser interface { + Cors(handler http.Handler) http.Handler + PutObjectHandler(w http.ResponseWriter, r *http.Request) +} diff --git a/s3/options.go b/s3/options.go new file mode 100644 index 000000000..7c20efe5f --- /dev/null +++ b/s3/options.go @@ -0,0 +1,9 @@ +package s3 + +type Option func(*Server) + +func WithAddress(address string) Option { + return func(s *Server) { + s.address = address + } +} diff --git a/s3/server.go b/s3/server.go new file mode 100644 index 000000000..182e95773 --- /dev/null +++ b/s3/server.go @@ -0,0 +1,90 @@ +package s3 + +import ( + "context" + "errors" + "fmt" + "github.com/gorilla/mux" + "net/http" + "sync" +) + +const defaultServerAddress = ":15001" + +var ( + ErrServerStarted = errors.New("server started") + ErrServerNotStarted = errors.New("server not started") +) + +type Server struct { + handlers Handlerser + address string + shutdown func() error + mutex sync.Mutex +} + +func NewServer(handlers Handlerser, options ...Option) (s *Server) { + s = &Server{ + handlers: handlers, + address: defaultServerAddress, + shutdown: nil, + mutex: sync.Mutex{}, + } + for _, option := range options { + option(s) + } + return +} + +func (s *Server) Start() (err error) { + s.mutex.Lock() + defer s.mutex.Unlock() + + if s.shutdown != nil { + err = ErrServerStarted + return + } + + httpSvr := &http.Server{ + Addr: s.address, + Handler: s.registerRouter(), + } + + s.shutdown = func() error { + return httpSvr.Shutdown(context.TODO()) + } + + go func() { + fmt.Printf("start s3-compatible-api server\n") + lErr := httpSvr.ListenAndServe() + if lErr != nil && !errors.Is(lErr, http.ErrServerClosed) { + fmt.Printf("start s3-compatible-api server: %v\n", lErr) + } + }() + + return +} + +func (s *Server) Stop() (err error) { + s.mutex.Lock() + defer s.mutex.Unlock() + if s.shutdown == nil { + err = ErrServerNotStarted + return + } + err = s.shutdown() + s.shutdown = nil + fmt.Printf("stoped s3-compatible-api server: %v\n", err) + return +} + +func (s *Server) registerRouter() http.Handler { + root := mux.NewRouter() + + root.Use(s.handlers.Cors) + + bucket := root.PathPrefix("/{bucket}").Subrouter() + bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(s.handlers.PutObjectHandler) + + return root +} diff --git a/s3/utils/random/string.go b/utils/random.go similarity index 85% rename from s3/utils/random/string.go rename to utils/random.go index 90071d6d9..0a402d9c3 100644 --- a/s3/utils/random/string.go +++ b/utils/random.go @@ -1,4 +1,4 @@ -package random +package utils import ( "math/rand" @@ -11,7 +11,7 @@ func init() { var letters = []rune("0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") -func NewString(l int) string { +func RandomString(l int) string { b := make([]rune, l) for i := range b { b[i] = letters[rand.Intn(len(letters))] From d434b63d1a8b32313ed3fed8d4027fedce81e025 Mon Sep 17 00:00:00 2001 From: fish <920886811@163.com> Date: Thu, 10 Aug 2023 20:09:42 +0800 Subject: [PATCH 018/139] chore: --- s3d/store/bucket.go | 150 ---------------------------------- s3d/store/service_instance.go | 150 +++++++++++++++++++++++++++++++++- statestore/leveldb/leveldb.go | 46 ++++++++++- transaction/storage/store.go | 17 ++++ 4 files changed, 209 insertions(+), 154 deletions(-) delete mode 100644 s3d/store/bucket.go diff --git a/s3d/store/bucket.go b/s3d/store/bucket.go deleted file mode 100644 index 210857e80..000000000 --- a/s3d/store/bucket.go +++ /dev/null @@ -1,150 +0,0 @@ -package store - -import ( - "context" - "github.com/bittorrent/go-btfs/s3d/lock" - "github.com/bittorrent/go-btfs/s3d/uleveldb" - "github.com/syndtr/goleveldb/leveldb" - "time" -) - -const ( - bucketPrefix = "bkt/" -) - -const ( - globalOperationTimeout = 5 * time.Minute - deleteOperationTimeout = 1 * time.Minute -) - -// BucketMetadata contains bucket metadata. -type BucketMetadata struct { - Name string - Region string - Owner string - Acl string - Created time.Time -} - -// NewBucketMetadata creates BucketMetadata with the supplied name and Created to Now. -func NewBucketMetadata(name, region, accessKey, acl string) *BucketMetadata { - return &BucketMetadata{ - Name: name, - Region: region, - Owner: accessKey, - Acl: acl, - Created: time.Now().UTC(), - } -} - -// BucketMetadataSys captures all bucket metadata for a given cluster. -type BucketMetadataSys struct { - db *uleveldb.ULevelDB - nsLock *lock.NsLockMap - emptyBucket func(ctx context.Context, bucket string) (bool, error) -} - -// NewBucketMetadataSys - creates new policy system. -func NewBucketMetadataSys(db *uleveldb.ULevelDB) *BucketMetadataSys { - return &BucketMetadataSys{ - db: db, - nsLock: lock.NewNSLock(), - } -} - -// NewNSLock - initialize a new namespace RWLocker instance. -func (sys *BucketMetadataSys) NewNSLock(bucket string) lock.RWLocker { - return sys.nsLock.NewNSLock("meta", bucket) -} - -func (sys *BucketMetadataSys) SetEmptyBucket(emptyBucket func(ctx context.Context, bucket string) (bool, error)) { - sys.emptyBucket = emptyBucket -} - -// setBucketMeta - sets a new metadata in-db -func (sys *BucketMetadataSys) setBucketMeta(bucket string, meta *BucketMetadata) error { - return sys.db.Put(bucketPrefix+bucket, meta) -} - -// CreateBucket - create a new Bucket -func (sys *BucketMetadataSys) CreateBucket(ctx context.Context, bucket, region, accessKey, acl string) error { - lk := sys.NewNSLock(bucket) - lkctx, err := lk.GetLock(ctx, globalOperationTimeout) - if err != nil { - return err - } - ctx = lkctx.Context() - defer lk.Unlock(lkctx.Cancel) - - return sys.setBucketMeta(bucket, NewBucketMetadata(bucket, region, accessKey, acl)) -} - -func (sys *BucketMetadataSys) getBucketMeta(bucket string) (meta BucketMetadata, err error) { - err = sys.db.Get(bucketPrefix+bucket, &meta) - if err == leveldb.ErrNotFound { - err = BucketNotFound{Bucket: bucket, Err: err} - } - return meta, err -} - -// GetBucketMeta metadata for a bucket. -func (sys *BucketMetadataSys) GetBucketMeta(ctx context.Context, bucket string) (meta BucketMetadata, err error) { - lk := sys.NewNSLock(bucket) - lkctx, err := lk.GetRLock(ctx, globalOperationTimeout) - if err != nil { - return BucketMetadata{}, err - } - ctx = lkctx.Context() - defer lk.RUnlock(lkctx.Cancel) - - return sys.getBucketMeta(bucket) -} - -// HasBucket metadata for a bucket. -func (sys *BucketMetadataSys) HasBucket(ctx context.Context, bucket string) bool { - _, err := sys.GetBucketMeta(ctx, bucket) - return err == nil -} - -// DeleteBucket bucket. -func (sys *BucketMetadataSys) DeleteBucket(ctx context.Context, bucket string) error { - lk := sys.NewNSLock(bucket) - lkctx, err := lk.GetLock(ctx, deleteOperationTimeout) - if err != nil { - return err - } - ctx = lkctx.Context() - defer lk.Unlock(lkctx.Cancel) - - if _, err = sys.getBucketMeta(bucket); err != nil { - return err - } - - if empty, err := sys.emptyBucket(ctx, bucket); err != nil { - return err - } else if !empty { - return ErrBucketNotEmpty - } - - return sys.db.Delete(bucketPrefix + bucket) -} - -// GetAllBucketsOfUser metadata for all bucket. -func (sys *BucketMetadataSys) GetAllBucketsOfUser(ctx context.Context, username string) ([]BucketMetadata, error) { - var m []BucketMetadata - all, err := sys.db.ReadAllChan(ctx, bucketPrefix, "") - if err != nil { - return nil, err - } - for entry := range all { - data := BucketMetadata{} - if err = entry.UnmarshalValue(&data); err != nil { - continue - } - if data.Owner != username { - continue - } - m = append(m, data) - } - return m, nil -} diff --git a/s3d/store/service_instance.go b/s3d/store/service_instance.go index aa4d9317d..13cbd96b5 100644 --- a/s3d/store/service_instance.go +++ b/s3d/store/service_instance.go @@ -1,5 +1,151 @@ package store -//bucket.go +import ( + "context" + "time" + + "github.com/bittorrent/go-btfs/s3d/lock" + "github.com/bittorrent/go-btfs/transaction/storage" + "github.com/syndtr/goleveldb/leveldb" +) -//bucket_acl.go +const ( + bucketPrefix = "bkt/" +) + +const ( + globalOperationTimeout = 5 * time.Minute + deleteOperationTimeout = 1 * time.Minute +) + +// BucketMetadata contains bucket metadata. +type BucketMetadata struct { + Name string + Region string + Owner string + Acl string + Created time.Time +} + +// NewBucketMetadata creates BucketMetadata with the supplied name and Created to Now. +func NewBucketMetadata(name, region, accessKey, acl string) *BucketMetadata { + return &BucketMetadata{ + Name: name, + Region: region, + Owner: accessKey, + Acl: acl, + Created: time.Now().UTC(), + } +} + +// BucketMetadataSys captures all bucket metadata for a given cluster. +type BucketMetadataSys struct { + db storage.StateStorer + nsLock *lock.NsLockMap + emptyBucket func(ctx context.Context, bucket string) (bool, error) +} + +// NewBucketMetadataSys - creates new policy system. +func NewBucketMetadataSys(db storage.StateStorer) *BucketMetadataSys { + return &BucketMetadataSys{ + db: db, + nsLock: lock.NewNSLock(), + } +} + +// NewNSLock - initialize a new namespace RWLocker instance. +func (sys *BucketMetadataSys) NewNSLock(bucket string) lock.RWLocker { + return sys.nsLock.NewNSLock("meta", bucket) +} + +func (sys *BucketMetadataSys) SetEmptyBucket(emptyBucket func(ctx context.Context, bucket string) (bool, error)) { + sys.emptyBucket = emptyBucket +} + +// setBucketMeta - sets a new metadata in-db +func (sys *BucketMetadataSys) setBucketMeta(bucket string, meta *BucketMetadata) error { + return sys.db.Put(bucketPrefix+bucket, meta) +} + +// CreateBucket - create a new Bucket +func (sys *BucketMetadataSys) CreateBucket(ctx context.Context, bucket, region, accessKey, acl string) error { + lk := sys.NewNSLock(bucket) + lkctx, err := lk.GetLock(ctx, globalOperationTimeout) + if err != nil { + return err + } + ctx = lkctx.Context() + defer lk.Unlock(lkctx.Cancel) + + return sys.setBucketMeta(bucket, NewBucketMetadata(bucket, region, accessKey, acl)) +} + +func (sys *BucketMetadataSys) getBucketMeta(bucket string) (meta BucketMetadata, err error) { + err = sys.db.Get(bucketPrefix+bucket, &meta) + if err == leveldb.ErrNotFound { + err = BucketNotFound{Bucket: bucket, Err: err} + } + return meta, err +} + +// GetBucketMeta metadata for a bucket. +func (sys *BucketMetadataSys) GetBucketMeta(ctx context.Context, bucket string) (meta BucketMetadata, err error) { + lk := sys.NewNSLock(bucket) + lkctx, err := lk.GetRLock(ctx, globalOperationTimeout) + if err != nil { + return BucketMetadata{}, err + } + ctx = lkctx.Context() + defer lk.RUnlock(lkctx.Cancel) + + return sys.getBucketMeta(bucket) +} + +// HasBucket metadata for a bucket. +func (sys *BucketMetadataSys) HasBucket(ctx context.Context, bucket string) bool { + _, err := sys.GetBucketMeta(ctx, bucket) + return err == nil +} + +// DeleteBucket bucket. +func (sys *BucketMetadataSys) DeleteBucket(ctx context.Context, bucket string) error { + lk := sys.NewNSLock(bucket) + lkctx, err := lk.GetLock(ctx, deleteOperationTimeout) + if err != nil { + return err + } + ctx = lkctx.Context() + defer lk.Unlock(lkctx.Cancel) + + if _, err = sys.getBucketMeta(bucket); err != nil { + return err + } + + if empty, err := sys.emptyBucket(ctx, bucket); err != nil { + return err + } else if !empty { + return ErrBucketNotEmpty + } + + return sys.db.Delete(bucketPrefix + bucket) +} + +// GetAllBucketsOfUser metadata for all bucket. +func (sys *BucketMetadataSys) GetAllBucketsOfUser(ctx context.Context, username string) ([]BucketMetadata, error) { + var m []BucketMetadata + all, err := sys.db.ReadAllChan(ctx, bucketPrefix, "") + if err != nil { + return nil, err + } + for entry := range all { + data := BucketMetadata{} + if err = entry.UnmarshalValue(&data); err != nil { + continue + } + if data.Owner != username { + continue + } + m = append(m, data) + } + return m, nil +} diff --git a/statestore/leveldb/leveldb.go b/statestore/leveldb/leveldb.go index 3a6de46a8..090f6936c 100644 --- a/statestore/leveldb/leveldb.go +++ b/statestore/leveldb/leveldb.go @@ -1,19 +1,23 @@ package leveldb import ( + "context" "encoding" "encoding/json" "errors" "fmt" "github.com/bittorrent/go-btfs/transaction/storage" - "github.com/syndtr/goleveldb/leveldb" - ldberr "github.com/syndtr/goleveldb/leveldb/errors" logging "github.com/ipfs/go-log" + "github.com/syndtr/goleveldb/leveldb" ldb "github.com/syndtr/goleveldb/leveldb" + ldberr "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/opt" ldbs "github.com/syndtr/goleveldb/leveldb/storage" "github.com/syndtr/goleveldb/leveldb/util" + "go.uber.org/zap/buffer" ) var log = logging.Logger("leveldb") @@ -171,3 +175,41 @@ func (s *store) DB() *leveldb.DB { func (s *store) Close() error { return s.db.Close() } + +// NewIterator /** +func (l *store) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { + return l.db.NewIterator(slice, ro) +} + +//ReadAllChan read all key value +func (l *store) ReadAllChan(ctx context.Context, prefix string, seekKey string) (<-chan *storage.Entry, error) { + ch := make(chan *storage.Entry) + var slice *util.Range + if prefix != "" { + slice = util.BytesPrefix([]byte(prefix)) + } + iter := l.NewIterator(slice, nil) + if seekKey != "" { + iter.Seek([]byte(seekKey)) + } + go func() { + defer func() { + iter.Release() + close(ch) + }() + for iter.Next() { + key := string(iter.Key()) + buf := buffer.Buffer{} + buf.Write(iter.Value()) + select { + case <-ctx.Done(): + return + case ch <- &storage.Entry{ + Key: key, + Value: buf.Bytes(), + }: + } + } + }() + return ch, nil +} diff --git a/transaction/storage/store.go b/transaction/storage/store.go index d6df4d3e8..440b254a9 100644 --- a/transaction/storage/store.go +++ b/transaction/storage/store.go @@ -1,6 +1,9 @@ package storage import ( + "context" + "encoding" + "encoding/json" "errors" "io" @@ -158,6 +161,19 @@ var ( // SubscribePull(ctx context.Context, bin uint8, since, until uint64) (c <-chan Descriptor, closed <-chan struct{}, stop func()) //} +// Entry 特别注意:Entry是否需要专门处理下,在ReadAllChan解析出来数据的时候 +type Entry struct { + Key string + Value []byte +} + +func (e *Entry) UnmarshalValue(value interface{}) error { + if unmarshaler, ok := value.(encoding.BinaryUnmarshaler); ok { + return unmarshaler.UnmarshalBinary(e.Value) + } + return json.Unmarshal(e.Value, value) +} + // StateStorer defines methods required to get, set, delete values for different keys // and close the underlying resources. type StateStorer interface { @@ -165,6 +181,7 @@ type StateStorer interface { Put(key string, i interface{}) (err error) Delete(key string) (err error) Iterate(prefix string, iterFunc StateIterFunc) (err error) + ReadAllChan(ctx context.Context, prefix string, seekKey string) (<-chan *Entry, error) // DB returns the underlying DB storage. DB() *leveldb.DB io.Closer From 3d9d343166c3152f0d781b74f092b9b4a485bde4 Mon Sep 17 00:00:00 2001 From: steve Date: Fri, 11 Aug 2023 14:27:19 +0800 Subject: [PATCH 019/139] optmize: code structure --- cmd/btfs/daemon.go | 18 ++++++- core/commands/accesskey.go | 2 +- s3/handlers/accesskey/instance.go | 43 ---------------- s3/handlers/handlers.go | 21 +++++--- s3/{interface.go => interfaces.go} | 0 .../filestore/local_shell.go | 6 +-- s3/providers/interface.go | 29 +++++++++++ s3/providers/providers/options.go | 3 ++ s3/providers/providers/providers.go | 29 +++++++++++ .../statestore/storage_proxy.go | 12 ++--- .../accesskey/accesskey.go | 49 ++++++++++--------- s3/services/accesskey/instance.go | 49 +++++++++++++++++++ .../accesskey/options.go | 0 s3/{handlers => services}/interface.go | 42 ++++++++-------- s3/services/sign/options.go | 3 ++ s3/services/sign/sign.go | 25 ++++++++++ 16 files changed, 224 insertions(+), 107 deletions(-) delete mode 100644 s3/handlers/accesskey/instance.go rename s3/{interface.go => interfaces.go} (100%) rename s3/{handlers => providers}/filestore/local_shell.go (57%) create mode 100644 s3/providers/interface.go create mode 100644 s3/providers/providers/options.go create mode 100644 s3/providers/providers/providers.go rename s3/{handlers => providers}/statestore/storage_proxy.go (64%) rename s3/{handlers => services}/accesskey/accesskey.go (65%) create mode 100644 s3/services/accesskey/instance.go rename s3/{handlers => services}/accesskey/options.go (100%) rename s3/{handlers => services}/interface.go (51%) create mode 100644 s3/services/sign/options.go create mode 100644 s3/services/sign/sign.go diff --git a/cmd/btfs/daemon.go b/cmd/btfs/daemon.go index 03d5ffd3d..6564254e0 100644 --- a/cmd/btfs/daemon.go +++ b/cmd/btfs/daemon.go @@ -7,8 +7,11 @@ import ( "errors" _ "expvar" "fmt" - "github.com/bittorrent/go-btfs/s3/handlers/accesskey" - s3statestore "github.com/bittorrent/go-btfs/s3/handlers/statestore" + "github.com/bittorrent/go-btfs/s3" + "github.com/bittorrent/go-btfs/s3/providers/filestore" + "github.com/bittorrent/go-btfs/s3/providers/providers" + s3statestore "github.com/bittorrent/go-btfs/s3/providers/statestore" + "github.com/bittorrent/go-btfs/s3/services/accesskey" "io/ioutil" "math/rand" "net" @@ -1463,3 +1466,14 @@ func CheckExistLastOnlineReportV2(cfg *config.Config, configRoot string, chainId } return nil } + +func buildS3Providers(storageStore storage.StateStorer) *providers.Providers { + return providers.NewProviders( + s3statestore.NewStorageStateStoreProxy(storageStore), + filestore.NewLocalShell(), + ) +} + +func buildS3Server(providers providers.Providers, address string, corsAllowHeaders []string) *s3.Server { + +} diff --git a/core/commands/accesskey.go b/core/commands/accesskey.go index 6279078ef..0b3a51928 100644 --- a/core/commands/accesskey.go +++ b/core/commands/accesskey.go @@ -4,7 +4,7 @@ import ( "errors" cmds "github.com/bittorrent/go-btfs-cmds" "github.com/bittorrent/go-btfs/core/commands/cmdenv" - "github.com/bittorrent/go-btfs/s3/handlers/accesskey" + "github.com/bittorrent/go-btfs/s3/services/accesskey" ) var AccessKeyCmd = &cmds.Command{ diff --git a/s3/handlers/accesskey/instance.go b/s3/handlers/accesskey/instance.go deleted file mode 100644 index f9a203c22..000000000 --- a/s3/handlers/accesskey/instance.go +++ /dev/null @@ -1,43 +0,0 @@ -package accesskey - -import ( - "github.com/bittorrent/go-btfs/s3/handlers" -) - -var instance handlers.AccessKeyer - -func InitInstance(storer handlers.StateStorer, options ...Option) { - instance = NewAccessKey(storer, options...) -} - -func GetInstance() handlers.AccessKeyer { - return instance -} - -func Generate() (record *handlers.AccessKeyRecord, err error) { - return instance.Generate() -} - -func Enable(key string) (err error) { - return instance.Enable(key) -} - -func Disable(key string) (err error) { - return instance.Disable(key) -} - -func Reset(key string) (err error) { - return instance.Reset(key) -} - -func Delete(key string) (err error) { - return instance.Delete(key) -} - -func Get(key string) (record *handlers.AccessKeyRecord, err error) { - return instance.Get(key) -} - -func List() (list []*handlers.AccessKeyRecord, err error) { - return instance.List() -} diff --git a/s3/handlers/handlers.go b/s3/handlers/handlers.go index f2c432c8a..d169b7061 100644 --- a/s3/handlers/handlers.go +++ b/s3/handlers/handlers.go @@ -4,6 +4,7 @@ package handlers import ( "github.com/bittorrent/go-btfs/s3" "github.com/bittorrent/go-btfs/s3/consts" + "github.com/bittorrent/go-btfs/s3/services" "github.com/rs/cors" "net/http" ) @@ -51,19 +52,25 @@ type Handlers struct { corsAllowOrigins []string corsAllowHeaders []string corsAllowMethods []string - fileStore FileStorer - stateStore StateStorer - accessKey AccessKeyer + authSvc services.SignService + bucketSvc services.BucketService + objectSvc services.ObjectService + multipartSvc services.MultipartService } -func NewHandlers(fileStore FileStorer, stateStore StateStorer, accessKey AccessKeyer, options ...Option) (handlers *Handlers) { +func NewHandlers( + authSvc services.SignService, bucketSvc services.BucketService, + objectSvc services.ObjectService, multipartSvc services.MultipartService, + options ...Option, +) (handlers *Handlers) { handlers = &Handlers{ corsAllowOrigins: defaultCorsAllowOrigins, corsAllowHeaders: defaultCorsAllowHeaders, corsAllowMethods: defaultCorsAllowMethods, - fileStore: fileStore, - stateStore: stateStore, - accessKey: accessKey, + authSvc: authSvc, + bucketSvc: bucketSvc, + objectSvc: objectSvc, + multipartSvc: multipartSvc, } for _, option := range options { option(handlers) diff --git a/s3/interface.go b/s3/interfaces.go similarity index 100% rename from s3/interface.go rename to s3/interfaces.go diff --git a/s3/handlers/filestore/local_shell.go b/s3/providers/filestore/local_shell.go similarity index 57% rename from s3/handlers/filestore/local_shell.go rename to s3/providers/filestore/local_shell.go index 97881d0cc..9dbd8e287 100644 --- a/s3/handlers/filestore/local_shell.go +++ b/s3/providers/filestore/local_shell.go @@ -2,16 +2,16 @@ package filestore import ( shell "github.com/bittorrent/go-btfs-api" - "github.com/bittorrent/go-btfs/s3/handlers" + "github.com/bittorrent/go-btfs/s3/providers" ) -var _ handlers.FileStorer = (*LocalShell)(nil) +var _ providers.FileStorer = (*LocalShell)(nil) type LocalShell struct { *shell.Shell } -func NewFileStore() *LocalShell { +func NewLocalShell() *LocalShell { return &LocalShell{ Shell: shell.NewLocalShell(), } diff --git a/s3/providers/interface.go b/s3/providers/interface.go new file mode 100644 index 000000000..86b8ab806 --- /dev/null +++ b/s3/providers/interface.go @@ -0,0 +1,29 @@ +package providers + +import ( + "errors" + "io" +) + +type Providerser interface { + GetFileStore() FileStorer + GetStateStore() StateStorer +} + +type FileStorer interface { + AddWithOpts(r io.Reader, pin bool, rawLeaves bool) (hash string, err error) + Remove(hash string) (removed bool) + Cat(path string) (readCloser io.ReadCloser, err error) + Unpin(path string) (err error) +} + +type StateStorer interface { + Get(key string, i interface{}) (err error) + Put(key string, i interface{}) (err error) + Delete(key string) (err error) + Iterate(prefix string, iterFunc StateStoreIterFunc) (err error) +} + +type StateStoreIterFunc func(key, value []byte) (stop bool, err error) + +var ErrStateStoreNotFound = errors.New("not found") diff --git a/s3/providers/providers/options.go b/s3/providers/providers/options.go new file mode 100644 index 000000000..f8792d8b7 --- /dev/null +++ b/s3/providers/providers/options.go @@ -0,0 +1,3 @@ +package providers + +type Option func(providers *Providers) diff --git a/s3/providers/providers/providers.go b/s3/providers/providers/providers.go new file mode 100644 index 000000000..97062e565 --- /dev/null +++ b/s3/providers/providers/providers.go @@ -0,0 +1,29 @@ +package providers + +import "github.com/bittorrent/go-btfs/s3/providers" + +var _ providers.Providerser = (*Providers)(nil) + +type Providers struct { + statestore providers.StateStorer + filestore providers.FileStorer +} + +func NewProviders(statestore providers.StateStorer, filestore providers.FileStorer, options ...Option) *Providers { + p := &Providers{ + statestore: statestore, + filestore: filestore, + } + for _, option := range options { + option(p) + } + return p +} + +func (p *Providers) GetStateStore() providers.StateStorer { + return p.statestore +} + +func (p *Providers) GetFileStore() providers.FileStorer { + return p.filestore +} diff --git a/s3/handlers/statestore/storage_proxy.go b/s3/providers/statestore/storage_proxy.go similarity index 64% rename from s3/handlers/statestore/storage_proxy.go rename to s3/providers/statestore/storage_proxy.go index b468fb84f..947e02ad0 100644 --- a/s3/handlers/statestore/storage_proxy.go +++ b/s3/providers/statestore/storage_proxy.go @@ -2,17 +2,17 @@ package statestore import ( "errors" - "github.com/bittorrent/go-btfs/s3/handlers" + "github.com/bittorrent/go-btfs/s3/providers" "github.com/bittorrent/go-btfs/transaction/storage" ) -var _ handlers.StateStorer = (*StorageProxy)(nil) +var _ providers.StateStorer = (*StorageProxy)(nil) type StorageProxy struct { proxy storage.StateStorer } -func NewStorageStateStoreProxy(proxy storage.StateStorer) *StorageProxy { +func NewStorageStateStoreProxy(proxy storage.StateStorer) providers.StateStorer { return &StorageProxy{ proxy: proxy, } @@ -25,7 +25,7 @@ func (s *StorageProxy) Put(key string, val interface{}) (err error) { func (s *StorageProxy) Get(key string, i interface{}) (err error) { err = s.proxy.Get(key, i) if errors.Is(err, storage.ErrNotFound) { - err = handlers.ErrStateStoreNotFound + err = providers.ErrStateStoreNotFound } return } @@ -33,11 +33,11 @@ func (s *StorageProxy) Get(key string, i interface{}) (err error) { func (s *StorageProxy) Delete(key string) (err error) { err = s.proxy.Delete(key) if errors.Is(err, storage.ErrNotFound) { - err = handlers.ErrStateStoreNotFound + err = providers.ErrStateStoreNotFound } return } -func (s *StorageProxy) Iterate(prefix string, iterFunc handlers.StateStoreIterFunc) (err error) { +func (s *StorageProxy) Iterate(prefix string, iterFunc providers.StateStoreIterFunc) (err error) { return s.proxy.Iterate(prefix, storage.StateIterFunc(iterFunc)) } diff --git a/s3/handlers/accesskey/accesskey.go b/s3/services/accesskey/accesskey.go similarity index 65% rename from s3/handlers/accesskey/accesskey.go rename to s3/services/accesskey/accesskey.go index 9fe8cc209..48b81f798 100644 --- a/s3/handlers/accesskey/accesskey.go +++ b/s3/services/accesskey/accesskey.go @@ -2,7 +2,8 @@ package accesskey import ( "errors" - "github.com/bittorrent/go-btfs/s3/handlers" + "github.com/bittorrent/go-btfs/s3/providers" + "github.com/bittorrent/go-btfs/s3/services" "github.com/bittorrent/go-btfs/transaction/storage" "github.com/bittorrent/go-btfs/utils" "github.com/google/uuid" @@ -15,31 +16,31 @@ const ( defaultStoreKeyPrefix = "access-keys:" ) -var _ handlers.AccessKeyer = (*AccessKey)(nil) +var _ services.AccessKeyService = (*AccessKey)(nil) type AccessKey struct { + providers providers.Providerser secretLength int storeKeyPrefix string - stateStore handlers.StateStorer locks sync.Map } -func NewAccessKey(store handlers.StateStorer, options ...Option) (ack *AccessKey) { - ack = &AccessKey{ +func NewAccessKey(providers providers.Providerser, options ...Option) services.AccessKeyService { + ack := &AccessKey{ + providers: providers, secretLength: defaultSecretLength, storeKeyPrefix: defaultStoreKeyPrefix, - stateStore: store, locks: sync.Map{}, } for _, option := range options { option(ack) } - return + return ack } -func (ack *AccessKey) Generate() (record *handlers.AccessKeyRecord, err error) { +func (ack *AccessKey) Generate() (record *services.AccessKeyRecord, err error) { now := time.Now() - record = &handlers.AccessKeyRecord{ + record = &services.AccessKeyRecord{ Key: ack.newKey(), Secret: ack.newSecret(), Enable: true, @@ -47,7 +48,7 @@ func (ack *AccessKey) Generate() (record *handlers.AccessKeyRecord, err error) { CreatedAt: now, UpdatedAt: now, } - err = ack.stateStore.Put(ack.getStoreKey(record.Key), record) + err = ack.providers.GetStateStore().Put(ack.getStoreKey(record.Key), record) return } @@ -83,22 +84,22 @@ func (ack *AccessKey) Delete(key string) (err error) { return } -func (ack *AccessKey) Get(key string) (record *handlers.AccessKeyRecord, err error) { - record = &handlers.AccessKeyRecord{} - err = ack.stateStore.Get(ack.getStoreKey(key), record) - if err != nil && !errors.Is(err, handlers.ErrStateStoreNotFound) { +func (ack *AccessKey) Get(key string) (record *services.AccessKeyRecord, err error) { + record = &services.AccessKeyRecord{} + err = ack.providers.GetStateStore().Get(ack.getStoreKey(key), record) + if err != nil && !errors.Is(err, providers.ErrStateStoreNotFound) { return } - if errors.Is(err, handlers.ErrStateStoreNotFound) || record.IsDeleted { - err = handlers.ErrAccessKeyIsNotFound + if errors.Is(err, providers.ErrStateStoreNotFound) || record.IsDeleted { + err = services.ErrAccessKeyIsNotFound } return } -func (ack *AccessKey) List() (list []*handlers.AccessKeyRecord, err error) { - err = ack.stateStore.Iterate(ack.storeKeyPrefix, func(key, _ []byte) (stop bool, er error) { - record := &handlers.AccessKeyRecord{} - er = ack.stateStore.Get(string(key), record) +func (ack *AccessKey) List() (list []*services.AccessKeyRecord, err error) { + err = ack.providers.GetStateStore().Iterate(ack.storeKeyPrefix, func(key, _ []byte) (stop bool, er error) { + record := &services.AccessKeyRecord{} + er = ack.providers.GetStateStore().Get(string(key), record) if er != nil { return } @@ -148,15 +149,15 @@ func (ack *AccessKey) update(key string, args *updateArgs) (err error) { unlock := ack.lock(key) defer unlock() - record := &handlers.AccessKeyRecord{} + record := &services.AccessKeyRecord{} stk := ack.getStoreKey(key) - err = ack.stateStore.Get(stk, record) + err = ack.providers.GetStateStore().Get(stk, record) if err != nil && !errors.Is(err, storage.ErrNotFound) { return } if errors.Is(err, storage.ErrNotFound) || record.IsDeleted { - err = handlers.ErrAccessKeyIsNotFound + err = services.ErrAccessKeyIsNotFound return } @@ -172,7 +173,7 @@ func (ack *AccessKey) update(key string, args *updateArgs) (err error) { record.UpdatedAt = time.Now() - err = ack.stateStore.Put(stk, record) + err = ack.providers.GetStateStore().Put(stk, record) return } diff --git a/s3/services/accesskey/instance.go b/s3/services/accesskey/instance.go new file mode 100644 index 000000000..8236d7c52 --- /dev/null +++ b/s3/services/accesskey/instance.go @@ -0,0 +1,49 @@ +package accesskey + +import ( + "github.com/bittorrent/go-btfs/s3/providers" + "github.com/bittorrent/go-btfs/s3/services" + "sync" +) + +var instance services.AccessKeyService + +var once sync.Once + +func InitInstance(providers providers.Providerser, options ...Option) { + once.Do(func() { + instance = NewAccessKey(providers, options...) + }) +} + +func GetInstance() services.AccessKeyService { + return instance +} + +func Generate() (record *services.AccessKeyRecord, err error) { + return instance.Generate() +} + +func Enable(key string) (err error) { + return instance.Enable(key) +} + +func Disable(key string) (err error) { + return instance.Disable(key) +} + +func Reset(key string) (err error) { + return instance.Reset(key) +} + +func Delete(key string) (err error) { + return instance.Delete(key) +} + +func Get(key string) (record *services.AccessKeyRecord, err error) { + return instance.Get(key) +} + +func List() (list []*services.AccessKeyRecord, err error) { + return instance.List() +} diff --git a/s3/handlers/accesskey/options.go b/s3/services/accesskey/options.go similarity index 100% rename from s3/handlers/accesskey/options.go rename to s3/services/accesskey/options.go diff --git a/s3/handlers/interface.go b/s3/services/interface.go similarity index 51% rename from s3/handlers/interface.go rename to s3/services/interface.go index bb445bcb7..0dbbe0c72 100644 --- a/s3/handlers/interface.go +++ b/s3/services/interface.go @@ -1,30 +1,12 @@ -package handlers +package services import ( "errors" - "io" + "net/http" "time" ) -type FileStorer interface { - AddWithOpts(r io.Reader, pin bool, rawLeaves bool) (hash string, err error) - Remove(hash string) (removed bool) - Cat(path string) (readCloser io.ReadCloser, err error) - Unpin(path string) (err error) -} - -type StateStorer interface { - Get(key string, i interface{}) (err error) - Put(key string, i interface{}) (err error) - Delete(key string) (err error) - Iterate(prefix string, iterFunc StateStoreIterFunc) (err error) -} - -type StateStoreIterFunc func(key, value []byte) (stop bool, err error) - -var ErrStateStoreNotFound = errors.New("not found") - -type AccessKeyer interface { +type AccessKeyService interface { Generate() (record *AccessKeyRecord, err error) Enable(key string) (err error) Disable(key string) (err error) @@ -44,3 +26,21 @@ type AccessKeyRecord struct { } var ErrAccessKeyIsNotFound = errors.New("access-key is not found") + +type BucketService interface { +} + +type ObjectService interface { +} + +type MultipartService interface { +} + +type SignService interface { + Verify(r *http.Request) (err error) +} + +var ( + ErrSignOutdated = errors.New("sign is outdated") + ErrSignKeyIsNotFound = errors.New("key is not found") +) diff --git a/s3/services/sign/options.go b/s3/services/sign/options.go new file mode 100644 index 000000000..b31e097bf --- /dev/null +++ b/s3/services/sign/options.go @@ -0,0 +1,3 @@ +package sign + +type Option func(*Sign) diff --git a/s3/services/sign/sign.go b/s3/services/sign/sign.go new file mode 100644 index 000000000..268fff7fc --- /dev/null +++ b/s3/services/sign/sign.go @@ -0,0 +1,25 @@ +package sign + +import ( + "github.com/bittorrent/go-btfs/s3/providers" + "net/http" +) + +type Sign struct { + providers providers.Providerser +} + +func NewSign(providers providers.Providerser, options ...Option) (sign *Sign) { + sign = &Sign{ + providers: providers, + } + + for _, option := range options { + option(sign) + } + return +} + +func (s *Sign) Verify(r *http.Request) (err error) { + return +} From a4e6f56f51f70bf83464d2ea7c74c8fa55a05b37 Mon Sep 17 00:00:00 2001 From: Steve Date: Fri, 11 Aug 2023 15:24:52 +0800 Subject: [PATCH 020/139] style: s3 code structure --- cmd/btfs/daemon.go | 6 ++-- s3/{ => common}/consts/consts.go | 0 s3/handlers/handlers.go | 25 ++++++++------- .../interface.go => handlers/services.go} | 2 +- s3/providers/filestore/local_shell.go | 4 +-- s3/providers/{providers => }/options.go | 0 s3/providers/providers.go | 31 ++++++++++++++++++ s3/providers/providers/providers.go | 29 ----------------- s3/providers/statestore/storage_proxy.go | 12 +++---- s3/{interfaces.go => server/handlers.go} | 3 +- s3/{ => server}/options.go | 2 +- s3/{ => server}/server.go | 4 +-- s3/services/accesskey/accesskey.go | 32 +++++++++---------- s3/services/accesskey/instance.go | 14 ++++---- .../interface.go => services/providers.go} | 2 +- s3/services/sign/sign.go | 14 +++++--- 16 files changed, 95 insertions(+), 85 deletions(-) rename s3/{ => common}/consts/consts.go (100%) rename s3/{services/interface.go => handlers/services.go} (98%) rename s3/providers/{providers => }/options.go (100%) create mode 100644 s3/providers/providers.go delete mode 100644 s3/providers/providers/providers.go rename s3/{interfaces.go => server/handlers.go} (73%) rename s3/{ => server}/options.go (89%) rename s3/{ => server}/server.go (96%) rename s3/{providers/interface.go => services/providers.go} (97%) diff --git a/cmd/btfs/daemon.go b/cmd/btfs/daemon.go index 6564254e0..c01dc99e4 100644 --- a/cmd/btfs/daemon.go +++ b/cmd/btfs/daemon.go @@ -7,10 +7,10 @@ import ( "errors" _ "expvar" "fmt" - "github.com/bittorrent/go-btfs/s3" + "github.com/bittorrent/go-btfs/s3/providers" "github.com/bittorrent/go-btfs/s3/providers/filestore" - "github.com/bittorrent/go-btfs/s3/providers/providers" s3statestore "github.com/bittorrent/go-btfs/s3/providers/statestore" + "github.com/bittorrent/go-btfs/s3/server" "github.com/bittorrent/go-btfs/s3/services/accesskey" "io/ioutil" "math/rand" @@ -1474,6 +1474,6 @@ func buildS3Providers(storageStore storage.StateStorer) *providers.Providers { ) } -func buildS3Server(providers providers.Providers, address string, corsAllowHeaders []string) *s3.Server { +func buildS3Server(providers providers.Providers, address string, corsAllowHeaders []string) *server.Server { } diff --git a/s3/consts/consts.go b/s3/common/consts/consts.go similarity index 100% rename from s3/consts/consts.go rename to s3/common/consts/consts.go diff --git a/s3/handlers/handlers.go b/s3/handlers/handlers.go index d169b7061..4a4867caf 100644 --- a/s3/handlers/handlers.go +++ b/s3/handlers/handlers.go @@ -2,9 +2,8 @@ package handlers import ( - "github.com/bittorrent/go-btfs/s3" - "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/services" + "github.com/bittorrent/go-btfs/s3/common/consts" + "github.com/bittorrent/go-btfs/s3/server" "github.com/rs/cors" "net/http" ) @@ -46,28 +45,28 @@ var ( } ) -var _ s3.Handlerser = (*Handlers)(nil) +var _ server.Handlerser = (*Handlers)(nil) type Handlers struct { corsAllowOrigins []string corsAllowHeaders []string corsAllowMethods []string - authSvc services.SignService - bucketSvc services.BucketService - objectSvc services.ObjectService - multipartSvc services.MultipartService + signSvc SignService + bucketSvc BucketService + objectSvc ObjectService + multipartSvc MultipartService } func NewHandlers( - authSvc services.SignService, bucketSvc services.BucketService, - objectSvc services.ObjectService, multipartSvc services.MultipartService, + signSvc SignService, bucketSvc BucketService, + objectSvc ObjectService, multipartSvc MultipartService, options ...Option, ) (handlers *Handlers) { handlers = &Handlers{ corsAllowOrigins: defaultCorsAllowOrigins, corsAllowHeaders: defaultCorsAllowHeaders, corsAllowMethods: defaultCorsAllowMethods, - authSvc: authSvc, + signSvc: signSvc, bucketSvc: bucketSvc, objectSvc: objectSvc, multipartSvc: multipartSvc, @@ -88,6 +87,10 @@ func (s *Handlers) Cors(handler http.Handler) http.Handler { }).Handler(handler) } +func (s *Handlers) Sign(handler http.Handler) http.Handler { + return nil +} + func (s *Handlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) { return } diff --git a/s3/services/interface.go b/s3/handlers/services.go similarity index 98% rename from s3/services/interface.go rename to s3/handlers/services.go index 0dbbe0c72..290ea3546 100644 --- a/s3/services/interface.go +++ b/s3/handlers/services.go @@ -1,4 +1,4 @@ -package services +package handlers import ( "errors" diff --git a/s3/providers/filestore/local_shell.go b/s3/providers/filestore/local_shell.go index 9dbd8e287..6653af683 100644 --- a/s3/providers/filestore/local_shell.go +++ b/s3/providers/filestore/local_shell.go @@ -2,10 +2,10 @@ package filestore import ( shell "github.com/bittorrent/go-btfs-api" - "github.com/bittorrent/go-btfs/s3/providers" + "github.com/bittorrent/go-btfs/s3/services" ) -var _ providers.FileStorer = (*LocalShell)(nil) +var _ services.FileStorer = (*LocalShell)(nil) type LocalShell struct { *shell.Shell diff --git a/s3/providers/providers/options.go b/s3/providers/options.go similarity index 100% rename from s3/providers/providers/options.go rename to s3/providers/options.go diff --git a/s3/providers/providers.go b/s3/providers/providers.go new file mode 100644 index 000000000..21bdf1ca0 --- /dev/null +++ b/s3/providers/providers.go @@ -0,0 +1,31 @@ +package providers + +import ( + "github.com/bittorrent/go-btfs/s3/services" +) + +var _ services.Providerser = (*Providers)(nil) + +type Providers struct { + statestore services.StateStorer + filestore services.FileStorer +} + +func NewProviders(statestore services.StateStorer, filestore services.FileStorer, options ...Option) (providers *Providers) { + providers = &Providers{ + statestore: statestore, + filestore: filestore, + } + for _, option := range options { + option(providers) + } + return +} + +func (p *Providers) GetStateStore() services.StateStorer { + return p.statestore +} + +func (p *Providers) GetFileStore() services.FileStorer { + return p.filestore +} diff --git a/s3/providers/providers/providers.go b/s3/providers/providers/providers.go deleted file mode 100644 index 97062e565..000000000 --- a/s3/providers/providers/providers.go +++ /dev/null @@ -1,29 +0,0 @@ -package providers - -import "github.com/bittorrent/go-btfs/s3/providers" - -var _ providers.Providerser = (*Providers)(nil) - -type Providers struct { - statestore providers.StateStorer - filestore providers.FileStorer -} - -func NewProviders(statestore providers.StateStorer, filestore providers.FileStorer, options ...Option) *Providers { - p := &Providers{ - statestore: statestore, - filestore: filestore, - } - for _, option := range options { - option(p) - } - return p -} - -func (p *Providers) GetStateStore() providers.StateStorer { - return p.statestore -} - -func (p *Providers) GetFileStore() providers.FileStorer { - return p.filestore -} diff --git a/s3/providers/statestore/storage_proxy.go b/s3/providers/statestore/storage_proxy.go index 947e02ad0..704e1177a 100644 --- a/s3/providers/statestore/storage_proxy.go +++ b/s3/providers/statestore/storage_proxy.go @@ -2,17 +2,17 @@ package statestore import ( "errors" - "github.com/bittorrent/go-btfs/s3/providers" + "github.com/bittorrent/go-btfs/s3/services" "github.com/bittorrent/go-btfs/transaction/storage" ) -var _ providers.StateStorer = (*StorageProxy)(nil) +var _ services.StateStorer = (*StorageProxy)(nil) type StorageProxy struct { proxy storage.StateStorer } -func NewStorageStateStoreProxy(proxy storage.StateStorer) providers.StateStorer { +func NewStorageStateStoreProxy(proxy storage.StateStorer) services.StateStorer { return &StorageProxy{ proxy: proxy, } @@ -25,7 +25,7 @@ func (s *StorageProxy) Put(key string, val interface{}) (err error) { func (s *StorageProxy) Get(key string, i interface{}) (err error) { err = s.proxy.Get(key, i) if errors.Is(err, storage.ErrNotFound) { - err = providers.ErrStateStoreNotFound + err = services.ErrStateStoreNotFound } return } @@ -33,11 +33,11 @@ func (s *StorageProxy) Get(key string, i interface{}) (err error) { func (s *StorageProxy) Delete(key string) (err error) { err = s.proxy.Delete(key) if errors.Is(err, storage.ErrNotFound) { - err = providers.ErrStateStoreNotFound + err = services.ErrStateStoreNotFound } return } -func (s *StorageProxy) Iterate(prefix string, iterFunc providers.StateStoreIterFunc) (err error) { +func (s *StorageProxy) Iterate(prefix string, iterFunc services.StateStoreIterFunc) (err error) { return s.proxy.Iterate(prefix, storage.StateIterFunc(iterFunc)) } diff --git a/s3/interfaces.go b/s3/server/handlers.go similarity index 73% rename from s3/interfaces.go rename to s3/server/handlers.go index bc89ec5b4..873c1aad0 100644 --- a/s3/interfaces.go +++ b/s3/server/handlers.go @@ -1,4 +1,4 @@ -package s3 +package server import ( "net/http" @@ -6,5 +6,6 @@ import ( type Handlerser interface { Cors(handler http.Handler) http.Handler + Sign(handler http.Handler) http.Handler PutObjectHandler(w http.ResponseWriter, r *http.Request) } diff --git a/s3/options.go b/s3/server/options.go similarity index 89% rename from s3/options.go rename to s3/server/options.go index 7c20efe5f..7b71e719b 100644 --- a/s3/options.go +++ b/s3/server/options.go @@ -1,4 +1,4 @@ -package s3 +package server type Option func(*Server) diff --git a/s3/server.go b/s3/server/server.go similarity index 96% rename from s3/server.go rename to s3/server/server.go index 182e95773..0856442a5 100644 --- a/s3/server.go +++ b/s3/server/server.go @@ -1,4 +1,4 @@ -package s3 +package server import ( "context" @@ -81,7 +81,7 @@ func (s *Server) Stop() (err error) { func (s *Server) registerRouter() http.Handler { root := mux.NewRouter() - root.Use(s.handlers.Cors) + root.Use(s.handlers.Cors, s.handlers.Sign) bucket := root.PathPrefix("/{bucket}").Subrouter() bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(s.handlers.PutObjectHandler) diff --git a/s3/services/accesskey/accesskey.go b/s3/services/accesskey/accesskey.go index 48b81f798..b0f271480 100644 --- a/s3/services/accesskey/accesskey.go +++ b/s3/services/accesskey/accesskey.go @@ -2,7 +2,7 @@ package accesskey import ( "errors" - "github.com/bittorrent/go-btfs/s3/providers" + "github.com/bittorrent/go-btfs/s3/handlers" "github.com/bittorrent/go-btfs/s3/services" "github.com/bittorrent/go-btfs/transaction/storage" "github.com/bittorrent/go-btfs/utils" @@ -16,17 +16,17 @@ const ( defaultStoreKeyPrefix = "access-keys:" ) -var _ services.AccessKeyService = (*AccessKey)(nil) +var _ handlers.AccessKeyService = (*AccessKey)(nil) type AccessKey struct { - providers providers.Providerser + providers services.Providerser secretLength int storeKeyPrefix string locks sync.Map } -func NewAccessKey(providers providers.Providerser, options ...Option) services.AccessKeyService { - ack := &AccessKey{ +func NewAccessKey(providers services.Providerser, options ...Option) (ack *AccessKey) { + ack = &AccessKey{ providers: providers, secretLength: defaultSecretLength, storeKeyPrefix: defaultStoreKeyPrefix, @@ -38,9 +38,9 @@ func NewAccessKey(providers providers.Providerser, options ...Option) services.A return ack } -func (ack *AccessKey) Generate() (record *services.AccessKeyRecord, err error) { +func (ack *AccessKey) Generate() (record *handlers.AccessKeyRecord, err error) { now := time.Now() - record = &services.AccessKeyRecord{ + record = &handlers.AccessKeyRecord{ Key: ack.newKey(), Secret: ack.newSecret(), Enable: true, @@ -84,21 +84,21 @@ func (ack *AccessKey) Delete(key string) (err error) { return } -func (ack *AccessKey) Get(key string) (record *services.AccessKeyRecord, err error) { - record = &services.AccessKeyRecord{} +func (ack *AccessKey) Get(key string) (record *handlers.AccessKeyRecord, err error) { + record = &handlers.AccessKeyRecord{} err = ack.providers.GetStateStore().Get(ack.getStoreKey(key), record) - if err != nil && !errors.Is(err, providers.ErrStateStoreNotFound) { + if err != nil && !errors.Is(err, services.ErrStateStoreNotFound) { return } - if errors.Is(err, providers.ErrStateStoreNotFound) || record.IsDeleted { - err = services.ErrAccessKeyIsNotFound + if errors.Is(err, services.ErrStateStoreNotFound) || record.IsDeleted { + err = handlers.ErrAccessKeyIsNotFound } return } -func (ack *AccessKey) List() (list []*services.AccessKeyRecord, err error) { +func (ack *AccessKey) List() (list []*handlers.AccessKeyRecord, err error) { err = ack.providers.GetStateStore().Iterate(ack.storeKeyPrefix, func(key, _ []byte) (stop bool, er error) { - record := &services.AccessKeyRecord{} + record := &handlers.AccessKeyRecord{} er = ack.providers.GetStateStore().Get(string(key), record) if er != nil { return @@ -149,7 +149,7 @@ func (ack *AccessKey) update(key string, args *updateArgs) (err error) { unlock := ack.lock(key) defer unlock() - record := &services.AccessKeyRecord{} + record := &handlers.AccessKeyRecord{} stk := ack.getStoreKey(key) err = ack.providers.GetStateStore().Get(stk, record) @@ -157,7 +157,7 @@ func (ack *AccessKey) update(key string, args *updateArgs) (err error) { return } if errors.Is(err, storage.ErrNotFound) || record.IsDeleted { - err = services.ErrAccessKeyIsNotFound + err = handlers.ErrAccessKeyIsNotFound return } diff --git a/s3/services/accesskey/instance.go b/s3/services/accesskey/instance.go index 8236d7c52..e0ac52f3d 100644 --- a/s3/services/accesskey/instance.go +++ b/s3/services/accesskey/instance.go @@ -1,26 +1,26 @@ package accesskey import ( - "github.com/bittorrent/go-btfs/s3/providers" + "github.com/bittorrent/go-btfs/s3/handlers" "github.com/bittorrent/go-btfs/s3/services" "sync" ) -var instance services.AccessKeyService +var instance handlers.AccessKeyService var once sync.Once -func InitInstance(providers providers.Providerser, options ...Option) { +func InitInstance(providers services.Providerser, options ...Option) { once.Do(func() { instance = NewAccessKey(providers, options...) }) } -func GetInstance() services.AccessKeyService { +func GetInstance() handlers.AccessKeyService { return instance } -func Generate() (record *services.AccessKeyRecord, err error) { +func Generate() (record *handlers.AccessKeyRecord, err error) { return instance.Generate() } @@ -40,10 +40,10 @@ func Delete(key string) (err error) { return instance.Delete(key) } -func Get(key string) (record *services.AccessKeyRecord, err error) { +func Get(key string) (record *handlers.AccessKeyRecord, err error) { return instance.Get(key) } -func List() (list []*services.AccessKeyRecord, err error) { +func List() (list []*handlers.AccessKeyRecord, err error) { return instance.List() } diff --git a/s3/providers/interface.go b/s3/services/providers.go similarity index 97% rename from s3/providers/interface.go rename to s3/services/providers.go index 86b8ab806..3ab58d2d1 100644 --- a/s3/providers/interface.go +++ b/s3/services/providers.go @@ -1,4 +1,4 @@ -package providers +package services import ( "errors" diff --git a/s3/services/sign/sign.go b/s3/services/sign/sign.go index 268fff7fc..39450a4bd 100644 --- a/s3/services/sign/sign.go +++ b/s3/services/sign/sign.go @@ -1,19 +1,23 @@ package sign import ( - "github.com/bittorrent/go-btfs/s3/providers" + "github.com/bittorrent/go-btfs/s3/handlers" + "github.com/bittorrent/go-btfs/s3/services" "net/http" ) +var _ handlers.SignService = (*Sign)(nil) + type Sign struct { - providers providers.Providerser + providers services.Providerser + accesskeySvc handlers.AccessKeyService } -func NewSign(providers providers.Providerser, options ...Option) (sign *Sign) { +func NewSign(providers services.Providerser, accesskeySvc handlers.AccessKeyService, options ...Option) (sign *Sign) { sign = &Sign{ - providers: providers, + providers: providers, + accesskeySvc: accesskeySvc, } - for _, option := range options { option(sign) } From 865daf9db9333782b96e43f0aa16f8a283fcf74a Mon Sep 17 00:00:00 2001 From: Steve Date: Fri, 11 Aug 2023 18:19:14 +0800 Subject: [PATCH 021/139] style: code structure --- {s3d => s3}/action/action.go | 4 +- {s3d => s3}/action/action_test.go | 0 {s3d => s3}/apierrors/errors.go | 0 {s3d => s3}/apierrors/s3_error.go | 0 {s3d => s3}/apierrors/s3api_errors.go | 0 s3/{common => }/consts/consts.go | 0 {s3d => s3}/etag/etag.go | 0 {s3d => s3}/etag/etag_test.go | 0 {s3d => s3}/etag/reader.go | 0 s3/handlers/accesskey.go | 7 ++++ s3/handlers/handlers.go | 8 ++-- s3/handlers/services.go | 24 ++---------- s3/handlers/services_errors.go | 8 ++++ s3/handlers/services_types.go | 15 ++++++++ {s3d => s3}/lock/lock.go | 0 {s3d => s3}/lock/rwmutex.go | 0 {s3d => s3}/policy/policy.go | 6 ++- s3/services/auth/auth.go | 34 +++++++++++++++++ {s3d => s3/services}/auth/auth_type.go | 0 .../services}/auth/check_handler_auth.go | 13 ++++--- {s3d => s3/services}/auth/cred_temp.go | 0 s3/services/auth/options.go | 3 ++ .../services}/auth/signature-v4-parser.go | 0 .../services}/auth/signature-v4-utils.go | 0 {s3d => s3/services}/auth/signature-v4.go | 4 +- s3/services/auth/signature.go | 37 +++++++++++++++++++ s3/services/providers.go | 5 --- s3/services/providers_errors.go | 7 ++++ s3/services/providers_types.go | 3 ++ s3/services/sign/options.go | 3 -- s3/services/sign/sign.go | 29 --------------- {s3d => s3}/set/match.go | 0 {s3d => s3}/set/match_test.go | 0 {s3d => s3}/set/stringset.go | 0 {s3d => s3}/set/stringset_test.go | 0 {s3d => s3}/utils/bgcontext.go | 0 {s3d => s3}/utils/encode.go | 0 {s3d => s3}/utils/hash/errors.go | 0 {s3d => s3}/utils/hash/reader.go | 0 {s3d => s3}/utils/ip.go | 0 {s3d => s3}/utils/levels.go | 0 {s3d => s3}/utils/signature.go | 0 {s3d => s3}/utils/utils.go | 0 {s3d => s3}/utils/xml.go | 0 s3d/auth/service.go | 1 - s3d/auth/service_instance.go | 28 -------------- s3d/auth/service_interface.go | 10 ----- s3d/auth/service_test.go | 1 - s3d/store/service_instance.go | 4 +- s3d/store/service_interface.go | 2 +- 50 files changed, 141 insertions(+), 115 deletions(-) rename {s3d => s3}/action/action.go (98%) rename {s3d => s3}/action/action_test.go (100%) rename {s3d => s3}/apierrors/errors.go (100%) rename {s3d => s3}/apierrors/s3_error.go (100%) rename {s3d => s3}/apierrors/s3api_errors.go (100%) rename s3/{common => }/consts/consts.go (100%) rename {s3d => s3}/etag/etag.go (100%) rename {s3d => s3}/etag/etag_test.go (100%) rename {s3d => s3}/etag/reader.go (100%) create mode 100644 s3/handlers/accesskey.go create mode 100644 s3/handlers/services_errors.go create mode 100644 s3/handlers/services_types.go rename {s3d => s3}/lock/lock.go (100%) rename {s3d => s3}/lock/rwmutex.go (100%) rename {s3d => s3}/policy/policy.go (95%) create mode 100644 s3/services/auth/auth.go rename {s3d => s3/services}/auth/auth_type.go (100%) rename {s3d => s3/services}/auth/check_handler_auth.go (94%) rename {s3d => s3/services}/auth/cred_temp.go (100%) create mode 100644 s3/services/auth/options.go rename {s3d => s3/services}/auth/signature-v4-parser.go (100%) rename {s3d => s3/services}/auth/signature-v4-utils.go (100%) rename {s3d => s3/services}/auth/signature-v4.go (98%) create mode 100644 s3/services/auth/signature.go create mode 100644 s3/services/providers_errors.go create mode 100644 s3/services/providers_types.go delete mode 100644 s3/services/sign/options.go delete mode 100644 s3/services/sign/sign.go rename {s3d => s3}/set/match.go (100%) rename {s3d => s3}/set/match_test.go (100%) rename {s3d => s3}/set/stringset.go (100%) rename {s3d => s3}/set/stringset_test.go (100%) rename {s3d => s3}/utils/bgcontext.go (100%) rename {s3d => s3}/utils/encode.go (100%) rename {s3d => s3}/utils/hash/errors.go (100%) rename {s3d => s3}/utils/hash/reader.go (100%) rename {s3d => s3}/utils/ip.go (100%) rename {s3d => s3}/utils/levels.go (100%) rename {s3d => s3}/utils/signature.go (100%) rename {s3d => s3}/utils/utils.go (100%) rename {s3d => s3}/utils/xml.go (100%) delete mode 100644 s3d/auth/service.go delete mode 100644 s3d/auth/service_instance.go delete mode 100644 s3d/auth/service_interface.go delete mode 100644 s3d/auth/service_test.go diff --git a/s3d/action/action.go b/s3/action/action.go similarity index 98% rename from s3d/action/action.go rename to s3/action/action.go index dedaaa599..ec7d088fe 100644 --- a/s3d/action/action.go +++ b/s3/action/action.go @@ -1,6 +1,8 @@ package action -import "github.com/bittorrent/go-btfs/s3d/set" +import ( + "github.com/bittorrent/go-btfs/s3/set" +) type Action string diff --git a/s3d/action/action_test.go b/s3/action/action_test.go similarity index 100% rename from s3d/action/action_test.go rename to s3/action/action_test.go diff --git a/s3d/apierrors/errors.go b/s3/apierrors/errors.go similarity index 100% rename from s3d/apierrors/errors.go rename to s3/apierrors/errors.go diff --git a/s3d/apierrors/s3_error.go b/s3/apierrors/s3_error.go similarity index 100% rename from s3d/apierrors/s3_error.go rename to s3/apierrors/s3_error.go diff --git a/s3d/apierrors/s3api_errors.go b/s3/apierrors/s3api_errors.go similarity index 100% rename from s3d/apierrors/s3api_errors.go rename to s3/apierrors/s3api_errors.go diff --git a/s3/common/consts/consts.go b/s3/consts/consts.go similarity index 100% rename from s3/common/consts/consts.go rename to s3/consts/consts.go diff --git a/s3d/etag/etag.go b/s3/etag/etag.go similarity index 100% rename from s3d/etag/etag.go rename to s3/etag/etag.go diff --git a/s3d/etag/etag_test.go b/s3/etag/etag_test.go similarity index 100% rename from s3d/etag/etag_test.go rename to s3/etag/etag_test.go diff --git a/s3d/etag/reader.go b/s3/etag/reader.go similarity index 100% rename from s3d/etag/reader.go rename to s3/etag/reader.go diff --git a/s3/handlers/accesskey.go b/s3/handlers/accesskey.go new file mode 100644 index 000000000..6e767a668 --- /dev/null +++ b/s3/handlers/accesskey.go @@ -0,0 +1,7 @@ +package handlers + +import ( + "errors" +) + +var ErrAccessKeyIsNotFound = errors.New("access-key is not found") diff --git a/s3/handlers/handlers.go b/s3/handlers/handlers.go index 4a4867caf..6a0670554 100644 --- a/s3/handlers/handlers.go +++ b/s3/handlers/handlers.go @@ -2,7 +2,7 @@ package handlers import ( - "github.com/bittorrent/go-btfs/s3/common/consts" + "github.com/bittorrent/go-btfs/s3/consts" "github.com/bittorrent/go-btfs/s3/server" "github.com/rs/cors" "net/http" @@ -51,14 +51,14 @@ type Handlers struct { corsAllowOrigins []string corsAllowHeaders []string corsAllowMethods []string - signSvc SignService + authSvc AuthService bucketSvc BucketService objectSvc ObjectService multipartSvc MultipartService } func NewHandlers( - signSvc SignService, bucketSvc BucketService, + authSvc AuthService, bucketSvc BucketService, objectSvc ObjectService, multipartSvc MultipartService, options ...Option, ) (handlers *Handlers) { @@ -66,7 +66,7 @@ func NewHandlers( corsAllowOrigins: defaultCorsAllowOrigins, corsAllowHeaders: defaultCorsAllowHeaders, corsAllowMethods: defaultCorsAllowMethods, - signSvc: signSvc, + authSvc: authSvc, bucketSvc: bucketSvc, objectSvc: objectSvc, multipartSvc: multipartSvc, diff --git a/s3/handlers/services.go b/s3/handlers/services.go index 290ea3546..4826030da 100644 --- a/s3/handlers/services.go +++ b/s3/handlers/services.go @@ -1,9 +1,8 @@ package handlers import ( - "errors" + "github.com/bittorrent/go-btfs/s3/action" "net/http" - "time" ) type AccessKeyService interface { @@ -16,17 +15,6 @@ type AccessKeyService interface { List() (list []*AccessKeyRecord, err error) } -type AccessKeyRecord struct { - Key string `json:"key"` - Secret string `json:"secret"` - Enable bool `json:"enable"` - IsDeleted bool `json:"is_deleted"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` -} - -var ErrAccessKeyIsNotFound = errors.New("access-key is not found") - type BucketService interface { } @@ -36,11 +24,7 @@ type ObjectService interface { type MultipartService interface { } -type SignService interface { - Verify(r *http.Request) (err error) +type AuthService interface { + VerifySignature(r *http.Request) (accessKeyRecord *AccessKeyRecord, err error) + CheckACL(accessKeyRecord *AccessKeyRecord, bucketMeta *BucketMeta, action action.Action) (err error) } - -var ( - ErrSignOutdated = errors.New("sign is outdated") - ErrSignKeyIsNotFound = errors.New("key is not found") -) diff --git a/s3/handlers/services_errors.go b/s3/handlers/services_errors.go new file mode 100644 index 000000000..057ea6fde --- /dev/null +++ b/s3/handlers/services_errors.go @@ -0,0 +1,8 @@ +package handlers + +import "errors" + +var ( + ErrBucketNotFound = errors.New("bucket is not found") + ErrSginVersionNotSupport = errors.New("sign version is not support") +) diff --git a/s3/handlers/services_types.go b/s3/handlers/services_types.go new file mode 100644 index 000000000..3efe138dd --- /dev/null +++ b/s3/handlers/services_types.go @@ -0,0 +1,15 @@ +package handlers + +import "time" + +type AccessKeyRecord struct { + Key string `json:"key"` + Secret string `json:"secret"` + Enable bool `json:"enable"` + IsDeleted bool `json:"is_deleted"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +type BucketMeta struct { +} diff --git a/s3d/lock/lock.go b/s3/lock/lock.go similarity index 100% rename from s3d/lock/lock.go rename to s3/lock/lock.go diff --git a/s3d/lock/rwmutex.go b/s3/lock/rwmutex.go similarity index 100% rename from s3d/lock/rwmutex.go rename to s3/lock/rwmutex.go diff --git a/s3d/policy/policy.go b/s3/policy/policy.go similarity index 95% rename from s3d/policy/policy.go rename to s3/policy/policy.go index d38c09ccc..0061c6e6c 100644 --- a/s3d/policy/policy.go +++ b/s3/policy/policy.go @@ -1,6 +1,8 @@ package policy -import s3action "github.com/bittorrent/go-btfs/s3d/action" +import ( + s3action "github.com/bittorrent/go-btfs/s3/action" +) const ( // PublicReadWrite 公开读写,适用于桶ACL和对象ACL @@ -51,7 +53,7 @@ func checkActionInPublicRead(action s3action.Action) bool { } func IsAllowed(own bool, acl string, action s3action.Action) (allow bool) { - a := s3action.Action(action) + a := action.Action(action) // 1.if bucket if a.IsBucketAction() { diff --git a/s3/services/auth/auth.go b/s3/services/auth/auth.go new file mode 100644 index 000000000..213e5d672 --- /dev/null +++ b/s3/services/auth/auth.go @@ -0,0 +1,34 @@ +package auth + +import ( + "github.com/bittorrent/go-btfs/s3/action" + "github.com/bittorrent/go-btfs/s3/handlers" + "github.com/bittorrent/go-btfs/s3/services" + "net/http" +) + +var _ handlers.AuthService = (*Service)(nil) + +type Service struct { + providers services.Providerser + accesskeySvc handlers.AccessKeyService +} + +func NewService(providers services.Providerser, accesskeySvc handlers.AccessKeyService, options ...Option) (s *Service) { + s = &Service{ + providers: providers, + accesskeySvc: accesskeySvc, + } + for _, option := range options { + option(s) + } + return +} + +func (s *Service) VerifySignature(r *http.Request) (accessKeyRecord *handlers.AccessKeyRecord, err error) { + return +} + +func (s *Service) CheckACL(accessKeyRecord *handlers.AccessKeyRecord, bucketMeta *handlers.BucketMeta, action action.Action) (err error) { + return +} diff --git a/s3d/auth/auth_type.go b/s3/services/auth/auth_type.go similarity index 100% rename from s3d/auth/auth_type.go rename to s3/services/auth/auth_type.go diff --git a/s3d/auth/check_handler_auth.go b/s3/services/auth/check_handler_auth.go similarity index 94% rename from s3d/auth/check_handler_auth.go rename to s3/services/auth/check_handler_auth.go index 207c0af30..e87d694d8 100644 --- a/s3d/auth/check_handler_auth.go +++ b/s3/services/auth/check_handler_auth.go @@ -4,16 +4,17 @@ import ( "bytes" "context" "encoding/hex" + s3action "github.com/bittorrent/go-btfs/s3/action" + "github.com/bittorrent/go-btfs/s3/handlers" + "github.com/bittorrent/go-btfs/s3/policy" + "github.com/bittorrent/go-btfs/s3/utils/hash" "github.com/bittorrent/go-btfs/s3d/store" "io" "net/http" - s3action "github.com/bittorrent/go-btfs/s3d/action" "github.com/bittorrent/go-btfs/s3d/apierrors" "github.com/bittorrent/go-btfs/s3d/consts" "github.com/bittorrent/go-btfs/s3d/etag" - "github.com/bittorrent/go-btfs/s3d/policy" - "github.com/bittorrent/go-btfs/s3d/utils/hash" ) // AuthSys auth and sign system @@ -31,10 +32,10 @@ func NewAuthSys() *AuthSys { // // returns APIErrorCode if any to be replied to the client. // Additionally, returns the accessKey used in the request, and if this request is by an admin. -func (s *AuthSys) CheckRequestAuthTypeCredential(ctx context.Context, r *http.Request, action s3action.Action, bucketName string, bmSys *store.BucketMetadataSys) (cred Credentials, s3Err apierrors.ErrorCode) { +func (s *AuthSys) CheckRequestAuthTypeCredential(ctx context.Context, r *http.Request, action s3action.Action, bucketName string, bmSys *store.BucketMetadataSys) (cred Credentials, err error) { //todo 是否需要判断 if bucketName == "" { - return cred, apierrors.ErrNoSuchBucket + return cred, handlers.ErrBucketNotFound } // 1.check signature @@ -55,7 +56,7 @@ func (s *AuthSys) CheckRequestAuthTypeCredential(ctx context.Context, r *http.Re } // CreateBucketAction - if action == s3action.CreateBucketAction { + if action == action.CreateBucketAction { // To extract region from XML in request body, get copy of request body. payload, err := io.ReadAll(io.LimitReader(r.Body, consts.MaxLocationConstraintSize)) if err != nil { diff --git a/s3d/auth/cred_temp.go b/s3/services/auth/cred_temp.go similarity index 100% rename from s3d/auth/cred_temp.go rename to s3/services/auth/cred_temp.go diff --git a/s3/services/auth/options.go b/s3/services/auth/options.go new file mode 100644 index 000000000..2b7837297 --- /dev/null +++ b/s3/services/auth/options.go @@ -0,0 +1,3 @@ +package auth + +type Option func(*Service) diff --git a/s3d/auth/signature-v4-parser.go b/s3/services/auth/signature-v4-parser.go similarity index 100% rename from s3d/auth/signature-v4-parser.go rename to s3/services/auth/signature-v4-parser.go diff --git a/s3d/auth/signature-v4-utils.go b/s3/services/auth/signature-v4-utils.go similarity index 100% rename from s3d/auth/signature-v4-utils.go rename to s3/services/auth/signature-v4-utils.go diff --git a/s3d/auth/signature-v4.go b/s3/services/auth/signature-v4.go similarity index 98% rename from s3d/auth/signature-v4.go rename to s3/services/auth/signature-v4.go index ca160a605..3bf68335f 100644 --- a/s3d/auth/signature-v4.go +++ b/s3/services/auth/signature-v4.go @@ -19,6 +19,8 @@ package auth import ( "crypto/subtle" + "github.com/bittorrent/go-btfs/s3/set" + "github.com/bittorrent/go-btfs/s3/utils" "net/http" "net/url" "strconv" @@ -26,8 +28,6 @@ import ( "github.com/bittorrent/go-btfs/s3d/apierrors" "github.com/bittorrent/go-btfs/s3d/consts" - "github.com/bittorrent/go-btfs/s3d/set" - "github.com/bittorrent/go-btfs/s3d/utils" ) // AWS Signature Version '4' constants. diff --git a/s3/services/auth/signature.go b/s3/services/auth/signature.go new file mode 100644 index 000000000..e10702920 --- /dev/null +++ b/s3/services/auth/signature.go @@ -0,0 +1,37 @@ +package auth + +import ( + "context" + "github.com/bittorrent/go-btfs/s3/handlers" + "github.com/bittorrent/go-btfs/s3/services" + s3action "github.com/bittorrent/go-btfs/s3d/action" + "github.com/bittorrent/go-btfs/s3d/apierrors" + "github.com/bittorrent/go-btfs/s3d/store" + "net/http" +) + +var _ handlers.SignatureService = (*Signature)(nil) + +type Signature struct { + providers services.Providerser + accesskeySvc handlers.AccessKeyService + au *AuthSys + bmSys *store.BucketMetadataSys +} + +func NewSignature(providers services.Providerser, accesskeySvc handlers.AccessKeyService, options ...Option) (signature *Signature) { + signature = &Signature{ + providers: providers, + accesskeySvc: accesskeySvc, + } + for _, option := range options { + + } + return +} + +func (s *service) CheckSignatureAndAcl(ctx context.Context, r *http.Request, action s3action.Action, bucketName string) ( + cred Credentials, s3Error apierrors.ErrorCode) { + + return s.au.CheckRequestAuthTypeCredential(ctx, r, action, bucketName, s.bmSys) +} diff --git a/s3/services/providers.go b/s3/services/providers.go index 3ab58d2d1..796291d97 100644 --- a/s3/services/providers.go +++ b/s3/services/providers.go @@ -1,7 +1,6 @@ package services import ( - "errors" "io" ) @@ -23,7 +22,3 @@ type StateStorer interface { Delete(key string) (err error) Iterate(prefix string, iterFunc StateStoreIterFunc) (err error) } - -type StateStoreIterFunc func(key, value []byte) (stop bool, err error) - -var ErrStateStoreNotFound = errors.New("not found") diff --git a/s3/services/providers_errors.go b/s3/services/providers_errors.go new file mode 100644 index 000000000..0ddf07ef3 --- /dev/null +++ b/s3/services/providers_errors.go @@ -0,0 +1,7 @@ +package services + +import "errors" + +var ( + ErrStateStoreNotFound = errors.New("not found") +) diff --git a/s3/services/providers_types.go b/s3/services/providers_types.go new file mode 100644 index 000000000..6808d76d1 --- /dev/null +++ b/s3/services/providers_types.go @@ -0,0 +1,3 @@ +package services + +type StateStoreIterFunc func(key, value []byte) (stop bool, err error) diff --git a/s3/services/sign/options.go b/s3/services/sign/options.go deleted file mode 100644 index b31e097bf..000000000 --- a/s3/services/sign/options.go +++ /dev/null @@ -1,3 +0,0 @@ -package sign - -type Option func(*Sign) diff --git a/s3/services/sign/sign.go b/s3/services/sign/sign.go deleted file mode 100644 index 39450a4bd..000000000 --- a/s3/services/sign/sign.go +++ /dev/null @@ -1,29 +0,0 @@ -package sign - -import ( - "github.com/bittorrent/go-btfs/s3/handlers" - "github.com/bittorrent/go-btfs/s3/services" - "net/http" -) - -var _ handlers.SignService = (*Sign)(nil) - -type Sign struct { - providers services.Providerser - accesskeySvc handlers.AccessKeyService -} - -func NewSign(providers services.Providerser, accesskeySvc handlers.AccessKeyService, options ...Option) (sign *Sign) { - sign = &Sign{ - providers: providers, - accesskeySvc: accesskeySvc, - } - for _, option := range options { - option(sign) - } - return -} - -func (s *Sign) Verify(r *http.Request) (err error) { - return -} diff --git a/s3d/set/match.go b/s3/set/match.go similarity index 100% rename from s3d/set/match.go rename to s3/set/match.go diff --git a/s3d/set/match_test.go b/s3/set/match_test.go similarity index 100% rename from s3d/set/match_test.go rename to s3/set/match_test.go diff --git a/s3d/set/stringset.go b/s3/set/stringset.go similarity index 100% rename from s3d/set/stringset.go rename to s3/set/stringset.go diff --git a/s3d/set/stringset_test.go b/s3/set/stringset_test.go similarity index 100% rename from s3d/set/stringset_test.go rename to s3/set/stringset_test.go diff --git a/s3d/utils/bgcontext.go b/s3/utils/bgcontext.go similarity index 100% rename from s3d/utils/bgcontext.go rename to s3/utils/bgcontext.go diff --git a/s3d/utils/encode.go b/s3/utils/encode.go similarity index 100% rename from s3d/utils/encode.go rename to s3/utils/encode.go diff --git a/s3d/utils/hash/errors.go b/s3/utils/hash/errors.go similarity index 100% rename from s3d/utils/hash/errors.go rename to s3/utils/hash/errors.go diff --git a/s3d/utils/hash/reader.go b/s3/utils/hash/reader.go similarity index 100% rename from s3d/utils/hash/reader.go rename to s3/utils/hash/reader.go diff --git a/s3d/utils/ip.go b/s3/utils/ip.go similarity index 100% rename from s3d/utils/ip.go rename to s3/utils/ip.go diff --git a/s3d/utils/levels.go b/s3/utils/levels.go similarity index 100% rename from s3d/utils/levels.go rename to s3/utils/levels.go diff --git a/s3d/utils/signature.go b/s3/utils/signature.go similarity index 100% rename from s3d/utils/signature.go rename to s3/utils/signature.go diff --git a/s3d/utils/utils.go b/s3/utils/utils.go similarity index 100% rename from s3d/utils/utils.go rename to s3/utils/utils.go diff --git a/s3d/utils/xml.go b/s3/utils/xml.go similarity index 100% rename from s3d/utils/xml.go rename to s3/utils/xml.go diff --git a/s3d/auth/service.go b/s3d/auth/service.go deleted file mode 100644 index 8832b06d1..000000000 --- a/s3d/auth/service.go +++ /dev/null @@ -1 +0,0 @@ -package auth diff --git a/s3d/auth/service_instance.go b/s3d/auth/service_instance.go deleted file mode 100644 index 9a56dfcce..000000000 --- a/s3d/auth/service_instance.go +++ /dev/null @@ -1,28 +0,0 @@ -package auth - -import ( - "context" - s3action "github.com/bittorrent/go-btfs/s3d/action" - "github.com/bittorrent/go-btfs/s3d/apierrors" - "github.com/bittorrent/go-btfs/s3d/store" - "net/http" -) - -type service struct { - au *AuthSys - bmSys *store.BucketMetadataSys -} - -func newService(bmSys *store.BucketMetadataSys) (svc *service, err error) { - svc = &service{ - au: NewAuthSys(), - bmSys: bmSys, - } - return -} - -func (s *service) CheckSignatureAndAcl(ctx context.Context, r *http.Request, action s3action.Action, bucketName string) ( - cred Credentials, s3Error apierrors.ErrorCode) { - - return s.au.CheckRequestAuthTypeCredential(ctx, r, action, bucketName, s.bmSys) -} diff --git a/s3d/auth/service_interface.go b/s3d/auth/service_interface.go deleted file mode 100644 index 634ca4ba4..000000000 --- a/s3d/auth/service_interface.go +++ /dev/null @@ -1,10 +0,0 @@ -package auth - -import ( - "github.com/bittorrent/go-btfs/s3d/apierrors" - "net/http" -) - -type Service interface { - CheckSignatureAndAcl(r *http.Request, region string, stype serviceType) (s3Error apierrors.ErrorCode) -} diff --git a/s3d/auth/service_test.go b/s3d/auth/service_test.go deleted file mode 100644 index 8832b06d1..000000000 --- a/s3d/auth/service_test.go +++ /dev/null @@ -1 +0,0 @@ -package auth diff --git a/s3d/store/service_instance.go b/s3d/store/service_instance.go index 13cbd96b5..97348a5fa 100644 --- a/s3d/store/service_instance.go +++ b/s3d/store/service_instance.go @@ -2,9 +2,9 @@ package store import ( "context" + "github.com/bittorrent/go-btfs/s3/lock" "time" - - "github.com/bittorrent/go-btfs/s3d/lock" + "github.com/bittorrent/go-btfs/transaction/storage" "github.com/syndtr/goleveldb/leveldb" ) diff --git a/s3d/store/service_interface.go b/s3d/store/service_interface.go index 4ec4a123b..ebc6ae526 100644 --- a/s3d/store/service_interface.go +++ b/s3d/store/service_interface.go @@ -2,7 +2,7 @@ package store import ( "context" - "github.com/bittorrent/go-btfs/s3d/lock" + "github.com/bittorrent/go-btfs/s3/lock" ) type Service interface { From 6299ba8f4a58f189b68ffe67432e7c66c0844163 Mon Sep 17 00:00:00 2001 From: Steve Date: Sun, 13 Aug 2023 01:07:04 +0800 Subject: [PATCH 022/139] optmize: code structure --- cmd/btfs/daemon.go | 2 +- s3/handlers/handlers.go | 87 +++++-------------- s3/handlers/services.go | 16 ++-- .../{options.go => providers_options.go} | 0 s3/providers/statestore/storage_proxy.go | 2 +- .../handlers.go => routers/handlerser.go} | 2 +- s3/routers/routers.go | 31 +++++++ s3/routers/routers_options.go | 3 + s3/server/routerser.go | 7 ++ s3/server/server.go | 20 +---- s3/server/{options.go => server_options.go} | 0 s3/services/accesskey/options.go | 15 ---- .../accesskey/{accesskey.go => service.go} | 74 ++++++++-------- .../{instance.go => service_instance.go} | 24 ++--- s3/services/accesskey/service_options.go | 15 ++++ s3/services/auth/options.go | 3 - s3/services/auth/{auth.go => service.go} | 10 +-- s3/services/auth/service_options.go | 3 + s3/services/cors/service.go | 76 ++++++++++++++++ s3/services/cors/service_options.go | 21 +++++ s3/services/multipart/service.go | 29 +++++++ s3/services/multipart/service_options.go | 3 + s3/services/{providers.go => providerser.go} | 0 ...viders_errors.go => providerser_errors.go} | 0 ...roviders_types.go => providerser_types.go} | 0 25 files changed, 284 insertions(+), 159 deletions(-) rename s3/providers/{options.go => providers_options.go} (100%) rename s3/{server/handlers.go => routers/handlerser.go} (92%) create mode 100644 s3/routers/routers.go create mode 100644 s3/routers/routers_options.go create mode 100644 s3/server/routerser.go rename s3/server/{options.go => server_options.go} (100%) delete mode 100644 s3/services/accesskey/options.go rename s3/services/accesskey/{accesskey.go => service.go} (54%) rename s3/services/accesskey/{instance.go => service_instance.go} (56%) create mode 100644 s3/services/accesskey/service_options.go delete mode 100644 s3/services/auth/options.go rename s3/services/auth/{auth.go => service.go} (59%) create mode 100644 s3/services/auth/service_options.go create mode 100644 s3/services/cors/service.go create mode 100644 s3/services/cors/service_options.go create mode 100644 s3/services/multipart/service.go create mode 100644 s3/services/multipart/service_options.go rename s3/services/{providers.go => providerser.go} (100%) rename s3/services/{providers_errors.go => providerser_errors.go} (100%) rename s3/services/{providers_types.go => providerser_types.go} (100%) diff --git a/cmd/btfs/daemon.go b/cmd/btfs/daemon.go index c01dc99e4..c3e71d398 100644 --- a/cmd/btfs/daemon.go +++ b/cmd/btfs/daemon.go @@ -427,7 +427,7 @@ If the user need to start multiple nodes on the same machine, the configuration }() // access-key init - accesskey.InitInstance(s3statestore.NewStorageStateStoreProxy(statestore)) + accesskey.InitService(s3statestore.NewStorageStateStoreProxy(statestore)) if SimpleMode == false { chainid, stored, err := getChainID(req, cfg, statestore) diff --git a/s3/handlers/handlers.go b/s3/handlers/handlers.go index 6a0670554..5b5e4f520 100644 --- a/s3/handlers/handlers.go +++ b/s3/handlers/handlers.go @@ -2,74 +2,35 @@ package handlers import ( - "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/server" + "github.com/bittorrent/go-btfs/s3/routers" "github.com/rs/cors" "net/http" ) -var ( - defaultCorsAllowOrigins = []string{"*"} - defaultCorsAllowHeaders = []string{ - consts.Date, - consts.ETag, - consts.ServerInfo, - consts.Connection, - consts.AcceptRanges, - consts.ContentRange, - consts.ContentEncoding, - consts.ContentLength, - consts.ContentType, - consts.ContentDisposition, - consts.LastModified, - consts.ContentLanguage, - consts.CacheControl, - consts.RetryAfter, - consts.AmzBucketRegion, - consts.Expires, - consts.Authorization, - consts.Action, - consts.Range, - "X-Amz*", - "x-amz*", - "*", - } - defaultCorsAllowMethods = []string{ - http.MethodGet, - http.MethodPut, - http.MethodHead, - http.MethodPost, - http.MethodDelete, - http.MethodOptions, - http.MethodPatch, - } -) - -var _ server.Handlerser = (*Handlers)(nil) +var _ routers.Handlerser = (*Handlers)(nil) type Handlers struct { - corsAllowOrigins []string - corsAllowHeaders []string - corsAllowMethods []string - authSvc AuthService - bucketSvc BucketService - objectSvc ObjectService - multipartSvc MultipartService + corsSvc CorsService + authSvc AuthService + bucketSvc BucketService + objectSvc ObjectService + multipartSvc MultipartService } func NewHandlers( - authSvc AuthService, bucketSvc BucketService, - objectSvc ObjectService, multipartSvc MultipartService, + corsSvc CorsService, + authSvc AuthService, + bucketSvc BucketService, + objectSvc ObjectService, + multipartSvc MultipartService, options ...Option, ) (handlers *Handlers) { handlers = &Handlers{ - corsAllowOrigins: defaultCorsAllowOrigins, - corsAllowHeaders: defaultCorsAllowHeaders, - corsAllowMethods: defaultCorsAllowMethods, - authSvc: authSvc, - bucketSvc: bucketSvc, - objectSvc: objectSvc, - multipartSvc: multipartSvc, + corsSvc: corsSvc, + authSvc: authSvc, + bucketSvc: bucketSvc, + objectSvc: objectSvc, + multipartSvc: multipartSvc, } for _, option := range options { option(handlers) @@ -77,20 +38,20 @@ func NewHandlers( return } -func (s *Handlers) Cors(handler http.Handler) http.Handler { +func (handlers *Handlers) Cors(handler http.Handler) http.Handler { return cors.New(cors.Options{ - AllowedOrigins: s.corsAllowOrigins, - AllowedMethods: s.corsAllowMethods, - AllowedHeaders: s.corsAllowHeaders, - ExposedHeaders: s.corsAllowHeaders, + AllowedOrigins: handlers.corsSvc.GetAllowOrigins(), + AllowedMethods: handlers.corsSvc.GetAllowMethods(), + AllowedHeaders: handlers.corsSvc.GetAllowHeaders(), + ExposedHeaders: handlers.corsSvc.GetAllowHeaders(), AllowCredentials: true, }).Handler(handler) } -func (s *Handlers) Sign(handler http.Handler) http.Handler { +func (handlers *Handlers) Sign(handler http.Handler) http.Handler { return nil } -func (s *Handlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) { +func (handlers *Handlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) { return } diff --git a/s3/handlers/services.go b/s3/handlers/services.go index 4826030da..3612f90d2 100644 --- a/s3/handlers/services.go +++ b/s3/handlers/services.go @@ -5,6 +5,12 @@ import ( "net/http" ) +type CorsService interface { + GetAllowOrigins() []string + GetAllowMethods() []string + GetAllowHeaders() []string +} + type AccessKeyService interface { Generate() (record *AccessKeyRecord, err error) Enable(key string) (err error) @@ -15,6 +21,11 @@ type AccessKeyService interface { List() (list []*AccessKeyRecord, err error) } +type AuthService interface { + VerifySignature(r *http.Request) (accessKeyRecord *AccessKeyRecord, err error) + CheckACL(accessKeyRecord *AccessKeyRecord, bucketMeta *BucketMeta, action action.Action) (err error) +} + type BucketService interface { } @@ -23,8 +34,3 @@ type ObjectService interface { type MultipartService interface { } - -type AuthService interface { - VerifySignature(r *http.Request) (accessKeyRecord *AccessKeyRecord, err error) - CheckACL(accessKeyRecord *AccessKeyRecord, bucketMeta *BucketMeta, action action.Action) (err error) -} diff --git a/s3/providers/options.go b/s3/providers/providers_options.go similarity index 100% rename from s3/providers/options.go rename to s3/providers/providers_options.go diff --git a/s3/providers/statestore/storage_proxy.go b/s3/providers/statestore/storage_proxy.go index 704e1177a..4ef6045b0 100644 --- a/s3/providers/statestore/storage_proxy.go +++ b/s3/providers/statestore/storage_proxy.go @@ -12,7 +12,7 @@ type StorageProxy struct { proxy storage.StateStorer } -func NewStorageStateStoreProxy(proxy storage.StateStorer) services.StateStorer { +func NewStorageStateStoreProxy(proxy storage.StateStorer) *StorageProxy { return &StorageProxy{ proxy: proxy, } diff --git a/s3/server/handlers.go b/s3/routers/handlerser.go similarity index 92% rename from s3/server/handlers.go rename to s3/routers/handlerser.go index 873c1aad0..d4daef440 100644 --- a/s3/server/handlers.go +++ b/s3/routers/handlerser.go @@ -1,4 +1,4 @@ -package server +package routers import ( "net/http" diff --git a/s3/routers/routers.go b/s3/routers/routers.go new file mode 100644 index 000000000..36fe43ced --- /dev/null +++ b/s3/routers/routers.go @@ -0,0 +1,31 @@ +package routers + +import ( + "github.com/gorilla/mux" + "net/http" +) + +type Routers struct { + handlers Handlerser +} + +func NewRouters(handlers Handlerser, options ...Option) (routers *Routers) { + routers = &Routers{ + handlers: handlers, + } + for _, option := range options { + option(routers) + } + return +} + +func (routers *Routers) Register() http.Handler { + root := mux.NewRouter() + + root.Use(routers.handlers.Cors, routers.handlers.Sign) + + bucket := root.PathPrefix("/{bucket}").Subrouter() + bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(routers.handlers.PutObjectHandler) + + return root +} diff --git a/s3/routers/routers_options.go b/s3/routers/routers_options.go new file mode 100644 index 000000000..2dce7e18f --- /dev/null +++ b/s3/routers/routers_options.go @@ -0,0 +1,3 @@ +package routers + +type Option func(routers *Routers) diff --git a/s3/server/routerser.go b/s3/server/routerser.go new file mode 100644 index 000000000..ca4c12b2d --- /dev/null +++ b/s3/server/routerser.go @@ -0,0 +1,7 @@ +package server + +import "net/http" + +type Routerser interface { + Register() http.Handler +} diff --git a/s3/server/server.go b/s3/server/server.go index 0856442a5..d18303f20 100644 --- a/s3/server/server.go +++ b/s3/server/server.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "github.com/gorilla/mux" "net/http" "sync" ) @@ -17,15 +16,15 @@ var ( ) type Server struct { - handlers Handlerser + routers Routerser address string shutdown func() error mutex sync.Mutex } -func NewServer(handlers Handlerser, options ...Option) (s *Server) { +func NewServer(routers Routerser, options ...Option) (s *Server) { s = &Server{ - handlers: handlers, + routers: routers, address: defaultServerAddress, shutdown: nil, mutex: sync.Mutex{}, @@ -47,7 +46,7 @@ func (s *Server) Start() (err error) { httpSvr := &http.Server{ Addr: s.address, - Handler: s.registerRouter(), + Handler: s.routers.Register(), } s.shutdown = func() error { @@ -77,14 +76,3 @@ func (s *Server) Stop() (err error) { fmt.Printf("stoped s3-compatible-api server: %v\n", err) return } - -func (s *Server) registerRouter() http.Handler { - root := mux.NewRouter() - - root.Use(s.handlers.Cors, s.handlers.Sign) - - bucket := root.PathPrefix("/{bucket}").Subrouter() - bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(s.handlers.PutObjectHandler) - - return root -} diff --git a/s3/server/options.go b/s3/server/server_options.go similarity index 100% rename from s3/server/options.go rename to s3/server/server_options.go diff --git a/s3/services/accesskey/options.go b/s3/services/accesskey/options.go deleted file mode 100644 index 593856d7e..000000000 --- a/s3/services/accesskey/options.go +++ /dev/null @@ -1,15 +0,0 @@ -package accesskey - -type Option func(ack *AccessKey) - -func WithSecretLength(length int) Option { - return func(ack *AccessKey) { - ack.secretLength = length - } -} - -func WithStoreKeyPrefix(prefix string) Option { - return func(ack *AccessKey) { - ack.storeKeyPrefix = prefix - } -} diff --git a/s3/services/accesskey/accesskey.go b/s3/services/accesskey/service.go similarity index 54% rename from s3/services/accesskey/accesskey.go rename to s3/services/accesskey/service.go index b0f271480..a1c72c5f6 100644 --- a/s3/services/accesskey/accesskey.go +++ b/s3/services/accesskey/service.go @@ -16,77 +16,77 @@ const ( defaultStoreKeyPrefix = "access-keys:" ) -var _ handlers.AccessKeyService = (*AccessKey)(nil) +var _ handlers.AccessKeyService = (*Service)(nil) -type AccessKey struct { +type Service struct { providers services.Providerser secretLength int storeKeyPrefix string locks sync.Map } -func NewAccessKey(providers services.Providerser, options ...Option) (ack *AccessKey) { - ack = &AccessKey{ +func NewService(providers services.Providerser, options ...Option) (svc *Service) { + svc = &Service{ providers: providers, secretLength: defaultSecretLength, storeKeyPrefix: defaultStoreKeyPrefix, locks: sync.Map{}, } for _, option := range options { - option(ack) + option(svc) } - return ack + return svc } -func (ack *AccessKey) Generate() (record *handlers.AccessKeyRecord, err error) { +func (svc *Service) Generate() (record *handlers.AccessKeyRecord, err error) { now := time.Now() record = &handlers.AccessKeyRecord{ - Key: ack.newKey(), - Secret: ack.newSecret(), + Key: svc.newKey(), + Secret: svc.newSecret(), Enable: true, IsDeleted: false, CreatedAt: now, UpdatedAt: now, } - err = ack.providers.GetStateStore().Put(ack.getStoreKey(record.Key), record) + err = svc.providers.GetStateStore().Put(svc.getStoreKey(record.Key), record) return } -func (ack *AccessKey) Enable(key string) (err error) { +func (svc *Service) Enable(key string) (err error) { enable := true - err = ack.update(key, &updateArgs{ + err = svc.update(key, &updateArgs{ Enable: &enable, }) return } -func (ack *AccessKey) Disable(key string) (err error) { +func (svc *Service) Disable(key string) (err error) { enable := false - err = ack.update(key, &updateArgs{ + err = svc.update(key, &updateArgs{ Enable: &enable, }) return } -func (ack *AccessKey) Reset(key string) (err error) { - secret := ack.newSecret() - err = ack.update(key, &updateArgs{ +func (svc *Service) Reset(key string) (err error) { + secret := svc.newSecret() + err = svc.update(key, &updateArgs{ Secret: &secret, }) return } -func (ack *AccessKey) Delete(key string) (err error) { +func (svc *Service) Delete(key string) (err error) { isDelete := true - err = ack.update(key, &updateArgs{ + err = svc.update(key, &updateArgs{ IsDelete: &isDelete, }) return } -func (ack *AccessKey) Get(key string) (record *handlers.AccessKeyRecord, err error) { +func (svc *Service) Get(key string) (record *handlers.AccessKeyRecord, err error) { record = &handlers.AccessKeyRecord{} - err = ack.providers.GetStateStore().Get(ack.getStoreKey(key), record) + err = svc.providers.GetStateStore().Get(svc.getStoreKey(key), record) if err != nil && !errors.Is(err, services.ErrStateStoreNotFound) { return } @@ -96,10 +96,10 @@ func (ack *AccessKey) Get(key string) (record *handlers.AccessKeyRecord, err err return } -func (ack *AccessKey) List() (list []*handlers.AccessKeyRecord, err error) { - err = ack.providers.GetStateStore().Iterate(ack.storeKeyPrefix, func(key, _ []byte) (stop bool, er error) { +func (svc *Service) List() (list []*handlers.AccessKeyRecord, err error) { + err = svc.providers.GetStateStore().Iterate(svc.storeKeyPrefix, func(key, _ []byte) (stop bool, er error) { record := &handlers.AccessKeyRecord{} - er = ack.providers.GetStateStore().Get(string(key), record) + er = svc.providers.GetStateStore().Get(string(key), record) if er != nil { return } @@ -112,29 +112,29 @@ func (ack *AccessKey) List() (list []*handlers.AccessKeyRecord, err error) { return } -func (ack *AccessKey) newKey() (key string) { +func (svc *Service) newKey() (key string) { key = uuid.NewString() return } -func (ack *AccessKey) newSecret() (secret string) { - secret = utils.RandomString(ack.secretLength) +func (svc *Service) newSecret() (secret string) { + secret = utils.RandomString(svc.secretLength) return } -func (ack *AccessKey) getStoreKey(key string) (storeKey string) { - storeKey = ack.storeKeyPrefix + key +func (svc *Service) getStoreKey(key string) (storeKey string) { + storeKey = svc.storeKeyPrefix + key return } -func (ack *AccessKey) lock(key string) (unlock func()) { +func (svc *Service) lock(key string) (unlock func()) { loaded := true for loaded { - _, loaded = ack.locks.LoadOrStore(key, nil) + _, loaded = svc.locks.LoadOrStore(key, nil) time.Sleep(10 * time.Millisecond) } unlock = func() { - ack.locks.Delete(key) + svc.locks.Delete(key) } return } @@ -145,14 +145,14 @@ type updateArgs struct { IsDelete *bool } -func (ack *AccessKey) update(key string, args *updateArgs) (err error) { - unlock := ack.lock(key) +func (svc *Service) update(key string, args *updateArgs) (err error) { + unlock := svc.lock(key) defer unlock() record := &handlers.AccessKeyRecord{} - stk := ack.getStoreKey(key) + stk := svc.getStoreKey(key) - err = ack.providers.GetStateStore().Get(stk, record) + err = svc.providers.GetStateStore().Get(stk, record) if err != nil && !errors.Is(err, storage.ErrNotFound) { return } @@ -173,7 +173,7 @@ func (ack *AccessKey) update(key string, args *updateArgs) (err error) { record.UpdatedAt = time.Now() - err = ack.providers.GetStateStore().Put(stk, record) + err = svc.providers.GetStateStore().Put(stk, record) return } diff --git a/s3/services/accesskey/instance.go b/s3/services/accesskey/service_instance.go similarity index 56% rename from s3/services/accesskey/instance.go rename to s3/services/accesskey/service_instance.go index e0ac52f3d..6d7686f55 100644 --- a/s3/services/accesskey/instance.go +++ b/s3/services/accesskey/service_instance.go @@ -6,44 +6,44 @@ import ( "sync" ) -var instance handlers.AccessKeyService +var service *Service var once sync.Once -func InitInstance(providers services.Providerser, options ...Option) { +func InitService(providers services.Providerser, options ...Option) { once.Do(func() { - instance = NewAccessKey(providers, options...) + service = NewService(providers, options...) }) } -func GetInstance() handlers.AccessKeyService { - return instance +func GetService() *Service { + return service } func Generate() (record *handlers.AccessKeyRecord, err error) { - return instance.Generate() + return service.Generate() } func Enable(key string) (err error) { - return instance.Enable(key) + return service.Enable(key) } func Disable(key string) (err error) { - return instance.Disable(key) + return service.Disable(key) } func Reset(key string) (err error) { - return instance.Reset(key) + return service.Reset(key) } func Delete(key string) (err error) { - return instance.Delete(key) + return service.Delete(key) } func Get(key string) (record *handlers.AccessKeyRecord, err error) { - return instance.Get(key) + return service.Get(key) } func List() (list []*handlers.AccessKeyRecord, err error) { - return instance.List() + return service.List() } diff --git a/s3/services/accesskey/service_options.go b/s3/services/accesskey/service_options.go new file mode 100644 index 000000000..d13493b8a --- /dev/null +++ b/s3/services/accesskey/service_options.go @@ -0,0 +1,15 @@ +package accesskey + +type Option func(svc *Service) + +func WithSecretLength(length int) Option { + return func(svc *Service) { + svc.secretLength = length + } +} + +func WithStoreKeyPrefix(prefix string) Option { + return func(svc *Service) { + svc.storeKeyPrefix = prefix + } +} diff --git a/s3/services/auth/options.go b/s3/services/auth/options.go deleted file mode 100644 index 2b7837297..000000000 --- a/s3/services/auth/options.go +++ /dev/null @@ -1,3 +0,0 @@ -package auth - -type Option func(*Service) diff --git a/s3/services/auth/auth.go b/s3/services/auth/service.go similarity index 59% rename from s3/services/auth/auth.go rename to s3/services/auth/service.go index 213e5d672..44c24af10 100644 --- a/s3/services/auth/auth.go +++ b/s3/services/auth/service.go @@ -14,21 +14,21 @@ type Service struct { accesskeySvc handlers.AccessKeyService } -func NewService(providers services.Providerser, accesskeySvc handlers.AccessKeyService, options ...Option) (s *Service) { - s = &Service{ +func NewService(providers services.Providerser, accesskeySvc handlers.AccessKeyService, options ...Option) (svc *Service) { + svc = &Service{ providers: providers, accesskeySvc: accesskeySvc, } for _, option := range options { - option(s) + option(svc) } return } -func (s *Service) VerifySignature(r *http.Request) (accessKeyRecord *handlers.AccessKeyRecord, err error) { +func (svc *Service) VerifySignature(r *http.Request) (accessKeyRecord *handlers.AccessKeyRecord, err error) { return } -func (s *Service) CheckACL(accessKeyRecord *handlers.AccessKeyRecord, bucketMeta *handlers.BucketMeta, action action.Action) (err error) { +func (svc *Service) CheckACL(accessKeyRecord *handlers.AccessKeyRecord, bucketMeta *handlers.BucketMeta, action action.Action) (err error) { return } diff --git a/s3/services/auth/service_options.go b/s3/services/auth/service_options.go new file mode 100644 index 000000000..7b0e351fd --- /dev/null +++ b/s3/services/auth/service_options.go @@ -0,0 +1,3 @@ +package auth + +type Option func(svc *Service) diff --git a/s3/services/cors/service.go b/s3/services/cors/service.go new file mode 100644 index 000000000..ccb76882e --- /dev/null +++ b/s3/services/cors/service.go @@ -0,0 +1,76 @@ +package cors + +import ( + "github.com/bittorrent/go-btfs/s3/handlers" + "github.com/bittorrent/go-btfs/s3d/consts" + "net/http" +) + +var ( + defaultAllowOrigins = []string{"*"} + defaultAllowMethods = []string{ + http.MethodGet, + http.MethodPut, + http.MethodHead, + http.MethodPost, + http.MethodDelete, + http.MethodOptions, + http.MethodPatch, + } + defaultAllowHeaders = []string{ + consts.Date, + consts.ETag, + consts.ServerInfo, + consts.Connection, + consts.AcceptRanges, + consts.ContentRange, + consts.ContentEncoding, + consts.ContentLength, + consts.ContentType, + consts.ContentDisposition, + consts.LastModified, + consts.ContentLanguage, + consts.CacheControl, + consts.RetryAfter, + consts.AmzBucketRegion, + consts.Expires, + consts.Authorization, + consts.Action, + consts.Range, + "X-Amz*", + "x-amz*", + "*", + } +) + +var _ handlers.CorsService = (*Service)(nil) + +type Service struct { + allowOrigins []string + allowMethods []string + allowHeaders []string +} + +func NewService(options ...Option) (svc *Service) { + svc = &Service{ + allowOrigins: defaultAllowOrigins, + allowMethods: defaultAllowMethods, + allowHeaders: defaultAllowHeaders, + } + for _, option := range options { + option(svc) + } + return +} + +func (svc *Service) GetAllowOrigins() []string { + return svc.allowOrigins +} + +func (svc *Service) GetAllowMethods() []string { + return svc.allowMethods +} + +func (svc *Service) GetAllowHeaders() []string { + return svc.allowHeaders +} diff --git a/s3/services/cors/service_options.go b/s3/services/cors/service_options.go new file mode 100644 index 000000000..1b3ee5721 --- /dev/null +++ b/s3/services/cors/service_options.go @@ -0,0 +1,21 @@ +package cors + +type Option func(svc *Service) + +func WithAllowOrigins(origins []string) Option { + return func(svc *Service) { + svc.allowOrigins = origins + } +} + +func WithAllowMethods(methods []string) Option { + return func(svc *Service) { + svc.allowMethods = methods + } +} + +func WithAllowHeaders(headers []string) Option { + return func(svc *Service) { + svc.allowHeaders = headers + } +} diff --git a/s3/services/multipart/service.go b/s3/services/multipart/service.go new file mode 100644 index 000000000..de8c518b3 --- /dev/null +++ b/s3/services/multipart/service.go @@ -0,0 +1,29 @@ +package multipart + +import ( + "github.com/bittorrent/go-btfs/s3/handlers" + "io" +) + +var _ handlers.MultipartService = (*Service)(nil) + +type Service struct { +} + +func NewService(options ...Option) (svc *Service) { + svc = &Service{} + for _, option := range options { + option(svc) + } + return +} + +func (svc *Service) multiReader() io.Reader { + var ( + r1 io.Reader + r2 io.Reader + r3 io.Reader + ) + + return io.MultiReader(r1, r2, r3) +} diff --git a/s3/services/multipart/service_options.go b/s3/services/multipart/service_options.go new file mode 100644 index 000000000..38cfa3705 --- /dev/null +++ b/s3/services/multipart/service_options.go @@ -0,0 +1,3 @@ +package multipart + +type Option func(svc *Service) diff --git a/s3/services/providers.go b/s3/services/providerser.go similarity index 100% rename from s3/services/providers.go rename to s3/services/providerser.go diff --git a/s3/services/providers_errors.go b/s3/services/providerser_errors.go similarity index 100% rename from s3/services/providers_errors.go rename to s3/services/providerser_errors.go diff --git a/s3/services/providers_types.go b/s3/services/providerser_types.go similarity index 100% rename from s3/services/providers_types.go rename to s3/services/providerser_types.go From 7aa2ca5283bef70fbcce04e38ea444ef4fd46180 Mon Sep 17 00:00:00 2001 From: Steve Date: Mon, 14 Aug 2023 17:22:01 +0800 Subject: [PATCH 023/139] feat: add multiple context lock --- s3/ctxmu/ctx_rwlock.go | 67 +++++++++++++++ s3/ctxmu/multi_ctx_rwlock.go | 137 ++++++++++++++++++++++++++++++ s3/ctxmu/multi_ctx_rwlock_test.go | 109 ++++++++++++++++++++++++ s3/handlers/options.go | 6 -- s3/services/accesskey/service.go | 36 ++++---- 5 files changed, 330 insertions(+), 25 deletions(-) create mode 100644 s3/ctxmu/ctx_rwlock.go create mode 100644 s3/ctxmu/multi_ctx_rwlock.go create mode 100644 s3/ctxmu/multi_ctx_rwlock_test.go diff --git a/s3/ctxmu/ctx_rwlock.go b/s3/ctxmu/ctx_rwlock.go new file mode 100644 index 000000000..c6b76bcaf --- /dev/null +++ b/s3/ctxmu/ctx_rwlock.go @@ -0,0 +1,67 @@ +package ctxmu + +import ( + "golang.org/x/net/context" + "math/rand" + "sync" + "time" +) + +const lockRetryInterval = 50 * time.Millisecond + +type CtxLocker interface { + Lock(ctx context.Context) (err error) + Unlock() +} + +type CtxRWLocker interface { + CtxLocker + RLock(ctx context.Context) (err error) + RUnlock() +} + +type CtxRWMutex struct { + lock sync.RWMutex +} + +func (c *CtxRWMutex) Lock(ctx context.Context) error { + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + if c.lock.TryLock() { + return nil + } + } + time.Sleep(c.getRandInterval()) + } +} + +func (c *CtxRWMutex) Unlock() { + c.lock.Unlock() +} + +func (c *CtxRWMutex) RLock(ctx context.Context) (err error) { + for { + select { + case <-ctx.Done(): + err = ctx.Err() + return + default: + if c.lock.TryRLock() { + return + } + } + time.Sleep(c.getRandInterval()) + } +} + +func (c *CtxRWMutex) RUnlock() { + c.lock.RUnlock() +} + +func (c *CtxRWMutex) getRandInterval() time.Duration { + r := rand.New(rand.NewSource(time.Now().UnixNano())) + return time.Duration(r.Float64() * float64(lockRetryInterval)) +} diff --git a/s3/ctxmu/multi_ctx_rwlock.go b/s3/ctxmu/multi_ctx_rwlock.go new file mode 100644 index 000000000..67df52b6a --- /dev/null +++ b/s3/ctxmu/multi_ctx_rwlock.go @@ -0,0 +1,137 @@ +package ctxmu + +import ( + "context" + "sync" + "sync/atomic" + "time" +) + +type MultiCtxLocker interface { + Lock(ctx context.Context, key interface{}) (err error) + Unlock(key interface{}) +} + +type MultiCtxRWLocker interface { + MultiCtxLocker + RLock(ctx context.Context, key interface{}) (err error) + RUnlock(key interface{}) +} + +type MultiCtxRWMutex struct { + locks sync.Map + pool sync.Pool +} + +func NewDefaultMultiCtxRWMutex() *MultiCtxRWMutex { + return NewMultiCtxRWMutex(func() CtxRWLocker { + return &CtxRWMutex{} + }) +} + +func NewMultiCtxRWMutex(newCtxRWLock func() CtxRWLocker) *MultiCtxRWMutex { + return &MultiCtxRWMutex{ + locks: sync.Map{}, + pool: sync.Pool{ + New: func() interface{} { + return newCtxRWLock() + }, + }, + } +} + +type ctxRWLockRefCounter struct { + count int64 + lock CtxRWLocker +} + +func (m *MultiCtxRWMutex) Lock(ctx context.Context, key interface{}) (err error) { + counter, err := m.incrGetRWLockRefCounter(ctx, key) + if err != nil { + return + } + err = (counter.lock).Lock(ctx) + if err != nil { + m.decrPutRWLockRefCounter(key, counter) + } + return +} + +func (m *MultiCtxRWMutex) LockWithTimout(timeout time.Duration, key interface{}) (err error) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + err = m.Lock(ctx, key) + return +} + +func (m *MultiCtxRWMutex) Unlock(key interface{}) { + counter := m.mustGetCounter(key) + counter.lock.Unlock() + m.decrPutRWLockRefCounter(key, counter) + return +} + +func (m *MultiCtxRWMutex) RLock(ctx context.Context, key interface{}) (err error) { + counter, err := m.incrGetRWLockRefCounter(ctx, key) + if err != nil { + return + } + err = (counter.lock).RLock(ctx) + if err != nil { + m.decrPutRWLockRefCounter(key, counter) + } + return +} + +func (m *MultiCtxRWMutex) RLockWithTimout(timeout time.Duration, key interface{}) (err error) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + err = m.RLock(ctx, key) + return +} + +func (m *MultiCtxRWMutex) RUnlock(key interface{}) { + counter := m.mustGetCounter(key) + counter.lock.RUnlock() + m.decrPutRWLockRefCounter(key, counter) + return +} + +func (m *MultiCtxRWMutex) mustGetCounter(key interface{}) (counter *ctxRWLockRefCounter) { + actual, ok := m.locks.Load(key) + if !ok { + panic("key's lock has been invalidly freed") + } + counter = actual.(*ctxRWLockRefCounter) + return +} + +func (m *MultiCtxRWMutex) incrGetRWLockRefCounter(ctx context.Context, key interface{}) (counter *ctxRWLockRefCounter, err error) { + for { + err = ctx.Err() + if err != nil { + return + } + actual, _ := m.locks.LoadOrStore(key, &ctxRWLockRefCounter{ + count: 0, + lock: m.pool.Get().(*CtxRWMutex), + }) + counter = actual.(*ctxRWLockRefCounter) + old := counter.count + if old < 0 { + continue + } + if atomic.CompareAndSwapInt64(&counter.count, old, old+1) { + break + } + } + return +} + +func (m *MultiCtxRWMutex) decrPutRWLockRefCounter(key interface{}, counter *ctxRWLockRefCounter) { + atomic.AddInt64(&counter.count, -1) + if atomic.CompareAndSwapInt64(&counter.count, 0, -1) { + m.pool.Put(counter.lock) + m.locks.Delete(key) + } +} diff --git a/s3/ctxmu/multi_ctx_rwlock_test.go b/s3/ctxmu/multi_ctx_rwlock_test.go new file mode 100644 index 000000000..ab3cca3a1 --- /dev/null +++ b/s3/ctxmu/multi_ctx_rwlock_test.go @@ -0,0 +1,109 @@ +package ctxmu + +import ( + "context" + "errors" + "fmt" + "golang.org/x/sync/errgroup" + "math/rand" + "testing" + "time" +) + +func TestMultiCtxRWMutex_Lock(t *testing.T) { + locks := NewMultiCtxRWMutex(func() CtxRWLocker { + return &CtxRWMutex{} + }) + eg := errgroup.Group{} + key := "test_key" + err := locks.Lock(context.Background(), key) + if err != nil { + t.Fatalf("can not lock") + } else { + t.Logf("can lock") + } + eg.Go(func() error { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Millisecond) + defer cancel() + lerr := locks.Lock(ctx, key) + if lerr == nil { + t.Fatalf("can lock after locked") + } else if !errors.Is(lerr, context.DeadlineExceeded) { + t.Fatalf("timout lock return non DeadlineExceeded error: %v", lerr) + } else { + t.Logf("can not lock after locked") + } + lerr = locks.RLock(ctx, key) + if lerr == nil { + t.Fatalf("can rlock after locked") + } else if !errors.Is(lerr, context.DeadlineExceeded) { + t.Fatalf("timout rlock return non DeadlineExceeded error: %v", lerr) + } else { + t.Logf("can not rlock after locked") + } + locks.Unlock(key) + lerr = locks.Lock(context.Background(), key) + if lerr != nil { + t.Fatalf("can not lock after unlocked") + } else { + t.Logf("can lock after unlocked") + } + locks.Unlock(key) + lerr = locks.RLock(context.Background(), key) + if lerr != nil { + t.Fatalf("can not rlock after unlocked") + } else { + t.Logf("can rlock after unlocked") + } + locks.RUnlock(key) + return nil + }) + + _ = eg.Wait() +} + +func TestMultiCtxRWMutex_LockWithTimout(t *testing.T) { + rand.Seed(time.Now().UnixNano()) + eg := errgroup.Group{} + timeout := 50 * time.Millisecond + locks := NewMultiCtxRWMutex(func() CtxRWLocker { + return &CtxRWMutex{} + }) + for i := 0; i < 1000; i++ { + okey := fmt.Sprintf("key_%d", i) + for j := 0; j < 100; j++ { + key := okey + n := j + wt := rand.Intn(200) + if j == 0 || j == 30 { + eg.Go(func() error { + lerr := locks.LockWithTimout(timeout, key) + if lerr == nil { + defer func() { + t.Logf("%s %d Unlock: %v, %d", key, n, lerr, wt) + locks.Unlock(key) + }() + } + t.Logf("%s %d Lock: %v, %d", key, n, lerr, wt) + time.Sleep(time.Duration(wt) * time.Millisecond) + return nil + }) + } else { + eg.Go(func() error { + lerr := locks.RLockWithTimout(timeout, key) + if lerr == nil { + defer func() { + t.Logf("%s %d RLock: %v, %d", key, n, lerr, wt) + locks.RUnlock(key) + }() + } + t.Logf("%s %d RLock: %v, %d", key, n, lerr, wt) + time.Sleep(time.Duration(wt) * time.Millisecond) + return nil + }) + + } + } + } + _ = eg.Wait() +} diff --git a/s3/handlers/options.go b/s3/handlers/options.go index 5caa7f8fc..ae5643114 100644 --- a/s3/handlers/options.go +++ b/s3/handlers/options.go @@ -1,9 +1,3 @@ package handlers type Option func(handlers *Handlers) - -func WithCorsAllowOrigins(origins []string) Option { - return func(handlers *Handlers) { - handlers.corsAllowOrigins = origins - } -} diff --git a/s3/services/accesskey/service.go b/s3/services/accesskey/service.go index a1c72c5f6..30d01bd57 100644 --- a/s3/services/accesskey/service.go +++ b/s3/services/accesskey/service.go @@ -1,19 +1,21 @@ package accesskey import ( + "context" "errors" + "github.com/bittorrent/go-btfs/s3/ctxmu" "github.com/bittorrent/go-btfs/s3/handlers" "github.com/bittorrent/go-btfs/s3/services" "github.com/bittorrent/go-btfs/transaction/storage" "github.com/bittorrent/go-btfs/utils" "github.com/google/uuid" - "sync" "time" ) const ( - defaultSecretLength = 32 - defaultStoreKeyPrefix = "access-keys:" + defaultSecretLength = 32 + defaultStoreKeyPrefix = "access-keys:" + defaultUpdateTimeoutMS = 200 ) var _ handlers.AccessKeyService = (*Service)(nil) @@ -22,7 +24,8 @@ type Service struct { providers services.Providerser secretLength int storeKeyPrefix string - locks sync.Map + locks *ctxmu.MultiCtxRWMutex + updateTimeout time.Duration } func NewService(providers services.Providerser, options ...Option) (svc *Service) { @@ -30,7 +33,8 @@ func NewService(providers services.Providerser, options ...Option) (svc *Service providers: providers, secretLength: defaultSecretLength, storeKeyPrefix: defaultStoreKeyPrefix, - locks: sync.Map{}, + locks: ctxmu.NewDefaultMultiCtxRWMutex(), + updateTimeout: time.Duration(defaultUpdateTimeoutMS) * time.Millisecond, } for _, option := range options { option(svc) @@ -127,18 +131,6 @@ func (svc *Service) getStoreKey(key string) (storeKey string) { return } -func (svc *Service) lock(key string) (unlock func()) { - loaded := true - for loaded { - _, loaded = svc.locks.LoadOrStore(key, nil) - time.Sleep(10 * time.Millisecond) - } - unlock = func() { - svc.locks.Delete(key) - } - return -} - type updateArgs struct { Enable *bool Secret *string @@ -146,8 +138,14 @@ type updateArgs struct { } func (svc *Service) update(key string, args *updateArgs) (err error) { - unlock := svc.lock(key) - defer unlock() + ctx, cancel := context.WithTimeout(context.Background(), svc.updateTimeout) + defer cancel() + + err = svc.locks.Lock(ctx, key) + if err != nil { + return + } + defer svc.locks.Unlock(key) record := &handlers.AccessKeyRecord{} stk := svc.getStoreKey(key) From ee3e5f8bef9a70f089d2491d7e0f8ea19a7c5e3b Mon Sep 17 00:00:00 2001 From: fish <920886811@163.com> Date: Mon, 14 Aug 2023 18:30:45 +0800 Subject: [PATCH 024/139] feat: check auth --- s3/action/action.go | 36 ++++++------ s3/handlers/services.go | 4 +- s3/services/auth/auth_type.go | 2 +- s3/services/auth/check_handler_auth.go | 74 ++++++------------------- s3/services/auth/cred_temp.go | 69 ----------------------- s3/services/auth/service.go | 22 ++++++-- s3/services/auth/signature-v4-parser.go | 37 +++++-------- s3/services/auth/signature-v4-utils.go | 6 +- s3/services/auth/signature-v4.go | 41 +++++++------- s3/services/auth/signature.go | 37 ------------- s3/services/cors/service.go | 2 +- s3/set/match_test.go | 42 +++++++------- s3/utils/signature.go | 2 +- 13 files changed, 121 insertions(+), 253 deletions(-) delete mode 100644 s3/services/auth/cred_temp.go delete mode 100644 s3/services/auth/signature.go diff --git a/s3/action/action.go b/s3/action/action.go index ec7d088fe..8a13e97db 100644 --- a/s3/action/action.go +++ b/s3/action/action.go @@ -12,62 +12,62 @@ const ( //--- bucket // CreateBucketAction - CreateBucket Rest API action. - CreateBucketAction = "s3d:CreateBucket" + CreateBucketAction = "s3:CreateBucket" // HeadBucketAction - HeadBucket Rest API action. - HeadBucketAction = "s3d:HeadBucket" + HeadBucketAction = "s3:HeadBucket" // ListBucketAction - ListBucket Rest API action. - ListBucketAction = "s3d:ListBucket" + ListBucketAction = "s3:ListBucket" // DeleteBucketAction - DeleteBucket Rest API action. - DeleteBucketAction = "s3d:DeleteBucket" + DeleteBucketAction = "s3:DeleteBucket" // PutBucketAclAction - PutBucketAcl Rest API action. - PutBucketAclAction = "s3d:PutBucketAcl" + PutBucketAclAction = "s3:PutBucketAcl" // GetBucketAclAction - GetBucketAcl Rest API action. - GetBucketAclAction = "s3d:GetBucketAcl" + GetBucketAclAction = "s3:GetBucketAcl" //--- object // ListObjectsAction - ListObjects Rest API action. - ListObjectsAction = "s3d:ListObjects" + ListObjectsAction = "s3:ListObjects" // ListObjectsV2Action - ListObjectsV2 Rest API action. - ListObjectsV2Action = "s3d:ListObjectsV2" + ListObjectsV2Action = "s3:ListObjectsV2" // HeadObjectAction - HeadObject Rest API action. - HeadObjectAction = "s3d:HeadObject" + HeadObjectAction = "s3:HeadObject" // PutObjectAction - PutObject Rest API action. - PutObjectAction = "s3d:PutObject" + PutObjectAction = "s3:PutObject" // GetObjectAction - GetObject Rest API action. - GetObjectAction = "s3d:GetObject" + GetObjectAction = "s3:GetObject" // CopyObjectAction - CopyObject Rest API action. - CopyObjectAction = "s3d:CopyObject" + CopyObjectAction = "s3:CopyObject" // DeleteObjectAction - DeleteObject Rest API action. - DeleteObjectAction = "s3d:DeleteObject" + DeleteObjectAction = "s3:DeleteObject" // DeleteObjectsAction - DeleteObjects Rest API action. - DeleteObjectsAction = "s3d:DeleteObjects" + DeleteObjectsAction = "s3:DeleteObjects" //--- multipart upload // CreateMultipartUploadAction - CreateMultipartUpload Rest API action. - CreateMultipartUploadAction Action = "s3d:CreateMultipartUpload" + CreateMultipartUploadAction Action = "s3:CreateMultipartUpload" // AbortMultipartUploadAction - AbortMultipartUpload Rest API action. - AbortMultipartUploadAction Action = "s3d:AbortMultipartUpload" + AbortMultipartUploadAction Action = "s3:AbortMultipartUpload" // CompleteMultipartUploadAction - CompleteMultipartUpload Rest API action. - CompleteMultipartUploadAction Action = "s3d:CompleteMultipartUpload" + CompleteMultipartUploadAction Action = "s3:CompleteMultipartUpload" // UploadPartAction - UploadPartUpload Rest API action. - UploadPartAction Action = "s3d:UploadPartUpload" + UploadPartAction Action = "s3:UploadPartUpload" ) // SupportedActions List of all supported actions. diff --git a/s3/handlers/services.go b/s3/handlers/services.go index 3612f90d2..edede1b9c 100644 --- a/s3/handlers/services.go +++ b/s3/handlers/services.go @@ -1,7 +1,9 @@ package handlers import ( + "context" "github.com/bittorrent/go-btfs/s3/action" + "github.com/bittorrent/go-btfs/s3/apierrors" "net/http" ) @@ -22,7 +24,7 @@ type AccessKeyService interface { } type AuthService interface { - VerifySignature(r *http.Request) (accessKeyRecord *AccessKeyRecord, err error) + VerifySignature(ctx context.Context, r *http.Request) (accessKeyRecord *AccessKeyRecord, err apierrors.ErrorCode) CheckACL(accessKeyRecord *AccessKeyRecord, bucketMeta *BucketMeta, action action.Action) (err error) } diff --git a/s3/services/auth/auth_type.go b/s3/services/auth/auth_type.go index 74ce2e913..572fb19b6 100644 --- a/s3/services/auth/auth_type.go +++ b/s3/services/auth/auth_type.go @@ -5,7 +5,7 @@ import ( "net/url" "strings" - "github.com/bittorrent/go-btfs/s3d/consts" + "github.com/bittorrent/go-btfs/s3/consts" ) // Verify if request has JWT. diff --git a/s3/services/auth/check_handler_auth.go b/s3/services/auth/check_handler_auth.go index e87d694d8..77ec0dec4 100644 --- a/s3/services/auth/check_handler_auth.go +++ b/s3/services/auth/check_handler_auth.go @@ -1,29 +1,24 @@ package auth import ( - "bytes" "context" "encoding/hex" - s3action "github.com/bittorrent/go-btfs/s3/action" "github.com/bittorrent/go-btfs/s3/handlers" - "github.com/bittorrent/go-btfs/s3/policy" "github.com/bittorrent/go-btfs/s3/utils/hash" - "github.com/bittorrent/go-btfs/s3d/store" - "io" "net/http" - "github.com/bittorrent/go-btfs/s3d/apierrors" - "github.com/bittorrent/go-btfs/s3d/consts" - "github.com/bittorrent/go-btfs/s3d/etag" + "github.com/bittorrent/go-btfs/s3/apierrors" + "github.com/bittorrent/go-btfs/s3/consts" + "github.com/bittorrent/go-btfs/s3/etag" ) -// AuthSys auth and sign system -type AuthSys struct{} - -// NewAuthSys new an AuthSys -func NewAuthSys() *AuthSys { - return &AuthSys{} -} +//// AuthSys auth and sign system +//type AuthSys struct{} +// +//// NewAuthSys new an AuthSys +//func NewAuthSys() *AuthSys { +// return &AuthSys{} +//} // CheckRequestAuthTypeCredential Check request auth type verifies the incoming http request // - validates the request signature @@ -32,12 +27,7 @@ func NewAuthSys() *AuthSys { // // returns APIErrorCode if any to be replied to the client. // Additionally, returns the accessKey used in the request, and if this request is by an admin. -func (s *AuthSys) CheckRequestAuthTypeCredential(ctx context.Context, r *http.Request, action s3action.Action, bucketName string, bmSys *store.BucketMetadataSys) (cred Credentials, err error) { - //todo 是否需要判断 - if bucketName == "" { - return cred, handlers.ErrBucketNotFound - } - +func (s *Service) CheckRequestAuthTypeCredential(ctx context.Context, r *http.Request) (cred *handlers.AccessKeyRecord, s3Err apierrors.ErrorCode) { // 1.check signature switch GetRequestAuthType(r) { case AuthTypeUnknown, AuthTypeStreamingSigned: @@ -49,57 +39,29 @@ func (s *AuthSys) CheckRequestAuthTypeCredential(ctx context.Context, r *http.Re if s3Err = s.IsReqAuthenticated(ctx, r, region, ServiceS3); s3Err != apierrors.ErrNone { return cred, s3Err } - cred, s3Err = GetReqAccessKeyV4(r, region, ServiceS3) + cred, s3Err = GetReqAccessKeyV4(r, region, ServiceS3, s.accessKeySvc) } if s3Err != apierrors.ErrNone { return cred, s3Err } - // CreateBucketAction - if action == action.CreateBucketAction { - // To extract region from XML in request body, get copy of request body. - payload, err := io.ReadAll(io.LimitReader(r.Body, consts.MaxLocationConstraintSize)) - if err != nil { - //log.Errorf("ReadAll err:%v", err) - return cred, apierrors.ErrMalformedXML - } - - // Populate payload to extract location constraint. - r.Body = io.NopCloser(bytes.NewReader(payload)) - //todo check HasBucket - if bmSys.HasBucket(ctx, bucketName) { - return cred, apierrors.ErrBucketAlreadyExists - } - } - - // 2.check acl - //todo 获取bucket用户信息:owner, acl - meta, err := bmSys.GetBucketMeta(ctx, bucketName) - if err != nil { - return cred, apierrors.ErrAccessDenied - } - - if policy.IsAllowed(meta.Owner == cred.AccessKey, meta.Acl, action) == false { - return cred, apierrors.ErrAccessDenied - } - return cred, apierrors.ErrNone } -func (s *AuthSys) ReqSignatureV4Verify(r *http.Request, region string, stype serviceType) (s3Error apierrors.ErrorCode) { +func (s *Service) ReqSignatureV4Verify(r *http.Request, region string, stype serviceType) (s3Error apierrors.ErrorCode) { sha256sum := getContentSha256Cksum(r, stype) switch { case IsRequestSignatureV4(r): - return DoesSignatureMatch(sha256sum, r, region, stype) + return DoesSignatureMatch(sha256sum, r, region, stype, s.accessKeySvc) case isRequestPresignedSignatureV4(r): - return DoesPresignedSignatureMatch(sha256sum, r, region, stype) + return DoesPresignedSignatureMatch(sha256sum, r, region, stype, s.accessKeySvc) default: return apierrors.ErrAccessDenied } } // IsReqAuthenticated Verify if request has valid AWS Signature Version '4'. -func (s *AuthSys) IsReqAuthenticated(ctx context.Context, r *http.Request, region string, stype serviceType) (s3Error apierrors.ErrorCode) { +func (s *Service) IsReqAuthenticated(ctx context.Context, r *http.Request, region string, stype serviceType) (s3Error apierrors.ErrorCode) { if errCode := s.ReqSignatureV4Verify(r, region, stype); errCode != apierrors.ErrNone { return errCode } @@ -136,7 +98,7 @@ func (s *AuthSys) IsReqAuthenticated(ctx context.Context, r *http.Request, regio } //// ValidateAdminSignature validate admin Signature -//func (s *AuthSys) ValidateAdminSignature(ctx context.Context, r *http.Request, region string) (Credentials, map[string]interface{}, bool, apierrors.ErrorCode) { +//func (s *Service) ValidateAdminSignature(ctx context.Context, r *http.Request, region string) (Credentials, map[string]interface{}, bool, apierrors.ErrorCode) { // var cred Credentials // var owner bool // s3Err := apierrors.ErrAccessDenied @@ -158,7 +120,7 @@ func (s *AuthSys) IsReqAuthenticated(ctx context.Context, r *http.Request, regio // return cred, nil, owner, apierrors.ErrNone //} //// -//func (s *AuthSys) GetCredential(r *http.Request) (cred auth.Credentials, owner bool, s3Err apierrors.ErrorCode) { +//func (s *Service) GetCredential(r *http.Request) (cred auth.Credentials, owner bool, s3Err apierrors.ErrorCode) { // switch GetRequestAuthType(r) { // case AuthTypeUnknown: // s3Err = apierrors.ErrSignatureVersionNotSupported diff --git a/s3/services/auth/cred_temp.go b/s3/services/auth/cred_temp.go deleted file mode 100644 index 44b488772..000000000 --- a/s3/services/auth/cred_temp.go +++ /dev/null @@ -1,69 +0,0 @@ -package auth - -import ( - "github.com/bittorrent/go-btfs/s3d/apierrors" - "time" -) - -var timeSentinel = time.Unix(0, 0).UTC() - -// Credentials holds access and secret keys. -type Credentials struct { - AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` - SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` - CreateTime time.Time `xml:"CreateTime" json:"createTime,omitempty"` - Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` - SessionToken string `xml:"SessionToken" json:"sessionToken"` - Status string `xml:"-" json:"status,omitempty"` - ParentUser string `xml:"-" json:"parentUser,omitempty"` -} - -// IsValid - returns whether credential is valid or not. -func (cred *Credentials) IsValid() bool { - return true -} - -// IsExpired - returns whether Credential is expired or not. -func (cred *Credentials) IsExpired() bool { - return false -} - -func CheckAccessKeyValid(accessKey string) (*Credentials, apierrors.ErrorCode) { - - ////check it - //cred, bl: = mp[accessKey] - //if bl { - // return cred, nil - //} else { - // return nil, errors.New("node found accessKey! ") - //} - - return &Credentials{AccessKey: accessKey}, apierrors.ErrNone -} - -const ( - // Minimum length for access key. - accessKeyMinLen = 3 - - // Maximum length for access key. - // There is no max length enforcement for access keys - accessKeyMaxLen = 20 - - // Minimum length for secret key for both server and gateway mode. - secretKeyMinLen = 8 - - // Maximum secret key length , this - // is used when autogenerating new credentials. - // There is no max length enforcement for secret keys - secretKeyMaxLen = 40 -) - -// IsAccessKeyValid - validate access key for right length. -func IsAccessKeyValid(accessKey string) bool { - return len(accessKey) >= accessKeyMinLen -} - -// IsSecretKeyValid - validate secret key for right length. -func IsSecretKeyValid(secretKey string) bool { - return len(secretKey) >= secretKeyMinLen -} diff --git a/s3/services/auth/service.go b/s3/services/auth/service.go index 44c24af10..79538ff50 100644 --- a/s3/services/auth/service.go +++ b/s3/services/auth/service.go @@ -1,8 +1,11 @@ package auth import ( + "context" "github.com/bittorrent/go-btfs/s3/action" + "github.com/bittorrent/go-btfs/s3/apierrors" "github.com/bittorrent/go-btfs/s3/handlers" + "github.com/bittorrent/go-btfs/s3/policy" "github.com/bittorrent/go-btfs/s3/services" "net/http" ) @@ -11,13 +14,13 @@ var _ handlers.AuthService = (*Service)(nil) type Service struct { providers services.Providerser - accesskeySvc handlers.AccessKeyService + accessKeySvc handlers.AccessKeyService } -func NewService(providers services.Providerser, accesskeySvc handlers.AccessKeyService, options ...Option) (svc *Service) { +func NewService(providers services.Providerser, accessKeySvc handlers.AccessKeyService, options ...Option) (svc *Service) { svc = &Service{ providers: providers, - accesskeySvc: accesskeySvc, + accessKeySvc: accessKeySvc, } for _, option := range options { option(svc) @@ -25,10 +28,21 @@ func NewService(providers services.Providerser, accesskeySvc handlers.AccessKeyS return } -func (svc *Service) VerifySignature(r *http.Request) (accessKeyRecord *handlers.AccessKeyRecord, err error) { +func (s *Service) VerifySignature(ctx context.Context, r *http.Request) (accessKeyRecord *handlers.AccessKeyRecord, err apierrors.ErrorCode) { + s.CheckRequestAuthTypeCredential(ctx, r) return } func (svc *Service) CheckACL(accessKeyRecord *handlers.AccessKeyRecord, bucketMeta *handlers.BucketMeta, action action.Action) (err error) { + ////todo 是否需要判断原始的 + //if bucketName == "" { + // return cred, handlers.ErrBucketNotFound + //} + + //todo 注意:如果action是CreateBucketAction,HasBucket(ctx, bucketName)进行判断 + + if policy.IsAllowed(bucketMeta.Owner == accessKeyRecord.Key, bucketMeta.Acl, action) == false { + return cred, apierrors.ErrAccessDenied + } return } diff --git a/s3/services/auth/signature-v4-parser.go b/s3/services/auth/signature-v4-parser.go index 2c60d2a09..1e4f0a884 100644 --- a/s3/services/auth/signature-v4-parser.go +++ b/s3/services/auth/signature-v4-parser.go @@ -18,13 +18,14 @@ package auth import ( + "github.com/bittorrent/go-btfs/s3/handlers" "net/http" "net/url" "strings" "time" - "github.com/bittorrent/go-btfs/s3d/apierrors" - "github.com/bittorrent/go-btfs/s3d/consts" + "github.com/bittorrent/go-btfs/s3/apierrors" + "github.com/bittorrent/go-btfs/s3/consts" ) // credentialHeader data type represents structured form of Credential @@ -63,9 +64,9 @@ func parseCredentialHeader(credElement string, region string, stype serviceType) return ch, apierrors.ErrCredMalformed } accessKey := strings.Join(credElements[:len(credElements)-4], consts.SlashSeparator) // The access key may contain one or more `/` - if !IsAccessKeyValid(accessKey) { - return ch, apierrors.ErrInvalidAccessKeyID - } + //if !IsAccessKeyValid(accessKey) { + // return ch, apierrors.ErrInvalidAccessKeyID + //} // Save access key id. cred := credentialHeader{ accessKey: accessKey, @@ -285,33 +286,25 @@ func parseSignV4(v4Auth string, region string, stype serviceType) (sv signValues return signV4Values, apierrors.ErrNone } -func GetReqAccessKeyV4(r *http.Request, region string, stype serviceType) (Credentials, apierrors.ErrorCode) { +func GetReqAccessKeyV4(r *http.Request, region string, stype serviceType, accessKeySvc handlers.AccessKeyService) (*handlers.AccessKeyRecord, apierrors.ErrorCode) { ch, s3Err := parseCredentialHeader("Credential="+r.Form.Get(consts.AmzCredential), region, stype) if s3Err != apierrors.ErrNone { // Strip off the Algorithm prefix. v4Auth := strings.TrimPrefix(r.Header.Get("Authorization"), signV4Algorithm) authFields := strings.Split(strings.TrimSpace(v4Auth), ",") if len(authFields) != 3 { - return Credentials{}, apierrors.ErrMissingFields + return &handlers.AccessKeyRecord{}, apierrors.ErrMissingFields } ch, s3Err = parseCredentialHeader(authFields[0], region, stype) if s3Err != apierrors.ErrNone { - return Credentials{}, s3Err + return &handlers.AccessKeyRecord{}, s3Err } } - // TODO: Why should a temporary user be replaced with the parent user's account name? - //cerd, _ := s.Iam.GetUser(r.Context(), ch.accessKey) - //if cerd.IsTemp() { - // ch.accessKey = cerd.ParentUser - //} - return checkAccessKeyValid(ch.accessKey) -} -// check if the access key is valid and recognized, additionally -func checkAccessKeyValid(accessKey string) (Credentials, apierrors.ErrorCode) { - - //todo 根据accessKey获取accessKey - cred := Credentials{} - - return cred, apierrors.ErrNone + // TODO: Why should a temporary user be replaced with the parent user's account name? + record, err := accessKeySvc.Get(ch.accessKey) + if err != nil { + return &handlers.AccessKeyRecord{}, err + } + return record, apierrors.ErrNone } diff --git a/s3/services/auth/signature-v4-utils.go b/s3/services/auth/signature-v4-utils.go index 3cab7aa58..734dccab9 100644 --- a/s3/services/auth/signature-v4-utils.go +++ b/s3/services/auth/signature-v4-utils.go @@ -23,8 +23,8 @@ import ( "strconv" "strings" - "github.com/bittorrent/go-btfs/s3d/apierrors" - "github.com/bittorrent/go-btfs/s3d/consts" + "github.com/bittorrent/go-btfs/s3/apierrors" + "github.com/bittorrent/go-btfs/s3/consts" ) // http Header "x-amz-content-sha256" == "UNSIGNED-PAYLOAD" indicates that the @@ -39,7 +39,7 @@ func isValidRegion(reqRegion string, confRegion string) bool { if confRegion == "US" { confRegion = consts.DefaultRegion } - // Some older s3d clients set region as "US" instead of + // Some older s3 clients set region as "US" instead of // globalDefaultRegion, handle it. if reqRegion == "US" { reqRegion = consts.DefaultRegion diff --git a/s3/services/auth/signature-v4.go b/s3/services/auth/signature-v4.go index 3bf68335f..eb53d9658 100644 --- a/s3/services/auth/signature-v4.go +++ b/s3/services/auth/signature-v4.go @@ -19,6 +19,7 @@ package auth import ( "crypto/subtle" + "github.com/bittorrent/go-btfs/s3/handlers" "github.com/bittorrent/go-btfs/s3/set" "github.com/bittorrent/go-btfs/s3/utils" "net/http" @@ -26,8 +27,8 @@ import ( "strconv" "time" - "github.com/bittorrent/go-btfs/s3d/apierrors" - "github.com/bittorrent/go-btfs/s3d/consts" + "github.com/bittorrent/go-btfs/s3/apierrors" + "github.com/bittorrent/go-btfs/s3/consts" ) // AWS Signature Version '4' constants. @@ -40,7 +41,7 @@ const ( type serviceType string const ( - ServiceS3 serviceType = "s3d" + ServiceS3 serviceType = "s3" ////ServiceSTS STS //ServiceSTS serviceType = "sts" ) @@ -58,7 +59,7 @@ func compareSignatureV4(sig1, sig2 string) bool { // - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html // // returns apierrors.ErrNone if the signature matches. -func DoesPresignedSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType) apierrors.ErrorCode { +func DoesPresignedSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType, accessKeySvc handlers.AccessKeyService) apierrors.ErrorCode { // Copy request req := *r @@ -69,7 +70,7 @@ func DoesPresignedSignatureMatch(hashedPayload string, r *http.Request, region s } // get access_info by accessKey - cred, s3Err := CheckAccessKeyValid(pSignValues.Credential.accessKey) + cred, s3Err := accessKeySvc.Get(pSignValues.Credential.accessKey) if s3Err != apierrors.ErrNone { return s3Err } @@ -101,10 +102,11 @@ func DoesPresignedSignatureMatch(hashedPayload string, r *http.Request, region s query.Set(consts.AmzContentSha256, hashedPayload) } - token := req.Form.Get(consts.AmzSecurityToken) - if token != "" { - query.Set(consts.AmzSecurityToken, cred.SessionToken) - } + // not check token? + //token := req.Form.Get(consts.AmzSecurityToken) + //if token != "" { + // query.Set(consts.AmzSecurityToken, cred.SessionToken) + //} query.Set(consts.AmzAlgorithm, signV4Algorithm) @@ -112,11 +114,11 @@ func DoesPresignedSignatureMatch(hashedPayload string, r *http.Request, region s query.Set(consts.AmzDate, t.Format(iso8601Format)) query.Set(consts.AmzExpires, strconv.Itoa(expireSeconds)) query.Set(consts.AmzSignedHeaders, utils.GetSignedHeaders(extractedSignedHeaders)) - query.Set(consts.AmzCredential, cred.AccessKey+consts.SlashSeparator+pSignValues.Credential.getScope()) + query.Set(consts.AmzCredential, cred.Key+consts.SlashSeparator+pSignValues.Credential.getScope()) defaultSigParams := set.CreateStringSet( consts.AmzContentSha256, - consts.AmzSecurityToken, + //consts.AmzSecurityToken, consts.AmzAlgorithm, consts.AmzDate, consts.AmzExpires, @@ -155,10 +157,11 @@ func DoesPresignedSignatureMatch(hashedPayload string, r *http.Request, region s if clntHashedPayload != "" && clntHashedPayload != query.Get(consts.AmzContentSha256) { return apierrors.ErrContentSHA256Mismatch } - // Verify if security token is correct. - if token != "" && subtle.ConstantTimeCompare([]byte(token), []byte(cred.SessionToken)) != 1 { - return apierrors.ErrInvalidToken - } + // not check SessionToken. + //// Verify if security token is correct. + //if token != "" && subtle.ConstantTimeCompare([]byte(token), []byte(cred.SessionToken)) != 1 { + // return apierrors.ErrInvalidToken + //} // Verify finally if signature is same. @@ -169,7 +172,7 @@ func DoesPresignedSignatureMatch(hashedPayload string, r *http.Request, region s presignedStringToSign := utils.GetStringToSign(presignedCanonicalReq, t, pSignValues.Credential.getScope()) // Get hmac presigned signing key. - presignedSigningKey := utils.GetSigningKey(cred.SecretKey, pSignValues.Credential.scope.date, + presignedSigningKey := utils.GetSigningKey(cred.Secret, pSignValues.Credential.scope.date, pSignValues.Credential.scope.region, string(stype)) // Get new signature. @@ -186,7 +189,7 @@ func DoesPresignedSignatureMatch(hashedPayload string, r *http.Request, region s // - http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html // // returns apierrors.ErrNone if signature matches. -func DoesSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType) apierrors.ErrorCode { +func DoesSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType, accessKeySvc handlers.AccessKeyService) apierrors.ErrorCode { // Copy request. req := *r @@ -205,7 +208,7 @@ func DoesSignatureMatch(hashedPayload string, r *http.Request, region string, st return errCode } - cred, s3Err := CheckAccessKeyValid(signV4Values.Credential.accessKey) + cred, s3Err := accessKeySvc.Get(signV4Values.Credential.accessKey) if s3Err != apierrors.ErrNone { return s3Err } @@ -234,7 +237,7 @@ func DoesSignatureMatch(hashedPayload string, r *http.Request, region string, st stringToSign := utils.GetStringToSign(canonicalRequest, t, signV4Values.Credential.getScope()) // Get hmac signing key. - signingKey := utils.GetSigningKey(cred.SecretKey, signV4Values.Credential.scope.date, + signingKey := utils.GetSigningKey(cred.Key, signV4Values.Credential.scope.date, signV4Values.Credential.scope.region, string(stype)) // Calculate signature. diff --git a/s3/services/auth/signature.go b/s3/services/auth/signature.go deleted file mode 100644 index e10702920..000000000 --- a/s3/services/auth/signature.go +++ /dev/null @@ -1,37 +0,0 @@ -package auth - -import ( - "context" - "github.com/bittorrent/go-btfs/s3/handlers" - "github.com/bittorrent/go-btfs/s3/services" - s3action "github.com/bittorrent/go-btfs/s3d/action" - "github.com/bittorrent/go-btfs/s3d/apierrors" - "github.com/bittorrent/go-btfs/s3d/store" - "net/http" -) - -var _ handlers.SignatureService = (*Signature)(nil) - -type Signature struct { - providers services.Providerser - accesskeySvc handlers.AccessKeyService - au *AuthSys - bmSys *store.BucketMetadataSys -} - -func NewSignature(providers services.Providerser, accesskeySvc handlers.AccessKeyService, options ...Option) (signature *Signature) { - signature = &Signature{ - providers: providers, - accesskeySvc: accesskeySvc, - } - for _, option := range options { - - } - return -} - -func (s *service) CheckSignatureAndAcl(ctx context.Context, r *http.Request, action s3action.Action, bucketName string) ( - cred Credentials, s3Error apierrors.ErrorCode) { - - return s.au.CheckRequestAuthTypeCredential(ctx, r, action, bucketName, s.bmSys) -} diff --git a/s3/services/cors/service.go b/s3/services/cors/service.go index ccb76882e..c61ba0074 100644 --- a/s3/services/cors/service.go +++ b/s3/services/cors/service.go @@ -1,8 +1,8 @@ package cors import ( + "github.com/bittorrent/go-btfs/s3/consts" "github.com/bittorrent/go-btfs/s3/handlers" - "github.com/bittorrent/go-btfs/s3d/consts" "net/http" ) diff --git a/s3/set/match_test.go b/s3/set/match_test.go index bd903c55b..eec6df487 100644 --- a/s3/set/match_test.go +++ b/s3/set/match_test.go @@ -18,14 +18,14 @@ func TestMatch(t *testing.T) { // Test case with pattern "*". Expected to match any text. { pattern: "*", - text: "s3d:GetObject", + text: "s3:GetObject", matched: true, }, // Test case - 2. // Test case with empty pattern. This only matches empty string. { pattern: "", - text: "s3d:GetObject", + text: "s3:GetObject", matched: false, }, // Test case - 3. @@ -38,29 +38,29 @@ func TestMatch(t *testing.T) { // Test case - 4. // Test case with single "*" at the end. { - pattern: "s3d:*", - text: "s3d:ListMultipartUploadParts", + pattern: "s3:*", + text: "s3:ListMultipartUploadParts", matched: true, }, // Test case - 5. // Test case with a no "*". In this case the pattern and text should be the same. { - pattern: "s3d:ListBucketMultipartUploads", - text: "s3d:ListBucket", + pattern: "s3:ListBucketMultipartUploads", + text: "s3:ListBucket", matched: false, }, // Test case - 6. // Test case with a no "*". In this case the pattern and text should be the same. { - pattern: "s3d:ListBucket", - text: "s3d:ListBucket", + pattern: "s3:ListBucket", + text: "s3:ListBucket", matched: true, }, // Test case - 7. // Test case with a no "*". In this case the pattern and text should be the same. { - pattern: "s3d:ListBucketMultipartUploads", - text: "s3d:ListBucketMultipartUploads", + pattern: "s3:ListBucketMultipartUploads", + text: "s3:ListBucketMultipartUploads", matched: true, }, // Test case - 8. @@ -194,7 +194,7 @@ func TestMatch(t *testing.T) { matched: true, }, // Test case 27-28. - // '?' matches '/' too. (works with s3d). + // '?' matches '/' too. (works with s3). // This is because the namespace is considered flat. // "abc?efg" matches both "abcdefg" and "abc/efg". { @@ -375,14 +375,14 @@ func TestMatchSimple(t *testing.T) { // Test case with pattern "*". Expected to match any text. { pattern: "*", - text: "s3d:GetObject", + text: "s3:GetObject", matched: true, }, // Test case - 2. // Test case with empty pattern. This only matches empty string. { pattern: "", - text: "s3d:GetObject", + text: "s3:GetObject", matched: false, }, // Test case - 3. @@ -395,29 +395,29 @@ func TestMatchSimple(t *testing.T) { // Test case - 4. // Test case with single "*" at the end. { - pattern: "s3d:*", - text: "s3d:ListMultipartUploadParts", + pattern: "s3:*", + text: "s3:ListMultipartUploadParts", matched: true, }, // Test case - 5. // Test case with a no "*". In this case the pattern and text should be the same. { - pattern: "s3d:ListBucketMultipartUploads", - text: "s3d:ListBucket", + pattern: "s3:ListBucketMultipartUploads", + text: "s3:ListBucket", matched: false, }, // Test case - 6. // Test case with a no "*". In this case the pattern and text should be the same. { - pattern: "s3d:ListBucket", - text: "s3d:ListBucket", + pattern: "s3:ListBucket", + text: "s3:ListBucket", matched: true, }, // Test case - 7. // Test case with a no "*". In this case the pattern and text should be the same. { - pattern: "s3d:ListBucketMultipartUploads", - text: "s3d:ListBucketMultipartUploads", + pattern: "s3:ListBucketMultipartUploads", + text: "s3:ListBucketMultipartUploads", matched: true, }, // Test case - 8. diff --git a/s3/utils/signature.go b/s3/utils/signature.go index fe6a99d49..571730535 100644 --- a/s3/utils/signature.go +++ b/s3/utils/signature.go @@ -16,7 +16,7 @@ import ( "time" "unicode/utf8" - "github.com/bittorrent/go-btfs/s3d/consts" + "github.com/bittorrent/go-btfs/s3/consts" ) var ignoredHeaders = map[string]bool{ From 44da3100987a03405e04806ecabfe81174d5920b Mon Sep 17 00:00:00 2001 From: fish <920886811@163.com> Date: Mon, 14 Aug 2023 18:45:18 +0800 Subject: [PATCH 025/139] chore: --- s3/services/auth/check_handler_auth.go | 6 +++--- s3/services/auth/signature-v4-parser.go | 4 ++-- s3/services/auth/signature-v4.go | 9 ++++----- 3 files changed, 9 insertions(+), 10 deletions(-) diff --git a/s3/services/auth/check_handler_auth.go b/s3/services/auth/check_handler_auth.go index 77ec0dec4..4222c886c 100644 --- a/s3/services/auth/check_handler_auth.go +++ b/s3/services/auth/check_handler_auth.go @@ -39,7 +39,7 @@ func (s *Service) CheckRequestAuthTypeCredential(ctx context.Context, r *http.Re if s3Err = s.IsReqAuthenticated(ctx, r, region, ServiceS3); s3Err != apierrors.ErrNone { return cred, s3Err } - cred, s3Err = GetReqAccessKeyV4(r, region, ServiceS3, s.accessKeySvc) + cred, s3Err = s.getReqAccessKeyV4(r, region, ServiceS3) } if s3Err != apierrors.ErrNone { return cred, s3Err @@ -52,9 +52,9 @@ func (s *Service) ReqSignatureV4Verify(r *http.Request, region string, stype ser sha256sum := getContentSha256Cksum(r, stype) switch { case IsRequestSignatureV4(r): - return DoesSignatureMatch(sha256sum, r, region, stype, s.accessKeySvc) + return s.doesSignatureMatch(sha256sum, r, region, stype) case isRequestPresignedSignatureV4(r): - return DoesPresignedSignatureMatch(sha256sum, r, region, stype, s.accessKeySvc) + return s.doesPresignedSignatureMatch(sha256sum, r, region, stype) default: return apierrors.ErrAccessDenied } diff --git a/s3/services/auth/signature-v4-parser.go b/s3/services/auth/signature-v4-parser.go index 1e4f0a884..011659c04 100644 --- a/s3/services/auth/signature-v4-parser.go +++ b/s3/services/auth/signature-v4-parser.go @@ -286,7 +286,7 @@ func parseSignV4(v4Auth string, region string, stype serviceType) (sv signValues return signV4Values, apierrors.ErrNone } -func GetReqAccessKeyV4(r *http.Request, region string, stype serviceType, accessKeySvc handlers.AccessKeyService) (*handlers.AccessKeyRecord, apierrors.ErrorCode) { +func (s *Service) getReqAccessKeyV4(r *http.Request, region string, stype serviceType) (*handlers.AccessKeyRecord, apierrors.ErrorCode) { ch, s3Err := parseCredentialHeader("Credential="+r.Form.Get(consts.AmzCredential), region, stype) if s3Err != apierrors.ErrNone { // Strip off the Algorithm prefix. @@ -302,7 +302,7 @@ func GetReqAccessKeyV4(r *http.Request, region string, stype serviceType, access } // TODO: Why should a temporary user be replaced with the parent user's account name? - record, err := accessKeySvc.Get(ch.accessKey) + record, err := s.accessKeySvc.Get(ch.accessKey) if err != nil { return &handlers.AccessKeyRecord{}, err } diff --git a/s3/services/auth/signature-v4.go b/s3/services/auth/signature-v4.go index eb53d9658..29b98bfd4 100644 --- a/s3/services/auth/signature-v4.go +++ b/s3/services/auth/signature-v4.go @@ -19,7 +19,6 @@ package auth import ( "crypto/subtle" - "github.com/bittorrent/go-btfs/s3/handlers" "github.com/bittorrent/go-btfs/s3/set" "github.com/bittorrent/go-btfs/s3/utils" "net/http" @@ -59,7 +58,7 @@ func compareSignatureV4(sig1, sig2 string) bool { // - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html // // returns apierrors.ErrNone if the signature matches. -func DoesPresignedSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType, accessKeySvc handlers.AccessKeyService) apierrors.ErrorCode { +func (s *Service) doesPresignedSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType) apierrors.ErrorCode { // Copy request req := *r @@ -70,7 +69,7 @@ func DoesPresignedSignatureMatch(hashedPayload string, r *http.Request, region s } // get access_info by accessKey - cred, s3Err := accessKeySvc.Get(pSignValues.Credential.accessKey) + cred, s3Err := s.accessKeySvc.Get(pSignValues.Credential.accessKey) if s3Err != apierrors.ErrNone { return s3Err } @@ -189,7 +188,7 @@ func DoesPresignedSignatureMatch(hashedPayload string, r *http.Request, region s // - http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html // // returns apierrors.ErrNone if signature matches. -func DoesSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType, accessKeySvc handlers.AccessKeyService) apierrors.ErrorCode { +func (s *Service) doesSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType) apierrors.ErrorCode { // Copy request. req := *r @@ -208,7 +207,7 @@ func DoesSignatureMatch(hashedPayload string, r *http.Request, region string, st return errCode } - cred, s3Err := accessKeySvc.Get(signV4Values.Credential.accessKey) + cred, s3Err := s.accessKeySvc.Get(signV4Values.Credential.accessKey) if s3Err != apierrors.ErrNone { return s3Err } From 1784c4945aacfb6630a93b966a017c8a27863ea9 Mon Sep 17 00:00:00 2001 From: fish <920886811@163.com> Date: Mon, 14 Aug 2023 18:47:25 +0800 Subject: [PATCH 026/139] chore: --- s3/services/auth/check_handler_auth.go | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/s3/services/auth/check_handler_auth.go b/s3/services/auth/check_handler_auth.go index 4222c886c..149968d85 100644 --- a/s3/services/auth/check_handler_auth.go +++ b/s3/services/auth/check_handler_auth.go @@ -12,14 +12,6 @@ import ( "github.com/bittorrent/go-btfs/s3/etag" ) -//// AuthSys auth and sign system -//type AuthSys struct{} -// -//// NewAuthSys new an AuthSys -//func NewAuthSys() *AuthSys { -// return &AuthSys{} -//} - // CheckRequestAuthTypeCredential Check request auth type verifies the incoming http request // - validates the request signature // - validates the policy action if anonymous tests bucket policies if any, @@ -28,18 +20,16 @@ import ( // returns APIErrorCode if any to be replied to the client. // Additionally, returns the accessKey used in the request, and if this request is by an admin. func (s *Service) CheckRequestAuthTypeCredential(ctx context.Context, r *http.Request) (cred *handlers.AccessKeyRecord, s3Err apierrors.ErrorCode) { - // 1.check signature + // check signature switch GetRequestAuthType(r) { - case AuthTypeUnknown, AuthTypeStreamingSigned: - return cred, apierrors.ErrSignatureVersionNotSupported - case AuthTypePresignedV2, AuthTypeSignedV2: - return cred, apierrors.ErrSignatureVersionNotSupported case AuthTypeSigned, AuthTypePresigned: region := "" if s3Err = s.IsReqAuthenticated(ctx, r, region, ServiceS3); s3Err != apierrors.ErrNone { return cred, s3Err } cred, s3Err = s.getReqAccessKeyV4(r, region, ServiceS3) + default: + return cred, apierrors.ErrSignatureVersionNotSupported } if s3Err != apierrors.ErrNone { return cred, s3Err From 84dd056c8a6897e9149e49097e23879c0ac3d281 Mon Sep 17 00:00:00 2001 From: fish <920886811@163.com> Date: Tue, 15 Aug 2023 11:52:27 +0800 Subject: [PATCH 027/139] feat: add bucket service --- s3/handlers/services.go | 10 +- s3/handlers/services_errors.go | 1 + s3/handlers/services_types.go | 19 ++- s3/providers/statestore/storage_proxy.go | 5 + s3/services/auth/check_handler_auth.go | 4 +- s3/services/auth/service.go | 3 +- s3/services/auth/signature-v4-parser.go | 2 +- s3/services/auth/signature-v4.go | 4 +- s3/services/bucket/service.go | 174 +++++++++++++++++++++++ s3/services/bucket/service_option.go | 3 + s3/services/providerser.go | 3 + 11 files changed, 220 insertions(+), 8 deletions(-) create mode 100644 s3/services/bucket/service.go create mode 100644 s3/services/bucket/service_option.go diff --git a/s3/handlers/services.go b/s3/handlers/services.go index edede1b9c..7dab817ea 100644 --- a/s3/handlers/services.go +++ b/s3/handlers/services.go @@ -4,6 +4,7 @@ import ( "context" "github.com/bittorrent/go-btfs/s3/action" "github.com/bittorrent/go-btfs/s3/apierrors" + "github.com/bittorrent/go-btfs/s3/lock" "net/http" ) @@ -25,10 +26,17 @@ type AccessKeyService interface { type AuthService interface { VerifySignature(ctx context.Context, r *http.Request) (accessKeyRecord *AccessKeyRecord, err apierrors.ErrorCode) - CheckACL(accessKeyRecord *AccessKeyRecord, bucketMeta *BucketMeta, action action.Action) (err error) + CheckACL(accessKeyRecord *AccessKeyRecord, bucketMeta *BucketMetadata, action action.Action) (err error) } type BucketService interface { + NewNSLock(bucket string) lock.RWLocker + SetEmptyBucket(emptyBucket func(ctx context.Context, bucket string) (bool, error)) + CreateBucket(ctx context.Context, bucket, region, accessKey, acl string) error + GetBucketMeta(ctx context.Context, bucket string) (meta BucketMetadata, err error) + HasBucket(ctx context.Context, bucket string) bool + DeleteBucket(ctx context.Context, bucket string) error + GetAllBucketsOfUser(ctx context.Context, username string) ([]BucketMetadata, error) } type ObjectService interface { diff --git a/s3/handlers/services_errors.go b/s3/handlers/services_errors.go index 057ea6fde..0b2fc79c0 100644 --- a/s3/handlers/services_errors.go +++ b/s3/handlers/services_errors.go @@ -5,4 +5,5 @@ import "errors" var ( ErrBucketNotFound = errors.New("bucket is not found") ErrSginVersionNotSupport = errors.New("sign version is not support") + ErrBucketNotEmpty = errors.New("bucket not empty") ) diff --git a/s3/handlers/services_types.go b/s3/handlers/services_types.go index 3efe138dd..ed0bd2eda 100644 --- a/s3/handlers/services_types.go +++ b/s3/handlers/services_types.go @@ -11,5 +11,22 @@ type AccessKeyRecord struct { UpdatedAt time.Time `json:"updated_at"` } -type BucketMeta struct { +// BucketMetadata contains bucket metadata. +type BucketMetadata struct { + Name string + Region string + Owner string + Acl string + Created time.Time +} + +// NewBucketMetadata creates BucketMetadata with the supplied name and Created to Now. +func NewBucketMetadata(name, region, accessKey, acl string) *BucketMetadata { + return &BucketMetadata{ + Name: name, + Region: region, + Owner: accessKey, + Acl: acl, + Created: time.Now().UTC(), + } } diff --git a/s3/providers/statestore/storage_proxy.go b/s3/providers/statestore/storage_proxy.go index 4ef6045b0..ee5d97a6b 100644 --- a/s3/providers/statestore/storage_proxy.go +++ b/s3/providers/statestore/storage_proxy.go @@ -1,6 +1,7 @@ package statestore import ( + "context" "errors" "github.com/bittorrent/go-btfs/s3/services" "github.com/bittorrent/go-btfs/transaction/storage" @@ -41,3 +42,7 @@ func (s *StorageProxy) Delete(key string) (err error) { func (s *StorageProxy) Iterate(prefix string, iterFunc services.StateStoreIterFunc) (err error) { return s.proxy.Iterate(prefix, storage.StateIterFunc(iterFunc)) } + +func (s *StorageProxy) ReadAllChan(ctx context.Context, prefix string, seekKey string) (<-chan *storage.Entry, error) { + return s.proxy.ReadAllChan(ctx, prefix, seekKey) +} diff --git a/s3/services/auth/check_handler_auth.go b/s3/services/auth/check_handler_auth.go index 149968d85..0808fff4e 100644 --- a/s3/services/auth/check_handler_auth.go +++ b/s3/services/auth/check_handler_auth.go @@ -3,13 +3,13 @@ package auth import ( "context" "encoding/hex" - "github.com/bittorrent/go-btfs/s3/handlers" - "github.com/bittorrent/go-btfs/s3/utils/hash" "net/http" "github.com/bittorrent/go-btfs/s3/apierrors" "github.com/bittorrent/go-btfs/s3/consts" "github.com/bittorrent/go-btfs/s3/etag" + "github.com/bittorrent/go-btfs/s3/handlers" + "github.com/bittorrent/go-btfs/s3/utils/hash" ) // CheckRequestAuthTypeCredential Check request auth type verifies the incoming http request diff --git a/s3/services/auth/service.go b/s3/services/auth/service.go index 79538ff50..a7befa14d 100644 --- a/s3/services/auth/service.go +++ b/s3/services/auth/service.go @@ -2,12 +2,13 @@ package auth import ( "context" + "net/http" + "github.com/bittorrent/go-btfs/s3/action" "github.com/bittorrent/go-btfs/s3/apierrors" "github.com/bittorrent/go-btfs/s3/handlers" "github.com/bittorrent/go-btfs/s3/policy" "github.com/bittorrent/go-btfs/s3/services" - "net/http" ) var _ handlers.AuthService = (*Service)(nil) diff --git a/s3/services/auth/signature-v4-parser.go b/s3/services/auth/signature-v4-parser.go index 011659c04..2c967f03e 100644 --- a/s3/services/auth/signature-v4-parser.go +++ b/s3/services/auth/signature-v4-parser.go @@ -18,7 +18,6 @@ package auth import ( - "github.com/bittorrent/go-btfs/s3/handlers" "net/http" "net/url" "strings" @@ -26,6 +25,7 @@ import ( "github.com/bittorrent/go-btfs/s3/apierrors" "github.com/bittorrent/go-btfs/s3/consts" + "github.com/bittorrent/go-btfs/s3/handlers" ) // credentialHeader data type represents structured form of Credential diff --git a/s3/services/auth/signature-v4.go b/s3/services/auth/signature-v4.go index 29b98bfd4..26856cf38 100644 --- a/s3/services/auth/signature-v4.go +++ b/s3/services/auth/signature-v4.go @@ -19,8 +19,6 @@ package auth import ( "crypto/subtle" - "github.com/bittorrent/go-btfs/s3/set" - "github.com/bittorrent/go-btfs/s3/utils" "net/http" "net/url" "strconv" @@ -28,6 +26,8 @@ import ( "github.com/bittorrent/go-btfs/s3/apierrors" "github.com/bittorrent/go-btfs/s3/consts" + "github.com/bittorrent/go-btfs/s3/set" + "github.com/bittorrent/go-btfs/s3/utils" ) // AWS Signature Version '4' constants. diff --git a/s3/services/bucket/service.go b/s3/services/bucket/service.go new file mode 100644 index 000000000..018f96e33 --- /dev/null +++ b/s3/services/bucket/service.go @@ -0,0 +1,174 @@ +package bucket + +import ( + "context" + "time" + + "github.com/bittorrent/go-btfs/s3/handlers" + "github.com/bittorrent/go-btfs/s3/lock" + "github.com/bittorrent/go-btfs/s3/services" + "github.com/syndtr/goleveldb/leveldb" +) + +const ( + bucketPrefix = "bkt/" + globalOperationTimeout = 5 * time.Minute + deleteOperationTimeout = 1 * time.Minute +) + +var _ handlers.BucketService = (*Service)(nil) + +// Service captures all bucket metadata for a given cluster. +type Service struct { + providers services.Providerser + nsLock *lock.NsLockMap + emptyBucket func(ctx context.Context, bucket string) (bool, error) +} + +// NewService - creates new policy system. +func NewService(providers services.Providerser, options ...Option) (s *Service) { + s = &Service{ + providers: providers, + nsLock: lock.NewNSLock(), + } + for _, option := range options { + option(s) + } + return s +} + +// NewBucketMetadata creates handlers.BucketMetadata with the supplied name and Created to Now. +func (s *Service) NewBucketMetadata(name, region, accessKey, acl string) *handlers.BucketMetadata { + return &handlers.BucketMetadata{ + Name: name, + Region: region, + Owner: accessKey, + Acl: acl, + Created: time.Now().UTC(), + } +} + +// NewNSLock - initialize a new namespace RWLocker instance. +func (s *Service) NewNSLock(bucket string) lock.RWLocker { + return s.nsLock.NewNSLock("meta", bucket) +} + +func (s *Service) SetEmptyBucket(emptyBucket func(ctx context.Context, bucket string) (bool, error)) { + s.emptyBucket = emptyBucket +} + +// setBucketMeta - sets a new metadata in-db +func (s *Service) setBucketMeta(bucket string, meta *handlers.BucketMetadata) error { + return s.providers.GetStateStore().Put(bucketPrefix+bucket, meta) +} + +// CreateBucket - create a new Bucket +func (s *Service) CreateBucket(ctx context.Context, bucket, region, accessKey, acl string) error { + lk := s.NewNSLock(bucket) + lkctx, err := lk.GetLock(ctx, globalOperationTimeout) + if err != nil { + return err + } + ctx = lkctx.Context() + defer lk.Unlock(lkctx.Cancel) + + return s.setBucketMeta(bucket, s.NewBucketMetadata(bucket, region, accessKey, acl)) +} + +func (s *Service) getBucketMeta(bucket string) (meta handlers.BucketMetadata, err error) { + err = s.providers.GetStateStore().Get(bucketPrefix+bucket, &meta) + if err == leveldb.ErrNotFound { + err = handlers.ErrBucketNotFound + } + return meta, err +} + +// GetBucketMeta metadata for a bucket. +func (s *Service) GetBucketMeta(ctx context.Context, bucket string) (meta handlers.BucketMetadata, err error) { + lk := s.NewNSLock(bucket) + lkctx, err := lk.GetRLock(ctx, globalOperationTimeout) + if err != nil { + return handlers.BucketMetadata{}, err + } + ctx = lkctx.Context() + defer lk.RUnlock(lkctx.Cancel) + + return s.getBucketMeta(bucket) +} + +// HasBucket metadata for a bucket. +func (s *Service) HasBucket(ctx context.Context, bucket string) bool { + _, err := s.GetBucketMeta(ctx, bucket) + return err == nil +} + +// DeleteBucket bucket. +func (s *Service) DeleteBucket(ctx context.Context, bucket string) error { + lk := s.NewNSLock(bucket) + lkctx, err := lk.GetLock(ctx, deleteOperationTimeout) + if err != nil { + return err + } + ctx = lkctx.Context() + defer lk.Unlock(lkctx.Cancel) + + if _, err = s.getBucketMeta(bucket); err != nil { + return err + } + + if empty, err := s.emptyBucket(ctx, bucket); err != nil { + return err + } else if !empty { + return handlers.ErrBucketNotEmpty + } + + return s.providers.GetStateStore().Delete(bucketPrefix + bucket) +} + +// GetAllBucketsOfUser metadata for all bucket. +func (s *Service) GetAllBucketsOfUser(ctx context.Context, username string) ([]handlers.BucketMetadata, error) { + var m []handlers.BucketMetadata + all, err := s.providers.GetStateStore().ReadAllChan(ctx, bucketPrefix, "") + if err != nil { + return nil, err + } + for entry := range all { + data := handlers.BucketMetadata{} + if err = entry.UnmarshalValue(&data); err != nil { + continue + } + if data.Owner != username { + continue + } + m = append(m, data) + } + return m, nil +} + +// UpdateBucketAcl . +func (s *Service) UpdateBucketAcl(ctx context.Context, bucket, acl, accessKey string) error { + lk := s.NewNSLock(bucket) + lkctx, err := lk.GetLock(ctx, globalOperationTimeout) + if err != nil { + return err + } + ctx = lkctx.Context() + defer lk.Unlock(lkctx.Cancel) + + meta, err := s.getBucketMeta(bucket) + if err != nil { + return err + } + + meta.Acl = acl + return s.setBucketMeta(bucket, &meta) +} + +// GetBucketAcl . +func (s *Service) GetBucketAcl(ctx context.Context, bucket string) (string, error) { + meta, err := s.GetBucketMeta(ctx, bucket) + if err != nil { + return "", err + } + return meta.Acl, nil +} diff --git a/s3/services/bucket/service_option.go b/s3/services/bucket/service_option.go new file mode 100644 index 000000000..0b648a3a5 --- /dev/null +++ b/s3/services/bucket/service_option.go @@ -0,0 +1,3 @@ +package bucket + +type Option func(svc *Service) diff --git a/s3/services/providerser.go b/s3/services/providerser.go index 796291d97..cbb508ca9 100644 --- a/s3/services/providerser.go +++ b/s3/services/providerser.go @@ -1,6 +1,8 @@ package services import ( + "context" + "github.com/bittorrent/go-btfs/transaction/storage" "io" ) @@ -21,4 +23,5 @@ type StateStorer interface { Put(key string, i interface{}) (err error) Delete(key string) (err error) Iterate(prefix string, iterFunc StateStoreIterFunc) (err error) + ReadAllChan(ctx context.Context, prefix string, seekKey string) (<-chan *storage.Entry, error) } From bdc1837a938fdcc266a503fcc78a24d0a32b4734 Mon Sep 17 00:00:00 2001 From: fish <920886811@163.com> Date: Tue, 15 Aug 2023 12:03:44 +0800 Subject: [PATCH 028/139] chore: --- s3/handlers/services.go | 2 +- s3/handlers/services_errors.go | 1 + s3/services/auth/service.go | 16 ---------------- s3/services/bucket/service.go | 21 +++++++++++++++++++++ 4 files changed, 23 insertions(+), 17 deletions(-) diff --git a/s3/handlers/services.go b/s3/handlers/services.go index 7dab817ea..2bfcc0dc0 100644 --- a/s3/handlers/services.go +++ b/s3/handlers/services.go @@ -26,10 +26,10 @@ type AccessKeyService interface { type AuthService interface { VerifySignature(ctx context.Context, r *http.Request) (accessKeyRecord *AccessKeyRecord, err apierrors.ErrorCode) - CheckACL(accessKeyRecord *AccessKeyRecord, bucketMeta *BucketMetadata, action action.Action) (err error) } type BucketService interface { + CheckACL(accessKeyRecord *AccessKeyRecord, bucketName string, action action.Action) (err error) NewNSLock(bucket string) lock.RWLocker SetEmptyBucket(emptyBucket func(ctx context.Context, bucket string) (bool, error)) CreateBucket(ctx context.Context, bucket, region, accessKey, acl string) error diff --git a/s3/handlers/services_errors.go b/s3/handlers/services_errors.go index 0b2fc79c0..abac6a647 100644 --- a/s3/handlers/services_errors.go +++ b/s3/handlers/services_errors.go @@ -6,4 +6,5 @@ var ( ErrBucketNotFound = errors.New("bucket is not found") ErrSginVersionNotSupport = errors.New("sign version is not support") ErrBucketNotEmpty = errors.New("bucket not empty") + ErrBucketAccessDenied = errors.New("bucket access denied. ") ) diff --git a/s3/services/auth/service.go b/s3/services/auth/service.go index a7befa14d..25d2be2bc 100644 --- a/s3/services/auth/service.go +++ b/s3/services/auth/service.go @@ -4,10 +4,8 @@ import ( "context" "net/http" - "github.com/bittorrent/go-btfs/s3/action" "github.com/bittorrent/go-btfs/s3/apierrors" "github.com/bittorrent/go-btfs/s3/handlers" - "github.com/bittorrent/go-btfs/s3/policy" "github.com/bittorrent/go-btfs/s3/services" ) @@ -33,17 +31,3 @@ func (s *Service) VerifySignature(ctx context.Context, r *http.Request) (accessK s.CheckRequestAuthTypeCredential(ctx, r) return } - -func (svc *Service) CheckACL(accessKeyRecord *handlers.AccessKeyRecord, bucketMeta *handlers.BucketMeta, action action.Action) (err error) { - ////todo 是否需要判断原始的 - //if bucketName == "" { - // return cred, handlers.ErrBucketNotFound - //} - - //todo 注意:如果action是CreateBucketAction,HasBucket(ctx, bucketName)进行判断 - - if policy.IsAllowed(bucketMeta.Owner == accessKeyRecord.Key, bucketMeta.Acl, action) == false { - return cred, apierrors.ErrAccessDenied - } - return -} diff --git a/s3/services/bucket/service.go b/s3/services/bucket/service.go index 018f96e33..ab439af36 100644 --- a/s3/services/bucket/service.go +++ b/s3/services/bucket/service.go @@ -4,8 +4,10 @@ import ( "context" "time" + "github.com/bittorrent/go-btfs/s3/action" "github.com/bittorrent/go-btfs/s3/handlers" "github.com/bittorrent/go-btfs/s3/lock" + "github.com/bittorrent/go-btfs/s3/policy" "github.com/bittorrent/go-btfs/s3/services" "github.com/syndtr/goleveldb/leveldb" ) @@ -37,6 +39,25 @@ func NewService(providers services.Providerser, options ...Option) (s *Service) return s } +func (s *Service) CheckACL(accessKeyRecord *handlers.AccessKeyRecord, bucketName string, action action.Action) (err error) { + //todo 是否需要判断原始的 + if bucketName == "" { + return handlers.ErrBucketNotFound + } + + bucketMeta, err := s.GetBucketMeta(context.Background(), bucketName) + if err != nil { + return err + } + + //todo 注意:如果action是CreateBucketAction,HasBucket(ctx, bucketName)进行判断 + + if policy.IsAllowed(bucketMeta.Owner == accessKeyRecord.Key, bucketMeta.Acl, action) == false { + return handlers.ErrBucketAccessDenied + } + return +} + // NewBucketMetadata creates handlers.BucketMetadata with the supplied name and Created to Now. func (s *Service) NewBucketMetadata(name, region, accessKey, acl string) *handlers.BucketMetadata { return &handlers.BucketMetadata{ From d30af9e15d3109e77e6f4f96edb7030069bb78b9 Mon Sep 17 00:00:00 2001 From: fish <920886811@163.com> Date: Tue, 15 Aug 2023 12:36:24 +0800 Subject: [PATCH 029/139] mod: update bucket lock --- s3/handlers/services.go | 2 - s3/services/bucket/service.go | 76 +++++++++++++++++------------------ 2 files changed, 38 insertions(+), 40 deletions(-) diff --git a/s3/handlers/services.go b/s3/handlers/services.go index 2bfcc0dc0..4eaf5556f 100644 --- a/s3/handlers/services.go +++ b/s3/handlers/services.go @@ -4,7 +4,6 @@ import ( "context" "github.com/bittorrent/go-btfs/s3/action" "github.com/bittorrent/go-btfs/s3/apierrors" - "github.com/bittorrent/go-btfs/s3/lock" "net/http" ) @@ -30,7 +29,6 @@ type AuthService interface { type BucketService interface { CheckACL(accessKeyRecord *AccessKeyRecord, bucketName string, action action.Action) (err error) - NewNSLock(bucket string) lock.RWLocker SetEmptyBucket(emptyBucket func(ctx context.Context, bucket string) (bool, error)) CreateBucket(ctx context.Context, bucket, region, accessKey, acl string) error GetBucketMeta(ctx context.Context, bucket string) (meta BucketMetadata, err error) diff --git a/s3/services/bucket/service.go b/s3/services/bucket/service.go index ab439af36..eb775cc5e 100644 --- a/s3/services/bucket/service.go +++ b/s3/services/bucket/service.go @@ -5,8 +5,8 @@ import ( "time" "github.com/bittorrent/go-btfs/s3/action" + "github.com/bittorrent/go-btfs/s3/ctxmu" "github.com/bittorrent/go-btfs/s3/handlers" - "github.com/bittorrent/go-btfs/s3/lock" "github.com/bittorrent/go-btfs/s3/policy" "github.com/bittorrent/go-btfs/s3/services" "github.com/syndtr/goleveldb/leveldb" @@ -14,24 +14,25 @@ import ( const ( bucketPrefix = "bkt/" - globalOperationTimeout = 5 * time.Minute - deleteOperationTimeout = 1 * time.Minute + defaultUpdateTimeoutMS = 200 ) var _ handlers.BucketService = (*Service)(nil) // Service captures all bucket metadata for a given cluster. type Service struct { - providers services.Providerser - nsLock *lock.NsLockMap - emptyBucket func(ctx context.Context, bucket string) (bool, error) + providers services.Providerser + emptyBucket func(ctx context.Context, bucket string) (bool, error) + locks *ctxmu.MultiCtxRWMutex + updateTimeout time.Duration } // NewService - creates new policy system. func NewService(providers services.Providerser, options ...Option) (s *Service) { s = &Service{ - providers: providers, - nsLock: lock.NewNSLock(), + providers: providers, + locks: ctxmu.NewDefaultMultiCtxRWMutex(), + updateTimeout: time.Duration(defaultUpdateTimeoutMS) * time.Millisecond, } for _, option := range options { option(s) @@ -69,34 +70,30 @@ func (s *Service) NewBucketMetadata(name, region, accessKey, acl string) *handle } } -// NewNSLock - initialize a new namespace RWLocker instance. -func (s *Service) NewNSLock(bucket string) lock.RWLocker { - return s.nsLock.NewNSLock("meta", bucket) -} - func (s *Service) SetEmptyBucket(emptyBucket func(ctx context.Context, bucket string) (bool, error)) { s.emptyBucket = emptyBucket } -// setBucketMeta - sets a new metadata in-db -func (s *Service) setBucketMeta(bucket string, meta *handlers.BucketMetadata) error { +// lockSetBucketMeta - sets a new metadata in-db +func (s *Service) lockSetBucketMeta(bucket string, meta *handlers.BucketMetadata) error { return s.providers.GetStateStore().Put(bucketPrefix+bucket, meta) } // CreateBucket - create a new Bucket func (s *Service) CreateBucket(ctx context.Context, bucket, region, accessKey, acl string) error { - lk := s.NewNSLock(bucket) - lkctx, err := lk.GetLock(ctx, globalOperationTimeout) + ctx, cancel := context.WithTimeout(context.Background(), s.updateTimeout) + defer cancel() + + err := s.locks.Lock(ctx, bucket) if err != nil { return err } - ctx = lkctx.Context() - defer lk.Unlock(lkctx.Cancel) + defer s.locks.Unlock(bucket) - return s.setBucketMeta(bucket, s.NewBucketMetadata(bucket, region, accessKey, acl)) + return s.lockSetBucketMeta(bucket, s.NewBucketMetadata(bucket, region, accessKey, acl)) } -func (s *Service) getBucketMeta(bucket string) (meta handlers.BucketMetadata, err error) { +func (s *Service) lockGetBucketMeta(bucket string) (meta handlers.BucketMetadata, err error) { err = s.providers.GetStateStore().Get(bucketPrefix+bucket, &meta) if err == leveldb.ErrNotFound { err = handlers.ErrBucketNotFound @@ -106,15 +103,16 @@ func (s *Service) getBucketMeta(bucket string) (meta handlers.BucketMetadata, er // GetBucketMeta metadata for a bucket. func (s *Service) GetBucketMeta(ctx context.Context, bucket string) (meta handlers.BucketMetadata, err error) { - lk := s.NewNSLock(bucket) - lkctx, err := lk.GetRLock(ctx, globalOperationTimeout) + ctx, cancel := context.WithTimeout(context.Background(), s.updateTimeout) + defer cancel() + + err = s.locks.Lock(ctx, bucket) if err != nil { - return handlers.BucketMetadata{}, err + return handlers.BucketMetadata{Name: bucket}, err } - ctx = lkctx.Context() - defer lk.RUnlock(lkctx.Cancel) + defer s.locks.Unlock(bucket) - return s.getBucketMeta(bucket) + return s.lockGetBucketMeta(bucket) } // HasBucket metadata for a bucket. @@ -125,15 +123,16 @@ func (s *Service) HasBucket(ctx context.Context, bucket string) bool { // DeleteBucket bucket. func (s *Service) DeleteBucket(ctx context.Context, bucket string) error { - lk := s.NewNSLock(bucket) - lkctx, err := lk.GetLock(ctx, deleteOperationTimeout) + ctx, cancel := context.WithTimeout(context.Background(), s.updateTimeout) + defer cancel() + + err := s.locks.Lock(ctx, bucket) if err != nil { return err } - ctx = lkctx.Context() - defer lk.Unlock(lkctx.Cancel) + defer s.locks.Unlock(bucket) - if _, err = s.getBucketMeta(bucket); err != nil { + if _, err = s.lockGetBucketMeta(bucket); err != nil { return err } @@ -168,21 +167,22 @@ func (s *Service) GetAllBucketsOfUser(ctx context.Context, username string) ([]h // UpdateBucketAcl . func (s *Service) UpdateBucketAcl(ctx context.Context, bucket, acl, accessKey string) error { - lk := s.NewNSLock(bucket) - lkctx, err := lk.GetLock(ctx, globalOperationTimeout) + ctx, cancel := context.WithTimeout(context.Background(), s.updateTimeout) + defer cancel() + + err := s.locks.Lock(ctx, bucket) if err != nil { return err } - ctx = lkctx.Context() - defer lk.Unlock(lkctx.Cancel) + defer s.locks.Unlock(bucket) - meta, err := s.getBucketMeta(bucket) + meta, err := s.lockGetBucketMeta(bucket) if err != nil { return err } meta.Acl = acl - return s.setBucketMeta(bucket, &meta) + return s.lockSetBucketMeta(bucket, &meta) } // GetBucketAcl . From e4f6109b85600fdd9dfe1302506b4c096a6b81d4 Mon Sep 17 00:00:00 2001 From: fish <920886811@163.com> Date: Tue, 15 Aug 2023 16:07:15 +0800 Subject: [PATCH 030/139] chore: --- s3/handlers/services.go | 2 +- s3/services/bucket/service.go | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/s3/handlers/services.go b/s3/handlers/services.go index 4eaf5556f..218f208de 100644 --- a/s3/handlers/services.go +++ b/s3/handlers/services.go @@ -29,10 +29,10 @@ type AuthService interface { type BucketService interface { CheckACL(accessKeyRecord *AccessKeyRecord, bucketName string, action action.Action) (err error) - SetEmptyBucket(emptyBucket func(ctx context.Context, bucket string) (bool, error)) CreateBucket(ctx context.Context, bucket, region, accessKey, acl string) error GetBucketMeta(ctx context.Context, bucket string) (meta BucketMetadata, err error) HasBucket(ctx context.Context, bucket string) bool + SetEmptyBucket(emptyBucket func(ctx context.Context, bucket string) (bool, error)) DeleteBucket(ctx context.Context, bucket string) error GetAllBucketsOfUser(ctx context.Context, username string) ([]BucketMetadata, error) } diff --git a/s3/services/bucket/service.go b/s3/services/bucket/service.go index eb775cc5e..945edcd33 100644 --- a/s3/services/bucket/service.go +++ b/s3/services/bucket/service.go @@ -70,10 +70,6 @@ func (s *Service) NewBucketMetadata(name, region, accessKey, acl string) *handle } } -func (s *Service) SetEmptyBucket(emptyBucket func(ctx context.Context, bucket string) (bool, error)) { - s.emptyBucket = emptyBucket -} - // lockSetBucketMeta - sets a new metadata in-db func (s *Service) lockSetBucketMeta(bucket string, meta *handlers.BucketMetadata) error { return s.providers.GetStateStore().Put(bucketPrefix+bucket, meta) @@ -106,11 +102,11 @@ func (s *Service) GetBucketMeta(ctx context.Context, bucket string) (meta handle ctx, cancel := context.WithTimeout(context.Background(), s.updateTimeout) defer cancel() - err = s.locks.Lock(ctx, bucket) + err = s.locks.RLock(ctx, bucket) if err != nil { return handlers.BucketMetadata{Name: bucket}, err } - defer s.locks.Unlock(bucket) + defer s.locks.RUnlock(bucket) return s.lockGetBucketMeta(bucket) } @@ -145,6 +141,10 @@ func (s *Service) DeleteBucket(ctx context.Context, bucket string) error { return s.providers.GetStateStore().Delete(bucketPrefix + bucket) } +func (s *Service) SetEmptyBucket(emptyBucket func(ctx context.Context, bucket string) (bool, error)) { + s.emptyBucket = emptyBucket +} + // GetAllBucketsOfUser metadata for all bucket. func (s *Service) GetAllBucketsOfUser(ctx context.Context, username string) ([]handlers.BucketMetadata, error) { var m []handlers.BucketMetadata From 5e4a4275a14f2cca16ce9e59b2935a377584589c Mon Sep 17 00:00:00 2001 From: fish <920886811@163.com> Date: Tue, 15 Aug 2023 16:15:00 +0800 Subject: [PATCH 031/139] chore: --- s3/policy/policy.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/s3/policy/policy.go b/s3/policy/policy.go index 0061c6e6c..c761fe8c9 100644 --- a/s3/policy/policy.go +++ b/s3/policy/policy.go @@ -53,15 +53,13 @@ func checkActionInPublicRead(action s3action.Action) bool { } func IsAllowed(own bool, acl string, action s3action.Action) (allow bool) { - a := action.Action(action) - // 1.if bucket - if a.IsBucketAction() { + if action.IsBucketAction() { return own } // 2.if object - if a.IsObjectAction() { + if action.IsObjectAction() { switch acl { case Private: return own From 6b3c0c11c60d024090f3019db426794695fbfff9 Mon Sep 17 00:00:00 2001 From: fish <920886811@163.com> Date: Tue, 15 Aug 2023 16:25:36 +0800 Subject: [PATCH 032/139] chore: --- s3/services/auth/auth_type.go | 62 +------------------------ s3/services/auth/signature-v4-parser.go | 2 +- 2 files changed, 2 insertions(+), 62 deletions(-) diff --git a/s3/services/auth/auth_type.go b/s3/services/auth/auth_type.go index 572fb19b6..5fb74bce4 100644 --- a/s3/services/auth/auth_type.go +++ b/s3/services/auth/auth_type.go @@ -4,55 +4,13 @@ import ( "net/http" "net/url" "strings" - - "github.com/bittorrent/go-btfs/s3/consts" ) -// Verify if request has JWT. -func isRequestJWT(r *http.Request) bool { - return strings.HasPrefix(r.Header.Get("Authorization"), "Bearer") -} - // IsRequestSignatureV4 Verify if request has AWS Signature Version '4'. func IsRequestSignatureV4(r *http.Request) bool { return strings.HasPrefix(r.Header.Get("Authorization"), signV4Algorithm) } -// Signature and API related constants. -const ( - signV2Algorithm = "AWS" -) - -// Verify if request has AWS Signature Version '2'. -func isRequestSignatureV2(r *http.Request) bool { - return !strings.HasPrefix(r.Header.Get("Authorization"), signV4Algorithm) && - strings.HasPrefix(r.Header.Get("Authorization"), signV2Algorithm) -} - -// Verify if request has AWS PreSign Version '4'. already exist in signature-v4-utils -//func isRequestPresignedSignatureV4(r *http.Request) bool { -// _, ok := r.URL.Query()["X-Amz-Credential"] -// return ok -//} - -// Verify request has AWS PreSign Version '2'. -func isRequestPresignedSignatureV2(r *http.Request) bool { - _, ok := r.URL.Query()["AWSAccessKeyId"] - return ok -} - -// Verify if request has AWS Post policy Signature Version '4'. -func isRequestPostPolicySignatureV4(r *http.Request) bool { - return strings.Contains(r.Header.Get("Content-Type"), "multipart/form-data") && - r.Method == http.MethodPost -} - -// Verify if the request has AWS Streaming Signature Version '4'. This is only valid for 'PUT' operation. -func isRequestSignStreamingV4(r *http.Request) bool { - return r.Header.Get("x-amz-content-sha256") == consts.StreamingContentSHA256 && - r.Method == http.MethodPut -} - // AuthType Authorization type. type AuthType int @@ -80,28 +38,10 @@ func GetRequestAuthType(r *http.Request) AuthType { return AuthTypeUnknown } } - if isRequestSignatureV2(r) { - return AuthTypeSignedV2 - } else if isRequestPresignedSignatureV2(r) { - return AuthTypePresignedV2 - } else if isRequestSignStreamingV4(r) { - return AuthTypeStreamingSigned - } else if IsRequestSignatureV4(r) { + if IsRequestSignatureV4(r) { return AuthTypeSigned } else if isRequestPresignedSignatureV4(r) { return AuthTypePresigned - } else if isRequestJWT(r) { - return AuthTypeJWT - } else if isRequestPostPolicySignatureV4(r) { - return AuthTypePostPolicy - } else if _, ok := r.Form[consts.StsAction]; ok { - return AuthTypeSTS - } else if _, ok := r.Header[consts.Authorization]; !ok { - return AuthTypeAnonymous } return AuthTypeUnknown } - -func IsAuthTypeStreamingSigned(atype AuthType) bool { - return atype == AuthTypeStreamingSigned -} diff --git a/s3/services/auth/signature-v4-parser.go b/s3/services/auth/signature-v4-parser.go index 2c967f03e..8099fcead 100644 --- a/s3/services/auth/signature-v4-parser.go +++ b/s3/services/auth/signature-v4-parser.go @@ -301,7 +301,7 @@ func (s *Service) getReqAccessKeyV4(r *http.Request, region string, stype servic } } - // TODO: Why should a temporary user be replaced with the parent user's account name? + // check accessKey. record, err := s.accessKeySvc.Get(ch.accessKey) if err != nil { return &handlers.AccessKeyRecord{}, err From 4faa010b4e0d6307aa5a3aeeab1bcd77885b11b9 Mon Sep 17 00:00:00 2001 From: fish <920886811@163.com> Date: Tue, 15 Aug 2023 16:27:46 +0800 Subject: [PATCH 033/139] del s3d --- s3d/consts/consts.go | 183 --------------------------------- s3d/store/bucket_acl.go | 34 ------ s3d/store/err.go | 34 ------ s3d/store/service.go | 1 - s3d/store/service_instance.go | 151 --------------------------- s3d/store/service_interface.go | 16 --- s3d/store/service_test.go | 1 - s3d/uleveldb/leveldb.go | 117 --------------------- s3d/uleveldb/uleveldb_test.go | 24 ----- 9 files changed, 561 deletions(-) delete mode 100644 s3d/consts/consts.go delete mode 100644 s3d/store/bucket_acl.go delete mode 100644 s3d/store/err.go delete mode 100644 s3d/store/service.go delete mode 100644 s3d/store/service_instance.go delete mode 100644 s3d/store/service_interface.go delete mode 100644 s3d/store/service_test.go delete mode 100644 s3d/uleveldb/leveldb.go delete mode 100644 s3d/uleveldb/uleveldb_test.go diff --git a/s3d/consts/consts.go b/s3d/consts/consts.go deleted file mode 100644 index 2bb2d09a8..000000000 --- a/s3d/consts/consts.go +++ /dev/null @@ -1,183 +0,0 @@ -package consts - -import ( - "github.com/dustin/go-humanize" - "time" -) - -//some const -const ( - // Iso8601TimeFormat RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z - Iso8601TimeFormat = "2006-01-02T15:04:05.000Z" // Reply date format with nanosecond precision. - - StreamingContentSHA256 = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" - - // MaxLocationConstraintSize Limit of location constraint XML for unauthenticated PUT bucket operations. - MaxLocationConstraintSize = 3 * humanize.MiByte - EmptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" - StsRequestBodyLimit = 10 * (1 << 20) // 10 MiB - DefaultRegion = "" - SlashSeparator = "/" - - MaxSkewTime = 15 * time.Minute // 15 minutes skew allowed. - - // STS API version. - StsAPIVersion = "2011-06-15" - StsVersion = "Version" - StsAction = "Action" - AssumeRole = "AssumeRole" - SignV4Algorithm = "AWS4-HMAC-SHA256" - - DefaultOwnerID = "02d6176db174dc93cb1b899f7c6078f08654445fe8cf1b6ce98d8855f66bdbf4" - DisplayName = "FileDagStorage" - DefaultStorageClass = "DAGSTORE" -) - -// Standard S3 HTTP request constants -const ( - IfModifiedSince = "If-Modified-Since" - IfUnmodifiedSince = "If-Unmodified-Since" - IfMatch = "If-Match" - IfNoneMatch = "If-None-Match" - - // S3 storage class - AmzStorageClass = "x-amz-storage-class" - - // S3 object version ID - AmzVersionID = "x-amz-version-id" - AmzDeleteMarker = "x-amz-delete-marker" - - // S3 object tagging - AmzObjectTagging = "X-Amz-Tagging" - AmzTagCount = "x-amz-tagging-count" - AmzTagDirective = "X-Amz-Tagging-Directive" - - // S3 transition restore - AmzRestore = "x-amz-restore" - AmzRestoreExpiryDays = "X-Amz-Restore-Expiry-Days" - AmzRestoreRequestDate = "X-Amz-Restore-Request-Date" - AmzRestoreOutputPath = "x-amz-restore-output-path" - - // S3 extensions - AmzCopySourceIfModifiedSince = "x-amz-copy-source-if-modified-since" - AmzCopySourceIfUnmodifiedSince = "x-amz-copy-source-if-unmodified-since" - - AmzCopySourceIfNoneMatch = "x-amz-copy-source-if-none-match" - AmzCopySourceIfMatch = "x-amz-copy-source-if-match" - - AmzCopySource = "X-Amz-Copy-Source" - AmzCopySourceVersionID = "X-Amz-Copy-Source-Version-Id" - AmzCopySourceRange = "X-Amz-Copy-Source-Range" - AmzMetadataDirective = "X-Amz-Metadata-Directive" - AmzObjectLockMode = "X-Amz-Object-Lock-Mode" - AmzObjectLockRetainUntilDate = "X-Amz-Object-Lock-Retain-Until-Date" - AmzObjectLockLegalHold = "X-Amz-Object-Lock-Legal-Hold" - AmzObjectLockBypassGovernance = "X-Amz-Bypass-Governance-Retention" - AmzBucketReplicationStatus = "X-Amz-Replication-Status" - AmzSnowballExtract = "X-Amz-Meta-Snowball-Auto-Extract" - - // Multipart parts count - AmzMpPartsCount = "x-amz-mp-parts-count" - - // Object date/time of expiration - AmzExpiration = "x-amz-expiration" - - // Dummy putBucketACL - AmzACL = "x-amz-acl" - - // Signature V4 related contants. - AmzContentSha256 = "X-Amz-Content-Sha256" - AmzDate = "X-Amz-Date" - AmzAlgorithm = "X-Amz-Algorithm" - AmzExpires = "X-Amz-Expires" - AmzSignedHeaders = "X-Amz-SignedHeaders" - AmzSignature = "X-Amz-Signature" - AmzCredential = "X-Amz-Credential" - AmzSecurityToken = "X-Amz-Security-Token" - AmzDecodedContentLength = "X-Amz-Decoded-Content-Length" - - AmzMetaUnencryptedContentLength = "X-Amz-Meta-X-Amz-Unencrypted-Content-Length" - AmzMetaUnencryptedContentMD5 = "X-Amz-Meta-X-Amz-Unencrypted-Content-Md5" - - // AWS server-side encryption headers for SSE-S3, SSE-KMS and SSE-C. - AmzServerSideEncryption = "X-Amz-Server-Side-Encryption" - AmzServerSideEncryptionKmsID = AmzServerSideEncryption + "-Aws-Kms-Key-Id" - AmzServerSideEncryptionKmsContext = AmzServerSideEncryption + "-Context" - AmzServerSideEncryptionCustomerAlgorithm = AmzServerSideEncryption + "-Customer-Algorithm" - AmzServerSideEncryptionCustomerKey = AmzServerSideEncryption + "-Customer-Key" - AmzServerSideEncryptionCustomerKeyMD5 = AmzServerSideEncryption + "-Customer-Key-Md5" - AmzServerSideEncryptionCopyCustomerAlgorithm = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm" - AmzServerSideEncryptionCopyCustomerKey = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key" - AmzServerSideEncryptionCopyCustomerKeyMD5 = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5" - - AmzEncryptionAES = "AES256" - AmzEncryptionKMS = "aws:kms" - - // Signature v2 related constants - AmzSignatureV2 = "Signature" - AmzAccessKeyID = "AWSAccessKeyId" - - // Response request id. - AmzRequestID = "x-amz-request-id" -) - -// Standard S3 HTTP response constants -const ( - LastModified = "Last-Modified" - Date = "Date" - ETag = "ETag" - ContentType = "Content-Type" - ContentMD5 = "Content-Md5" - ContentEncoding = "Content-Encoding" - Expires = "Expires" - ContentLength = "Content-Length" - ContentLanguage = "Content-Language" - ContentRange = "Content-Range" - Connection = "Connection" - AcceptRanges = "Accept-Ranges" - AmzBucketRegion = "X-Amz-Bucket-Region" - ServerInfo = "Server" - RetryAfter = "Retry-After" - Location = "Location" - CacheControl = "Cache-Control" - ContentDisposition = "Content-Disposition" - Authorization = "Authorization" - Action = "Action" - Range = "Range" -) - -//object const -const ( - MaxObjectSize = 5 * humanize.TiByte - - // Minimum Part size for multipart upload is 5MiB - MinPartSize = 5 * humanize.MiByte - - // Maximum Part size for multipart upload is 5GiB - MaxPartSize = 5 * humanize.GiByte - - // Maximum Part ID for multipart upload is 10000 - // (Acceptable values range from 1 to 10000 inclusive) - MaxPartID = 10000 - - MaxObjectList = 1000 // Limit number of objects in a listObjectsResponse/listObjectsVersionsResponse. - MaxDeleteList = 1000 // Limit number of objects deleted in a delete call. - MaxUploadsList = 10000 // Limit number of uploads in a listUploadsResponse. - MaxPartsList = 10000 // Limit number of parts in a listPartsResponse. -) - -// Common http query params S3 API -const ( - VersionID = "versionId" - - PartNumber = "partNumber" - - UploadID = "uploadId" -) - -// limit -const ( - // The maximum allowed time difference between the incoming request - // date and server date during signature verification. - GlobalMaxSkewTime = 15 * time.Minute // 15 minutes skew allowed. -) diff --git a/s3d/store/bucket_acl.go b/s3d/store/bucket_acl.go deleted file mode 100644 index cbecf0376..000000000 --- a/s3d/store/bucket_acl.go +++ /dev/null @@ -1,34 +0,0 @@ -package store - -import ( - "context" -) - -func (sys *BucketMetadataSys) UpdateBucketAcl(ctx context.Context, bucket, acl, accessKey string) error { - lk := sys.NewNSLock(bucket) - lkctx, err := lk.GetLock(ctx, globalOperationTimeout) - if err != nil { - return err - } - ctx = lkctx.Context() - defer lk.Unlock(lkctx.Cancel) - - meta, err := sys.getBucketMeta(bucket) - if err != nil { - return err - } - - meta.Acl = acl - return sys.setBucketMeta(bucket, &meta) -} -func (sys *BucketMetadataSys) GetBucketAcl(ctx context.Context, bucket string) (string, error) { - meta, err := sys.GetBucketMeta(ctx, bucket) - if err != nil { - switch err.(type) { - case BucketNotFound: - return "", BucketTaggingNotFound{Bucket: bucket} - } - return "", err - } - return meta.Acl, nil -} diff --git a/s3d/store/err.go b/s3d/store/err.go deleted file mode 100644 index 5cc5ebeb2..000000000 --- a/s3d/store/err.go +++ /dev/null @@ -1,34 +0,0 @@ -package store - -import "errors" - -var ErrBucketNotEmpty = errors.New("bucket not empty") - -// BucketPolicyNotFound - no bucket policy found. -type BucketPolicyNotFound struct { - Bucket string - Err error -} - -func (e BucketPolicyNotFound) Error() string { - return "No bucket policy configuration found for bucket: " + e.Bucket -} - -// BucketNotFound - no bucket found. -type BucketNotFound struct { - Bucket string - Err error -} - -func (e BucketNotFound) Error() string { - return "Not found for bucket: " + e.Bucket -} - -type BucketTaggingNotFound struct { - Bucket string - Err error -} - -func (e BucketTaggingNotFound) Error() string { - return "No bucket tagging configuration found for bucket: " + e.Bucket -} diff --git a/s3d/store/service.go b/s3d/store/service.go deleted file mode 100644 index 72440ea2a..000000000 --- a/s3d/store/service.go +++ /dev/null @@ -1 +0,0 @@ -package store diff --git a/s3d/store/service_instance.go b/s3d/store/service_instance.go deleted file mode 100644 index 97348a5fa..000000000 --- a/s3d/store/service_instance.go +++ /dev/null @@ -1,151 +0,0 @@ -package store - -import ( - "context" - "github.com/bittorrent/go-btfs/s3/lock" - "time" - - "github.com/bittorrent/go-btfs/transaction/storage" - "github.com/syndtr/goleveldb/leveldb" -) - -const ( - bucketPrefix = "bkt/" -) - -const ( - globalOperationTimeout = 5 * time.Minute - deleteOperationTimeout = 1 * time.Minute -) - -// BucketMetadata contains bucket metadata. -type BucketMetadata struct { - Name string - Region string - Owner string - Acl string - Created time.Time -} - -// NewBucketMetadata creates BucketMetadata with the supplied name and Created to Now. -func NewBucketMetadata(name, region, accessKey, acl string) *BucketMetadata { - return &BucketMetadata{ - Name: name, - Region: region, - Owner: accessKey, - Acl: acl, - Created: time.Now().UTC(), - } -} - -// BucketMetadataSys captures all bucket metadata for a given cluster. -type BucketMetadataSys struct { - db storage.StateStorer - nsLock *lock.NsLockMap - emptyBucket func(ctx context.Context, bucket string) (bool, error) -} - -// NewBucketMetadataSys - creates new policy system. -func NewBucketMetadataSys(db storage.StateStorer) *BucketMetadataSys { - return &BucketMetadataSys{ - db: db, - nsLock: lock.NewNSLock(), - } -} - -// NewNSLock - initialize a new namespace RWLocker instance. -func (sys *BucketMetadataSys) NewNSLock(bucket string) lock.RWLocker { - return sys.nsLock.NewNSLock("meta", bucket) -} - -func (sys *BucketMetadataSys) SetEmptyBucket(emptyBucket func(ctx context.Context, bucket string) (bool, error)) { - sys.emptyBucket = emptyBucket -} - -// setBucketMeta - sets a new metadata in-db -func (sys *BucketMetadataSys) setBucketMeta(bucket string, meta *BucketMetadata) error { - return sys.db.Put(bucketPrefix+bucket, meta) -} - -// CreateBucket - create a new Bucket -func (sys *BucketMetadataSys) CreateBucket(ctx context.Context, bucket, region, accessKey, acl string) error { - lk := sys.NewNSLock(bucket) - lkctx, err := lk.GetLock(ctx, globalOperationTimeout) - if err != nil { - return err - } - ctx = lkctx.Context() - defer lk.Unlock(lkctx.Cancel) - - return sys.setBucketMeta(bucket, NewBucketMetadata(bucket, region, accessKey, acl)) -} - -func (sys *BucketMetadataSys) getBucketMeta(bucket string) (meta BucketMetadata, err error) { - err = sys.db.Get(bucketPrefix+bucket, &meta) - if err == leveldb.ErrNotFound { - err = BucketNotFound{Bucket: bucket, Err: err} - } - return meta, err -} - -// GetBucketMeta metadata for a bucket. -func (sys *BucketMetadataSys) GetBucketMeta(ctx context.Context, bucket string) (meta BucketMetadata, err error) { - lk := sys.NewNSLock(bucket) - lkctx, err := lk.GetRLock(ctx, globalOperationTimeout) - if err != nil { - return BucketMetadata{}, err - } - ctx = lkctx.Context() - defer lk.RUnlock(lkctx.Cancel) - - return sys.getBucketMeta(bucket) -} - -// HasBucket metadata for a bucket. -func (sys *BucketMetadataSys) HasBucket(ctx context.Context, bucket string) bool { - _, err := sys.GetBucketMeta(ctx, bucket) - return err == nil -} - -// DeleteBucket bucket. -func (sys *BucketMetadataSys) DeleteBucket(ctx context.Context, bucket string) error { - lk := sys.NewNSLock(bucket) - lkctx, err := lk.GetLock(ctx, deleteOperationTimeout) - if err != nil { - return err - } - ctx = lkctx.Context() - defer lk.Unlock(lkctx.Cancel) - - if _, err = sys.getBucketMeta(bucket); err != nil { - return err - } - - if empty, err := sys.emptyBucket(ctx, bucket); err != nil { - return err - } else if !empty { - return ErrBucketNotEmpty - } - - return sys.db.Delete(bucketPrefix + bucket) -} - -// GetAllBucketsOfUser metadata for all bucket. -func (sys *BucketMetadataSys) GetAllBucketsOfUser(ctx context.Context, username string) ([]BucketMetadata, error) { - var m []BucketMetadata - all, err := sys.db.ReadAllChan(ctx, bucketPrefix, "") - if err != nil { - return nil, err - } - for entry := range all { - data := BucketMetadata{} - if err = entry.UnmarshalValue(&data); err != nil { - continue - } - if data.Owner != username { - continue - } - m = append(m, data) - } - return m, nil -} diff --git a/s3d/store/service_interface.go b/s3d/store/service_interface.go deleted file mode 100644 index ebc6ae526..000000000 --- a/s3d/store/service_interface.go +++ /dev/null @@ -1,16 +0,0 @@ -package store - -import ( - "context" - "github.com/bittorrent/go-btfs/s3/lock" -) - -type Service interface { - NewNSLock(bucket string) lock.RWLocker - SetEmptyBucket(emptyBucket func(ctx context.Context, bucket string) (bool, error)) - CreateBucket(ctx context.Context, bucket, region, accessKey, acl string) error - GetBucketMeta(ctx context.Context, bucket string) (meta BucketMetadata, err error) - HasBucket(ctx context.Context, bucket string) bool - DeleteBucket(ctx context.Context, bucket string) error - GetAllBucketsOfUser(ctx context.Context, username string) ([]BucketMetadata, error) -} diff --git a/s3d/store/service_test.go b/s3d/store/service_test.go deleted file mode 100644 index 72440ea2a..000000000 --- a/s3d/store/service_test.go +++ /dev/null @@ -1 +0,0 @@ -package store diff --git a/s3d/uleveldb/leveldb.go b/s3d/uleveldb/leveldb.go deleted file mode 100644 index 781625528..000000000 --- a/s3d/uleveldb/leveldb.go +++ /dev/null @@ -1,117 +0,0 @@ -package uleveldb - -import ( - "context" - logging "github.com/ipfs/go-log/v2" - "github.com/syndtr/goleveldb/leveldb" - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/util" - "github.com/vmihailenco/msgpack/v4" - "go.uber.org/zap/buffer" -) - -var log = logging.Logger("leveldb") - -//ULevelDB level db store key-struct -type ULevelDB struct { - DB *leveldb.DB -} - -// OpenDb open a db client -func OpenDb(path string) (*ULevelDB, error) { - newDb, err := leveldb.OpenFile(path, nil) - if _, corrupted := err.(*errors.ErrCorrupted); corrupted { - newDb, err = leveldb.RecoverFile(path, nil) - } - if err != nil { - log.Errorf("Open Db path: %v,err:%v,", path, err) - return nil, err - } - return &ULevelDB{ - DB: newDb, - }, nil -} - -//Close db close -func (l *ULevelDB) Close() error { - return l.DB.Close() -} - -// Put -// * @param {string} key -// * @param {interface{}} value -func (l *ULevelDB) Put(key string, value interface{}) error { - result, err := msgpack.Marshal(value) - if err != nil { - log.Errorf("marshal error%v", err) - return err - } - return l.DB.Put([]byte(key), result, nil) -} - -// Get -// * @param {string} key -// * @param {interface{}} value -func (l *ULevelDB) Get(key string, value interface{}) error { - get, err := l.DB.Get([]byte(key), nil) - if err != nil { - return err - } - return msgpack.Unmarshal(get, value) -} - -// Delete -// * @param {string} key -// * @param {interface{}} value -func (l *ULevelDB) Delete(key string) error { - return l.DB.Delete([]byte(key), nil) -} - -// NewIterator /** -func (l *ULevelDB) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { - return l.DB.NewIterator(slice, ro) -} - -type entry struct { - Key string - Value []byte -} - -func (e *entry) UnmarshalValue(value interface{}) error { - return msgpack.Unmarshal(e.Value, value) -} - -//ReadAllChan read all key value -func (l *ULevelDB) ReadAllChan(ctx context.Context, prefix string, seekKey string) (<-chan *entry, error) { - ch := make(chan *entry) - var slice *util.Range - if prefix != "" { - slice = util.BytesPrefix([]byte(prefix)) - } - iter := l.NewIterator(slice, nil) - if seekKey != "" { - iter.Seek([]byte(seekKey)) - } - go func() { - defer func() { - iter.Release() - close(ch) - }() - for iter.Next() { - key := string(iter.Key()) - buf := buffer.Buffer{} - buf.Write(iter.Value()) - select { - case <-ctx.Done(): - return - case ch <- &entry{ - Key: key, - Value: buf.Bytes(), - }: - } - } - }() - return ch, nil -} diff --git a/s3d/uleveldb/uleveldb_test.go b/s3d/uleveldb/uleveldb_test.go deleted file mode 100644 index e758e6d98..000000000 --- a/s3d/uleveldb/uleveldb_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package uleveldb - -import ( - "fmt" - "testing" -) - -func TestULeveldb(t *testing.T) { - db, err := OpenDb(t.TempDir()) - if err != nil { - t.Fatal(err) - } - err = db.Put("a", 10) - if err != nil { - return - } - var a int - err = db.Get("a", &a) - db.Close() - if err != nil { - return - } - fmt.Println(a) -} From 8176acc48a8e5a27174f95f5519e9cd375fccd4a Mon Sep 17 00:00:00 2001 From: Steve Date: Tue, 15 Aug 2023 17:55:21 +0800 Subject: [PATCH 034/139] chore: s3 req & rsp structure --- s3/handlers/errors.go | 80 ++ s3/handlers/handlers.go | 13 + s3/handlers/request.go | 20 + s3/handlers/response.go | 7 + s3/handlers/s3_error.go | 44 ++ s3/handlers/s3api_errors.go | 1310 +++++++++++++++++++++++++++++++++ s3/handlers/services_types.go | 10 +- 7 files changed, 1475 insertions(+), 9 deletions(-) create mode 100644 s3/handlers/errors.go create mode 100644 s3/handlers/request.go create mode 100644 s3/handlers/response.go create mode 100644 s3/handlers/s3_error.go create mode 100644 s3/handlers/s3api_errors.go diff --git a/s3/handlers/errors.go b/s3/handlers/errors.go new file mode 100644 index 000000000..e9981add7 --- /dev/null +++ b/s3/handlers/errors.go @@ -0,0 +1,80 @@ +package handlers + +import ( + "context" + "github.com/yann-y/fds/internal/lock" + "github.com/yann-y/fds/internal/store" + "github.com/yann-y/fds/internal/utils/hash" + "github.com/yann-y/fds/pkg/s3utils" + "golang.org/x/xerrors" + "net/url" +) + +// NotImplemented If a feature is not implemented +type NotImplemented struct { + Message string +} + +// ContextCanceled returns whether a context is canceled. +func ContextCanceled(ctx context.Context) bool { + select { + case <-ctx.Done(): + return true + default: + return false + } +} + +func ToApiError(ctx context.Context, err error) ErrorCode { + if ContextCanceled(ctx) { + if ctx.Err() == context.Canceled { + return ErrClientDisconnected + } + } + errCode := ErrInternalError + switch err.(type) { + case lock.OperationTimedOut: + errCode = ErrOperationTimedOut + case hash.SHA256Mismatch: + errCode = ErrContentSHA256Mismatch + case hash.BadDigest: + errCode = ErrBadDigest + case store.BucketNotFound: + errCode = ErrNoSuchBucket + case store.BucketPolicyNotFound: + errCode = ErrNoSuchBucketPolicy + case store.BucketTaggingNotFound: + errCode = ErrBucketTaggingNotFound + case s3utils.BucketNameInvalid: + errCode = ErrInvalidBucketName + case s3utils.ObjectNameInvalid: + errCode = ErrInvalidObjectName + case s3utils.ObjectNameTooLong: + errCode = ErrKeyTooLongError + case s3utils.ObjectNamePrefixAsSlash: + errCode = ErrInvalidObjectNamePrefixSlash + case s3utils.InvalidUploadIDKeyCombination: + errCode = ErrNotImplemented + case s3utils.InvalidMarkerPrefixCombination: + errCode = ErrNotImplemented + case s3utils.MalformedUploadID: + errCode = ErrNoSuchUpload + case s3utils.InvalidUploadID: + errCode = ErrNoSuchUpload + case s3utils.InvalidPart: + errCode = ErrInvalidPart + case s3utils.PartTooSmall: + errCode = ErrEntityTooSmall + case s3utils.PartTooBig: + errCode = ErrEntityTooLarge + case url.EscapeError: + errCode = ErrInvalidObjectName + default: + if xerrors.Is(err, store.ErrObjectNotFound) { + errCode = ErrNoSuchKey + } else if xerrors.Is(err, store.ErrBucketNotEmpty) { + errCode = ErrBucketNotEmpty + } + } + return errCode +} diff --git a/s3/handlers/handlers.go b/s3/handlers/handlers.go index 5b5e4f520..143295d72 100644 --- a/s3/handlers/handlers.go +++ b/s3/handlers/handlers.go @@ -52,6 +52,19 @@ func (handlers *Handlers) Sign(handler http.Handler) http.Handler { return nil } +func (handlers *Handlers) parsePutObjectReq(r *http.Request) (arg *PutObjectReq, err error) { + return +} + func (handlers *Handlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) { + req := &PutObjectRequest{} + err := req.Bind(r) + if err != nil { + return + } + //.... + + WritePutObjectResponse(w, object) + return } diff --git a/s3/handlers/request.go b/s3/handlers/request.go new file mode 100644 index 000000000..383f09633 --- /dev/null +++ b/s3/handlers/request.go @@ -0,0 +1,20 @@ +package handlers + +import ( + "io" + "net/http" +) + +type RequestBinder interface { + Bind(r *http.Request) (err error) +} + +type PutObjectRequest struct { + Bucket string + Object string + Body io.Reader +} + +func (req *PutObjectRequest) Bind(r *http.Request) (err error) { + return +} diff --git a/s3/handlers/response.go b/s3/handlers/response.go new file mode 100644 index 000000000..610876f48 --- /dev/null +++ b/s3/handlers/response.go @@ -0,0 +1,7 @@ +package handlers + +import "net/http" + +func WritePutObjectResponse(w http.ResponseWriter, objectMeta *ObjectMetadata) { + return +} diff --git a/s3/handlers/s3_error.go b/s3/handlers/s3_error.go new file mode 100644 index 000000000..b577e5127 --- /dev/null +++ b/s3/handlers/s3_error.go @@ -0,0 +1,44 @@ +package handlers + +// http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html +var s3ErrorResponseMap = map[string]string{ + "AccessDenied": "Access Denied.", + "BadDigest": "The Content-Md5 you specified did not match what we received.", + "EntityTooSmall": "Your proposed upload is smaller than the minimum allowed object size.", + "EntityTooLarge": "Your proposed upload exceeds the maximum allowed object size.", + "IncompleteBody": "You did not provide the number of bytes specified by the Content-Length HTTP header.", + "InternalError": "We encountered an internal error, please try again.", + "InvalidAccessKeyId": "The access key ID you provided does not exist in our records.", + "InvalidBucketName": "The specified bucket is not valid.", + "InvalidDigest": "The Content-Md5 you specified is not valid.", + "InvalidRange": "The requested range is not satisfiable", + "MalformedXML": "The XML you provided was not well-formed or did not validate against our published schema.", + "MissingContentLength": "You must provide the Content-Length HTTP header.", + "MissingContentMD5": "Missing required header for this request: Content-Md5.", + "MissingRequestBodyError": "Request body is empty.", + "NoSuchBucket": "The specified bucket does not exist.", + "NoSuchBucketPolicy": "The bucket policy does not exist", + "NoSuchKey": "The specified key does not exist.", + "NoSuchUpload": "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.", + "NotImplemented": "A header you provided implies functionality that is not implemented", + "PreconditionFailed": "At least one of the pre-conditions you specified did not hold", + "RequestTimeTooSkewed": "The difference between the request time and the server's time is too large.", + "SignatureDoesNotMatch": "The request signature we calculated does not match the signature you provided. Check your key and signing method.", + "MethodNotAllowed": "The specified method is not allowed against this resource.", + "InvalidPart": "One or more of the specified parts could not be found.", + "InvalidPartOrder": "The list of parts was not in ascending order. The parts list must be specified in order by part number.", + "InvalidObjectState": "The operation is not valid for the current state of the object.", + "AuthorizationHeaderMalformed": "The authorization header is malformed; the region is wrong.", + "MalformedPOSTRequest": "The body of your POST request is not well-formed multipart/form-data.", + "BucketNotEmpty": "The bucket you tried to delete is not empty", + "AllAccessDisabled": "All access to this bucket has been disabled.", + "MalformedPolicy": "Policy has invalid resource.", + "MissingFields": "Missing fields in request.", + "AuthorizationQueryParametersError": "Error parsing the X-Amz-Credential parameter; the Credential is mal-formed; expecting \"/YYYYMMDD/REGION/SERVICE/aws4_request\".", + "MalformedDate": "Invalid date format header, expected to be in ISO8601, RFC1123 or RFC1123Z time format.", + "BucketAlreadyOwnedByYou": "Your previous request to create the named bucket succeeded and you already own it.", + "InvalidDuration": "Duration provided in the request is invalid.", + "XAmzContentSHA256Mismatch": "The provided 'x-amz-content-sha256' header does not match what was computed.", + // Add new API errors here. + "NoSuchCORSConfiguration": "The CORS configuration does not exist", +} diff --git a/s3/handlers/s3api_errors.go b/s3/handlers/s3api_errors.go new file mode 100644 index 000000000..d990e85d7 --- /dev/null +++ b/s3/handlers/s3api_errors.go @@ -0,0 +1,1310 @@ +package handlers + +import ( + "encoding/xml" + "fmt" + "net/http" +) + +// APIError structure +type APIError struct { + Code string + Description string + HTTPStatusCode int +} + +// RESTErrorResponse - error response format +type RESTErrorResponse struct { + XMLName xml.Name `xml:"Error" json:"-"` + Code string `xml:"Code" json:"Code"` + Message string `xml:"Message" json:"Message"` + Resource string `xml:"Resource" json:"Resource"` + RequestID string `xml:"RequestId" json:"RequestId"` + Key string `xml:"Key,omitempty" json:"Key,omitempty"` + BucketName string `xml:"BucketName,omitempty" json:"BucketName,omitempty"` +} + +// Error - Returns S3 error string. +func (e RESTErrorResponse) Error() string { + if e.Message == "" { + msg, ok := s3ErrorResponseMap[e.Code] + if !ok { + msg = fmt.Sprintf("Error response code %s.", e.Code) + } + return msg + } + return e.Message +} + +// ErrorCode type of error status. +type ErrorCode int + +// Error codes, non exhaustive list - http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html +const ( + ErrNone ErrorCode = iota + ErrAccessDenied + ErrBadDigest + ErrEntityTooSmall + ErrEntityTooLarge + ErrIncompleteBody + ErrInternalError + ErrInvalidAccessKeyID + ErrAccessKeyDisabled + ErrInvalidBucketName + ErrInvalidDigest + ErrInvalidRange + ErrInvalidRangePartNumber + ErrInvalidCopyPartRange + ErrInvalidCopyPartRangeSource + ErrInvalidMaxKeys + ErrInvalidEncodingMethod + ErrInvalidMaxUploads + ErrInvalidMaxParts + ErrInvalidPartNumberMarker + ErrInvalidRequestBody + ErrInvalidCopySource + ErrInvalidMetadataDirective + ErrInvalidCopyDest + ErrInvalidPolicyDocument + ErrInvalidObjectState + ErrMalformedXML + ErrMissingContentLength + ErrMissingContentMD5 + ErrMissingRequestBodyError + ErrMissingSecurityHeader + ErrNoSuchUser + ErrUserAlreadyExists + ErrNoSuchUserPolicy + ErrUserPolicyAlreadyExists + ErrNoSuchBucket + ErrNoSuchBucketPolicy + ErrNoSuchLifecycleConfiguration + ErrNoSuchCORSConfiguration + ErrNoSuchWebsiteConfiguration + ErrReplicationConfigurationNotFoundError + ErrReplicationNeedsVersioningError + ErrReplicationBucketNeedsVersioningError + ErrObjectRestoreAlreadyInProgress + ErrNoSuchKey + ErrNoSuchUpload + ErrInvalidVersionID + ErrNoSuchVersion + ErrNotImplemented + ErrPreconditionFailed + ErrRequestTimeTooSkewed + ErrSignatureDoesNotMatch + ErrMethodNotAllowed + ErrInvalidPart + ErrInvalidPartOrder + ErrAuthorizationHeaderMalformed + ErrMalformedDate + ErrMalformedPOSTRequest + ErrPOSTFileRequired + ErrSignatureVersionNotSupported + ErrBucketNotEmpty + ErrAllAccessDisabled + ErrMalformedPolicy + ErrMissingFields + ErrMissingCredTag + ErrCredMalformed + ErrInvalidRegion + + ErrMissingSignTag + ErrMissingSignHeadersTag + + ErrAuthHeaderEmpty + ErrExpiredPresignRequest + ErrRequestNotReadyYet + ErrUnsignedHeaders + ErrMissingDateHeader + + ErrBucketAlreadyOwnedByYou + ErrInvalidDuration + ErrBucketAlreadyExists + ErrMetadataTooLarge + ErrUnsupportedMetadata + + ErrSlowDown + ErrBadRequest + ErrKeyTooLongError + ErrInvalidBucketObjectLockConfiguration + ErrObjectLockConfigurationNotAllowed + ErrNoSuchObjectLockConfiguration + ErrObjectLocked + ErrInvalidRetentionDate + ErrPastObjectLockRetainDate + ErrUnknownWORMModeDirective + ErrBucketTaggingNotFound + ErrObjectLockInvalidHeaders + ErrInvalidTagDirective + // Add new error codes here. + + // SSE-S3 related API errors + ErrInvalidEncryptionMethod + ErrInvalidQueryParams + ErrNoAccessKey + ErrInvalidToken + + // Bucket notification related errors. + ErrEventNotification + ErrARNNotification + ErrRegionNotification + ErrOverlappingFilterNotification + ErrFilterNameInvalid + ErrFilterNamePrefix + ErrFilterNameSuffix + ErrFilterValueInvalid + ErrOverlappingConfigs + + // S3 extended errors. + ErrContentSHA256Mismatch + + // Add new extended error codes here. + ErrInvalidObjectName + ErrInvalidObjectNamePrefixSlash + ErrClientDisconnected + ErrOperationTimedOut + ErrOperationMaxedOut + ErrInvalidRequest + ErrIncorrectContinuationToken + ErrInvalidFormatAccessKey + + // S3 Select Errors + ErrEmptyRequestBody + ErrUnsupportedFunction + ErrInvalidExpressionType + ErrBusy + ErrUnauthorizedAccess + ErrExpressionTooLong + ErrIllegalSQLFunctionArgument + ErrInvalidKeyPath + ErrInvalidCompressionFormat + ErrInvalidFileHeaderInfo + ErrInvalidJSONType + ErrInvalidQuoteFields + ErrInvalidRequestParameter + ErrInvalidDataType + ErrInvalidTextEncoding + ErrInvalidDataSource + ErrInvalidTableAlias + ErrMissingRequiredParameter + ErrObjectSerializationConflict + ErrUnsupportedSQLOperation + ErrUnsupportedSQLStructure + ErrUnsupportedSyntax + ErrUnsupportedRangeHeader + ErrLexerInvalidChar + ErrLexerInvalidOperator + ErrLexerInvalidLiteral + ErrLexerInvalidIONLiteral + ErrParseExpectedDatePart + ErrParseExpectedKeyword + ErrParseExpectedTokenType + ErrParseExpected2TokenTypes + ErrParseExpectedNumber + ErrParseExpectedRightParenBuiltinFunctionCall + ErrParseExpectedTypeName + ErrParseExpectedWhenClause + ErrParseUnsupportedToken + ErrParseUnsupportedLiteralsGroupBy + ErrParseExpectedMember + ErrParseUnsupportedSelect + ErrParseUnsupportedCase + ErrParseUnsupportedCaseClause + ErrParseUnsupportedAlias + ErrParseUnsupportedSyntax + ErrParseUnknownOperator + ErrParseMissingIdentAfterAt + ErrParseUnexpectedOperator + ErrParseUnexpectedTerm + ErrParseUnexpectedToken + ErrParseUnexpectedKeyword + ErrParseExpectedExpression + ErrParseExpectedLeftParenAfterCast + ErrParseExpectedLeftParenValueConstructor + ErrParseExpectedLeftParenBuiltinFunctionCall + ErrParseExpectedArgumentDelimiter + ErrParseCastArity + ErrParseInvalidTypeParam + ErrParseEmptySelect + ErrParseSelectMissingFrom + ErrParseExpectedIdentForGroupName + ErrParseExpectedIdentForAlias + ErrParseUnsupportedCallWithStar + ErrParseNonUnaryAgregateFunctionCall + ErrParseMalformedJoin + ErrParseExpectedIdentForAt + ErrParseAsteriskIsNotAloneInSelectList + ErrParseCannotMixSqbAndWildcardInSelectList + ErrParseInvalidContextForWildcardInSelectList + ErrIncorrectSQLFunctionArgumentType + ErrValueParseFailure + ErrEvaluatorInvalidArguments + ErrIntegerOverflow + ErrLikeInvalidInputs + ErrCastFailed + ErrInvalidCast + ErrEvaluatorInvalidTimestampFormatPattern + ErrEvaluatorInvalidTimestampFormatPatternSymbolForParsing + ErrEvaluatorTimestampFormatPatternDuplicateFields + ErrEvaluatorTimestampFormatPatternHourClockAmPmMismatch + ErrEvaluatorUnterminatedTimestampFormatPatternToken + ErrEvaluatorInvalidTimestampFormatPatternToken + ErrEvaluatorInvalidTimestampFormatPatternSymbol + ErrEvaluatorBindingDoesNotExist + ErrMissingHeaders + ErrInvalidColumnIndex + ErrPostPolicyConditionInvalidFormat + + ErrMalformedJSON +) + +// error code to APIError structure, these fields carry respective +// descriptions for all the error responses. +var errorCodeResponse = map[ErrorCode]APIError{ + ErrInvalidCopyDest: { + Code: "InvalidRequest", + Description: "This copy request is illegal because it is trying to copy an object to itself without changing the object's metadata, storage class, website redirect location or encryption attributes.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidCopySource: { + Code: "InvalidArgument", + Description: "Copy Source must mention the source bucket and key: sourcebucket/sourcekey.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidMetadataDirective: { + Code: "InvalidArgument", + Description: "Unknown metadata directive.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidRequestBody: { + Code: "InvalidArgument", + Description: "Body shouldn't be set for this request.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidMaxUploads: { + Code: "InvalidArgument", + Description: "Argument max-uploads must be an integer between 0 and 2147483647", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidMaxKeys: { + Code: "InvalidArgument", + Description: "Argument maxKeys must be an integer between 0 and 2147483647", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidEncodingMethod: { + Code: "InvalidArgument", + Description: "Invalid Encoding Method specified in Request", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidMaxParts: { + Code: "InvalidArgument", + Description: "Part number must be an integer between 1 and 10000, inclusive", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidPartNumberMarker: { + Code: "InvalidArgument", + Description: "Argument partNumberMarker must be an integer.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidPolicyDocument: { + Code: "InvalidPolicyDocument", + Description: "The content of the form does not meet the conditions specified in the policy document.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrAccessDenied: { + Code: "AccessDenied", + Description: "Access Denied.", + HTTPStatusCode: http.StatusForbidden, + }, + ErrBadDigest: { + Code: "BadDigest", + Description: "The Content-Md5 you specified did not match what we received.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrEntityTooSmall: { + Code: "EntityTooSmall", + Description: "Your proposed upload is smaller than the minimum allowed object size.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrEntityTooLarge: { + Code: "EntityTooLarge", + Description: "Your proposed upload exceeds the maximum allowed object size.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrIncompleteBody: { + Code: "IncompleteBody", + Description: "You did not provide the number of bytes specified by the Content-Length HTTP header.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInternalError: { + Code: "InternalError", + Description: "We encountered an internal error, please try again.", + HTTPStatusCode: http.StatusInternalServerError, + }, + ErrInvalidAccessKeyID: { + Code: "InvalidAccessKeyId", + Description: "The Access Key Id you provided does not exist in our records.", + HTTPStatusCode: http.StatusForbidden, + }, + ErrAccessKeyDisabled: { + Code: "InvalidAccessKeyId", + Description: "Your account is disabled; please contact your administrator.", + HTTPStatusCode: http.StatusForbidden, + }, + ErrInvalidBucketName: { + Code: "InvalidBucketName", + Description: "The specified bucket is not valid.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidDigest: { + Code: "InvalidDigest", + Description: "The Content-Md5 you specified is not valid.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidRange: { + Code: "InvalidRange", + Description: "The requested range is not satisfiable", + HTTPStatusCode: http.StatusRequestedRangeNotSatisfiable, + }, + ErrInvalidRangePartNumber: { + Code: "InvalidRequest", + Description: "Cannot specify both Range header and partNumber query parameter", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMalformedXML: { + Code: "MalformedXML", + Description: "The XML you provided was not well-formed or did not validate against our published schema.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMissingContentLength: { + Code: "MissingContentLength", + Description: "You must provide the Content-Length HTTP header.", + HTTPStatusCode: http.StatusLengthRequired, + }, + ErrMissingContentMD5: { + Code: "MissingContentMD5", + Description: "Missing required header for this request: Content-Md5.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMissingSecurityHeader: { + Code: "MissingSecurityHeader", + Description: "Your request was missing a required header", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMissingRequestBodyError: { + Code: "MissingRequestBodyError", + Description: "Request body is empty.", + HTTPStatusCode: http.StatusLengthRequired, + }, + ErrNoSuchBucket: { + Code: "NoSuchBucket", + Description: "The specified bucket does not exist", + HTTPStatusCode: http.StatusNotFound, + }, + ErrNoSuchBucketPolicy: { + Code: "NoSuchBucketPolicy", + Description: "The bucket policy does not exist", + HTTPStatusCode: http.StatusNotFound, + }, + ErrNoSuchLifecycleConfiguration: { + Code: "NoSuchLifecycleConfiguration", + Description: "The lifecycle configuration does not exist", + HTTPStatusCode: http.StatusNotFound, + }, + ErrNoSuchUser: { + Code: "NoSuchUser", + Description: "The specified user does not exist", + HTTPStatusCode: http.StatusConflict, + }, + ErrUserAlreadyExists: { + Code: "UserAlreadyExists", + Description: "The request was rejected because it attempted to create a resource that already exists .", + HTTPStatusCode: http.StatusConflict, + }, + ErrNoSuchUserPolicy: { + Code: "NoSuchUserPolicy", + Description: "The specified user policy does not exist", + HTTPStatusCode: http.StatusConflict, + }, + ErrUserPolicyAlreadyExists: { + Code: "UserPolicyAlreadyExists", + Description: "The same user policy already exists .", + HTTPStatusCode: http.StatusConflict, + }, + ErrNoSuchKey: { + Code: "NoSuchKey", + Description: "The specified key does not exist.", + HTTPStatusCode: http.StatusNotFound, + }, + ErrNoSuchUpload: { + Code: "NoSuchUpload", + Description: "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.", + HTTPStatusCode: http.StatusNotFound, + }, + ErrInvalidVersionID: { + Code: "InvalidArgument", + Description: "Invalid version id specified", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrNoSuchVersion: { + Code: "NoSuchVersion", + Description: "The specified version does not exist.", + HTTPStatusCode: http.StatusNotFound, + }, + ErrNotImplemented: { + Code: "NotImplemented", + Description: "A header you provided implies functionality that is not implemented", + HTTPStatusCode: http.StatusNotImplemented, + }, + ErrPreconditionFailed: { + Code: "PreconditionFailed", + Description: "At least one of the pre-conditions you specified did not hold", + HTTPStatusCode: http.StatusPreconditionFailed, + }, + ErrRequestTimeTooSkewed: { + Code: "RequestTimeTooSkewed", + Description: "The difference between the request time and the server's time is too large.", + HTTPStatusCode: http.StatusForbidden, + }, + ErrSignatureDoesNotMatch: { + Code: "SignatureDoesNotMatch", + Description: "The request signature we calculated does not match the signature you provided. Check your key and signing method.", + HTTPStatusCode: http.StatusForbidden, + }, + ErrMethodNotAllowed: { + Code: "MethodNotAllowed", + Description: "The specified method is not allowed against this resource.", + HTTPStatusCode: http.StatusMethodNotAllowed, + }, + ErrInvalidPart: { + Code: "InvalidPart", + Description: "One or more of the specified parts could not be found. The part may not have been uploaded, or the specified entity tag may not match the part's entity tag.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidPartOrder: { + Code: "InvalidPartOrder", + Description: "The list of parts was not in ascending order. The parts list must be specified in order by part number.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidObjectState: { + Code: "InvalidObjectState", + Description: "The operation is not valid for the current state of the object.", + HTTPStatusCode: http.StatusForbidden, + }, + ErrAuthorizationHeaderMalformed: { + Code: "AuthorizationHeaderMalformed", + Description: "The authorization header is malformed; the region is wrong; expecting 'us-east-1'.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMalformedPOSTRequest: { + Code: "MalformedPOSTRequest", + Description: "The body of your POST request is not well-formed multipart/form-data.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrPOSTFileRequired: { + Code: "InvalidArgument", + Description: "POST requires exactly one file upload per request.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrSignatureVersionNotSupported: { + Code: "InvalidRequest", + Description: "The authorization mechanism you have provided is not supported. Please use AWS4-HMAC-SHA256.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrBucketNotEmpty: { + Code: "BucketNotEmpty", + Description: "The bucket you tried to delete is not empty", + HTTPStatusCode: http.StatusConflict, + }, + ErrBucketAlreadyExists: { + Code: "BucketAlreadyExists", + Description: "The requested bucket name is not available. The bucket namespace is shared by all users of the system. Please select a different name and try again.", + HTTPStatusCode: http.StatusConflict, + }, + ErrAllAccessDisabled: { + Code: "AllAccessDisabled", + Description: "All access to this resource has been disabled.", + HTTPStatusCode: http.StatusForbidden, + }, + ErrMalformedPolicy: { + Code: "MalformedPolicy", + Description: "Policy has invalid resource.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMissingCredTag: { + Code: "InvalidRequest", + Description: "Missing Credential field for this request.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidRegion: { + Code: "InvalidRegion", + Description: "Region does not match.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMissingSignTag: { + Code: "AccessDenied", + Description: "Signature header missing Signature field.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMissingSignHeadersTag: { + Code: "InvalidArgument", + Description: "Signature header missing SignedHeaders field.", + HTTPStatusCode: http.StatusBadRequest, + }, + + ErrAuthHeaderEmpty: { + Code: "InvalidArgument", + Description: "Authorization header is invalid -- one and only one ' ' (space) required.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMissingDateHeader: { + Code: "AccessDenied", + Description: "AWS authentication requires a valid Date or x-amz-date header", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrExpiredPresignRequest: { + Code: "AccessDenied", + Description: "Request has expired", + HTTPStatusCode: http.StatusForbidden, + }, + ErrRequestNotReadyYet: { + Code: "AccessDenied", + Description: "Request is not valid yet", + HTTPStatusCode: http.StatusForbidden, + }, + ErrSlowDown: { + Code: "SlowDown", + Description: "Resource requested is unreadable, please reduce your request rate", + HTTPStatusCode: http.StatusServiceUnavailable, + }, + ErrBadRequest: { + Code: "BadRequest", + Description: "400 BadRequest", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrKeyTooLongError: { + Code: "KeyTooLongError", + Description: "Your key is too long", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrUnsignedHeaders: { + Code: "AccessDenied", + Description: "There were headers present in the request which were not signed", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrBucketAlreadyOwnedByYou: { + Code: "BucketAlreadyOwnedByYou", + Description: "Your previous request to create the named bucket succeeded and you already own it.", + HTTPStatusCode: http.StatusConflict, + }, + ErrInvalidDuration: { + Code: "InvalidDuration", + Description: "Duration provided in the request is invalid.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidBucketObjectLockConfiguration: { + Code: "InvalidRequest", + Description: "Bucket is missing ObjectLockConfiguration", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrBucketTaggingNotFound: { + Code: "NoSuchTagSet", + Description: "The TagSet does not exist", + HTTPStatusCode: http.StatusNotFound, + }, + ErrObjectLockConfigurationNotAllowed: { + Code: "InvalidBucketState", + Description: "Object Lock configuration cannot be enabled on existing buckets", + HTTPStatusCode: http.StatusConflict, + }, + ErrNoSuchCORSConfiguration: { + Code: "NoSuchCORSConfiguration", + Description: "The CORS configuration does not exist", + HTTPStatusCode: http.StatusNotFound, + }, + ErrNoSuchWebsiteConfiguration: { + Code: "NoSuchWebsiteConfiguration", + Description: "The specified bucket does not have a website configuration", + HTTPStatusCode: http.StatusNotFound, + }, + ErrReplicationConfigurationNotFoundError: { + Code: "ReplicationConfigurationNotFoundError", + Description: "The replication configuration was not found", + HTTPStatusCode: http.StatusNotFound, + }, + ErrReplicationNeedsVersioningError: { + Code: "InvalidRequest", + Description: "Versioning must be 'Enabled' on the bucket to apply a replication configuration", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrReplicationBucketNeedsVersioningError: { + Code: "InvalidRequest", + Description: "Versioning must be 'Enabled' on the bucket to add a replication target", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrNoSuchObjectLockConfiguration: { + Code: "NoSuchObjectLockConfiguration", + Description: "The specified object does not have a ObjectLock configuration", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrObjectLocked: { + Code: "InvalidRequest", + Description: "Object is WORM protected and cannot be overwritten", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidRetentionDate: { + Code: "InvalidRequest", + Description: "Date must be provided in ISO 8601 format", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrPastObjectLockRetainDate: { + Code: "InvalidRequest", + Description: "the retain until date must be in the future", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrUnknownWORMModeDirective: { + Code: "InvalidRequest", + Description: "unknown wormMode directive", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrObjectLockInvalidHeaders: { + Code: "InvalidRequest", + Description: "x-amz-object-lock-retain-until-date and x-amz-object-lock-mode must both be supplied", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrObjectRestoreAlreadyInProgress: { + Code: "RestoreAlreadyInProgress", + Description: "Object restore is already in progress", + HTTPStatusCode: http.StatusConflict, + }, + // Bucket notification related errors. + ErrEventNotification: { + Code: "InvalidArgument", + Description: "A specified event is not supported for notifications.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrARNNotification: { + Code: "InvalidArgument", + Description: "A specified destination ARN does not exist or is not well-formed. Verify the destination ARN.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrRegionNotification: { + Code: "InvalidArgument", + Description: "A specified destination is in a different region than the bucket. You must use a destination that resides in the same region as the bucket.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrOverlappingFilterNotification: { + Code: "InvalidArgument", + Description: "An object key name filtering rule defined with overlapping prefixes, overlapping suffixes, or overlapping combinations of prefixes and suffixes for the same event types.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrFilterNameInvalid: { + Code: "InvalidArgument", + Description: "filter rule name must be either prefix or suffix", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrFilterNamePrefix: { + Code: "InvalidArgument", + Description: "Cannot specify more than one prefix rule in a filter.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrFilterNameSuffix: { + Code: "InvalidArgument", + Description: "Cannot specify more than one suffix rule in a filter.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrFilterValueInvalid: { + Code: "InvalidArgument", + Description: "Size of filter rule value cannot exceed 1024 bytes in UTF-8 representation", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrOverlappingConfigs: { + Code: "InvalidArgument", + Description: "Configurations overlap. Configurations on the same bucket cannot share a common event type.", + HTTPStatusCode: http.StatusBadRequest, + }, + + ErrInvalidCopyPartRange: { + Code: "InvalidArgument", + Description: "The x-amz-copy-source-range value must be of the form bytes=first-last where first and last are the zero-based offsets of the first and last bytes to copy", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidCopyPartRangeSource: { + Code: "InvalidArgument", + Description: "Range specified is not valid for source object", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMetadataTooLarge: { + Code: "MetadataTooLarge", + Description: "Your metadata headers exceed the maximum allowed metadata size.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidTagDirective: { + Code: "InvalidArgument", + Description: "Unknown tag directive.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidEncryptionMethod: { + Code: "InvalidRequest", + Description: "The encryption method specified is not supported", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidQueryParams: { + Code: "AuthorizationQueryParametersError", + Description: "Query-string authentication version 4 requires the X-Amz-Algorithm, X-Amz-Credential, X-Amz-Signature, X-Amz-Date, X-Amz-SignedHeaders, and X-Amz-Expires parameters.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrNoAccessKey: { + Code: "AccessDenied", + Description: "No AWSAccessKey was presented", + HTTPStatusCode: http.StatusForbidden, + }, + ErrInvalidToken: { + Code: "InvalidTokenId", + Description: "The security token included in the request is invalid", + HTTPStatusCode: http.StatusForbidden, + }, + + // S3 extensions. + ErrInvalidObjectName: { + Code: "InvalidObjectName", + Description: "Object name contains unsupported characters.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidObjectNamePrefixSlash: { + Code: "InvalidObjectName", + Description: "Object name contains a leading slash.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrClientDisconnected: { + Code: "ClientDisconnected", + Description: "Client disconnected before response was ready", + HTTPStatusCode: 499, // No official code, use nginx value. + }, + ErrOperationTimedOut: { + Code: "RequestTimeout", + Description: "A timeout occurred while trying to lock a resource, please reduce your request rate", + HTTPStatusCode: http.StatusServiceUnavailable, + }, + ErrOperationMaxedOut: { + Code: "SlowDown", + Description: "A timeout exceeded while waiting to proceed with the request, please reduce your request rate", + HTTPStatusCode: http.StatusServiceUnavailable, + }, + ErrUnsupportedMetadata: { + Code: "InvalidArgument", + Description: "Your metadata headers are not supported.", + HTTPStatusCode: http.StatusBadRequest, + }, + // Generic Invalid-Request error. Should be used for response errors only for unlikely + // corner case errors for which introducing new APIErrorCode is not worth it. LogIf() + // should be used to log the error at the source of the error for debugging purposes. + ErrInvalidRequest: { + Code: "InvalidRequest", + Description: "Invalid Request", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrIncorrectContinuationToken: { + Code: "InvalidArgument", + Description: "The continuation token provided is incorrect", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidFormatAccessKey: { + Code: "InvalidAccessKeyId", + Description: "The Access Key Id you provided contains invalid characters.", + HTTPStatusCode: http.StatusBadRequest, + }, + // S3 Select API Errors + ErrEmptyRequestBody: { + Code: "EmptyRequestBody", + Description: "Request body cannot be empty.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrUnsupportedFunction: { + Code: "UnsupportedFunction", + Description: "Encountered an unsupported SQL function.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidDataSource: { + Code: "InvalidDataSource", + Description: "Invalid data source type. Only CSV and JSON are supported at this time.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidExpressionType: { + Code: "InvalidExpressionType", + Description: "The ExpressionType is invalid. Only SQL expressions are supported at this time.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrBusy: { + Code: "Busy", + Description: "The service is unavailable. Please retry.", + HTTPStatusCode: http.StatusServiceUnavailable, + }, + ErrUnauthorizedAccess: { + Code: "UnauthorizedAccess", + Description: "You are not authorized to perform this operation", + HTTPStatusCode: http.StatusUnauthorized, + }, + ErrExpressionTooLong: { + Code: "ExpressionTooLong", + Description: "The SQL expression is too long: The maximum byte-length for the SQL expression is 256 KB.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrIllegalSQLFunctionArgument: { + Code: "IllegalSqlFunctionArgument", + Description: "Illegal argument was used in the SQL function.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidKeyPath: { + Code: "InvalidKeyPath", + Description: "Key path in the SQL expression is invalid.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidCompressionFormat: { + Code: "InvalidCompressionFormat", + Description: "The file is not in a supported compression format. Only GZIP is supported at this time.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidFileHeaderInfo: { + Code: "InvalidFileHeaderInfo", + Description: "The FileHeaderInfo is invalid. Only NONE, USE, and IGNORE are supported.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidJSONType: { + Code: "InvalidJsonType", + Description: "The JsonType is invalid. Only DOCUMENT and LINES are supported at this time.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidQuoteFields: { + Code: "InvalidQuoteFields", + Description: "The QuoteFields is invalid. Only ALWAYS and ASNEEDED are supported.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidRequestParameter: { + Code: "InvalidRequestParameter", + Description: "The value of a parameter in SelectRequest element is invalid. Check the service API documentation and try again.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidDataType: { + Code: "InvalidDataType", + Description: "The SQL expression contains an invalid data type.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidTextEncoding: { + Code: "InvalidTextEncoding", + Description: "Invalid encoding type. Only UTF-8 encoding is supported at this time.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidTableAlias: { + Code: "InvalidTableAlias", + Description: "The SQL expression contains an invalid table alias.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMissingRequiredParameter: { + Code: "MissingRequiredParameter", + Description: "The SelectRequest entity is missing a required parameter. Check the service documentation and try again.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrObjectSerializationConflict: { + Code: "ObjectSerializationConflict", + Description: "The SelectRequest entity can only contain one of CSV or JSON. Check the service documentation and try again.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrUnsupportedSQLOperation: { + Code: "UnsupportedSqlOperation", + Description: "Encountered an unsupported SQL operation.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrUnsupportedSQLStructure: { + Code: "UnsupportedSqlStructure", + Description: "Encountered an unsupported SQL structure. Check the SQL Reference.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrUnsupportedSyntax: { + Code: "UnsupportedSyntax", + Description: "Encountered invalid syntax.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrUnsupportedRangeHeader: { + Code: "UnsupportedRangeHeader", + Description: "Range header is not supported for this operation.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrLexerInvalidChar: { + Code: "LexerInvalidChar", + Description: "The SQL expression contains an invalid character.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrLexerInvalidOperator: { + Code: "LexerInvalidOperator", + Description: "The SQL expression contains an invalid literal.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrLexerInvalidLiteral: { + Code: "LexerInvalidLiteral", + Description: "The SQL expression contains an invalid operator.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrLexerInvalidIONLiteral: { + Code: "LexerInvalidIONLiteral", + Description: "The SQL expression contains an invalid operator.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpectedDatePart: { + Code: "ParseExpectedDatePart", + Description: "Did not find the expected date part in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpectedKeyword: { + Code: "ParseExpectedKeyword", + Description: "Did not find the expected keyword in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpectedTokenType: { + Code: "ParseExpectedTokenType", + Description: "Did not find the expected token in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpected2TokenTypes: { + Code: "ParseExpected2TokenTypes", + Description: "Did not find the expected token in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpectedNumber: { + Code: "ParseExpectedNumber", + Description: "Did not find the expected number in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpectedRightParenBuiltinFunctionCall: { + Code: "ParseExpectedRightParenBuiltinFunctionCall", + Description: "Did not find the expected right parenthesis character in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpectedTypeName: { + Code: "ParseExpectedTypeName", + Description: "Did not find the expected type name in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpectedWhenClause: { + Code: "ParseExpectedWhenClause", + Description: "Did not find the expected WHEN clause in the SQL expression. CASE is not supported.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseUnsupportedToken: { + Code: "ParseUnsupportedToken", + Description: "The SQL expression contains an unsupported token.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseUnsupportedLiteralsGroupBy: { + Code: "ParseUnsupportedLiteralsGroupBy", + Description: "The SQL expression contains an unsupported use of GROUP BY.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpectedMember: { + Code: "ParseExpectedMember", + Description: "The SQL expression contains an unsupported use of MEMBER.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseUnsupportedSelect: { + Code: "ParseUnsupportedSelect", + Description: "The SQL expression contains an unsupported use of SELECT.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseUnsupportedCase: { + Code: "ParseUnsupportedCase", + Description: "The SQL expression contains an unsupported use of CASE.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseUnsupportedCaseClause: { + Code: "ParseUnsupportedCaseClause", + Description: "The SQL expression contains an unsupported use of CASE.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseUnsupportedAlias: { + Code: "ParseUnsupportedAlias", + Description: "The SQL expression contains an unsupported use of ALIAS.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseUnsupportedSyntax: { + Code: "ParseUnsupportedSyntax", + Description: "The SQL expression contains unsupported syntax.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseUnknownOperator: { + Code: "ParseUnknownOperator", + Description: "The SQL expression contains an invalid operator.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseMissingIdentAfterAt: { + Code: "ParseMissingIdentAfterAt", + Description: "Did not find the expected identifier after the @ symbol in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseUnexpectedOperator: { + Code: "ParseUnexpectedOperator", + Description: "The SQL expression contains an unexpected operator.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseUnexpectedTerm: { + Code: "ParseUnexpectedTerm", + Description: "The SQL expression contains an unexpected term.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseUnexpectedToken: { + Code: "ParseUnexpectedToken", + Description: "The SQL expression contains an unexpected token.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseUnexpectedKeyword: { + Code: "ParseUnexpectedKeyword", + Description: "The SQL expression contains an unexpected keyword.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpectedExpression: { + Code: "ParseExpectedExpression", + Description: "Did not find the expected SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpectedLeftParenAfterCast: { + Code: "ParseExpectedLeftParenAfterCast", + Description: "Did not find expected the left parenthesis in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpectedLeftParenValueConstructor: { + Code: "ParseExpectedLeftParenValueConstructor", + Description: "Did not find expected the left parenthesis in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpectedLeftParenBuiltinFunctionCall: { + Code: "ParseExpectedLeftParenBuiltinFunctionCall", + Description: "Did not find the expected left parenthesis in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpectedArgumentDelimiter: { + Code: "ParseExpectedArgumentDelimiter", + Description: "Did not find the expected argument delimiter in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseCastArity: { + Code: "ParseCastArity", + Description: "The SQL expression CAST has incorrect arity.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseInvalidTypeParam: { + Code: "ParseInvalidTypeParam", + Description: "The SQL expression contains an invalid parameter value.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseEmptySelect: { + Code: "ParseEmptySelect", + Description: "The SQL expression contains an empty SELECT.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseSelectMissingFrom: { + Code: "ParseSelectMissingFrom", + Description: "GROUP is not supported in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpectedIdentForGroupName: { + Code: "ParseExpectedIdentForGroupName", + Description: "GROUP is not supported in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpectedIdentForAlias: { + Code: "ParseExpectedIdentForAlias", + Description: "Did not find the expected identifier for the alias in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseUnsupportedCallWithStar: { + Code: "ParseUnsupportedCallWithStar", + Description: "Only COUNT with (*) as a parameter is supported in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseNonUnaryAgregateFunctionCall: { + Code: "ParseNonUnaryAgregateFunctionCall", + Description: "Only one argument is supported for aggregate functions in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseMalformedJoin: { + Code: "ParseMalformedJoin", + Description: "JOIN is not supported in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpectedIdentForAt: { + Code: "ParseExpectedIdentForAt", + Description: "Did not find the expected identifier for AT name in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseAsteriskIsNotAloneInSelectList: { + Code: "ParseAsteriskIsNotAloneInSelectList", + Description: "Other expressions are not allowed in the SELECT list when '*' is used without dot notation in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseCannotMixSqbAndWildcardInSelectList: { + Code: "ParseCannotMixSqbAndWildcardInSelectList", + Description: "Cannot mix [] and * in the same expression in a SELECT list in SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseInvalidContextForWildcardInSelectList: { + Code: "ParseInvalidContextForWildcardInSelectList", + Description: "Invalid use of * in SELECT list in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrIncorrectSQLFunctionArgumentType: { + Code: "IncorrectSqlFunctionArgumentType", + Description: "Incorrect type of arguments in function call in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrValueParseFailure: { + Code: "ValueParseFailure", + Description: "Time stamp parse failure in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrEvaluatorInvalidArguments: { + Code: "EvaluatorInvalidArguments", + Description: "Incorrect number of arguments in the function call in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrIntegerOverflow: { + Code: "IntegerOverflow", + Description: "Int overflow or underflow in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrLikeInvalidInputs: { + Code: "LikeInvalidInputs", + Description: "Invalid argument given to the LIKE clause in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrCastFailed: { + Code: "CastFailed", + Description: "Attempt to convert from one data type to another using CAST failed in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidCast: { + Code: "InvalidCast", + Description: "Attempt to convert from one data type to another using CAST failed in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrEvaluatorInvalidTimestampFormatPattern: { + Code: "EvaluatorInvalidTimestampFormatPattern", + Description: "Time stamp format pattern requires additional fields in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrEvaluatorInvalidTimestampFormatPatternSymbolForParsing: { + Code: "EvaluatorInvalidTimestampFormatPatternSymbolForParsing", + Description: "Time stamp format pattern contains a valid format symbol that cannot be applied to time stamp parsing in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrEvaluatorTimestampFormatPatternDuplicateFields: { + Code: "EvaluatorTimestampFormatPatternDuplicateFields", + Description: "Time stamp format pattern contains multiple format specifiers representing the time stamp field in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrEvaluatorTimestampFormatPatternHourClockAmPmMismatch: { + Code: "EvaluatorUnterminatedTimestampFormatPatternToken", + Description: "Time stamp format pattern contains unterminated token in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrEvaluatorUnterminatedTimestampFormatPatternToken: { + Code: "EvaluatorInvalidTimestampFormatPatternToken", + Description: "Time stamp format pattern contains an invalid token in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrEvaluatorInvalidTimestampFormatPatternToken: { + Code: "EvaluatorInvalidTimestampFormatPatternToken", + Description: "Time stamp format pattern contains an invalid token in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrEvaluatorInvalidTimestampFormatPatternSymbol: { + Code: "EvaluatorInvalidTimestampFormatPatternSymbol", + Description: "Time stamp format pattern contains an invalid symbol in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrEvaluatorBindingDoesNotExist: { + Code: "ErrEvaluatorBindingDoesNotExist", + Description: "A column name or a path provided does not exist in the SQL expression", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMissingHeaders: { + Code: "MissingHeaders", + Description: "Some headers in the query are missing from the file. Check the file and try again.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidColumnIndex: { + Code: "InvalidColumnIndex", + Description: "The column index is invalid. Please check the service documentation and try again.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrPostPolicyConditionInvalidFormat: { + Code: "PostPolicyInvalidKeyName", + Description: "Invalid according to Policy: Policy Conditions failed", + HTTPStatusCode: http.StatusForbidden, + }, + // Add your error structure here. + ErrMalformedJSON: { + Code: "MalformedJSON", + Description: "The JSON was not well-formed or did not validate against our published format.", + HTTPStatusCode: http.StatusBadRequest, + }, +} + +// GetAPIError provides API Error for input API error code. +func GetAPIError(code ErrorCode) APIError { + return errorCodeResponse[code] +} + +// STSErrorCode type of error status. +type STSErrorCode int + +// STSError structure +type STSError struct { + Code string + Description string + HTTPStatusCode int +} + +// Error codes,list - http://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithSAML.html +const ( + ErrSTSNone STSErrorCode = iota + ErrSTSAccessDenied + ErrSTSMissingParameter + ErrSTSInvalidParameterValue + ErrSTSInternalError +) + +type stsErrorCodeMap map[STSErrorCode]STSError + +//ToSTSErr code to err +func (e stsErrorCodeMap) ToSTSErr(errCode STSErrorCode) STSError { + apiErr, ok := e[errCode] + if !ok { + return e[ErrSTSInternalError] + } + return apiErr +} + +// StsErrCodes error code to STSError structure, these fields carry respective +// descriptions for all the error responses. +var StsErrCodes = stsErrorCodeMap{ + ErrSTSAccessDenied: { + Code: "AccessDenied", + Description: "Generating temporary credentials not allowed for this request.", + HTTPStatusCode: http.StatusForbidden, + }, + ErrSTSMissingParameter: { + Code: "MissingParameter", + Description: "A required parameter for the specified action is not supplied.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrSTSInvalidParameterValue: { + Code: "InvalidParameterValue", + Description: "An invalid or out-of-range value was supplied for the input parameter.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrSTSInternalError: { + Code: "InternalError", + Description: "We encountered an internal error generating credentials, please try again.", + HTTPStatusCode: http.StatusInternalServerError, + }, +} diff --git a/s3/handlers/services_types.go b/s3/handlers/services_types.go index ed0bd2eda..30e1d9ba1 100644 --- a/s3/handlers/services_types.go +++ b/s3/handlers/services_types.go @@ -20,13 +20,5 @@ type BucketMetadata struct { Created time.Time } -// NewBucketMetadata creates BucketMetadata with the supplied name and Created to Now. -func NewBucketMetadata(name, region, accessKey, acl string) *BucketMetadata { - return &BucketMetadata{ - Name: name, - Region: region, - Owner: accessKey, - Acl: acl, - Created: time.Now().UTC(), - } +type ObjectMetadata struct { } From 68747537cd8d10fa1a6ba7503305ab8517616941 Mon Sep 17 00:00:00 2001 From: fish <920886811@163.com> Date: Wed, 16 Aug 2023 11:59:35 +0800 Subject: [PATCH 035/139] chore: --- s3/handlers/services.go | 8 +- s3/handlers/services_errors.go | 8 +- s3/services/auth/check_handler_auth.go | 43 +++++----- s3/services/auth/service.go | 3 +- s3/services/auth/signature-v4-parser.go | 101 ++++++++++++------------ s3/services/auth/signature-v4-utils.go | 10 +-- s3/services/auth/signature-v4.go | 66 ++++++++-------- s3/services/bucket/service.go | 2 +- 8 files changed, 120 insertions(+), 121 deletions(-) diff --git a/s3/handlers/services.go b/s3/handlers/services.go index 218f208de..8c4c2c6dc 100644 --- a/s3/handlers/services.go +++ b/s3/handlers/services.go @@ -2,9 +2,9 @@ package handlers import ( "context" - "github.com/bittorrent/go-btfs/s3/action" - "github.com/bittorrent/go-btfs/s3/apierrors" "net/http" + + "github.com/bittorrent/go-btfs/s3/action" ) type CorsService interface { @@ -24,7 +24,7 @@ type AccessKeyService interface { } type AuthService interface { - VerifySignature(ctx context.Context, r *http.Request) (accessKeyRecord *AccessKeyRecord, err apierrors.ErrorCode) + VerifySignature(ctx context.Context, r *http.Request) (accessKeyRecord *AccessKeyRecord, err ErrorCode) } type BucketService interface { @@ -34,7 +34,7 @@ type BucketService interface { HasBucket(ctx context.Context, bucket string) bool SetEmptyBucket(emptyBucket func(ctx context.Context, bucket string) (bool, error)) DeleteBucket(ctx context.Context, bucket string) error - GetAllBucketsOfUser(ctx context.Context, username string) ([]BucketMetadata, error) + GetAllBucketsOfUser(ctx context.Context, accessKey string) ([]BucketMetadata, error) } type ObjectService interface { diff --git a/s3/handlers/services_errors.go b/s3/handlers/services_errors.go index abac6a647..4f8e2c5be 100644 --- a/s3/handlers/services_errors.go +++ b/s3/handlers/services_errors.go @@ -3,8 +3,10 @@ package handlers import "errors" var ( - ErrBucketNotFound = errors.New("bucket is not found") ErrSginVersionNotSupport = errors.New("sign version is not support") - ErrBucketNotEmpty = errors.New("bucket not empty") - ErrBucketAccessDenied = errors.New("bucket access denied. ") + + // bucket + ErrBucketNotFound = errors.New("bucket is not found") + ErrBucketAccessDenied = errors.New("bucket access denied. ") + ErrSetBucketEmptyFailed = errors.New("set bucket empty failed. ") ) diff --git a/s3/services/auth/check_handler_auth.go b/s3/services/auth/check_handler_auth.go index 0808fff4e..319c2eb44 100644 --- a/s3/services/auth/check_handler_auth.go +++ b/s3/services/auth/check_handler_auth.go @@ -5,7 +5,6 @@ import ( "encoding/hex" "net/http" - "github.com/bittorrent/go-btfs/s3/apierrors" "github.com/bittorrent/go-btfs/s3/consts" "github.com/bittorrent/go-btfs/s3/etag" "github.com/bittorrent/go-btfs/s3/handlers" @@ -19,26 +18,26 @@ import ( // // returns APIErrorCode if any to be replied to the client. // Additionally, returns the accessKey used in the request, and if this request is by an admin. -func (s *Service) CheckRequestAuthTypeCredential(ctx context.Context, r *http.Request) (cred *handlers.AccessKeyRecord, s3Err apierrors.ErrorCode) { +func (s *Service) CheckRequestAuthTypeCredential(ctx context.Context, r *http.Request) (cred *handlers.AccessKeyRecord, s3Err handlers.ErrorCode) { // check signature switch GetRequestAuthType(r) { case AuthTypeSigned, AuthTypePresigned: region := "" - if s3Err = s.IsReqAuthenticated(ctx, r, region, ServiceS3); s3Err != apierrors.ErrNone { + if s3Err = s.IsReqAuthenticated(ctx, r, region, ServiceS3); s3Err != handlers.ErrNone { return cred, s3Err } cred, s3Err = s.getReqAccessKeyV4(r, region, ServiceS3) default: - return cred, apierrors.ErrSignatureVersionNotSupported + return cred, handlers.ErrSignatureVersionNotSupported } - if s3Err != apierrors.ErrNone { + if s3Err != handlers.ErrNone { return cred, s3Err } - return cred, apierrors.ErrNone + return cred, handlers.ErrNone } -func (s *Service) ReqSignatureV4Verify(r *http.Request, region string, stype serviceType) (s3Error apierrors.ErrorCode) { +func (s *Service) ReqSignatureV4Verify(r *http.Request, region string, stype serviceType) (s3Error handlers.ErrorCode) { sha256sum := getContentSha256Cksum(r, stype) switch { case IsRequestSignatureV4(r): @@ -46,18 +45,18 @@ func (s *Service) ReqSignatureV4Verify(r *http.Request, region string, stype ser case isRequestPresignedSignatureV4(r): return s.doesPresignedSignatureMatch(sha256sum, r, region, stype) default: - return apierrors.ErrAccessDenied + return handlers.ErrAccessDenied } } // IsReqAuthenticated Verify if request has valid AWS Signature Version '4'. -func (s *Service) IsReqAuthenticated(ctx context.Context, r *http.Request, region string, stype serviceType) (s3Error apierrors.ErrorCode) { - if errCode := s.ReqSignatureV4Verify(r, region, stype); errCode != apierrors.ErrNone { +func (s *Service) IsReqAuthenticated(ctx context.Context, r *http.Request, region string, stype serviceType) (s3Error handlers.ErrorCode) { + if errCode := s.ReqSignatureV4Verify(r, region, stype); errCode != handlers.ErrNone { return errCode } clientETag, err := etag.FromContentMD5(r.Header) if err != nil { - return apierrors.ErrInvalidDigest + return handlers.ErrInvalidDigest } // Extract either 'X-Amz-Content-Sha256' header or 'X-Amz-Content-Sha256' query parameter (if V4 presigned) @@ -67,13 +66,13 @@ func (s *Service) IsReqAuthenticated(ctx context.Context, r *http.Request, regio if sha256Sum, ok := r.Form[consts.AmzContentSha256]; ok && len(sha256Sum) > 0 { contentSHA256, err = hex.DecodeString(sha256Sum[0]) if err != nil { - return apierrors.ErrContentSHA256Mismatch + return handlers.ErrContentSHA256Mismatch } } } else if _, ok := r.Header[consts.AmzContentSha256]; !skipSHA256 && ok { contentSHA256, err = hex.DecodeString(r.Header.Get(consts.AmzContentSha256)) if err != nil || len(contentSHA256) == 0 { - return apierrors.ErrContentSHA256Mismatch + return handlers.ErrContentSHA256Mismatch } } @@ -81,39 +80,39 @@ func (s *Service) IsReqAuthenticated(ctx context.Context, r *http.Request, regio // The verification happens implicit during reading. reader, err := hash.NewReader(r.Body, -1, clientETag.String(), hex.EncodeToString(contentSHA256), -1) if err != nil { - return apierrors.ErrInternalError + return handlers.ErrInternalError } r.Body = reader - return apierrors.ErrNone + return handlers.ErrNone } //// ValidateAdminSignature validate admin Signature -//func (s *Service) ValidateAdminSignature(ctx context.Context, r *http.Request, region string) (Credentials, map[string]interface{}, bool, apierrors.ErrorCode) { +//func (s *Service) ValidateAdminSignature(ctx context.Context, r *http.Request, region string) (Credentials, map[string]interface{}, bool, handlers.ErrorCode) { // var cred Credentials // var owner bool -// s3Err := apierrors.ErrAccessDenied +// s3Err := handlers.ErrAccessDenied // if _, ok := r.Header[consts.AmzContentSha256]; ok && // GetRequestAuthType(r) == AuthTypeSigned { // // We only support admin credentials to access admin APIs. // cred, s3Err = GetReqAccessKeyV4(r, region, ServiceS3) -// if s3Err != apierrors.ErrNone { +// if s3Err != handlers.ErrNone { // return cred, nil, owner, s3Err // } // // // we only support V4 (no presign) with auth body // s3Err = s.IsReqAuthenticated(ctx, r, region, ServiceS3) // } -// if s3Err != apierrors.ErrNone { +// if s3Err != handlers.ErrNone { // return cred, nil, owner, s3Err // } // -// return cred, nil, owner, apierrors.ErrNone +// return cred, nil, owner, handlers.ErrNone //} //// -//func (s *Service) GetCredential(r *http.Request) (cred auth.Credentials, owner bool, s3Err apierrors.ErrorCode) { +//func (s *Service) GetCredential(r *http.Request) (cred auth.Credentials, owner bool, s3Err handlers.ErrorCode) { // switch GetRequestAuthType(r) { // case AuthTypeUnknown: -// s3Err = apierrors.ErrSignatureVersionNotSupported +// s3Err = handlers.ErrSignatureVersionNotSupported // case AuthTypeSignedV2, AuthTypePresignedV2: // cred, owner, s3Err = s.getReqAccessKeyV2(r) // case AuthTypeStreamingSigned, AuthTypePresigned, AuthTypeSigned: diff --git a/s3/services/auth/service.go b/s3/services/auth/service.go index 25d2be2bc..ed48c84be 100644 --- a/s3/services/auth/service.go +++ b/s3/services/auth/service.go @@ -4,7 +4,6 @@ import ( "context" "net/http" - "github.com/bittorrent/go-btfs/s3/apierrors" "github.com/bittorrent/go-btfs/s3/handlers" "github.com/bittorrent/go-btfs/s3/services" ) @@ -27,7 +26,7 @@ func NewService(providers services.Providerser, accessKeySvc handlers.AccessKeyS return } -func (s *Service) VerifySignature(ctx context.Context, r *http.Request) (accessKeyRecord *handlers.AccessKeyRecord, err apierrors.ErrorCode) { +func (s *Service) VerifySignature(ctx context.Context, r *http.Request) (accessKeyRecord *handlers.AccessKeyRecord, err handlers.ErrorCode) { s.CheckRequestAuthTypeCredential(ctx, r) return } diff --git a/s3/services/auth/signature-v4-parser.go b/s3/services/auth/signature-v4-parser.go index 8099fcead..ae57b6dd9 100644 --- a/s3/services/auth/signature-v4-parser.go +++ b/s3/services/auth/signature-v4-parser.go @@ -23,7 +23,6 @@ import ( "strings" "time" - "github.com/bittorrent/go-btfs/s3/apierrors" "github.com/bittorrent/go-btfs/s3/consts" "github.com/bittorrent/go-btfs/s3/handlers" ) @@ -51,21 +50,21 @@ func (c credentialHeader) getScope() string { } // parse credentialHeader string into its structured form. -func parseCredentialHeader(credElement string, region string, stype serviceType) (ch credentialHeader, aec apierrors.ErrorCode) { +func parseCredentialHeader(credElement string, region string, stype serviceType) (ch credentialHeader, aec handlers.ErrorCode) { creds := strings.SplitN(strings.TrimSpace(credElement), "=", 2) if len(creds) != 2 { - return ch, apierrors.ErrMissingFields + return ch, handlers.ErrMissingFields } if creds[0] != "Credential" { - return ch, apierrors.ErrMissingCredTag + return ch, handlers.ErrMissingCredTag } credElements := strings.Split(strings.TrimSpace(creds[1]), consts.SlashSeparator) if len(credElements) < 5 { - return ch, apierrors.ErrCredMalformed + return ch, handlers.ErrCredMalformed } accessKey := strings.Join(credElements[:len(credElements)-4], consts.SlashSeparator) // The access key may contain one or more `/` //if !IsAccessKeyValid(accessKey) { - // return ch, apierrors.ErrInvalidAccessKeyID + // return ch, handlers.ErrInvalidAccessKeyID //} // Save access key id. cred := credentialHeader{ @@ -75,7 +74,7 @@ func parseCredentialHeader(credElement string, region string, stype serviceType) var e error cred.scope.date, e = time.Parse(yyyymmdd, credElements[0]) if e != nil { - return ch, apierrors.ErrAuthorizationHeaderMalformed + return ch, handlers.ErrAuthorizationHeaderMalformed } cred.scope.region = credElements[1] @@ -90,53 +89,53 @@ func parseCredentialHeader(credElement string, region string, stype serviceType) } // Should validate region, only if region is set. if !isValidRegion(sRegion, region) { - return ch, apierrors.ErrAuthorizationHeaderMalformed + return ch, handlers.ErrAuthorizationHeaderMalformed } if credElements[2] != string(stype) { //switch stype { //case ServiceSTS: - // return ch, apierrors.ErrAuthorizationHeaderMalformed + // return ch, handlers.ErrAuthorizationHeaderMalformed //} - return ch, apierrors.ErrAuthorizationHeaderMalformed + return ch, handlers.ErrAuthorizationHeaderMalformed } cred.scope.service = credElements[2] if credElements[3] != "aws4_request" { - return ch, apierrors.ErrAuthorizationHeaderMalformed + return ch, handlers.ErrAuthorizationHeaderMalformed } cred.scope.request = credElements[3] - return cred, apierrors.ErrNone + return cred, handlers.ErrNone } // Parse signature from signature tag. -func parseSignature(signElement string) (string, apierrors.ErrorCode) { +func parseSignature(signElement string) (string, handlers.ErrorCode) { signFields := strings.Split(strings.TrimSpace(signElement), "=") if len(signFields) != 2 { - return "", apierrors.ErrMissingFields + return "", handlers.ErrMissingFields } if signFields[0] != "Signature" { - return "", apierrors.ErrMissingSignTag + return "", handlers.ErrMissingSignTag } if signFields[1] == "" { - return "", apierrors.ErrMissingFields + return "", handlers.ErrMissingFields } signature := signFields[1] - return signature, apierrors.ErrNone + return signature, handlers.ErrNone } // Parse slice of signed headers from signed headers tag. -func parseSignedHeader(signedHdrElement string) ([]string, apierrors.ErrorCode) { +func parseSignedHeader(signedHdrElement string) ([]string, handlers.ErrorCode) { signedHdrFields := strings.Split(strings.TrimSpace(signedHdrElement), "=") if len(signedHdrFields) != 2 { - return nil, apierrors.ErrMissingFields + return nil, handlers.ErrMissingFields } if signedHdrFields[0] != "SignedHeaders" { - return nil, apierrors.ErrMissingSignHeadersTag + return nil, handlers.ErrMissingSignHeadersTag } if signedHdrFields[1] == "" { - return nil, apierrors.ErrMissingFields + return nil, handlers.ErrMissingFields } signedHeaders := strings.Split(signedHdrFields[1], ";") - return signedHeaders, apierrors.ErrNone + return signedHeaders, handlers.ErrNone } // signValues data type represents structured form of AWS Signature V4 header. @@ -163,27 +162,27 @@ type preSignValues struct { // querystring += &X-Amz-Signature=signature // // verifies if any of the necessary query params are missing in the presigned request. -func doesV4PresignParamsExist(query url.Values) apierrors.ErrorCode { +func doesV4PresignParamsExist(query url.Values) handlers.ErrorCode { v4PresignQueryParams := []string{consts.AmzAlgorithm, consts.AmzCredential, consts.AmzSignature, consts.AmzDate, consts.AmzSignedHeaders, consts.AmzExpires} for _, v4PresignQueryParam := range v4PresignQueryParams { if _, ok := query[v4PresignQueryParam]; !ok { - return apierrors.ErrInvalidQueryParams + return handlers.ErrInvalidQueryParams } } - return apierrors.ErrNone + return handlers.ErrNone } // Parses all the presigned signature values into separate elements. -func parsePreSignV4(query url.Values, region string, stype serviceType) (psv preSignValues, aec apierrors.ErrorCode) { +func parsePreSignV4(query url.Values, region string, stype serviceType) (psv preSignValues, aec handlers.ErrorCode) { // verify whether the required query params exist. aec = doesV4PresignParamsExist(query) - if aec != apierrors.ErrNone { + if aec != handlers.ErrNone { return psv, aec } // Verify if the query algorithm is supported or not. if query.Get(consts.AmzAlgorithm) != signV4Algorithm { - return psv, apierrors.ErrAuthorizationHeaderMalformed + return psv, handlers.ErrAuthorizationHeaderMalformed } // Initialize signature version '4' structured header. @@ -191,7 +190,7 @@ func parsePreSignV4(query url.Values, region string, stype serviceType) (psv pre // Save credential. preSignV4Values.Credential, aec = parseCredentialHeader("Credential="+query.Get(consts.AmzCredential), region, stype) - if aec != apierrors.ErrNone { + if aec != handlers.ErrNone { return psv, aec } @@ -199,45 +198,45 @@ func parsePreSignV4(query url.Values, region string, stype serviceType) (psv pre // Save date in native time.Time. preSignV4Values.Date, e = time.Parse(iso8601Format, query.Get(consts.AmzDate)) if e != nil { - return psv, apierrors.ErrAuthorizationHeaderMalformed + return psv, handlers.ErrAuthorizationHeaderMalformed } // Save expires in native time.Duration. preSignV4Values.Expires, e = time.ParseDuration(query.Get(consts.AmzExpires) + "s") if e != nil { - return psv, apierrors.ErrAuthorizationHeaderMalformed + return psv, handlers.ErrAuthorizationHeaderMalformed } if preSignV4Values.Expires < 0 { - return psv, apierrors.ErrAuthorizationHeaderMalformed + return psv, handlers.ErrAuthorizationHeaderMalformed } // Check if Expiry time is less than 7 days (value in seconds). if preSignV4Values.Expires.Seconds() > 604800 { - return psv, apierrors.ErrAuthorizationHeaderMalformed + return psv, handlers.ErrAuthorizationHeaderMalformed } // Save signed headers. preSignV4Values.SignedHeaders, aec = parseSignedHeader("SignedHeaders=" + query.Get(consts.AmzSignedHeaders)) - if aec != apierrors.ErrNone { + if aec != handlers.ErrNone { return psv, aec } // Save signature. preSignV4Values.Signature, aec = parseSignature("Signature=" + query.Get(consts.AmzSignature)) - if aec != apierrors.ErrNone { + if aec != handlers.ErrNone { return psv, aec } // Return structed form of signature query string. - return preSignV4Values, apierrors.ErrNone + return preSignV4Values, handlers.ErrNone } // Parses signature version '4' header of the following form. // // Authorization: algorithm Credential=accessKeyID/credScope, \ // SignedHeaders=signedHeaders, Signature=signature -func parseSignV4(v4Auth string, region string, stype serviceType) (sv signValues, aec apierrors.ErrorCode) { +func parseSignV4(v4Auth string, region string, stype serviceType) (sv signValues, aec handlers.ErrorCode) { // credElement is fetched first to skip replacing the space in access key. credElement := strings.TrimPrefix(strings.Split(strings.TrimSpace(v4Auth), ",")[0], signV4Algorithm) // Replace all spaced strings, some clients can send spaced @@ -245,58 +244,58 @@ func parseSignV4(v4Auth string, region string, stype serviceType) (sv signValues // to make parsing easier. v4Auth = strings.ReplaceAll(v4Auth, " ", "") if v4Auth == "" { - return sv, apierrors.ErrAuthHeaderEmpty + return sv, handlers.ErrAuthHeaderEmpty } // Verify if the header algorithm is supported or not. if !strings.HasPrefix(v4Auth, signV4Algorithm) { - return sv, apierrors.ErrSignatureVersionNotSupported + return sv, handlers.ErrSignatureVersionNotSupported } // Strip off the Algorithm prefix. v4Auth = strings.TrimPrefix(v4Auth, signV4Algorithm) authFields := strings.Split(strings.TrimSpace(v4Auth), ",") if len(authFields) != 3 { - return sv, apierrors.ErrMissingFields + return sv, handlers.ErrMissingFields } // Initialize signature version '4' structured header. signV4Values := signValues{} - var s3Err apierrors.ErrorCode + var s3Err handlers.ErrorCode // Save credentail values. signV4Values.Credential, s3Err = parseCredentialHeader(strings.TrimSpace(credElement), region, stype) - if s3Err != apierrors.ErrNone { + if s3Err != handlers.ErrNone { return sv, s3Err } // Save signed headers. signV4Values.SignedHeaders, s3Err = parseSignedHeader(authFields[1]) - if s3Err != apierrors.ErrNone { + if s3Err != handlers.ErrNone { return sv, s3Err } // Save signature. signV4Values.Signature, s3Err = parseSignature(authFields[2]) - if s3Err != apierrors.ErrNone { + if s3Err != handlers.ErrNone { return sv, s3Err } // Return the structure here. - return signV4Values, apierrors.ErrNone + return signV4Values, handlers.ErrNone } -func (s *Service) getReqAccessKeyV4(r *http.Request, region string, stype serviceType) (*handlers.AccessKeyRecord, apierrors.ErrorCode) { +func (s *Service) getReqAccessKeyV4(r *http.Request, region string, stype serviceType) (*handlers.AccessKeyRecord, handlers.ErrorCode) { ch, s3Err := parseCredentialHeader("Credential="+r.Form.Get(consts.AmzCredential), region, stype) - if s3Err != apierrors.ErrNone { + if s3Err != handlers.ErrNone { // Strip off the Algorithm prefix. v4Auth := strings.TrimPrefix(r.Header.Get("Authorization"), signV4Algorithm) authFields := strings.Split(strings.TrimSpace(v4Auth), ",") if len(authFields) != 3 { - return &handlers.AccessKeyRecord{}, apierrors.ErrMissingFields + return &handlers.AccessKeyRecord{}, handlers.ErrMissingFields } ch, s3Err = parseCredentialHeader(authFields[0], region, stype) - if s3Err != apierrors.ErrNone { + if s3Err != handlers.ErrNone { return &handlers.AccessKeyRecord{}, s3Err } } @@ -304,7 +303,7 @@ func (s *Service) getReqAccessKeyV4(r *http.Request, region string, stype servic // check accessKey. record, err := s.accessKeySvc.Get(ch.accessKey) if err != nil { - return &handlers.AccessKeyRecord{}, err + return &handlers.AccessKeyRecord{}, handlers.ErrNoSuchUserPolicy } - return record, apierrors.ErrNone + return record, handlers.ErrNone } diff --git a/s3/services/auth/signature-v4-utils.go b/s3/services/auth/signature-v4-utils.go index 734dccab9..25b8b48c5 100644 --- a/s3/services/auth/signature-v4-utils.go +++ b/s3/services/auth/signature-v4-utils.go @@ -23,8 +23,8 @@ import ( "strconv" "strings" - "github.com/bittorrent/go-btfs/s3/apierrors" "github.com/bittorrent/go-btfs/s3/consts" + "github.com/bittorrent/go-btfs/s3/handlers" ) // http Header "x-amz-content-sha256" == "UNSIGNED-PAYLOAD" indicates that the @@ -60,13 +60,13 @@ func contains(slice interface{}, elem interface{}) bool { } // extractSignedHeaders extract signed headers from Authorization header -func extractSignedHeaders(signedHeaders []string, r *http.Request) (http.Header, apierrors.ErrorCode) { +func extractSignedHeaders(signedHeaders []string, r *http.Request) (http.Header, handlers.ErrorCode) { reqHeaders := r.Header reqQueries := r.Form // find whether "host" is part of list of signed headers. // if not return ErrUnsignedHeaders. "host" is mandatory. if !contains(signedHeaders, "host") { - return nil, apierrors.ErrUnsignedHeaders + return nil, handlers.ErrUnsignedHeaders } extractedSignedHeaders := make(http.Header) for _, header := range signedHeaders { @@ -116,10 +116,10 @@ func extractSignedHeaders(signedHeaders []string, r *http.Request) (http.Header, // calculation to be compatible with such clients. extractedSignedHeaders.Set(header, strconv.FormatInt(r.ContentLength, 10)) default: - return nil, apierrors.ErrUnsignedHeaders + return nil, handlers.ErrUnsignedHeaders } } - return extractedSignedHeaders, apierrors.ErrNone + return extractedSignedHeaders, handlers.ErrNone } // Returns SHA256 for calculating canonical-request. diff --git a/s3/services/auth/signature-v4.go b/s3/services/auth/signature-v4.go index 26856cf38..4b06b49f8 100644 --- a/s3/services/auth/signature-v4.go +++ b/s3/services/auth/signature-v4.go @@ -24,8 +24,8 @@ import ( "strconv" "time" - "github.com/bittorrent/go-btfs/s3/apierrors" "github.com/bittorrent/go-btfs/s3/consts" + "github.com/bittorrent/go-btfs/s3/handlers" "github.com/bittorrent/go-btfs/s3/set" "github.com/bittorrent/go-btfs/s3/utils" ) @@ -57,37 +57,37 @@ func compareSignatureV4(sig1, sig2 string) bool { // DoesPresignedSignatureMatch - Verify queryString headers with presigned signature // - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html // -// returns apierrors.ErrNone if the signature matches. -func (s *Service) doesPresignedSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType) apierrors.ErrorCode { +// returns handlers.ErrNone if the signature matches. +func (s *Service) doesPresignedSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType) handlers.ErrorCode { // Copy request req := *r // Parse request query string. - pSignValues, err := parsePreSignV4(req.Form, region, stype) - if err != apierrors.ErrNone { - return err + pSignValues, errCode := parsePreSignV4(req.Form, region, stype) + if errCode != handlers.ErrNone { + return errCode } // get access_info by accessKey - cred, s3Err := s.accessKeySvc.Get(pSignValues.Credential.accessKey) - if s3Err != apierrors.ErrNone { - return s3Err + cred, err := s.accessKeySvc.Get(pSignValues.Credential.accessKey) + if err != nil { + return handlers.ErrNoSuchUserPolicy } // Extract all the signed headers along with its values. extractedSignedHeaders, errCode := extractSignedHeaders(pSignValues.SignedHeaders, r) - if errCode != apierrors.ErrNone { + if errCode != handlers.ErrNone { return errCode } // If the host which signed the request is slightly ahead in time (by less than MaxSkewTime) the // request should still be allowed. if pSignValues.Date.After(time.Now().UTC().Add(consts.MaxSkewTime)) { - return apierrors.ErrRequestNotReadyYet + return handlers.ErrRequestNotReadyYet } if time.Now().UTC().Sub(pSignValues.Date) > pSignValues.Expires { - return apierrors.ErrExpiredPresignRequest + return handlers.ErrExpiredPresignRequest } // Save the date and expires. @@ -138,28 +138,28 @@ func (s *Service) doesPresignedSignatureMatch(hashedPayload string, r *http.Requ // Verify if date query is same. if req.Form.Get(consts.AmzDate) != query.Get(consts.AmzDate) { - return apierrors.ErrSignatureDoesNotMatch + return handlers.ErrSignatureDoesNotMatch } // Verify if expires query is same. if req.Form.Get(consts.AmzExpires) != query.Get(consts.AmzExpires) { - return apierrors.ErrSignatureDoesNotMatch + return handlers.ErrSignatureDoesNotMatch } // Verify if signed headers query is same. if req.Form.Get(consts.AmzSignedHeaders) != query.Get(consts.AmzSignedHeaders) { - return apierrors.ErrSignatureDoesNotMatch + return handlers.ErrSignatureDoesNotMatch } // Verify if credential query is same. if req.Form.Get(consts.AmzCredential) != query.Get(consts.AmzCredential) { - return apierrors.ErrSignatureDoesNotMatch + return handlers.ErrSignatureDoesNotMatch } // Verify if sha256 payload query is same. if clntHashedPayload != "" && clntHashedPayload != query.Get(consts.AmzContentSha256) { - return apierrors.ErrContentSHA256Mismatch + return handlers.ErrContentSHA256Mismatch } // not check SessionToken. //// Verify if security token is correct. //if token != "" && subtle.ConstantTimeCompare([]byte(token), []byte(cred.SessionToken)) != 1 { - // return apierrors.ErrInvalidToken + // return handlers.ErrInvalidToken //} // Verify finally if signature is same. @@ -179,16 +179,16 @@ func (s *Service) doesPresignedSignatureMatch(hashedPayload string, r *http.Requ // Verify signature. if !compareSignatureV4(req.Form.Get(consts.AmzSignature), newSignature) { - return apierrors.ErrSignatureDoesNotMatch + return handlers.ErrSignatureDoesNotMatch } - return apierrors.ErrNone + return handlers.ErrNone } // DoesSignatureMatch - Verify authorization header with calculated header in accordance with // - http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html // -// returns apierrors.ErrNone if signature matches. -func (s *Service) doesSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType) apierrors.ErrorCode { +// returns handlers.ErrNone if signature matches. +func (s *Service) doesSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType) handlers.ErrorCode { // Copy request. req := *r @@ -196,34 +196,34 @@ func (s *Service) doesSignatureMatch(hashedPayload string, r *http.Request, regi v4Auth := req.Header.Get(consts.Authorization) // Parse signature version '4' header. - signV4Values, err := parseSignV4(v4Auth, region, stype) - if err != apierrors.ErrNone { - return err + signV4Values, errCode := parseSignV4(v4Auth, region, stype) + if errCode != handlers.ErrNone { + return errCode } // Extract all the signed headers along with its values. extractedSignedHeaders, errCode := extractSignedHeaders(signV4Values.SignedHeaders, r) - if errCode != apierrors.ErrNone { + if errCode != handlers.ErrNone { return errCode } - cred, s3Err := s.accessKeySvc.Get(signV4Values.Credential.accessKey) - if s3Err != apierrors.ErrNone { - return s3Err + cred, err := s.accessKeySvc.Get(signV4Values.Credential.accessKey) + if err != nil { + return handlers.ErrNoSuchUserPolicy } // Extract date, if not present throw error. var date string if date = req.Header.Get(consts.AmzDate); date == "" { if date = r.Header.Get(consts.Date); date == "" { - return apierrors.ErrMissingDateHeader + return handlers.ErrMissingDateHeader } } // Parse date header. t, e := time.Parse(iso8601Format, date) if e != nil { - return apierrors.ErrAuthorizationHeaderMalformed + return handlers.ErrAuthorizationHeaderMalformed } // Query string. @@ -244,11 +244,11 @@ func (s *Service) doesSignatureMatch(hashedPayload string, r *http.Request, regi // Verify if signature match. if !compareSignatureV4(newSignature, signV4Values.Signature) { - return apierrors.ErrSignatureDoesNotMatch + return handlers.ErrSignatureDoesNotMatch } // Return error none. - return apierrors.ErrNone + return handlers.ErrNone } //// getScope generate a string of a specific date, an AWS region, and a service. diff --git a/s3/services/bucket/service.go b/s3/services/bucket/service.go index 945edcd33..9214f839c 100644 --- a/s3/services/bucket/service.go +++ b/s3/services/bucket/service.go @@ -135,7 +135,7 @@ func (s *Service) DeleteBucket(ctx context.Context, bucket string) error { if empty, err := s.emptyBucket(ctx, bucket); err != nil { return err } else if !empty { - return handlers.ErrBucketNotEmpty + return handlers.ErrSetBucketEmptyFailed } return s.providers.GetStateStore().Delete(bucketPrefix + bucket) From 0b4e5965b96a4ebb69e4d7e1c4fbd9565e7c66a4 Mon Sep 17 00:00:00 2001 From: Shawn-Huang-Tron <107823650+Shawn-Huang-Tron@users.noreply.github.com> Date: Wed, 16 Aug 2023 14:19:34 +0800 Subject: [PATCH 036/139] feat: add multibase commands (#342) --- core/commands/commands_test.go | 5 + core/commands/multibase.go | 171 +++++++++++++++++++++++++++++++++ core/commands/root.go | 1 + 3 files changed, 177 insertions(+) create mode 100644 core/commands/multibase.go diff --git a/core/commands/commands_test.go b/core/commands/commands_test.go index 1982d51b9..1366d2963 100644 --- a/core/commands/commands_test.go +++ b/core/commands/commands_test.go @@ -342,6 +342,11 @@ func TestCommands(t *testing.T) { "/bittorrent/scrape", "/bittorrent/metainfo", "/bittorrent/bencode", + "/multibase", + "/multibase/encode", + "/multibase/decode", + "/multibase/transcode", + "/multibase/list", } cmdSet := make(map[string]struct{}) diff --git a/core/commands/multibase.go b/core/commands/multibase.go new file mode 100644 index 000000000..bdba34b0d --- /dev/null +++ b/core/commands/multibase.go @@ -0,0 +1,171 @@ +package commands + +import ( + "bytes" + "fmt" + "io" + "strings" + + cmds "github.com/bittorrent/go-btfs-cmds" + cmdenv "github.com/bittorrent/go-btfs/core/commands/cmdenv" + mbase "github.com/multiformats/go-multibase" +) + +var MbaseCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Encode and decode files or stdin with multibase format", + }, + Subcommands: map[string]*cmds.Command{ + "encode": mbaseEncodeCmd, + "decode": mbaseDecodeCmd, + "transcode": mbaseTranscodeCmd, + "list": basesCmd, + }, + Extra: CreateCmdExtras(SetDoesNotUseRepo(true)), +} + +const ( + mbaseOptionName = "b" +) + +var mbaseEncodeCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Encode data into multibase string", + LongDescription: ` +This command expects a file name or data provided via stdin. + +By default it will use URL-safe base64url encoding, +but one can customize used base with -b: + + > echo hello | btfs multibase encode -b base16 > output_file + > cat output_file + f68656c6c6f0a + + > echo hello > input_file + > btfs multibase encode -b base16 input_file + f68656c6c6f0a + `, + }, + Arguments: []cmds.Argument{ + cmds.FileArg("file", true, false, "data to encode").EnableStdin(), + }, + Options: []cmds.Option{ + cmds.StringOption(mbaseOptionName, "multibase encoding").WithDefault("base64url"), + }, + Run: func(req *cmds.Request, resp cmds.ResponseEmitter, env cmds.Environment) error { + if err := req.ParseBodyArgs(); err != nil { + return err + } + encoderName, _ := req.Options[mbaseOptionName].(string) + encoder, err := mbase.EncoderByName(encoderName) + if err != nil { + return err + } + files := req.Files.Entries() + file, err := cmdenv.GetFileArg(files) + if err != nil { + return fmt.Errorf("failed to access file: %w", err) + } + buf, err := io.ReadAll(file) + if err != nil { + return fmt.Errorf("failed to read file contents: %w", err) + } + encoded := encoder.Encode(buf) + reader := strings.NewReader(encoded) + return resp.Emit(reader) + }, +} + +var mbaseDecodeCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Decode multibase string", + LongDescription: ` +This command expects multibase inside of a file or via stdin: + + > echo -n hello | btfs multibase encode -b base16 > file + > cat file + f68656c6c6f + + > btfs multibase decode file + hello + + > cat file | btfs multibase decode + hello +`, + }, + Arguments: []cmds.Argument{ + cmds.FileArg("encoded_file", true, false, "encoded data to decode").EnableStdin(), + }, + Run: func(req *cmds.Request, resp cmds.ResponseEmitter, env cmds.Environment) error { + if err := req.ParseBodyArgs(); err != nil { + return err + } + files := req.Files.Entries() + file, err := cmdenv.GetFileArg(files) + if err != nil { + return fmt.Errorf("failed to access file: %w", err) + } + encodedData, err := io.ReadAll(file) + if err != nil { + return fmt.Errorf("failed to read file contents: %w", err) + } + _, data, err := mbase.Decode(string(encodedData)) + if err != nil { + return fmt.Errorf("failed to decode multibase: %w", err) + } + reader := bytes.NewReader(data) + return resp.Emit(reader) + }, +} + +var mbaseTranscodeCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Transcode multibase string between bases", + LongDescription: ` +This command expects multibase inside of a file or via stdin. + +By default it will use URL-safe base64url encoding, +but one can customize used base with -b: + + > echo -n hello | btfs multibase encode > file + > cat file + uaGVsbG8 + + > btfs multibase transcode file -b base16 > transcoded_file + > cat transcoded_file + f68656c6c6f +`, + }, + Arguments: []cmds.Argument{ + cmds.FileArg("encoded_file", true, false, "encoded data to decode").EnableStdin(), + }, + Options: []cmds.Option{ + cmds.StringOption(mbaseOptionName, "multibase encoding").WithDefault("base64url"), + }, + Run: func(req *cmds.Request, resp cmds.ResponseEmitter, env cmds.Environment) error { + if err := req.ParseBodyArgs(); err != nil { + return err + } + encoderName, _ := req.Options[mbaseOptionName].(string) + encoder, err := mbase.EncoderByName(encoderName) + if err != nil { + return err + } + files := req.Files.Entries() + file, err := cmdenv.GetFileArg(files) + if err != nil { + return fmt.Errorf("failed to access file: %w", err) + } + encodedData, err := io.ReadAll(file) + if err != nil { + return fmt.Errorf("failed to read file contents: %w", err) + } + _, data, err := mbase.Decode(string(encodedData)) + if err != nil { + return fmt.Errorf("failed to decode multibase: %w", err) + } + encoded := encoder.Encode(data) + reader := strings.NewReader(encoded) + return resp.Emit(reader) + }, +} diff --git a/core/commands/root.go b/core/commands/root.go index 7a9c9ed21..22508bece 100644 --- a/core/commands/root.go +++ b/core/commands/root.go @@ -179,6 +179,7 @@ var rootSubcommands = map[string]*cmds.Command{ "network": NetworkCmd, "statuscontract": StatusContractCmd, "bittorrent": bittorrentCmd, + "multibase": MbaseCmd, } // RootRO is the readonly version of Root From fbbb5e17eaa5bdde9904c526af2efc01e0b0a89a Mon Sep 17 00:00:00 2001 From: fish <920886811@163.com> Date: Thu, 17 Aug 2023 13:00:38 +0800 Subject: [PATCH 037/139] feat: add pubBucket api --- s3/handlers/errors.go | 63 +- s3/handlers/handlers.go | 61 +- s3/handlers/request.go | 97 ++- s3/handlers/response.go | 4 + s3/handlers/response_comm.go | 584 +++++++++++++++ s3/handlers/response_error.go | 38 + s3/handlers/s3api_errors.go | 904 ++++++++++++------------ s3/handlers/services_errors.go | 5 +- s3/routers/handlerser.go | 5 +- s3/routers/routers.go | 5 +- s3/s3utils/utils.go | 381 ++++++++++ s3/services/auth/check_handler_auth.go | 32 +- s3/services/auth/signature-v4-parser.go | 82 +-- s3/services/auth/signature-v4-utils.go | 8 +- s3/services/auth/signature-v4.go | 42 +- 15 files changed, 1741 insertions(+), 570 deletions(-) create mode 100644 s3/handlers/response_comm.go create mode 100644 s3/handlers/response_error.go create mode 100644 s3/s3utils/utils.go diff --git a/s3/handlers/errors.go b/s3/handlers/errors.go index e9981add7..4785f1638 100644 --- a/s3/handlers/errors.go +++ b/s3/handlers/errors.go @@ -25,55 +25,72 @@ func ContextCanceled(ctx context.Context) bool { } } +//ErrInvalidBucketName = errors.New("bucket name is invalid") +//ErrBucketNotFound = errors.New("bucket is not found") +//ErrBucketAccessDenied = errors.New("bucket access denied. ") +//ErrSetBucketEmptyFailed = errors.New("set bucket empty failed. ") +//ErrCreateBucket = errors.New("create bucket failed") +//) + func ToApiError(ctx context.Context, err error) ErrorCode { if ContextCanceled(ctx) { if ctx.Err() == context.Canceled { - return ErrClientDisconnected + return ErrCodeClientDisconnected } } - errCode := ErrInternalError - switch err.(type) { + errCode := ErrCodeInternalError + switch err { + case ErrInvalidArgument: + errCode = ErrCodeInvalidRequestBody //实际是request请求信息, header or query uri 信息。 + case ErrInvalidBucketName: + errCode = ErrCodeInvalidBucketName + case ErrBucketNotFound: + errCode = ErrCodeNoSuchBucket + case ErrBucketAccessDenied: + errCode = ErrCodeAccessDenied + case ErrSetBucketEmptyFailed: + case ErrCreateBucket: + errCode = ErrCodeInternalError + case lock.OperationTimedOut: - errCode = ErrOperationTimedOut + errCode = ErrCodeOperationTimedOut case hash.SHA256Mismatch: - errCode = ErrContentSHA256Mismatch + errCode = ErrCodeContentSHA256Mismatch case hash.BadDigest: - errCode = ErrBadDigest - case store.BucketNotFound: - errCode = ErrNoSuchBucket + errCode = ErrCodeBadDigest case store.BucketPolicyNotFound: - errCode = ErrNoSuchBucketPolicy + errCode = ErrCodeNoSuchBucketPolicy case store.BucketTaggingNotFound: errCode = ErrBucketTaggingNotFound case s3utils.BucketNameInvalid: - errCode = ErrInvalidBucketName + errCode = ErrCodeInvalidBucketName case s3utils.ObjectNameInvalid: - errCode = ErrInvalidObjectName + errCode = ErrCodeInvalidObjectName case s3utils.ObjectNameTooLong: - errCode = ErrKeyTooLongError + errCode = ErrCodeKeyTooLongError case s3utils.ObjectNamePrefixAsSlash: - errCode = ErrInvalidObjectNamePrefixSlash + errCode = ErrCodeInvalidObjectNamePrefixSlash case s3utils.InvalidUploadIDKeyCombination: - errCode = ErrNotImplemented + errCode = ErrCodeNotImplemented case s3utils.InvalidMarkerPrefixCombination: - errCode = ErrNotImplemented + errCode = ErrCodeNotImplemented case s3utils.MalformedUploadID: - errCode = ErrNoSuchUpload + errCode = ErrCodeNoSuchUpload case s3utils.InvalidUploadID: - errCode = ErrNoSuchUpload + errCode = ErrCodeNoSuchUpload case s3utils.InvalidPart: - errCode = ErrInvalidPart + errCode = ErrCodeInvalidPart case s3utils.PartTooSmall: - errCode = ErrEntityTooSmall + errCode = ErrCodeEntityTooSmall case s3utils.PartTooBig: - errCode = ErrEntityTooLarge + errCode = ErrCodeEntityTooLarge case url.EscapeError: - errCode = ErrInvalidObjectName + errCode = ErrCodeInvalidObjectName default: if xerrors.Is(err, store.ErrObjectNotFound) { - errCode = ErrNoSuchKey + errCode = ErrCodeNoSuchKey } else if xerrors.Is(err, store.ErrBucketNotEmpty) { - errCode = ErrBucketNotEmpty + errCode = ErrCodeBucketNotEmpty } } return errCode diff --git a/s3/handlers/handlers.go b/s3/handlers/handlers.go index 143295d72..6a4b0eb77 100644 --- a/s3/handlers/handlers.go +++ b/s3/handlers/handlers.go @@ -2,9 +2,13 @@ package handlers import ( + "net/http" + + "github.com/bittorrent/go-btfs/s3/consts" + "github.com/bittorrent/go-btfs/s3/policy" "github.com/bittorrent/go-btfs/s3/routers" + "github.com/bittorrent/go-btfs/s3/s3utils" "github.com/rs/cors" - "net/http" ) var _ routers.Handlerser = (*Handlers)(nil) @@ -52,19 +56,60 @@ func (handlers *Handlers) Sign(handler http.Handler) http.Handler { return nil } -func (handlers *Handlers) parsePutObjectReq(r *http.Request) (arg *PutObjectReq, err error) { - return -} +//func (handlers *Handlers) parsePutObjectReq(r *http.Request) (arg *PutObjectReq, err error) { +// return +//} +// +//func (handlers *Handlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) { +// req := &PutObjectRequest{} +// err := req.Bind(r) +// if err != nil { +// return +// } +// //.... +// +// WritePutObjectResponse(w, object) +// +// return +//} -func (handlers *Handlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) { - req := &PutObjectRequest{} +func (handlers *Handlers) PutBucketHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + req := &PutBucketRequest{} err := req.Bind(r) if err != nil { + WriteErrorResponse(w, r, ToApiError(ctx, ErrInvalidArgument)) return } - //.... - WritePutObjectResponse(w, object) + accessKeyRecord, errCode := handlers.authSvc.VerifySignature(ctx, r) + if errCode != ErrCodeNone { + WriteErrorResponse(w, r, errCode) + return + } + + if err := s3utils.CheckValidBucketNameStrict(req.Bucket); err != nil { + WriteErrorResponse(w, r, ToApiError(ctx, ErrInvalidBucketName)) + return + } + + if !checkPermissionType(req.ACL) { + req.ACL = policy.Private + } + + err = handlers.bucketSvc.CreateBucket(ctx, req.Bucket, req.Region, accessKeyRecord.Key, req.ACL) + if err != nil { + log.Errorf("PutBucketHandler create bucket error:%v", err) + WriteErrorResponse(w, r, ToApiError(ctx, ErrCreateBucket)) + return + } + + // Make sure to add Location information here only for bucket + if cp := pathClean(r.URL.Path); cp != "" { + w.Header().Set(consts.Location, cp) // Clean any trailing slashes. + } + + WriteSuccessResponseHeadersOnly(w, r) return } diff --git a/s3/handlers/request.go b/s3/handlers/request.go index 383f09633..1e43cad2a 100644 --- a/s3/handlers/request.go +++ b/s3/handlers/request.go @@ -1,20 +1,111 @@ package handlers import ( + "encoding/xml" + "github.com/bittorrent/go-btfs/s3/apierrors" + "github.com/bittorrent/go-btfs/s3/consts" + "github.com/bittorrent/go-btfs/s3/policy" + "github.com/bittorrent/go-btfs/s3/utils" + "github.com/gorilla/mux" "io" "net/http" + "path" ) type RequestBinder interface { Bind(r *http.Request) (err error) } -type PutObjectRequest struct { +//type PutObjectRequest struct { +// Bucket string +// Object string +// Body io.Reader +//} +// +//func (req *PutObjectRequest) Bind(r *http.Request) (err error) { +// return +//} + +func checkPermissionType(s string) bool { + switch s { + case policy.PublicRead: + return true + case policy.PublicReadWrite: + return true + case policy.Private: + return true + } + return false +} + +type PutBucketRequest struct { Bucket string - Object string + ACL string + Region string Body io.Reader } -func (req *PutObjectRequest) Bind(r *http.Request) (err error) { +func (req *PutBucketRequest) Bind(r *http.Request) (err error) { + vars := mux.Vars(r) + bucket := vars["bucket"] + + region, _ := parseLocationConstraint(r) + + acl := r.Header.Get(consts.AmzACL) + + //set request + req.Bucket = bucket + req.ACL = acl + req.Region = region return } + +// Parses location constraint from the incoming reader. +func parseLocationConstraint(r *http.Request) (location string, s3Error apierrors.ErrorCode) { + // If the request has no body with content-length set to 0, + // we do not have to validate location constraint. Bucket will + // be created at default region. + locationConstraint := createBucketLocationConfiguration{} + err := utils.XmlDecoder(r.Body, &locationConstraint, r.ContentLength) + if err != nil && r.ContentLength != 0 { + // Treat all other failures as XML parsing errors. + return "", apierrors.ErrMalformedXML + } // else for both err as nil or io.EOF + location = locationConstraint.Location + if location == "" { + location = consts.DefaultRegion + } + return location, apierrors.ErrNone +} + +// createBucketConfiguration container for bucket configuration request from client. +// Used for parsing the location from the request body for Makebucket. +type createBucketLocationConfiguration struct { + XMLName xml.Name `xml:"CreateBucketConfiguration" json:"-"` + Location string `xml:"LocationConstraint"` +} + +// pathClean is like path.Clean but does not return "." for +// empty inputs, instead returns "empty" as is. +func pathClean(p string) string { + cp := path.Clean(p) + if cp == "." { + return "" + } + return cp +} + +func unmarshalXML(reader io.Reader, isObject bool) (*store.Tags, error) { + tagging := &store.Tags{ + TagSet: &store.TagSet{ + TagMap: make(map[string]string), + IsObject: isObject, + }, + } + + if err := xml.NewDecoder(reader).Decode(tagging); err != nil { + return nil, err + } + + return tagging, nil +} diff --git a/s3/handlers/response.go b/s3/handlers/response.go index 610876f48..491e0b4af 100644 --- a/s3/handlers/response.go +++ b/s3/handlers/response.go @@ -2,6 +2,10 @@ package handlers import "net/http" +//func WritePutObjectResponse(w http.ResponseWriter, objectMeta *ObjectMetadata) { +// return +//} + func WritePutObjectResponse(w http.ResponseWriter, objectMeta *ObjectMetadata) { return } diff --git a/s3/handlers/response_comm.go b/s3/handlers/response_comm.go new file mode 100644 index 000000000..31d1d1cff --- /dev/null +++ b/s3/handlers/response_comm.go @@ -0,0 +1,584 @@ +package handlers + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/bittorrent/go-btfs/s3/consts" + logging "github.com/ipfs/go-log/v2" + "net/http" + "net/url" + "strconv" + "time" +) + +var log = logging.Logger("resp") + +type mimeType string + +const ( + mimeNone mimeType = "" + mimeJSON mimeType = "application/json" + //mimeXML application/xml UTF-8 + mimeXML mimeType = " application/xml" +) + +// APIErrorResponse - error response format +type APIErrorResponse struct { + XMLName xml.Name `xml:"Error" json:"-"` + Code string + Message string + Resource string + RequestID string `xml:"RequestId" json:"RequestId"` + HostID string `xml:"HostId" json:"HostId"` +} + +// WriteSuccessResponseXML Write Success Response XML +func WriteSuccessResponseXML(w http.ResponseWriter, r *http.Request, response interface{}) { + WriteXMLResponse(w, r, http.StatusOK, response) +} + +// WriteXMLResponse Write XMLResponse +func WriteXMLResponse(w http.ResponseWriter, r *http.Request, statusCode int, response interface{}) { + writeResponse(w, r, statusCode, encodeXMLResponse(response), mimeXML) +} + +func writeResponse(w http.ResponseWriter, r *http.Request, statusCode int, response []byte, mType mimeType) { + setCommonHeaders(w, r) + if response != nil { + w.Header().Set(consts.ContentLength, strconv.Itoa(len(response))) + } + if mType != mimeNone { + w.Header().Set(consts.ContentType, string(mType)) + } + w.WriteHeader(statusCode) + if response != nil { + log.Debugf("status %d %s: %s", statusCode, mType, string(response)) + _, err := w.Write(response) + if err != nil { + log.Errorf("write err: %v", err) + } + w.(http.Flusher).Flush() + } +} + +func setCommonHeaders(w http.ResponseWriter, r *http.Request) { + w.Header().Set(consts.ServerInfo, "FDS") + w.Header().Set(consts.AmzRequestID, fmt.Sprintf("%d", time.Now().UnixNano())) + w.Header().Set(consts.AcceptRanges, "bytes") + if r.Header.Get("Origin") != "" { + w.Header().Set("Access-Control-Allow-Origin", "*") + w.Header().Set("Access-Control-Allow-Credentials", "true") + } +} + +// encodeXMLResponse Encodes the response headers into XML format. +func encodeXMLResponse(response interface{}) []byte { + var bytesBuffer bytes.Buffer + bytesBuffer.WriteString(xml.Header) + e := xml.NewEncoder(&bytesBuffer) + e.Encode(response) + return bytesBuffer.Bytes() +} + +// WriteErrorResponseJSON - writes error response in JSON format; +// useful for admin APIs. +func WriteErrorResponseJSON(w http.ResponseWriter, err APIError, reqURL *url.URL, host string) { + // Generate error response. + errorResponse := getAPIErrorResponse(err, reqURL.Path, w.Header().Get(consts.AmzRequestID), host) + encodedErrorResponse := encodeResponseJSON(errorResponse) + writeResponseSimple(w, err.HTTPStatusCode, encodedErrorResponse, mimeJSON) +} + +// getErrorResponse gets in standard error and resource value and +// provides a encodable populated response values +func getAPIErrorResponse(err APIError, resource, requestID, hostID string) APIErrorResponse { + return APIErrorResponse{ + Code: err.Code, + Message: err.Description, + Resource: resource, + RequestID: requestID, + HostID: hostID, + } +} + +// Encodes the response headers into JSON format. +func encodeResponseJSON(response interface{}) []byte { + var bytesBuffer bytes.Buffer + e := json.NewEncoder(&bytesBuffer) + e.Encode(response) + return bytesBuffer.Bytes() +} + +// WriteSuccessResponseJSON writes success headers and response if any, +// with content-type set to `application/json`. +func WriteSuccessResponseJSON(w http.ResponseWriter, response []byte) { + writeResponseSimple(w, http.StatusOK, response, mimeJSON) +} + +func writeResponseSimple(w http.ResponseWriter, statusCode int, response []byte, mType mimeType) { + if mType != mimeNone { + w.Header().Set(consts.ContentType, string(mType)) + } + w.Header().Set(consts.ContentLength, strconv.Itoa(len(response))) + w.WriteHeader(statusCode) + if response != nil { + w.Write(response) + } +} + +// WriteSuccessNoContent writes success headers with http status 204 +func WriteSuccessNoContent(w http.ResponseWriter) { + writeResponseSimple(w, http.StatusNoContent, nil, mimeNone) +} + +// ListAllMyBucketsResult List All Buckets Result +type ListAllMyBucketsResult struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListAllMyBucketsResult"` + Owner *s3.Owner + Buckets []*s3.Bucket `xml:"Buckets>Bucket"` +} + +// WriteSuccessResponseHeadersOnly write SuccessResponseHeadersOnly +func WriteSuccessResponseHeadersOnly(w http.ResponseWriter, r *http.Request) { + writeResponse(w, r, http.StatusOK, nil, mimeNone) +} + +type CopyObjectResponse struct { + CopyObjectResult CopyObjectResult `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopyObjectResult"` +} + +type CopyObjectResult struct { + LastModified string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"` + ETag string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ETag"` +} + +// LocationResponse - format for location response. +type LocationResponse struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LocationConstraint" json:"-"` + Location string `xml:",chardata"` +} + +// ListObjectsResponse - format for list objects response. +type ListObjectsResponse struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult" json:"-"` + + Name string + Prefix string + Marker string + + // When response is truncated (the IsTruncated element value in the response + // is true), you can use the key name in this field as marker in the subsequent + // request to get next set of objects. Server lists objects in alphabetical + // order Note: This element is returned only if you have delimiter request parameter + // specified. If response does not include the NextMaker and it is truncated, + // you can use the value of the last Key in the response as the marker in the + // subsequent request to get the next set of object keys. + NextMarker string `xml:"NextMarker,omitempty"` + + MaxKeys int + Delimiter string + // A flag that indicates whether or not ListObjects returned all of the results + // that satisfied the search criteria. + IsTruncated bool + + Contents []Object + CommonPrefixes []CommonPrefix + + // Encoding type used to encode object keys in the response. + EncodingType string `xml:"EncodingType,omitempty"` +} + +// ListObjectsV2Response - format for list objects response. +type ListObjectsV2Response struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult" json:"-"` + + Name string + Prefix string + StartAfter string `xml:"StartAfter,omitempty"` + // When response is truncated (the IsTruncated element value in the response + // is true), you can use the key name in this field as marker in the subsequent + // request to get next set of objects. Server lists objects in alphabetical + // order Note: This element is returned only if you have delimiter request parameter + // specified. If response does not include the NextMaker and it is truncated, + // you can use the value of the last Key in the response as the marker in the + // subsequent request to get the next set of object keys. + ContinuationToken string `xml:"ContinuationToken,omitempty"` + NextContinuationToken string `xml:"NextContinuationToken,omitempty"` + + KeyCount int + MaxKeys int + Delimiter string + // A flag that indicates whether or not ListObjects returned all of the results + // that satisfied the search criteria. + IsTruncated bool + + Contents []Object + CommonPrefixes []CommonPrefix + + // Encoding type used to encode object keys in the response. + EncodingType string `xml:"EncodingType,omitempty"` +} + +// Object container for object metadata +type Object struct { + Key string + LastModified string // time string of format "2006-01-02T15:04:05.000Z" + ETag string + Size int64 + + // Owner of the object. + Owner s3.Owner + + // The class of storage used to store the object. + StorageClass string + + // UserMetadata user-defined metadata + UserMetadata StringMap `xml:"UserMetadata,omitempty"` +} + +// StringMap is a map[string]string +type StringMap map[string]string + +// MarshalXML - StringMap marshals into XML. +func (s StringMap) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + tokens := []xml.Token{start} + + for key, value := range s { + t := xml.StartElement{} + t.Name = xml.Name{ + Space: "", + Local: key, + } + tokens = append(tokens, t, xml.CharData(value), xml.EndElement{Name: t.Name}) + } + + tokens = append(tokens, xml.EndElement{ + Name: start.Name, + }) + + for _, t := range tokens { + if err := e.EncodeToken(t); err != nil { + return err + } + } + + // flush to ensure tokens are written + return e.Flush() +} + +// CommonPrefix container for prefix response in ListObjectsResponse +type CommonPrefix struct { + Prefix string +} + +// +//// DeleteError structure. +//type DeleteError struct { +// Code string +// Message string +// Key string +// VersionID string `xml:"VersionId"` +//} +// +//// DeleteObjectsResponse container for multiple object deletes. +//type DeleteObjectsResponse struct { +// XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DeleteResult" json:"-"` +// +// // Collection of all deleted objects +// DeletedObjects []datatypes.DeletedObject `xml:"Deleted,omitempty"` +// +// // Collection of errors deleting certain objects. +// Errors []DeleteError `xml:"Error,omitempty"` +//} +// +//// GenerateListObjectsV2Response Generates an ListObjectsV2 response for the said bucket with other enumerated options. +//func GenerateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, isTruncated bool, maxKeys int, objects []store.ObjectInfo, prefixes []string) ListObjectsV2Response { +// contents := make([]Object, 0, len(objects)) +// id := consts.DefaultOwnerID +// name := consts.DisplayName +// owner := s3.Owner{ +// ID: &id, +// DisplayName: &name, +// } +// data := ListObjectsV2Response{} +// +// for _, object := range objects { +// content := Object{} +// if object.Name == "" { +// continue +// } +// content.Key = utils.S3EncodeName(object.Name, encodingType) +// content.LastModified = object.ModTime.UTC().Format(consts.Iso8601TimeFormat) +// if object.ETag != "" { +// content.ETag = "\"" + object.ETag + "\"" +// } +// content.Size = object.Size +// content.Owner = owner +// contents = append(contents, content) +// } +// data.Name = bucket +// data.Contents = contents +// +// data.EncodingType = encodingType +// data.StartAfter = utils.S3EncodeName(startAfter, encodingType) +// data.Delimiter = utils.S3EncodeName(delimiter, encodingType) +// data.Prefix = utils.S3EncodeName(prefix, encodingType) +// data.MaxKeys = maxKeys +// data.ContinuationToken = base64.StdEncoding.EncodeToString([]byte(token)) +// data.NextContinuationToken = base64.StdEncoding.EncodeToString([]byte(nextToken)) +// data.IsTruncated = isTruncated +// +// commonPrefixes := make([]CommonPrefix, 0, len(prefixes)) +// for _, prefix := range prefixes { +// prefixItem := CommonPrefix{} +// prefixItem.Prefix = utils.S3EncodeName(prefix, encodingType) +// commonPrefixes = append(commonPrefixes, prefixItem) +// } +// data.CommonPrefixes = commonPrefixes +// data.KeyCount = len(data.Contents) + len(data.CommonPrefixes) +// return data +//} +// +//// generates an ListObjectsV1 response for the said bucket with other enumerated options. +//func GenerateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp store.ListObjectsInfo) ListObjectsResponse { +// contents := make([]Object, 0, len(resp.Objects)) +// id := consts.DefaultOwnerID +// name := consts.DisplayName +// owner := s3.Owner{ +// ID: &id, +// DisplayName: &name, +// } +// data := ListObjectsResponse{} +// +// for _, object := range resp.Objects { +// content := Object{} +// if object.Name == "" { +// continue +// } +// content.Key = utils.S3EncodeName(object.Name, encodingType) +// content.LastModified = object.ModTime.UTC().Format(consts.Iso8601TimeFormat) +// if object.ETag != "" { +// content.ETag = "\"" + object.ETag + "\"" +// } +// content.Size = object.Size +// content.StorageClass = "" +// content.Owner = owner +// contents = append(contents, content) +// } +// data.Name = bucket +// data.Contents = contents +// +// data.EncodingType = encodingType +// data.Prefix = utils.S3EncodeName(prefix, encodingType) +// data.Marker = utils.S3EncodeName(marker, encodingType) +// data.Delimiter = utils.S3EncodeName(delimiter, encodingType) +// data.MaxKeys = maxKeys +// data.NextMarker = utils.S3EncodeName(resp.NextMarker, encodingType) +// data.IsTruncated = resp.IsTruncated +// +// prefixes := make([]CommonPrefix, 0, len(resp.Prefixes)) +// for _, prefix := range resp.Prefixes { +// prefixItem := CommonPrefix{} +// prefixItem.Prefix = utils.S3EncodeName(prefix, encodingType) +// prefixes = append(prefixes, prefixItem) +// } +// data.CommonPrefixes = prefixes +// return data +//} +// +//// generate multi objects delete response. +//func GenerateMultiDeleteResponse(quiet bool, deletedObjects []datatypes.DeletedObject, errs []DeleteError) DeleteObjectsResponse { +// deleteResp := DeleteObjectsResponse{} +// if !quiet { +// deleteResp.DeletedObjects = deletedObjects +// } +// deleteResp.Errors = errs +// return deleteResp +//} +// +//// InitiateMultipartUploadResponse container for InitiateMultiPartUpload response, provides uploadID to start MultiPart upload +//type InitiateMultipartUploadResponse struct { +// XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ InitiateMultipartUploadResult" json:"-"` +// +// Bucket string +// Key string +// UploadID string `xml:"UploadId"` +//} +// +//// CompleteMultipartUploadResponse container for completed multipart upload response +//type CompleteMultipartUploadResponse struct { +// XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUploadResult" json:"-"` +// +// Location string +// Bucket string +// Key string +// ETag string +// +// ChecksumCRC32 string +// ChecksumCRC32C string +// ChecksumSHA1 string +// ChecksumSHA256 string +//} +// +//// Part container for part metadata. +//type Part struct { +// PartNumber int +// LastModified string +// ETag string +// Size int64 +// +// // Checksum values +// ChecksumCRC32 string +// ChecksumCRC32C string +// ChecksumSHA1 string +// ChecksumSHA256 string +//} +// +//// Initiator inherit from Owner struct, fields are same +//type Initiator s3.Owner +// +//// ListPartsResponse - format for list parts response. +//type ListPartsResponse struct { +// XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListPartsResult" json:"-"` +// +// Bucket string +// Key string +// UploadID string `xml:"UploadId"` +// +// Initiator Initiator +// Owner s3.Owner +// +// // The class of storage used to store the object. +// StorageClass string +// +// PartNumberMarker int +// NextPartNumberMarker int +// MaxParts int +// IsTruncated bool +// +// ChecksumAlgorithm string +// // List of parts. +// Parts []Part `xml:"Part"` +//} +// +//// ListMultipartUploadsResponse - format for list multipart uploads response. +//type ListMultipartUploadsResponse struct { +// XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListMultipartUploadsResult" json:"-"` +// +// Bucket string +// KeyMarker string +// UploadIDMarker string `xml:"UploadIdMarker"` +// NextKeyMarker string +// NextUploadIDMarker string `xml:"NextUploadIdMarker"` +// Delimiter string +// Prefix string +// EncodingType string `xml:"EncodingType,omitempty"` +// MaxUploads int +// IsTruncated bool +// +// // List of pending uploads. +// Uploads []Upload `xml:"Upload"` +// +// // Delimed common prefixes. +// CommonPrefixes []CommonPrefix +//} +// +//// Upload container for in progress multipart upload +//type Upload struct { +// Key string +// UploadID string `xml:"UploadId"` +// Initiator Initiator +// Owner s3.Owner +// StorageClass string +// Initiated string +//} +// +//// generates InitiateMultipartUploadResponse for given bucket, key and uploadID. +//func GenerateInitiateMultipartUploadResponse(bucket, key, uploadID string) InitiateMultipartUploadResponse { +// return InitiateMultipartUploadResponse{ +// Bucket: bucket, +// Key: key, +// UploadID: uploadID, +// } +//} +// +//// generates CompleteMultipartUploadResponse for given bucket, key, location and ETag. +//func GenerateCompleteMultpartUploadResponse(bucket, key, location string, oi store.ObjectInfo) CompleteMultipartUploadResponse { +// c := CompleteMultipartUploadResponse{ +// Location: location, +// Bucket: bucket, +// Key: key, +// // AWS S3 quotes the ETag in XML, make sure we are compatible here. +// ETag: "\"" + oi.ETag + "\"", +// } +// return c +//} +// +//// generates ListPartsResponse from ListPartsInfo. +//func GenerateListPartsResponse(partsInfo store.ListPartsInfo, encodingType string) ListPartsResponse { +// resp := ListPartsResponse{} +// resp.Bucket = partsInfo.Bucket +// resp.Key = utils.S3EncodeName(partsInfo.Object, encodingType) +// resp.UploadID = partsInfo.UploadID +// resp.StorageClass = consts.DefaultStorageClass +// +// // Dumb values not meaningful +// resp.Initiator = Initiator{ +// ID: aws.String(consts.DefaultOwnerID), +// DisplayName: aws.String(consts.DisplayName), +// } +// resp.Owner = s3.Owner{ +// ID: aws.String(consts.DefaultOwnerID), +// DisplayName: aws.String(consts.DisplayName), +// } +// +// resp.MaxParts = partsInfo.MaxParts +// resp.PartNumberMarker = partsInfo.PartNumberMarker +// resp.IsTruncated = partsInfo.IsTruncated +// resp.NextPartNumberMarker = partsInfo.NextPartNumberMarker +// resp.ChecksumAlgorithm = partsInfo.ChecksumAlgorithm +// +// resp.Parts = make([]Part, len(partsInfo.Parts)) +// for index, part := range partsInfo.Parts { +// newPart := Part{} +// newPart.PartNumber = part.Number +// newPart.ETag = "\"" + part.ETag + "\"" +// newPart.Size = part.Size +// newPart.LastModified = part.ModTime.UTC().Format(consts.Iso8601TimeFormat) +// resp.Parts[index] = newPart +// } +// return resp +//} +// +//// generates ListMultipartUploadsResponse for given bucket and ListMultipartsInfo. +//func GenerateListMultipartUploadsResponse(bucket string, multipartsInfo store.ListMultipartsInfo, encodingType string) ListMultipartUploadsResponse { +// resp := ListMultipartUploadsResponse{} +// resp.Bucket = bucket +// resp.Delimiter = utils.S3EncodeName(multipartsInfo.Delimiter, encodingType) +// resp.IsTruncated = multipartsInfo.IsTruncated +// resp.EncodingType = encodingType +// resp.Prefix = utils.S3EncodeName(multipartsInfo.Prefix, encodingType) +// resp.KeyMarker = utils.S3EncodeName(multipartsInfo.KeyMarker, encodingType) +// resp.NextKeyMarker = utils.S3EncodeName(multipartsInfo.NextKeyMarker, encodingType) +// resp.MaxUploads = multipartsInfo.MaxUploads +// resp.NextUploadIDMarker = multipartsInfo.NextUploadIDMarker +// resp.UploadIDMarker = multipartsInfo.UploadIDMarker +// resp.CommonPrefixes = make([]CommonPrefix, len(multipartsInfo.CommonPrefixes)) +// for index, commonPrefix := range multipartsInfo.CommonPrefixes { +// resp.CommonPrefixes[index] = CommonPrefix{ +// Prefix: utils.S3EncodeName(commonPrefix, encodingType), +// } +// } +// resp.Uploads = make([]Upload, len(multipartsInfo.Uploads)) +// for index, upload := range multipartsInfo.Uploads { +// newUpload := Upload{} +// newUpload.UploadID = upload.UploadID +// newUpload.Key = utils.S3EncodeName(upload.Object, encodingType) +// newUpload.Initiated = upload.Initiated.UTC().Format(consts.Iso8601TimeFormat) +// resp.Uploads[index] = newUpload +// } +// return resp +//} diff --git a/s3/handlers/response_error.go b/s3/handlers/response_error.go new file mode 100644 index 000000000..ac3cb001c --- /dev/null +++ b/s3/handlers/response_error.go @@ -0,0 +1,38 @@ +package handlers + +import ( + "fmt" + "github.com/gorilla/mux" + "net/http" + "time" +) + +func WriteErrorResponseHeadersOnly(w http.ResponseWriter, r *http.Request, err ErrorCode) { + writeResponse(w, r, GetAPIError(err).HTTPStatusCode, nil, mimeNone) +} + +// WriteErrorResponse write ErrorResponse +func WriteErrorResponse(w http.ResponseWriter, r *http.Request, errorCode ErrorCode) { + vars := mux.Vars(r) + bucket := vars["bucket"] + object := vars["object"] + + apiError := GetAPIError(errorCode) + errorResponse := getRESTErrorResponse(apiError, r.URL.Path, bucket, object) + WriteXMLResponse(w, r, apiError.HTTPStatusCode, errorResponse) +} + +func getRESTErrorResponse(err APIError, resource string, bucket, object string) RESTErrorResponse { + return RESTErrorResponse{ + Code: err.Code, + BucketName: bucket, + Key: object, + Message: err.Description, + Resource: resource, + RequestID: fmt.Sprintf("%d", time.Now().UnixNano()), + } +} + +// NotFoundHandler If none of the http routes match respond with MethodNotAllowed +func NotFoundHandler(w http.ResponseWriter, r *http.Request) { +} diff --git a/s3/handlers/s3api_errors.go b/s3/handlers/s3api_errors.go index d990e85d7..8ca8ba72b 100644 --- a/s3/handlers/s3api_errors.go +++ b/s3/handlers/s3api_errors.go @@ -41,1208 +41,1209 @@ type ErrorCode int // Error codes, non exhaustive list - http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html const ( - ErrNone ErrorCode = iota - ErrAccessDenied - ErrBadDigest - ErrEntityTooSmall - ErrEntityTooLarge - ErrIncompleteBody - ErrInternalError - ErrInvalidAccessKeyID - ErrAccessKeyDisabled - ErrInvalidBucketName - ErrInvalidDigest - ErrInvalidRange - ErrInvalidRangePartNumber - ErrInvalidCopyPartRange - ErrInvalidCopyPartRangeSource - ErrInvalidMaxKeys - ErrInvalidEncodingMethod - ErrInvalidMaxUploads - ErrInvalidMaxParts - ErrInvalidPartNumberMarker - ErrInvalidRequestBody - ErrInvalidCopySource - ErrInvalidMetadataDirective - ErrInvalidCopyDest - ErrInvalidPolicyDocument - ErrInvalidObjectState - ErrMalformedXML - ErrMissingContentLength - ErrMissingContentMD5 - ErrMissingRequestBodyError - ErrMissingSecurityHeader - ErrNoSuchUser - ErrUserAlreadyExists - ErrNoSuchUserPolicy - ErrUserPolicyAlreadyExists - ErrNoSuchBucket - ErrNoSuchBucketPolicy - ErrNoSuchLifecycleConfiguration - ErrNoSuchCORSConfiguration - ErrNoSuchWebsiteConfiguration - ErrReplicationConfigurationNotFoundError - ErrReplicationNeedsVersioningError - ErrReplicationBucketNeedsVersioningError - ErrObjectRestoreAlreadyInProgress - ErrNoSuchKey - ErrNoSuchUpload - ErrInvalidVersionID - ErrNoSuchVersion - ErrNotImplemented - ErrPreconditionFailed - ErrRequestTimeTooSkewed - ErrSignatureDoesNotMatch - ErrMethodNotAllowed - ErrInvalidPart - ErrInvalidPartOrder - ErrAuthorizationHeaderMalformed - ErrMalformedDate - ErrMalformedPOSTRequest - ErrPOSTFileRequired - ErrSignatureVersionNotSupported - ErrBucketNotEmpty - ErrAllAccessDisabled - ErrMalformedPolicy - ErrMissingFields - ErrMissingCredTag - ErrCredMalformed - ErrInvalidRegion + ErrCodeNone ErrorCode = iota + ErrCodeAccessDenied + ErrCodeBadDigest + ErrCodeEntityTooSmall + ErrCodeEntityTooLarge + ErrCodeIncompleteBody + ErrCodeInternalError + ErrCodeInvalidAccessKeyID + ErrCodeAccessKeyDisabled + ErrCodeInvalidBucketName + ErrCodeInvalidDigest + ErrCodeInvalidRange + ErrCodeInvalidRangePartNumber + ErrCodeInvalidCopyPartRange + ErrCodeInvalidCopyPartRangeSource + ErrCodeInvalidMaxKeys + ErrCodeInvalidEncodingMethod + ErrCodeInvalidMaxUploads + ErrCodeInvalidMaxParts + ErrCodeInvalidPartNumberMarker + ErrCodeInvalidRequestBody + ErrCodeInvalidCopySource + ErrCodeInvalidMetadataDirective + ErrCodeCodeInvalidCopyDest + ErrCodeInvalidPolicyDocument + ErrCodeInvalidObjectState + ErrCodeMalformedXML + ErrCodeMissingContentLength + ErrCodeMissingContentMD5 + ErrCodeMissingRequestBodyError + ErrCodeMissingSecurityHeader + ErrCodeNoSuchUser + ErrCodeUserAlreadyExists + ErrCodeNoSuchUserPolicy + ErrCodeUserPolicyAlreadyExists + ErrCodeNoSuchBucket + ErrCodeNoSuchBucketPolicy + ErrCodeNoSuchLifecycleConfiguration + ErrCodeNoSuchCORSConfiguration + ErrCodeNoSuchWebsiteConfiguration + ErrCodeReplicationConfigurationNotFoundError + ErrCodeReplicationNeedsVersioningError + ErrCodeReplicationBucketNeedsVersioningError + ErrCodeObjectRestoreAlreadyInProgress + ErrCodeNoSuchKey + ErrCodeNoSuchUpload + ErrCodeInvalidVersionID + ErrCodeNoSuchVersion + ErrCodeNotImplemented + ErrCodePreconditionFailed + ErrCodeRequestTimeTooSkewed + ErrCodeSignatureDoesNotMatch + ErrCodeMethodNotAllowed + ErrCodeInvalidPart + ErrCodeInvalidPartOrder + ErrCodeAuthorizationHeaderMalformed + ErrCodeMalformedDate + ErrCodeMalformedPOSTRequest + ErrCodePOSTFileRequired + ErrCodeSignatureVersionNotSupported + ErrCodeBucketNotEmpty + ErrCodeAllAccessDisabled + ErrCodeMalformedPolicy + ErrCodeMissingFields + ErrCodeMissingCredTag + ErrCodeCredMalformed + ErrCodeInvalidRegion - ErrMissingSignTag - ErrMissingSignHeadersTag + ErrCodeMissingSignTag + ErrCodeMissingSignHeadersTag - ErrAuthHeaderEmpty - ErrExpiredPresignRequest - ErrRequestNotReadyYet - ErrUnsignedHeaders - ErrMissingDateHeader + ErrCodeAuthHeaderEmpty + ErrCodeExpiredPresignRequest + ErrCodeRequestNotReadyYet + ErrCodeUnsignedHeaders + ErrCodeMissingDateHeader - ErrBucketAlreadyOwnedByYou - ErrInvalidDuration - ErrBucketAlreadyExists - ErrMetadataTooLarge - ErrUnsupportedMetadata + ErrCodeBucketAlreadyOwnedByYou + ErrCodeInvalidDuration + ErrCodeBucketAlreadyExists + ErrCodeMetadataTooLarge + ErrCodeUnsupportedMetadata - ErrSlowDown - ErrBadRequest - ErrKeyTooLongError - ErrInvalidBucketObjectLockConfiguration - ErrObjectLockConfigurationNotAllowed - ErrNoSuchObjectLockConfiguration - ErrObjectLocked - ErrInvalidRetentionDate - ErrPastObjectLockRetainDate - ErrUnknownWORMModeDirective - ErrBucketTaggingNotFound - ErrObjectLockInvalidHeaders - ErrInvalidTagDirective + ErrCodeSlowDown + ErrCodeBadRequest + ErrCodeKeyTooLongError + ErrCodeInvalidBucketObjectLockConfiguration + ErrCodeObjectLockConfigurationNotAllowed + ErrCodeNoSuchObjectLockConfiguration + ErrCodeObjectLocked + ErrCodeInvalidRetentionDate + ErrCodePastObjectLockRetainDate + ErrCodeUnknownWORMModeDirective + ErrCodeBucketTaggingNotFound + ErrCodeObjectLockInvalidHeaders + ErrCodeInvalidTagDirective // Add new error codes here. // SSE-S3 related API errors - ErrInvalidEncryptionMethod - ErrInvalidQueryParams - ErrNoAccessKey - ErrInvalidToken + ErrCodeInvalidEncryptionMethod + ErrCodeInvalidQueryParams + ErrCodeNoAccessKey + ErrCodeInvalidToken // Bucket notification related errors. - ErrEventNotification - ErrARNNotification - ErrRegionNotification - ErrOverlappingFilterNotification - ErrFilterNameInvalid - ErrFilterNamePrefix - ErrFilterNameSuffix - ErrFilterValueInvalid - ErrOverlappingConfigs + ErrCodeEventNotification + ErrCodeARNNotification + ErrCodeRegionNotification + ErrCodeOverlappingFilterNotification + ErrCodeFilterNameInvalid + ErrCodeFilterNamePrefix + ErrCodeFilterNameSuffix + ErrCodeFilterValueInvalid + ErrCodeOverlappingConfigs // S3 extended errors. - ErrContentSHA256Mismatch + ErrCodeContentSHA256Mismatch // Add new extended error codes here. - ErrInvalidObjectName - ErrInvalidObjectNamePrefixSlash - ErrClientDisconnected - ErrOperationTimedOut - ErrOperationMaxedOut - ErrInvalidRequest - ErrIncorrectContinuationToken - ErrInvalidFormatAccessKey + ErrCodeInvalidObjectName + ErrCodeInvalidObjectNamePrefixSlash + ErrCodeClientDisconnected + ErrCodeOperationTimedOut + ErrCodeOperationMaxedOut + ErrCodeInvalidRequest + ErrCodeIncorrectContinuationToken + ErrCodeInvalidFormatAccessKey // S3 Select Errors - ErrEmptyRequestBody - ErrUnsupportedFunction - ErrInvalidExpressionType - ErrBusy - ErrUnauthorizedAccess - ErrExpressionTooLong - ErrIllegalSQLFunctionArgument - ErrInvalidKeyPath - ErrInvalidCompressionFormat - ErrInvalidFileHeaderInfo - ErrInvalidJSONType - ErrInvalidQuoteFields - ErrInvalidRequestParameter - ErrInvalidDataType - ErrInvalidTextEncoding - ErrInvalidDataSource - ErrInvalidTableAlias - ErrMissingRequiredParameter - ErrObjectSerializationConflict - ErrUnsupportedSQLOperation - ErrUnsupportedSQLStructure - ErrUnsupportedSyntax - ErrUnsupportedRangeHeader - ErrLexerInvalidChar - ErrLexerInvalidOperator - ErrLexerInvalidLiteral - ErrLexerInvalidIONLiteral - ErrParseExpectedDatePart - ErrParseExpectedKeyword - ErrParseExpectedTokenType - ErrParseExpected2TokenTypes - ErrParseExpectedNumber - ErrParseExpectedRightParenBuiltinFunctionCall - ErrParseExpectedTypeName - ErrParseExpectedWhenClause - ErrParseUnsupportedToken - ErrParseUnsupportedLiteralsGroupBy - ErrParseExpectedMember - ErrParseUnsupportedSelect - ErrParseUnsupportedCase - ErrParseUnsupportedCaseClause - ErrParseUnsupportedAlias - ErrParseUnsupportedSyntax - ErrParseUnknownOperator - ErrParseMissingIdentAfterAt - ErrParseUnexpectedOperator - ErrParseUnexpectedTerm - ErrParseUnexpectedToken - ErrParseUnexpectedKeyword - ErrParseExpectedExpression - ErrParseExpectedLeftParenAfterCast - ErrParseExpectedLeftParenValueConstructor - ErrParseExpectedLeftParenBuiltinFunctionCall - ErrParseExpectedArgumentDelimiter - ErrParseCastArity - ErrParseInvalidTypeParam - ErrParseEmptySelect - ErrParseSelectMissingFrom - ErrParseExpectedIdentForGroupName - ErrParseExpectedIdentForAlias - ErrParseUnsupportedCallWithStar - ErrParseNonUnaryAgregateFunctionCall - ErrParseMalformedJoin - ErrParseExpectedIdentForAt - ErrParseAsteriskIsNotAloneInSelectList - ErrParseCannotMixSqbAndWildcardInSelectList - ErrParseInvalidContextForWildcardInSelectList - ErrIncorrectSQLFunctionArgumentType - ErrValueParseFailure - ErrEvaluatorInvalidArguments - ErrIntegerOverflow - ErrLikeInvalidInputs - ErrCastFailed - ErrInvalidCast - ErrEvaluatorInvalidTimestampFormatPattern - ErrEvaluatorInvalidTimestampFormatPatternSymbolForParsing - ErrEvaluatorTimestampFormatPatternDuplicateFields - ErrEvaluatorTimestampFormatPatternHourClockAmPmMismatch - ErrEvaluatorUnterminatedTimestampFormatPatternToken - ErrEvaluatorInvalidTimestampFormatPatternToken - ErrEvaluatorInvalidTimestampFormatPatternSymbol - ErrEvaluatorBindingDoesNotExist - ErrMissingHeaders - ErrInvalidColumnIndex - ErrPostPolicyConditionInvalidFormat + ErrCodeEmptyRequestBody + ErrCodeUnsupportedFunction + ErrCodeInvalidExpressionType + ErrCodeBusy + ErrCodeUnauthorizedAccess + ErrCodeExpressionTooLong + ErrCodeIllegalSQLFunctionArgument + ErrCodeInvalidKeyPath + ErrCodeInvalidCompressionFormat + ErrCodeInvalidFileHeaderInfo + ErrCodeInvalidJSONType + ErrCodeInvalidQuoteFields + ErrCodeInvalidRequestParameter + ErrCodeInvalidDataType + ErrCodeInvalidTextEncoding + ErrCodeInvalidDataSource + ErrCodeInvalidTableAlias + ErrCodeMissingRequiredParameter + ErrCodeObjectSerializationConflict + ErrCodeUnsupportedSQLOperation + ErrCodeUnsupportedSQLStructure + ErrCodeUnsupportedSyntax + ErrCodeUnsupportedRangeHeader + ErrCodeLexerInvalidChar + ErrCodeLexerInvalidOperator + ErrCodeLexerInvalidLiteral + ErrCodeLexerInvalidIONLiteral + ErrCodeParseExpectedDatePart + ErrCodeParseExpectedKeyword + ErrCodeParseExpectedTokenType + ErrCodeParseExpected2TokenTypes + ErrCodeParseExpectedNumber + ErrCodeParseExpectedRightParenBuiltinFunctionCall + ErrCodeParseExpectedTypeName + ErrCodeParseExpectedWhenClause + ErrCodeParseUnsupportedToken + ErrCodeParseUnsupportedLiteralsGroupBy + ErrCodeParseExpectedMember + ErrCodeParseUnsupportedSelect + ErrCodeParseUnsupportedCase + ErrCodeParseUnsupportedCaseClause + ErrCodeParseUnsupportedAlias + ErrCodeParseUnsupportedSyntax + ErrCodeParseUnknownOperator + ErrCodeParseMissingIdentAfterAt + ErrCodeParseUnexpectedOperator + ErrCodeParseUnexpectedTerm + ErrCodeParseUnexpectedToken + ErrCodeParseUnexpectedKeyword + ErrCodeParseExpectedExpression + ErrCodeParseExpectedLeftParenAfterCast + ErrCodeParseExpectedLeftParenValueConstructor + ErrCodeParseExpectedLeftParenBuiltinFunctionCall + ErrCodeParseExpectedArgumentDelimiter + ErrCodeParseCastArity + ErrCodeParseInvalidTypeParam + ErrCodeParseEmptySelect + ErrCodeParseSelectMissingFrom + ErrCodeParseExpectedIdentForGroupName + ErrCodeParseExpectedIdentForAlias + ErrCodeParseUnsupportedCallWithStar + ErrCodeParseNonUnaryAgregateFunctionCall + ErrCodeParseMalformedJoin + ErrCodeParseExpectedIdentForAt + ErrCodeParseAsteriskIsNotAloneInSelectList + ErrCodeParseCannotMixSqbAndWildcardInSelectList + ErrCodeParseInvalidContextForWildcardInSelectList + ErrCodeIncorrectSQLFunctionArgumentType + ErrCodeValueParseFailure + ErrCodeEvaluatorInvalidArguments + ErrCodeIntegerOverflow + ErrCodeLikeInvalidInputs + ErrCodeCastFailed + ErrCodeInvalidCast + ErrCodeEvaluatorInvalidTimestampFormatPattern + ErrCodeEvaluatorInvalidTimestampFormatPatternSymbolForParsing + ErrCodeEvaluatorTimestampFormatPatternDuplicateFields + ErrCodeEvaluatorTimestampFormatPatternHourClockAmPmMismatch + ErrCodeEvaluatorUnterminatedTimestampFormatPatternToken + ErrCodeEvaluatorInvalidTimestampFormatPatternToken + ErrCodeEvaluatorInvalidTimestampFormatPatternSymbol + ErrCodeEvaluatorBindingDoesNotExist + ErrCodeMissingHeaders + ErrCodeInvalidColumnIndex - ErrMalformedJSON + ErrCodePostPolicyConditionInvalidFormat + + ErrCodeMalformedJSON ) // error code to APIError structure, these fields carry respective // descriptions for all the error responses. var errorCodeResponse = map[ErrorCode]APIError{ - ErrInvalidCopyDest: { + ErrCodeCodeInvalidCopyDest: { Code: "InvalidRequest", Description: "This copy request is illegal because it is trying to copy an object to itself without changing the object's metadata, storage class, website redirect location or encryption attributes.", HTTPStatusCode: http.StatusBadRequest, }, - ErrInvalidCopySource: { + ErrCodeInvalidCopySource: { Code: "InvalidArgument", Description: "Copy Source must mention the source bucket and key: sourcebucket/sourcekey.", HTTPStatusCode: http.StatusBadRequest, }, - ErrInvalidMetadataDirective: { + ErrCodeInvalidMetadataDirective: { Code: "InvalidArgument", Description: "Unknown metadata directive.", HTTPStatusCode: http.StatusBadRequest, }, - ErrInvalidRequestBody: { + ErrCodeInvalidRequestBody: { Code: "InvalidArgument", Description: "Body shouldn't be set for this request.", HTTPStatusCode: http.StatusBadRequest, }, - ErrInvalidMaxUploads: { + ErrCodeInvalidMaxUploads: { Code: "InvalidArgument", Description: "Argument max-uploads must be an integer between 0 and 2147483647", HTTPStatusCode: http.StatusBadRequest, }, - ErrInvalidMaxKeys: { + ErrCodeInvalidMaxKeys: { Code: "InvalidArgument", Description: "Argument maxKeys must be an integer between 0 and 2147483647", HTTPStatusCode: http.StatusBadRequest, }, - ErrInvalidEncodingMethod: { + ErrCodeInvalidEncodingMethod: { Code: "InvalidArgument", Description: "Invalid Encoding Method specified in Request", HTTPStatusCode: http.StatusBadRequest, }, - ErrInvalidMaxParts: { + ErrCodeInvalidMaxParts: { Code: "InvalidArgument", Description: "Part number must be an integer between 1 and 10000, inclusive", HTTPStatusCode: http.StatusBadRequest, }, - ErrInvalidPartNumberMarker: { + ErrCodeInvalidPartNumberMarker: { Code: "InvalidArgument", Description: "Argument partNumberMarker must be an integer.", HTTPStatusCode: http.StatusBadRequest, }, - ErrInvalidPolicyDocument: { + ErrCodeInvalidPolicyDocument: { Code: "InvalidPolicyDocument", Description: "The content of the form does not meet the conditions specified in the policy document.", HTTPStatusCode: http.StatusBadRequest, }, - ErrAccessDenied: { + ErrCodeAccessDenied: { Code: "AccessDenied", Description: "Access Denied.", HTTPStatusCode: http.StatusForbidden, }, - ErrBadDigest: { + ErrCodeBadDigest: { Code: "BadDigest", Description: "The Content-Md5 you specified did not match what we received.", HTTPStatusCode: http.StatusBadRequest, }, - ErrEntityTooSmall: { + ErrCodeEntityTooSmall: { Code: "EntityTooSmall", Description: "Your proposed upload is smaller than the minimum allowed object size.", HTTPStatusCode: http.StatusBadRequest, }, - ErrEntityTooLarge: { + ErrCodeEntityTooLarge: { Code: "EntityTooLarge", Description: "Your proposed upload exceeds the maximum allowed object size.", HTTPStatusCode: http.StatusBadRequest, }, - ErrIncompleteBody: { + ErrCodeIncompleteBody: { Code: "IncompleteBody", Description: "You did not provide the number of bytes specified by the Content-Length HTTP header.", HTTPStatusCode: http.StatusBadRequest, }, - ErrInternalError: { + ErrCodeInternalError: { Code: "InternalError", Description: "We encountered an internal error, please try again.", HTTPStatusCode: http.StatusInternalServerError, }, - ErrInvalidAccessKeyID: { + ErrCodeInvalidAccessKeyID: { Code: "InvalidAccessKeyId", Description: "The Access Key Id you provided does not exist in our records.", HTTPStatusCode: http.StatusForbidden, }, - ErrAccessKeyDisabled: { + ErrCodeAccessKeyDisabled: { Code: "InvalidAccessKeyId", Description: "Your account is disabled; please contact your administrator.", HTTPStatusCode: http.StatusForbidden, }, - ErrInvalidBucketName: { + ErrCodeInvalidBucketName: { Code: "InvalidBucketName", Description: "The specified bucket is not valid.", HTTPStatusCode: http.StatusBadRequest, }, - ErrInvalidDigest: { + ErrCodeInvalidDigest: { Code: "InvalidDigest", Description: "The Content-Md5 you specified is not valid.", HTTPStatusCode: http.StatusBadRequest, }, - ErrInvalidRange: { + ErrCodeInvalidRange: { Code: "InvalidRange", Description: "The requested range is not satisfiable", HTTPStatusCode: http.StatusRequestedRangeNotSatisfiable, }, - ErrInvalidRangePartNumber: { + ErrCodeInvalidRangePartNumber: { Code: "InvalidRequest", Description: "Cannot specify both Range header and partNumber query parameter", HTTPStatusCode: http.StatusBadRequest, }, - ErrMalformedXML: { + ErrCodeMalformedXML: { Code: "MalformedXML", Description: "The XML you provided was not well-formed or did not validate against our published schema.", HTTPStatusCode: http.StatusBadRequest, }, - ErrMissingContentLength: { + ErrCodeMissingContentLength: { Code: "MissingContentLength", Description: "You must provide the Content-Length HTTP header.", HTTPStatusCode: http.StatusLengthRequired, }, - ErrMissingContentMD5: { + ErrCodeMissingContentMD5: { Code: "MissingContentMD5", Description: "Missing required header for this request: Content-Md5.", HTTPStatusCode: http.StatusBadRequest, }, - ErrMissingSecurityHeader: { + ErrCodeMissingSecurityHeader: { Code: "MissingSecurityHeader", Description: "Your request was missing a required header", HTTPStatusCode: http.StatusBadRequest, }, - ErrMissingRequestBodyError: { + ErrCodeMissingRequestBodyError: { Code: "MissingRequestBodyError", Description: "Request body is empty.", HTTPStatusCode: http.StatusLengthRequired, }, - ErrNoSuchBucket: { + ErrCodeNoSuchBucket: { Code: "NoSuchBucket", Description: "The specified bucket does not exist", HTTPStatusCode: http.StatusNotFound, }, - ErrNoSuchBucketPolicy: { + ErrCodeNoSuchBucketPolicy: { Code: "NoSuchBucketPolicy", Description: "The bucket policy does not exist", HTTPStatusCode: http.StatusNotFound, }, - ErrNoSuchLifecycleConfiguration: { + ErrCodeNoSuchLifecycleConfiguration: { Code: "NoSuchLifecycleConfiguration", Description: "The lifecycle configuration does not exist", HTTPStatusCode: http.StatusNotFound, }, - ErrNoSuchUser: { + ErrCodeNoSuchUser: { Code: "NoSuchUser", Description: "The specified user does not exist", HTTPStatusCode: http.StatusConflict, }, - ErrUserAlreadyExists: { + ErrCodeUserAlreadyExists: { Code: "UserAlreadyExists", Description: "The request was rejected because it attempted to create a resource that already exists .", HTTPStatusCode: http.StatusConflict, }, - ErrNoSuchUserPolicy: { + ErrCodeNoSuchUserPolicy: { Code: "NoSuchUserPolicy", Description: "The specified user policy does not exist", HTTPStatusCode: http.StatusConflict, }, - ErrUserPolicyAlreadyExists: { + ErrCodeUserPolicyAlreadyExists: { Code: "UserPolicyAlreadyExists", Description: "The same user policy already exists .", HTTPStatusCode: http.StatusConflict, }, - ErrNoSuchKey: { + ErrCodeNoSuchKey: { Code: "NoSuchKey", Description: "The specified key does not exist.", HTTPStatusCode: http.StatusNotFound, }, - ErrNoSuchUpload: { + ErrCodeNoSuchUpload: { Code: "NoSuchUpload", Description: "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.", HTTPStatusCode: http.StatusNotFound, }, - ErrInvalidVersionID: { + ErrCodeInvalidVersionID: { Code: "InvalidArgument", Description: "Invalid version id specified", HTTPStatusCode: http.StatusBadRequest, }, - ErrNoSuchVersion: { + ErrCodeNoSuchVersion: { Code: "NoSuchVersion", Description: "The specified version does not exist.", HTTPStatusCode: http.StatusNotFound, }, - ErrNotImplemented: { + ErrCodeNotImplemented: { Code: "NotImplemented", Description: "A header you provided implies functionality that is not implemented", HTTPStatusCode: http.StatusNotImplemented, }, - ErrPreconditionFailed: { + ErrCodePreconditionFailed: { Code: "PreconditionFailed", Description: "At least one of the pre-conditions you specified did not hold", HTTPStatusCode: http.StatusPreconditionFailed, }, - ErrRequestTimeTooSkewed: { + ErrCodeRequestTimeTooSkewed: { Code: "RequestTimeTooSkewed", Description: "The difference between the request time and the server's time is too large.", HTTPStatusCode: http.StatusForbidden, }, - ErrSignatureDoesNotMatch: { + ErrCodeSignatureDoesNotMatch: { Code: "SignatureDoesNotMatch", Description: "The request signature we calculated does not match the signature you provided. Check your key and signing method.", HTTPStatusCode: http.StatusForbidden, }, - ErrMethodNotAllowed: { + ErrCodeMethodNotAllowed: { Code: "MethodNotAllowed", Description: "The specified method is not allowed against this resource.", HTTPStatusCode: http.StatusMethodNotAllowed, }, - ErrInvalidPart: { + ErrCodeInvalidPart: { Code: "InvalidPart", Description: "One or more of the specified parts could not be found. The part may not have been uploaded, or the specified entity tag may not match the part's entity tag.", HTTPStatusCode: http.StatusBadRequest, }, - ErrInvalidPartOrder: { + ErrCodeInvalidPartOrder: { Code: "InvalidPartOrder", Description: "The list of parts was not in ascending order. The parts list must be specified in order by part number.", HTTPStatusCode: http.StatusBadRequest, }, - ErrInvalidObjectState: { + ErrCodeInvalidObjectState: { Code: "InvalidObjectState", Description: "The operation is not valid for the current state of the object.", HTTPStatusCode: http.StatusForbidden, }, - ErrAuthorizationHeaderMalformed: { + ErrCodeAuthorizationHeaderMalformed: { Code: "AuthorizationHeaderMalformed", Description: "The authorization header is malformed; the region is wrong; expecting 'us-east-1'.", HTTPStatusCode: http.StatusBadRequest, }, - ErrMalformedPOSTRequest: { + ErrCodeMalformedPOSTRequest: { Code: "MalformedPOSTRequest", Description: "The body of your POST request is not well-formed multipart/form-data.", HTTPStatusCode: http.StatusBadRequest, }, - ErrPOSTFileRequired: { + ErrCodePOSTFileRequired: { Code: "InvalidArgument", Description: "POST requires exactly one file upload per request.", HTTPStatusCode: http.StatusBadRequest, }, - ErrSignatureVersionNotSupported: { + ErrCodeSignatureVersionNotSupported: { Code: "InvalidRequest", Description: "The authorization mechanism you have provided is not supported. Please use AWS4-HMAC-SHA256.", HTTPStatusCode: http.StatusBadRequest, }, - ErrBucketNotEmpty: { + ErrCodeBucketNotEmpty: { Code: "BucketNotEmpty", Description: "The bucket you tried to delete is not empty", HTTPStatusCode: http.StatusConflict, }, - ErrBucketAlreadyExists: { + ErrCodeBucketAlreadyExists: { Code: "BucketAlreadyExists", Description: "The requested bucket name is not available. The bucket namespace is shared by all users of the system. Please select a different name and try again.", HTTPStatusCode: http.StatusConflict, }, - ErrAllAccessDisabled: { + ErrCodeAllAccessDisabled: { Code: "AllAccessDisabled", Description: "All access to this resource has been disabled.", HTTPStatusCode: http.StatusForbidden, }, - ErrMalformedPolicy: { + ErrCodeMalformedPolicy: { Code: "MalformedPolicy", Description: "Policy has invalid resource.", HTTPStatusCode: http.StatusBadRequest, }, - ErrMissingCredTag: { + ErrCodeMissingCredTag: { Code: "InvalidRequest", Description: "Missing Credential field for this request.", HTTPStatusCode: http.StatusBadRequest, }, - ErrInvalidRegion: { + ErrCodeInvalidRegion: { Code: "InvalidRegion", Description: "Region does not match.", HTTPStatusCode: http.StatusBadRequest, }, - ErrMissingSignTag: { + ErrCodeMissingSignTag: { Code: "AccessDenied", Description: "Signature header missing Signature field.", HTTPStatusCode: http.StatusBadRequest, }, - ErrMissingSignHeadersTag: { + ErrCodeMissingSignHeadersTag: { Code: "InvalidArgument", Description: "Signature header missing SignedHeaders field.", HTTPStatusCode: http.StatusBadRequest, }, - ErrAuthHeaderEmpty: { + ErrCodeAuthHeaderEmpty: { Code: "InvalidArgument", Description: "Authorization header is invalid -- one and only one ' ' (space) required.", HTTPStatusCode: http.StatusBadRequest, }, - ErrMissingDateHeader: { + ErrCodeMissingDateHeader: { Code: "AccessDenied", Description: "AWS authentication requires a valid Date or x-amz-date header", HTTPStatusCode: http.StatusBadRequest, }, - ErrExpiredPresignRequest: { + ErrCodeExpiredPresignRequest: { Code: "AccessDenied", Description: "Request has expired", HTTPStatusCode: http.StatusForbidden, }, - ErrRequestNotReadyYet: { + ErrCodeRequestNotReadyYet: { Code: "AccessDenied", Description: "Request is not valid yet", HTTPStatusCode: http.StatusForbidden, }, - ErrSlowDown: { + ErrCodeSlowDown: { Code: "SlowDown", Description: "Resource requested is unreadable, please reduce your request rate", HTTPStatusCode: http.StatusServiceUnavailable, }, - ErrBadRequest: { + ErrCodeBadRequest: { Code: "BadRequest", Description: "400 BadRequest", HTTPStatusCode: http.StatusBadRequest, }, - ErrKeyTooLongError: { + ErrCodeKeyTooLongError: { Code: "KeyTooLongError", Description: "Your key is too long", HTTPStatusCode: http.StatusBadRequest, }, - ErrUnsignedHeaders: { + ErrCodeUnsignedHeaders: { Code: "AccessDenied", Description: "There were headers present in the request which were not signed", HTTPStatusCode: http.StatusBadRequest, }, - ErrBucketAlreadyOwnedByYou: { + ErrCodeBucketAlreadyOwnedByYou: { Code: "BucketAlreadyOwnedByYou", Description: "Your previous request to create the named bucket succeeded and you already own it.", HTTPStatusCode: http.StatusConflict, }, - ErrInvalidDuration: { + ErrCodeInvalidDuration: { Code: "InvalidDuration", Description: "Duration provided in the request is invalid.", HTTPStatusCode: http.StatusBadRequest, }, - ErrInvalidBucketObjectLockConfiguration: { + ErrCodeInvalidBucketObjectLockConfiguration: { Code: "InvalidRequest", Description: "Bucket is missing ObjectLockConfiguration", HTTPStatusCode: http.StatusBadRequest, }, - ErrBucketTaggingNotFound: { + ErrCodeBucketTaggingNotFound: { Code: "NoSuchTagSet", Description: "The TagSet does not exist", HTTPStatusCode: http.StatusNotFound, }, - ErrObjectLockConfigurationNotAllowed: { + ErrCodeObjectLockConfigurationNotAllowed: { Code: "InvalidBucketState", Description: "Object Lock configuration cannot be enabled on existing buckets", HTTPStatusCode: http.StatusConflict, }, - ErrNoSuchCORSConfiguration: { + ErrCodeNoSuchCORSConfiguration: { Code: "NoSuchCORSConfiguration", Description: "The CORS configuration does not exist", HTTPStatusCode: http.StatusNotFound, }, - ErrNoSuchWebsiteConfiguration: { + ErrCodeNoSuchWebsiteConfiguration: { Code: "NoSuchWebsiteConfiguration", Description: "The specified bucket does not have a website configuration", HTTPStatusCode: http.StatusNotFound, }, - ErrReplicationConfigurationNotFoundError: { + ErrCodeReplicationConfigurationNotFoundError: { Code: "ReplicationConfigurationNotFoundError", Description: "The replication configuration was not found", HTTPStatusCode: http.StatusNotFound, }, - ErrReplicationNeedsVersioningError: { + ErrCodeReplicationNeedsVersioningError: { Code: "InvalidRequest", Description: "Versioning must be 'Enabled' on the bucket to apply a replication configuration", HTTPStatusCode: http.StatusBadRequest, }, - ErrReplicationBucketNeedsVersioningError: { + ErrCodeReplicationBucketNeedsVersioningError: { Code: "InvalidRequest", Description: "Versioning must be 'Enabled' on the bucket to add a replication target", HTTPStatusCode: http.StatusBadRequest, }, - ErrNoSuchObjectLockConfiguration: { + ErrCodeNoSuchObjectLockConfiguration: { Code: "NoSuchObjectLockConfiguration", Description: "The specified object does not have a ObjectLock configuration", HTTPStatusCode: http.StatusBadRequest, }, - ErrObjectLocked: { + ErrCodeObjectLocked: { Code: "InvalidRequest", Description: "Object is WORM protected and cannot be overwritten", HTTPStatusCode: http.StatusBadRequest, }, - ErrInvalidRetentionDate: { + ErrCodeInvalidRetentionDate: { Code: "InvalidRequest", Description: "Date must be provided in ISO 8601 format", HTTPStatusCode: http.StatusBadRequest, }, - ErrPastObjectLockRetainDate: { + ErrCodePastObjectLockRetainDate: { Code: "InvalidRequest", Description: "the retain until date must be in the future", HTTPStatusCode: http.StatusBadRequest, }, - ErrUnknownWORMModeDirective: { + ErrCodeUnknownWORMModeDirective: { Code: "InvalidRequest", Description: "unknown wormMode directive", HTTPStatusCode: http.StatusBadRequest, }, - ErrObjectLockInvalidHeaders: { + ErrCodeObjectLockInvalidHeaders: { Code: "InvalidRequest", Description: "x-amz-object-lock-retain-until-date and x-amz-object-lock-mode must both be supplied", HTTPStatusCode: http.StatusBadRequest, }, - ErrObjectRestoreAlreadyInProgress: { + ErrCodeObjectRestoreAlreadyInProgress: { Code: "RestoreAlreadyInProgress", Description: "Object restore is already in progress", HTTPStatusCode: http.StatusConflict, }, // Bucket notification related errors. - ErrEventNotification: { + ErrCodeEventNotification: { Code: "InvalidArgument", Description: "A specified event is not supported for notifications.", HTTPStatusCode: http.StatusBadRequest, }, - ErrARNNotification: { + ErrCodeARNNotification: { Code: "InvalidArgument", Description: "A specified destination ARN does not exist or is not well-formed. Verify the destination ARN.", HTTPStatusCode: http.StatusBadRequest, }, - ErrRegionNotification: { + ErrCodeRegionNotification: { Code: "InvalidArgument", Description: "A specified destination is in a different region than the bucket. You must use a destination that resides in the same region as the bucket.", HTTPStatusCode: http.StatusBadRequest, }, - ErrOverlappingFilterNotification: { + ErrCodeOverlappingFilterNotification: { Code: "InvalidArgument", Description: "An object key name filtering rule defined with overlapping prefixes, overlapping suffixes, or overlapping combinations of prefixes and suffixes for the same event types.", HTTPStatusCode: http.StatusBadRequest, }, - ErrFilterNameInvalid: { + ErrCodeFilterNameInvalid: { Code: "InvalidArgument", Description: "filter rule name must be either prefix or suffix", HTTPStatusCode: http.StatusBadRequest, }, - ErrFilterNamePrefix: { + ErrCodeFilterNamePrefix: { Code: "InvalidArgument", Description: "Cannot specify more than one prefix rule in a filter.", HTTPStatusCode: http.StatusBadRequest, }, - ErrFilterNameSuffix: { + ErrCodeFilterNameSuffix: { Code: "InvalidArgument", Description: "Cannot specify more than one suffix rule in a filter.", HTTPStatusCode: http.StatusBadRequest, }, - ErrFilterValueInvalid: { + ErrCodeFilterValueInvalid: { Code: "InvalidArgument", Description: "Size of filter rule value cannot exceed 1024 bytes in UTF-8 representation", HTTPStatusCode: http.StatusBadRequest, }, - ErrOverlappingConfigs: { + ErrCodeOverlappingConfigs: { Code: "InvalidArgument", Description: "Configurations overlap. Configurations on the same bucket cannot share a common event type.", HTTPStatusCode: http.StatusBadRequest, }, - ErrInvalidCopyPartRange: { + ErrCodeInvalidCopyPartRange: { Code: "InvalidArgument", Description: "The x-amz-copy-source-range value must be of the form bytes=first-last where first and last are the zero-based offsets of the first and last bytes to copy", HTTPStatusCode: http.StatusBadRequest, }, - ErrInvalidCopyPartRangeSource: { + ErrCodeInvalidCopyPartRangeSource: { Code: "InvalidArgument", Description: "Range specified is not valid for source object", HTTPStatusCode: http.StatusBadRequest, }, - ErrMetadataTooLarge: { + ErrCodeMetadataTooLarge: { Code: "MetadataTooLarge", Description: "Your metadata headers exceed the maximum allowed metadata size.", HTTPStatusCode: http.StatusBadRequest, }, - ErrInvalidTagDirective: { + ErrCodeInvalidTagDirective: { Code: "InvalidArgument", Description: "Unknown tag directive.", HTTPStatusCode: http.StatusBadRequest, }, - ErrInvalidEncryptionMethod: { + ErrCodeInvalidEncryptionMethod: { Code: "InvalidRequest", Description: "The encryption method specified is not supported", HTTPStatusCode: http.StatusBadRequest, }, - ErrInvalidQueryParams: { + ErrCodeInvalidQueryParams: { Code: "AuthorizationQueryParametersError", Description: "Query-string authentication version 4 requires the X-Amz-Algorithm, X-Amz-Credential, X-Amz-Signature, X-Amz-Date, X-Amz-SignedHeaders, and X-Amz-Expires parameters.", HTTPStatusCode: http.StatusBadRequest, }, - ErrNoAccessKey: { + ErrCodeNoAccessKey: { Code: "AccessDenied", Description: "No AWSAccessKey was presented", HTTPStatusCode: http.StatusForbidden, }, - ErrInvalidToken: { + ErrCodeInvalidToken: { Code: "InvalidTokenId", Description: "The security token included in the request is invalid", HTTPStatusCode: http.StatusForbidden, }, // S3 extensions. - ErrInvalidObjectName: { + ErrCodeInvalidObjectName: { Code: "InvalidObjectName", Description: "Object name contains unsupported characters.", HTTPStatusCode: http.StatusBadRequest, }, - ErrInvalidObjectNamePrefixSlash: { + ErrCodeInvalidObjectNamePrefixSlash: { Code: "InvalidObjectName", Description: "Object name contains a leading slash.", HTTPStatusCode: http.StatusBadRequest, }, - ErrClientDisconnected: { + ErrCodeClientDisconnected: { Code: "ClientDisconnected", Description: "Client disconnected before response was ready", HTTPStatusCode: 499, // No official code, use nginx value. }, - ErrOperationTimedOut: { + ErrCodeOperationTimedOut: { Code: "RequestTimeout", Description: "A timeout occurred while trying to lock a resource, please reduce your request rate", HTTPStatusCode: http.StatusServiceUnavailable, }, - ErrOperationMaxedOut: { + ErrCodeOperationMaxedOut: { Code: "SlowDown", Description: "A timeout exceeded while waiting to proceed with the request, please reduce your request rate", HTTPStatusCode: http.StatusServiceUnavailable, }, - ErrUnsupportedMetadata: { + ErrCodeUnsupportedMetadata: { Code: "InvalidArgument", Description: "Your metadata headers are not supported.", HTTPStatusCode: http.StatusBadRequest, }, // Generic Invalid-Request error. Should be used for response errors only for unlikely - // corner case errors for which introducing new APIErrorCode is not worth it. LogIf() + // corner case errors for which introducing new APIErrCodeorCode is not worth it. LogIf() // should be used to log the error at the source of the error for debugging purposes. - ErrInvalidRequest: { + ErrCodeInvalidRequest: { Code: "InvalidRequest", Description: "Invalid Request", HTTPStatusCode: http.StatusBadRequest, }, - ErrIncorrectContinuationToken: { + ErrCodeIncorrectContinuationToken: { Code: "InvalidArgument", Description: "The continuation token provided is incorrect", HTTPStatusCode: http.StatusBadRequest, }, - ErrInvalidFormatAccessKey: { + ErrCodeInvalidFormatAccessKey: { Code: "InvalidAccessKeyId", Description: "The Access Key Id you provided contains invalid characters.", HTTPStatusCode: http.StatusBadRequest, }, // S3 Select API Errors - ErrEmptyRequestBody: { + ErrCodeEmptyRequestBody: { Code: "EmptyRequestBody", Description: "Request body cannot be empty.", HTTPStatusCode: http.StatusBadRequest, }, - ErrUnsupportedFunction: { + ErrCodeUnsupportedFunction: { Code: "UnsupportedFunction", Description: "Encountered an unsupported SQL function.", HTTPStatusCode: http.StatusBadRequest, }, - ErrInvalidDataSource: { + ErrCodeInvalidDataSource: { Code: "InvalidDataSource", Description: "Invalid data source type. Only CSV and JSON are supported at this time.", HTTPStatusCode: http.StatusBadRequest, }, - ErrInvalidExpressionType: { + ErrCodeInvalidExpressionType: { Code: "InvalidExpressionType", Description: "The ExpressionType is invalid. Only SQL expressions are supported at this time.", HTTPStatusCode: http.StatusBadRequest, }, - ErrBusy: { + ErrCodeBusy: { Code: "Busy", Description: "The service is unavailable. Please retry.", HTTPStatusCode: http.StatusServiceUnavailable, }, - ErrUnauthorizedAccess: { + ErrCodeUnauthorizedAccess: { Code: "UnauthorizedAccess", Description: "You are not authorized to perform this operation", HTTPStatusCode: http.StatusUnauthorized, }, - ErrExpressionTooLong: { + ErrCodeExpressionTooLong: { Code: "ExpressionTooLong", Description: "The SQL expression is too long: The maximum byte-length for the SQL expression is 256 KB.", HTTPStatusCode: http.StatusBadRequest, }, - ErrIllegalSQLFunctionArgument: { + ErrCodeIllegalSQLFunctionArgument: { Code: "IllegalSqlFunctionArgument", Description: "Illegal argument was used in the SQL function.", HTTPStatusCode: http.StatusBadRequest, }, - ErrInvalidKeyPath: { + ErrCodeInvalidKeyPath: { Code: "InvalidKeyPath", Description: "Key path in the SQL expression is invalid.", HTTPStatusCode: http.StatusBadRequest, }, - ErrInvalidCompressionFormat: { + ErrCodeInvalidCompressionFormat: { Code: "InvalidCompressionFormat", Description: "The file is not in a supported compression format. Only GZIP is supported at this time.", HTTPStatusCode: http.StatusBadRequest, }, - ErrInvalidFileHeaderInfo: { + ErrCodeInvalidFileHeaderInfo: { Code: "InvalidFileHeaderInfo", Description: "The FileHeaderInfo is invalid. Only NONE, USE, and IGNORE are supported.", HTTPStatusCode: http.StatusBadRequest, }, - ErrInvalidJSONType: { + ErrCodeInvalidJSONType: { Code: "InvalidJsonType", Description: "The JsonType is invalid. Only DOCUMENT and LINES are supported at this time.", HTTPStatusCode: http.StatusBadRequest, }, - ErrInvalidQuoteFields: { + ErrCodeInvalidQuoteFields: { Code: "InvalidQuoteFields", Description: "The QuoteFields is invalid. Only ALWAYS and ASNEEDED are supported.", HTTPStatusCode: http.StatusBadRequest, }, - ErrInvalidRequestParameter: { + ErrCodeInvalidRequestParameter: { Code: "InvalidRequestParameter", Description: "The value of a parameter in SelectRequest element is invalid. Check the service API documentation and try again.", HTTPStatusCode: http.StatusBadRequest, }, - ErrInvalidDataType: { + ErrCodeInvalidDataType: { Code: "InvalidDataType", Description: "The SQL expression contains an invalid data type.", HTTPStatusCode: http.StatusBadRequest, }, - ErrInvalidTextEncoding: { + ErrCodeInvalidTextEncoding: { Code: "InvalidTextEncoding", Description: "Invalid encoding type. Only UTF-8 encoding is supported at this time.", HTTPStatusCode: http.StatusBadRequest, }, - ErrInvalidTableAlias: { + ErrCodeInvalidTableAlias: { Code: "InvalidTableAlias", Description: "The SQL expression contains an invalid table alias.", HTTPStatusCode: http.StatusBadRequest, }, - ErrMissingRequiredParameter: { + ErrCodeMissingRequiredParameter: { Code: "MissingRequiredParameter", Description: "The SelectRequest entity is missing a required parameter. Check the service documentation and try again.", HTTPStatusCode: http.StatusBadRequest, }, - ErrObjectSerializationConflict: { + ErrCodeObjectSerializationConflict: { Code: "ObjectSerializationConflict", Description: "The SelectRequest entity can only contain one of CSV or JSON. Check the service documentation and try again.", HTTPStatusCode: http.StatusBadRequest, }, - ErrUnsupportedSQLOperation: { + ErrCodeUnsupportedSQLOperation: { Code: "UnsupportedSqlOperation", Description: "Encountered an unsupported SQL operation.", HTTPStatusCode: http.StatusBadRequest, }, - ErrUnsupportedSQLStructure: { + ErrCodeUnsupportedSQLStructure: { Code: "UnsupportedSqlStructure", Description: "Encountered an unsupported SQL structure. Check the SQL Reference.", HTTPStatusCode: http.StatusBadRequest, }, - ErrUnsupportedSyntax: { + ErrCodeUnsupportedSyntax: { Code: "UnsupportedSyntax", Description: "Encountered invalid syntax.", HTTPStatusCode: http.StatusBadRequest, }, - ErrUnsupportedRangeHeader: { + ErrCodeUnsupportedRangeHeader: { Code: "UnsupportedRangeHeader", Description: "Range header is not supported for this operation.", HTTPStatusCode: http.StatusBadRequest, }, - ErrLexerInvalidChar: { + ErrCodeLexerInvalidChar: { Code: "LexerInvalidChar", Description: "The SQL expression contains an invalid character.", HTTPStatusCode: http.StatusBadRequest, }, - ErrLexerInvalidOperator: { + ErrCodeLexerInvalidOperator: { Code: "LexerInvalidOperator", Description: "The SQL expression contains an invalid literal.", HTTPStatusCode: http.StatusBadRequest, }, - ErrLexerInvalidLiteral: { + ErrCodeLexerInvalidLiteral: { Code: "LexerInvalidLiteral", Description: "The SQL expression contains an invalid operator.", HTTPStatusCode: http.StatusBadRequest, }, - ErrLexerInvalidIONLiteral: { + ErrCodeLexerInvalidIONLiteral: { Code: "LexerInvalidIONLiteral", Description: "The SQL expression contains an invalid operator.", HTTPStatusCode: http.StatusBadRequest, }, - ErrParseExpectedDatePart: { + ErrCodeParseExpectedDatePart: { Code: "ParseExpectedDatePart", Description: "Did not find the expected date part in the SQL expression.", HTTPStatusCode: http.StatusBadRequest, }, - ErrParseExpectedKeyword: { + ErrCodeParseExpectedKeyword: { Code: "ParseExpectedKeyword", Description: "Did not find the expected keyword in the SQL expression.", HTTPStatusCode: http.StatusBadRequest, }, - ErrParseExpectedTokenType: { + ErrCodeParseExpectedTokenType: { Code: "ParseExpectedTokenType", Description: "Did not find the expected token in the SQL expression.", HTTPStatusCode: http.StatusBadRequest, }, - ErrParseExpected2TokenTypes: { + ErrCodeParseExpected2TokenTypes: { Code: "ParseExpected2TokenTypes", Description: "Did not find the expected token in the SQL expression.", HTTPStatusCode: http.StatusBadRequest, }, - ErrParseExpectedNumber: { + ErrCodeParseExpectedNumber: { Code: "ParseExpectedNumber", Description: "Did not find the expected number in the SQL expression.", HTTPStatusCode: http.StatusBadRequest, }, - ErrParseExpectedRightParenBuiltinFunctionCall: { + ErrCodeParseExpectedRightParenBuiltinFunctionCall: { Code: "ParseExpectedRightParenBuiltinFunctionCall", Description: "Did not find the expected right parenthesis character in the SQL expression.", HTTPStatusCode: http.StatusBadRequest, }, - ErrParseExpectedTypeName: { + ErrCodeParseExpectedTypeName: { Code: "ParseExpectedTypeName", Description: "Did not find the expected type name in the SQL expression.", HTTPStatusCode: http.StatusBadRequest, }, - ErrParseExpectedWhenClause: { + ErrCodeParseExpectedWhenClause: { Code: "ParseExpectedWhenClause", Description: "Did not find the expected WHEN clause in the SQL expression. CASE is not supported.", HTTPStatusCode: http.StatusBadRequest, }, - ErrParseUnsupportedToken: { + ErrCodeParseUnsupportedToken: { Code: "ParseUnsupportedToken", Description: "The SQL expression contains an unsupported token.", HTTPStatusCode: http.StatusBadRequest, }, - ErrParseUnsupportedLiteralsGroupBy: { + ErrCodeParseUnsupportedLiteralsGroupBy: { Code: "ParseUnsupportedLiteralsGroupBy", Description: "The SQL expression contains an unsupported use of GROUP BY.", HTTPStatusCode: http.StatusBadRequest, }, - ErrParseExpectedMember: { + ErrCodeParseExpectedMember: { Code: "ParseExpectedMember", Description: "The SQL expression contains an unsupported use of MEMBER.", HTTPStatusCode: http.StatusBadRequest, }, - ErrParseUnsupportedSelect: { + ErrCodeParseUnsupportedSelect: { Code: "ParseUnsupportedSelect", Description: "The SQL expression contains an unsupported use of SELECT.", HTTPStatusCode: http.StatusBadRequest, }, - ErrParseUnsupportedCase: { + ErrCodeParseUnsupportedCase: { Code: "ParseUnsupportedCase", Description: "The SQL expression contains an unsupported use of CASE.", HTTPStatusCode: http.StatusBadRequest, }, - ErrParseUnsupportedCaseClause: { + ErrCodeParseUnsupportedCaseClause: { Code: "ParseUnsupportedCaseClause", Description: "The SQL expression contains an unsupported use of CASE.", HTTPStatusCode: http.StatusBadRequest, }, - ErrParseUnsupportedAlias: { + ErrCodeParseUnsupportedAlias: { Code: "ParseUnsupportedAlias", Description: "The SQL expression contains an unsupported use of ALIAS.", HTTPStatusCode: http.StatusBadRequest, }, - ErrParseUnsupportedSyntax: { + ErrCodeParseUnsupportedSyntax: { Code: "ParseUnsupportedSyntax", Description: "The SQL expression contains unsupported syntax.", HTTPStatusCode: http.StatusBadRequest, }, - ErrParseUnknownOperator: { + ErrCodeParseUnknownOperator: { Code: "ParseUnknownOperator", Description: "The SQL expression contains an invalid operator.", HTTPStatusCode: http.StatusBadRequest, }, - ErrParseMissingIdentAfterAt: { + ErrCodeParseMissingIdentAfterAt: { Code: "ParseMissingIdentAfterAt", Description: "Did not find the expected identifier after the @ symbol in the SQL expression.", HTTPStatusCode: http.StatusBadRequest, }, - ErrParseUnexpectedOperator: { + ErrCodeParseUnexpectedOperator: { Code: "ParseUnexpectedOperator", Description: "The SQL expression contains an unexpected operator.", HTTPStatusCode: http.StatusBadRequest, }, - ErrParseUnexpectedTerm: { + ErrCodeParseUnexpectedTerm: { Code: "ParseUnexpectedTerm", Description: "The SQL expression contains an unexpected term.", HTTPStatusCode: http.StatusBadRequest, }, - ErrParseUnexpectedToken: { + ErrCodeParseUnexpectedToken: { Code: "ParseUnexpectedToken", Description: "The SQL expression contains an unexpected token.", HTTPStatusCode: http.StatusBadRequest, }, - ErrParseUnexpectedKeyword: { + ErrCodeParseUnexpectedKeyword: { Code: "ParseUnexpectedKeyword", Description: "The SQL expression contains an unexpected keyword.", HTTPStatusCode: http.StatusBadRequest, }, - ErrParseExpectedExpression: { + ErrCodeParseExpectedExpression: { Code: "ParseExpectedExpression", Description: "Did not find the expected SQL expression.", HTTPStatusCode: http.StatusBadRequest, }, - ErrParseExpectedLeftParenAfterCast: { + ErrCodeParseExpectedLeftParenAfterCast: { Code: "ParseExpectedLeftParenAfterCast", Description: "Did not find expected the left parenthesis in the SQL expression.", HTTPStatusCode: http.StatusBadRequest, }, - ErrParseExpectedLeftParenValueConstructor: { + ErrCodeParseExpectedLeftParenValueConstructor: { Code: "ParseExpectedLeftParenValueConstructor", Description: "Did not find expected the left parenthesis in the SQL expression.", HTTPStatusCode: http.StatusBadRequest, }, - ErrParseExpectedLeftParenBuiltinFunctionCall: { + ErrCodeParseExpectedLeftParenBuiltinFunctionCall: { Code: "ParseExpectedLeftParenBuiltinFunctionCall", Description: "Did not find the expected left parenthesis in the SQL expression.", HTTPStatusCode: http.StatusBadRequest, }, - ErrParseExpectedArgumentDelimiter: { + ErrCodeParseExpectedArgumentDelimiter: { Code: "ParseExpectedArgumentDelimiter", Description: "Did not find the expected argument delimiter in the SQL expression.", HTTPStatusCode: http.StatusBadRequest, }, - ErrParseCastArity: { + ErrCodeParseCastArity: { Code: "ParseCastArity", Description: "The SQL expression CAST has incorrect arity.", HTTPStatusCode: http.StatusBadRequest, }, - ErrParseInvalidTypeParam: { + ErrCodeParseInvalidTypeParam: { Code: "ParseInvalidTypeParam", Description: "The SQL expression contains an invalid parameter value.", HTTPStatusCode: http.StatusBadRequest, }, - ErrParseEmptySelect: { + ErrCodeParseEmptySelect: { Code: "ParseEmptySelect", Description: "The SQL expression contains an empty SELECT.", HTTPStatusCode: http.StatusBadRequest, }, - ErrParseSelectMissingFrom: { + ErrCodeParseSelectMissingFrom: { Code: "ParseSelectMissingFrom", Description: "GROUP is not supported in the SQL expression.", HTTPStatusCode: http.StatusBadRequest, }, - ErrParseExpectedIdentForGroupName: { + ErrCodeParseExpectedIdentForGroupName: { Code: "ParseExpectedIdentForGroupName", Description: "GROUP is not supported in the SQL expression.", HTTPStatusCode: http.StatusBadRequest, }, - ErrParseExpectedIdentForAlias: { + ErrCodeParseExpectedIdentForAlias: { Code: "ParseExpectedIdentForAlias", Description: "Did not find the expected identifier for the alias in the SQL expression.", HTTPStatusCode: http.StatusBadRequest, }, - ErrParseUnsupportedCallWithStar: { + ErrCodeParseUnsupportedCallWithStar: { Code: "ParseUnsupportedCallWithStar", Description: "Only COUNT with (*) as a parameter is supported in the SQL expression.", HTTPStatusCode: http.StatusBadRequest, }, - ErrParseNonUnaryAgregateFunctionCall: { + ErrCodeParseNonUnaryAgregateFunctionCall: { Code: "ParseNonUnaryAgregateFunctionCall", Description: "Only one argument is supported for aggregate functions in the SQL expression.", HTTPStatusCode: http.StatusBadRequest, }, - ErrParseMalformedJoin: { + ErrCodeParseMalformedJoin: { Code: "ParseMalformedJoin", Description: "JOIN is not supported in the SQL expression.", HTTPStatusCode: http.StatusBadRequest, }, - ErrParseExpectedIdentForAt: { + ErrCodeParseExpectedIdentForAt: { Code: "ParseExpectedIdentForAt", Description: "Did not find the expected identifier for AT name in the SQL expression.", HTTPStatusCode: http.StatusBadRequest, }, - ErrParseAsteriskIsNotAloneInSelectList: { + ErrCodeParseAsteriskIsNotAloneInSelectList: { Code: "ParseAsteriskIsNotAloneInSelectList", Description: "Other expressions are not allowed in the SELECT list when '*' is used without dot notation in the SQL expression.", HTTPStatusCode: http.StatusBadRequest, }, - ErrParseCannotMixSqbAndWildcardInSelectList: { + ErrCodeParseCannotMixSqbAndWildcardInSelectList: { Code: "ParseCannotMixSqbAndWildcardInSelectList", Description: "Cannot mix [] and * in the same expression in a SELECT list in SQL expression.", HTTPStatusCode: http.StatusBadRequest, }, - ErrParseInvalidContextForWildcardInSelectList: { + ErrCodeParseInvalidContextForWildcardInSelectList: { Code: "ParseInvalidContextForWildcardInSelectList", Description: "Invalid use of * in SELECT list in the SQL expression.", HTTPStatusCode: http.StatusBadRequest, }, - ErrIncorrectSQLFunctionArgumentType: { + ErrCodeIncorrectSQLFunctionArgumentType: { Code: "IncorrectSqlFunctionArgumentType", Description: "Incorrect type of arguments in function call in the SQL expression.", HTTPStatusCode: http.StatusBadRequest, }, - ErrValueParseFailure: { + ErrCodeValueParseFailure: { Code: "ValueParseFailure", Description: "Time stamp parse failure in the SQL expression.", HTTPStatusCode: http.StatusBadRequest, }, - ErrEvaluatorInvalidArguments: { + ErrCodeEvaluatorInvalidArguments: { Code: "EvaluatorInvalidArguments", Description: "Incorrect number of arguments in the function call in the SQL expression.", HTTPStatusCode: http.StatusBadRequest, }, - ErrIntegerOverflow: { + ErrCodeIntegerOverflow: { Code: "IntegerOverflow", Description: "Int overflow or underflow in the SQL expression.", HTTPStatusCode: http.StatusBadRequest, }, - ErrLikeInvalidInputs: { + ErrCodeLikeInvalidInputs: { Code: "LikeInvalidInputs", Description: "Invalid argument given to the LIKE clause in the SQL expression.", HTTPStatusCode: http.StatusBadRequest, }, - ErrCastFailed: { + ErrCodeCastFailed: { Code: "CastFailed", Description: "Attempt to convert from one data type to another using CAST failed in the SQL expression.", HTTPStatusCode: http.StatusBadRequest, }, - ErrInvalidCast: { + ErrCodeInvalidCast: { Code: "InvalidCast", Description: "Attempt to convert from one data type to another using CAST failed in the SQL expression.", HTTPStatusCode: http.StatusBadRequest, }, - ErrEvaluatorInvalidTimestampFormatPattern: { + ErrCodeEvaluatorInvalidTimestampFormatPattern: { Code: "EvaluatorInvalidTimestampFormatPattern", Description: "Time stamp format pattern requires additional fields in the SQL expression.", HTTPStatusCode: http.StatusBadRequest, }, - ErrEvaluatorInvalidTimestampFormatPatternSymbolForParsing: { + ErrCodeEvaluatorInvalidTimestampFormatPatternSymbolForParsing: { Code: "EvaluatorInvalidTimestampFormatPatternSymbolForParsing", Description: "Time stamp format pattern contains a valid format symbol that cannot be applied to time stamp parsing in the SQL expression.", HTTPStatusCode: http.StatusBadRequest, }, - ErrEvaluatorTimestampFormatPatternDuplicateFields: { + ErrCodeEvaluatorTimestampFormatPatternDuplicateFields: { Code: "EvaluatorTimestampFormatPatternDuplicateFields", Description: "Time stamp format pattern contains multiple format specifiers representing the time stamp field in the SQL expression.", HTTPStatusCode: http.StatusBadRequest, }, - ErrEvaluatorTimestampFormatPatternHourClockAmPmMismatch: { + ErrCodeEvaluatorTimestampFormatPatternHourClockAmPmMismatch: { Code: "EvaluatorUnterminatedTimestampFormatPatternToken", Description: "Time stamp format pattern contains unterminated token in the SQL expression.", HTTPStatusCode: http.StatusBadRequest, }, - ErrEvaluatorUnterminatedTimestampFormatPatternToken: { + ErrCodeEvaluatorUnterminatedTimestampFormatPatternToken: { Code: "EvaluatorInvalidTimestampFormatPatternToken", Description: "Time stamp format pattern contains an invalid token in the SQL expression.", HTTPStatusCode: http.StatusBadRequest, }, - ErrEvaluatorInvalidTimestampFormatPatternToken: { + ErrCodeEvaluatorInvalidTimestampFormatPatternToken: { Code: "EvaluatorInvalidTimestampFormatPatternToken", Description: "Time stamp format pattern contains an invalid token in the SQL expression.", HTTPStatusCode: http.StatusBadRequest, }, - ErrEvaluatorInvalidTimestampFormatPatternSymbol: { + ErrCodeEvaluatorInvalidTimestampFormatPatternSymbol: { Code: "EvaluatorInvalidTimestampFormatPatternSymbol", Description: "Time stamp format pattern contains an invalid symbol in the SQL expression.", HTTPStatusCode: http.StatusBadRequest, }, - ErrEvaluatorBindingDoesNotExist: { - Code: "ErrEvaluatorBindingDoesNotExist", + ErrCodeEvaluatorBindingDoesNotExist: { + Code: "ErrCodeEvaluatorBindingDoesNotExist", Description: "A column name or a path provided does not exist in the SQL expression", HTTPStatusCode: http.StatusBadRequest, }, - ErrMissingHeaders: { + ErrCodeMissingHeaders: { Code: "MissingHeaders", Description: "Some headers in the query are missing from the file. Check the file and try again.", HTTPStatusCode: http.StatusBadRequest, }, - ErrInvalidColumnIndex: { + ErrCodeInvalidColumnIndex: { Code: "InvalidColumnIndex", Description: "The column index is invalid. Please check the service documentation and try again.", HTTPStatusCode: http.StatusBadRequest, }, - ErrPostPolicyConditionInvalidFormat: { + ErrCodePostPolicyConditionInvalidFormat: { Code: "PostPolicyInvalidKeyName", Description: "Invalid according to Policy: Policy Conditions failed", HTTPStatusCode: http.StatusForbidden, }, // Add your error structure here. - ErrMalformedJSON: { + ErrCodeMalformedJSON: { Code: "MalformedJSON", Description: "The JSON was not well-formed or did not validate against our published format.", HTTPStatusCode: http.StatusBadRequest, @@ -1254,57 +1255,58 @@ func GetAPIError(code ErrorCode) APIError { return errorCodeResponse[code] } -// STSErrorCode type of error status. -type STSErrorCode int - -// STSError structure -type STSError struct { - Code string - Description string - HTTPStatusCode int -} - -// Error codes,list - http://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithSAML.html -const ( - ErrSTSNone STSErrorCode = iota - ErrSTSAccessDenied - ErrSTSMissingParameter - ErrSTSInvalidParameterValue - ErrSTSInternalError -) - -type stsErrorCodeMap map[STSErrorCode]STSError - -//ToSTSErr code to err -func (e stsErrorCodeMap) ToSTSErr(errCode STSErrorCode) STSError { - apiErr, ok := e[errCode] - if !ok { - return e[ErrSTSInternalError] - } - return apiErr -} - -// StsErrCodes error code to STSError structure, these fields carry respective -// descriptions for all the error responses. -var StsErrCodes = stsErrorCodeMap{ - ErrSTSAccessDenied: { - Code: "AccessDenied", - Description: "Generating temporary credentials not allowed for this request.", - HTTPStatusCode: http.StatusForbidden, - }, - ErrSTSMissingParameter: { - Code: "MissingParameter", - Description: "A required parameter for the specified action is not supplied.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrSTSInvalidParameterValue: { - Code: "InvalidParameterValue", - Description: "An invalid or out-of-range value was supplied for the input parameter.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrSTSInternalError: { - Code: "InternalError", - Description: "We encountered an internal error generating credentials, please try again.", - HTTPStatusCode: http.StatusInternalServerError, - }, -} +// +//// STSErrorCode type of error status. +//type STSErrorCode int +// +//// STSError structure +//type STSError struct { +// Code string +// Description string +// HTTPStatusCode int +//} +// +//// Error codes,list - http://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithSAML.html +//const ( +// ErrSTSNone STSErrorCode = iota +// ErrSTSAccessDenied +// ErrSTSMissingParameter +// ErrSTSInvalidParameterValue +// ErrSTSInternalError +//) +// +//type stsErrorCodeMap map[STSErrorCode]STSError +// +////ToSTSErr code to err +//func (e stsErrorCodeMap) ToSTSErr(errCode STSErrorCode) STSError { +// apiErr, ok := e[errCode] +// if !ok { +// return e[ErrSTSInternalError] +// } +// return apiErr +//} +// +//// StsErrCodes error code to STSError structure, these fields carry respective +//// descriptions for all the error responses. +//var StsErrCodes = stsErrorCodeMap{ +// ErrSTSAccessDenied: { +// Code: "AccessDenied", +// Description: "Generating temporary credentials not allowed for this request.", +// HTTPStatusCode: http.StatusForbidden, +// }, +// ErrSTSMissingParameter: { +// Code: "MissingParameter", +// Description: "A required parameter for the specified action is not supplied.", +// HTTPStatusCode: http.StatusBadRequest, +// }, +// ErrSTSInvalidParameterValue: { +// Code: "InvalidParameterValue", +// Description: "An invalid or out-of-range value was supplied for the input parameter.", +// HTTPStatusCode: http.StatusBadRequest, +// }, +// ErrSTSInternalError: { +// Code: "InternalError", +// Description: "We encountered an internal error generating credentials, please try again.", +// HTTPStatusCode: http.StatusInternalServerError, +// }, +//} diff --git a/s3/handlers/services_errors.go b/s3/handlers/services_errors.go index 4f8e2c5be..95fdb2cd4 100644 --- a/s3/handlers/services_errors.go +++ b/s3/handlers/services_errors.go @@ -5,8 +5,11 @@ import "errors" var ( ErrSginVersionNotSupport = errors.New("sign version is not support") - // bucket + ErrInvalidArgument = errors.New("invalid argument") + + ErrInvalidBucketName = errors.New("bucket name is invalid") ErrBucketNotFound = errors.New("bucket is not found") ErrBucketAccessDenied = errors.New("bucket access denied. ") ErrSetBucketEmptyFailed = errors.New("set bucket empty failed. ") + ErrCreateBucket = errors.New("create bucket failed") ) diff --git a/s3/routers/handlerser.go b/s3/routers/handlerser.go index d4daef440..b41e8e7c6 100644 --- a/s3/routers/handlerser.go +++ b/s3/routers/handlerser.go @@ -7,5 +7,8 @@ import ( type Handlerser interface { Cors(handler http.Handler) http.Handler Sign(handler http.Handler) http.Handler - PutObjectHandler(w http.ResponseWriter, r *http.Request) + + PutBucketHandler(w http.ResponseWriter, r *http.Request) + + //PutObjectHandler(w http.ResponseWriter, r *http.Request) } diff --git a/s3/routers/routers.go b/s3/routers/routers.go index 36fe43ced..0beae03d4 100644 --- a/s3/routers/routers.go +++ b/s3/routers/routers.go @@ -25,7 +25,10 @@ func (routers *Routers) Register() http.Handler { root.Use(routers.handlers.Cors, routers.handlers.Sign) bucket := root.PathPrefix("/{bucket}").Subrouter() - bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(routers.handlers.PutObjectHandler) + bucket.Methods(http.MethodPut).Path("/{bucket:.+}").HandlerFunc(routers.handlers.PutBucketHandler) + + //object + //bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(routers.handlers.PutObjectHandler) return root } diff --git a/s3/s3utils/utils.go b/s3/s3utils/utils.go new file mode 100644 index 000000000..532954cca --- /dev/null +++ b/s3/s3utils/utils.go @@ -0,0 +1,381 @@ +package s3utils + +import ( + "context" + "errors" + "fmt" + "github.com/google/uuid" + "regexp" + "strings" + "unicode/utf8" +) + +// GenericError - generic object layer error. +type GenericError struct { + Bucket string + Object string + VersionID string + Err error +} + +// Bucket related errors. + +// BucketNameInvalid - bucketname provided is invalid. +type BucketNameInvalid GenericError + +// Error returns string an error formatted as the given text. +func (e BucketNameInvalid) Error() string { + return "Bucket name invalid: " + e.Bucket +} + +// Object related errors. + +// ObjectNameInvalid - object name provided is invalid. +type ObjectNameInvalid GenericError + +// ObjectNameTooLong - object name too long. +type ObjectNameTooLong GenericError + +// ObjectNamePrefixAsSlash - object name has a slash as prefix. +type ObjectNamePrefixAsSlash GenericError + +// Error returns string an error formatted as the given text. +func (e ObjectNameInvalid) Error() string { + return "Object name invalid: " + e.Bucket + "/" + e.Object +} + +// Error returns string an error formatted as the given text. +func (e ObjectNameTooLong) Error() string { + return "Object name too long: " + e.Bucket + "/" + e.Object +} + +// Error returns string an error formatted as the given text. +func (e ObjectNamePrefixAsSlash) Error() string { + return "Object name contains forward slash as pefix: " + e.Bucket + "/" + e.Object +} + +// InvalidUploadIDKeyCombination - invalid upload id and key marker combination. +type InvalidUploadIDKeyCombination struct { + UploadIDMarker, KeyMarker string +} + +func (e InvalidUploadIDKeyCombination) Error() string { + return fmt.Sprintf("Invalid combination of uploadID marker '%s' and marker '%s'", e.UploadIDMarker, e.KeyMarker) +} + +// InvalidMarkerPrefixCombination - invalid marker and prefix combination. +type InvalidMarkerPrefixCombination struct { + Marker, Prefix string +} + +func (e InvalidMarkerPrefixCombination) Error() string { + return fmt.Sprintf("Invalid combination of marker '%s' and prefix '%s'", e.Marker, e.Prefix) +} + +// Multipart related errors. + +// MalformedUploadID malformed upload id. +type MalformedUploadID struct { + UploadID string +} + +func (e MalformedUploadID) Error() string { + return "Malformed upload id " + e.UploadID +} + +// InvalidUploadID invalid upload id. +type InvalidUploadID struct { + Bucket string + Object string + UploadID string +} + +func (e InvalidUploadID) Error() string { + return "Invalid upload id " + e.UploadID +} + +// InvalidPart One or more of the specified parts could not be found +type InvalidPart struct { + PartNumber int + ExpETag string + GotETag string +} + +func (e InvalidPart) Error() string { + return fmt.Sprintf("Specified part could not be found. PartNumber %d, Expected %s, got %s", + e.PartNumber, e.ExpETag, e.GotETag) +} + +// PartTooSmall - error if part size is less than 5MB. +type PartTooSmall struct { + PartSize int64 + PartNumber int + PartETag string +} + +func (e PartTooSmall) Error() string { + return fmt.Sprintf("Part size for %d should be at least 5MB", e.PartNumber) +} + +// PartTooBig returned if size of part is bigger than the allowed limit. +type PartTooBig struct{} + +func (e PartTooBig) Error() string { + return "Part size bigger than the allowed limit" +} + +// We support '.' with bucket names but we fallback to using path +// style requests instead for such buckets. +var ( + validBucketName = regexp.MustCompile(`^[A-Za-z0-9][A-Za-z0-9\.\-\_\:]{1,61}[A-Za-z0-9]$`) + validBucketNameStrict = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`) + ipAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`) +) + +// Common checker for both stricter and basic validation. +func checkBucketNameCommon(bucketName string, strict bool) (err error) { + if strings.TrimSpace(bucketName) == "" { + return errors.New("Bucket name cannot be empty") + } + if len(bucketName) < 3 { + return errors.New("Bucket name cannot be shorter than 3 characters") + } + if len(bucketName) > 63 { + return errors.New("Bucket name cannot be longer than 63 characters") + } + if ipAddress.MatchString(bucketName) { + return errors.New("Bucket name cannot be an ip address") + } + if strings.Contains(bucketName, "..") || strings.Contains(bucketName, ".-") || strings.Contains(bucketName, "-.") { + return errors.New("Bucket name contains invalid characters") + } + if strict { + if !validBucketNameStrict.MatchString(bucketName) { + err = errors.New("Bucket name contains invalid characters") + } + return err + } + if !validBucketName.MatchString(bucketName) { + err = errors.New("Bucket name contains invalid characters") + } + return err +} + +// CheckValidBucketName - checks if we have a valid input bucket name. +func CheckValidBucketName(bucketName string) (err error) { + return checkBucketNameCommon(bucketName, false) +} + +// CheckValidBucketNameStrict - checks if we have a valid input bucket name. +// This is a stricter version. +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html +func CheckValidBucketNameStrict(bucketName string) (err error) { + return checkBucketNameCommon(bucketName, true) +} + +// Checks on GetObject arguments, bucket and object. +func CheckGetObjArgs(ctx context.Context, bucket, object string) error { + return checkBucketAndObjectNames(ctx, bucket, object) +} + +// Checks on DeleteObject arguments, bucket and object. +func CheckDelObjArgs(ctx context.Context, bucket, object string) error { + return checkBucketAndObjectNames(ctx, bucket, object) +} + +// Checks bucket and object name validity, returns nil if both are valid. +func checkBucketAndObjectNames(ctx context.Context, bucket, object string) error { + // Verify if bucket is valid. + if CheckValidBucketName(bucket) != nil { + return BucketNameInvalid{Bucket: bucket} + } + // Verify if object is valid. + if len(object) == 0 { + return ObjectNameInvalid{Bucket: bucket, Object: object} + } + if !IsValidObjectPrefix(object) { + return ObjectNameInvalid{Bucket: bucket, Object: object} + } + return nil +} + +// Checks for all ListObjects arguments validity. +func CheckListObjsArgs(ctx context.Context, bucket, prefix, marker string) error { + // Validates object prefix validity after bucket exists. + if !IsValidObjectPrefix(prefix) { + return ObjectNameInvalid{ + Bucket: bucket, + Object: prefix, + } + } + // Verify if marker has prefix. + if marker != "" && !strings.HasPrefix(marker, prefix) { + return InvalidMarkerPrefixCombination{ + Marker: marker, + Prefix: prefix, + } + } + return nil +} + +// Checks for all ListMultipartUploads arguments validity. +func CheckListMultipartArgs(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string) error { + if err := CheckListObjsArgs(ctx, bucket, prefix, keyMarker); err != nil { + return err + } + if uploadIDMarker != "" { + if strings.HasSuffix(keyMarker, SlashSeparator) { + return InvalidUploadIDKeyCombination{ + UploadIDMarker: uploadIDMarker, + KeyMarker: keyMarker, + } + } + if _, err := uuid.Parse(uploadIDMarker); err != nil { + return MalformedUploadID{ + UploadID: uploadIDMarker, + } + } + } + return nil +} + +// Checks for NewMultipartUpload arguments validity, also validates if bucket exists. +func CheckNewMultipartArgs(ctx context.Context, bucket, object string) error { + return checkObjectArgs(ctx, bucket, object) +} + +// Checks for PutObjectPart arguments validity, also validates if bucket exists. +func CheckPutObjectPartArgs(ctx context.Context, bucket, object string) error { + return checkObjectArgs(ctx, bucket, object) +} + +// Checks for ListParts arguments validity, also validates if bucket exists. +func CheckListPartsArgs(ctx context.Context, bucket, object string) error { + return checkObjectArgs(ctx, bucket, object) +} + +// Checks for CompleteMultipartUpload arguments validity, also validates if bucket exists. +func CheckCompleteMultipartArgs(ctx context.Context, bucket, object string) error { + return checkObjectArgs(ctx, bucket, object) +} + +// Checks for AbortMultipartUpload arguments validity, also validates if bucket exists. +func CheckAbortMultipartArgs(ctx context.Context, bucket, object string) error { + return checkObjectArgs(ctx, bucket, object) +} + +// Checks Object arguments validity, also validates if bucket exists. +func checkObjectArgs(ctx context.Context, bucket, object string) error { + if err := checkObjectNameForLengthAndSlash(bucket, object); err != nil { + return err + } + + // Validates object name validity after bucket exists. + if !IsValidObjectName(object) { + return ObjectNameInvalid{ + Bucket: bucket, + Object: object, + } + } + + return nil +} + +// Checks for PutObject arguments validity, also validates if bucket exists. +func CheckPutObjectArgs(ctx context.Context, bucket, object string) error { + if err := checkObjectNameForLengthAndSlash(bucket, object); err != nil { + return err + } + if len(object) == 0 || + !IsValidObjectPrefix(object) { + return ObjectNameInvalid{ + Bucket: bucket, + Object: object, + } + } + return nil +} + +// SlashSeparator - slash separator. +const SlashSeparator = "/" + +// IsValidObjectName verifies an object name in accordance with Amazon's +// requirements. It cannot exceed 1024 characters and must be a valid UTF8 +// string. +// +// See: +// http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html +// +// You should avoid the following characters in a key name because of +// significant special handling for consistency across all +// applications. +// +// Rejects strings with following characters. +// +// - Backslash ("\") +// +// additionally minio does not support object names with trailing SlashSeparator. +func IsValidObjectName(object string) bool { + if len(object) == 0 { + return false + } + if strings.HasSuffix(object, SlashSeparator) { + return false + } + return IsValidObjectPrefix(object) +} + +// IsValidObjectPrefix verifies whether the prefix is a valid object name. +// Its valid to have a empty prefix. +func IsValidObjectPrefix(object string) bool { + if hasBadPathComponent(object) { + return false + } + if !utf8.ValidString(object) { + return false + } + if strings.Contains(object, `//`) { + return false + } + return true +} + +// checkObjectNameForLengthAndSlash -check for the validity of object name length and prefis as slash +func checkObjectNameForLengthAndSlash(bucket, object string) error { + // Check for the length of object name + if len(object) > 1024 { + return ObjectNameTooLong{ + Bucket: bucket, + Object: object, + } + } + // Check for slash as prefix in object name + if strings.HasPrefix(object, SlashSeparator) { + return ObjectNamePrefixAsSlash{ + Bucket: bucket, + Object: object, + } + } + return nil +} + +// Bad path components to be rejected by the path validity handler. +const ( + dotdotComponent = ".." + dotComponent = "." +) + +// Check if the incoming path has bad path components, +// such as ".." and "." +func hasBadPathComponent(path string) bool { + path = strings.TrimSpace(path) + for _, p := range strings.Split(path, SlashSeparator) { + switch strings.TrimSpace(p) { + case dotdotComponent: + return true + case dotComponent: + return true + } + } + return false +} diff --git a/s3/services/auth/check_handler_auth.go b/s3/services/auth/check_handler_auth.go index 319c2eb44..948c99149 100644 --- a/s3/services/auth/check_handler_auth.go +++ b/s3/services/auth/check_handler_auth.go @@ -23,18 +23,18 @@ func (s *Service) CheckRequestAuthTypeCredential(ctx context.Context, r *http.Re switch GetRequestAuthType(r) { case AuthTypeSigned, AuthTypePresigned: region := "" - if s3Err = s.IsReqAuthenticated(ctx, r, region, ServiceS3); s3Err != handlers.ErrNone { + if s3Err = s.IsReqAuthenticated(ctx, r, region, ServiceS3); s3Err != handlers.ErrCodeNone { return cred, s3Err } cred, s3Err = s.getReqAccessKeyV4(r, region, ServiceS3) default: - return cred, handlers.ErrSignatureVersionNotSupported + return cred, handlers.ErrCodeSignatureVersionNotSupported } - if s3Err != handlers.ErrNone { + if s3Err != handlers.ErrCodeNone { return cred, s3Err } - return cred, handlers.ErrNone + return cred, handlers.ErrCodeNone } func (s *Service) ReqSignatureV4Verify(r *http.Request, region string, stype serviceType) (s3Error handlers.ErrorCode) { @@ -45,18 +45,18 @@ func (s *Service) ReqSignatureV4Verify(r *http.Request, region string, stype ser case isRequestPresignedSignatureV4(r): return s.doesPresignedSignatureMatch(sha256sum, r, region, stype) default: - return handlers.ErrAccessDenied + return handlers.ErrCodeAccessDenied } } // IsReqAuthenticated Verify if request has valid AWS Signature Version '4'. func (s *Service) IsReqAuthenticated(ctx context.Context, r *http.Request, region string, stype serviceType) (s3Error handlers.ErrorCode) { - if errCode := s.ReqSignatureV4Verify(r, region, stype); errCode != handlers.ErrNone { + if errCode := s.ReqSignatureV4Verify(r, region, stype); errCode != handlers.ErrCodeNone { return errCode } clientETag, err := etag.FromContentMD5(r.Header) if err != nil { - return handlers.ErrInvalidDigest + return handlers.ErrCodeInvalidDigest } // Extract either 'X-Amz-Content-Sha256' header or 'X-Amz-Content-Sha256' query parameter (if V4 presigned) @@ -66,13 +66,13 @@ func (s *Service) IsReqAuthenticated(ctx context.Context, r *http.Request, regio if sha256Sum, ok := r.Form[consts.AmzContentSha256]; ok && len(sha256Sum) > 0 { contentSHA256, err = hex.DecodeString(sha256Sum[0]) if err != nil { - return handlers.ErrContentSHA256Mismatch + return handlers.ErrCodeContentSHA256Mismatch } } } else if _, ok := r.Header[consts.AmzContentSha256]; !skipSHA256 && ok { contentSHA256, err = hex.DecodeString(r.Header.Get(consts.AmzContentSha256)) if err != nil || len(contentSHA256) == 0 { - return handlers.ErrContentSHA256Mismatch + return handlers.ErrCodeContentSHA256Mismatch } } @@ -80,39 +80,39 @@ func (s *Service) IsReqAuthenticated(ctx context.Context, r *http.Request, regio // The verification happens implicit during reading. reader, err := hash.NewReader(r.Body, -1, clientETag.String(), hex.EncodeToString(contentSHA256), -1) if err != nil { - return handlers.ErrInternalError + return handlers.ErrCodeInternalError } r.Body = reader - return handlers.ErrNone + return handlers.ErrCodeNone } //// ValidateAdminSignature validate admin Signature //func (s *Service) ValidateAdminSignature(ctx context.Context, r *http.Request, region string) (Credentials, map[string]interface{}, bool, handlers.ErrorCode) { // var cred Credentials // var owner bool -// s3Err := handlers.ErrAccessDenied +// s3Err := handlers.ErrCodeAccessDenied // if _, ok := r.Header[consts.AmzContentSha256]; ok && // GetRequestAuthType(r) == AuthTypeSigned { // // We only support admin credentials to access admin APIs. // cred, s3Err = GetReqAccessKeyV4(r, region, ServiceS3) -// if s3Err != handlers.ErrNone { +// if s3Err != handlers.ErrCodeNone { // return cred, nil, owner, s3Err // } // // // we only support V4 (no presign) with auth body // s3Err = s.IsReqAuthenticated(ctx, r, region, ServiceS3) // } -// if s3Err != handlers.ErrNone { +// if s3Err != handlers.ErrCodeNone { // return cred, nil, owner, s3Err // } // -// return cred, nil, owner, handlers.ErrNone +// return cred, nil, owner, handlers.ErrCodeNone //} //// //func (s *Service) GetCredential(r *http.Request) (cred auth.Credentials, owner bool, s3Err handlers.ErrorCode) { // switch GetRequestAuthType(r) { // case AuthTypeUnknown: -// s3Err = handlers.ErrSignatureVersionNotSupported +// s3Err = handlers.ErrCodeSignatureVersionNotSupported // case AuthTypeSignedV2, AuthTypePresignedV2: // cred, owner, s3Err = s.getReqAccessKeyV2(r) // case AuthTypeStreamingSigned, AuthTypePresigned, AuthTypeSigned: diff --git a/s3/services/auth/signature-v4-parser.go b/s3/services/auth/signature-v4-parser.go index ae57b6dd9..32ed43211 100644 --- a/s3/services/auth/signature-v4-parser.go +++ b/s3/services/auth/signature-v4-parser.go @@ -53,10 +53,10 @@ func (c credentialHeader) getScope() string { func parseCredentialHeader(credElement string, region string, stype serviceType) (ch credentialHeader, aec handlers.ErrorCode) { creds := strings.SplitN(strings.TrimSpace(credElement), "=", 2) if len(creds) != 2 { - return ch, handlers.ErrMissingFields + return ch, handlers.ErrCodeMissingFields } if creds[0] != "Credential" { - return ch, handlers.ErrMissingCredTag + return ch, handlers.ErrCodeMissingCredTag } credElements := strings.Split(strings.TrimSpace(creds[1]), consts.SlashSeparator) if len(credElements) < 5 { @@ -64,7 +64,7 @@ func parseCredentialHeader(credElement string, region string, stype serviceType) } accessKey := strings.Join(credElements[:len(credElements)-4], consts.SlashSeparator) // The access key may contain one or more `/` //if !IsAccessKeyValid(accessKey) { - // return ch, handlers.ErrInvalidAccessKeyID + // return ch, handlers.ErrCodeInvalidAccessKeyID //} // Save access key id. cred := credentialHeader{ @@ -74,7 +74,7 @@ func parseCredentialHeader(credElement string, region string, stype serviceType) var e error cred.scope.date, e = time.Parse(yyyymmdd, credElements[0]) if e != nil { - return ch, handlers.ErrAuthorizationHeaderMalformed + return ch, handlers.ErrCodeAuthorizationHeaderMalformed } cred.scope.region = credElements[1] @@ -89,53 +89,53 @@ func parseCredentialHeader(credElement string, region string, stype serviceType) } // Should validate region, only if region is set. if !isValidRegion(sRegion, region) { - return ch, handlers.ErrAuthorizationHeaderMalformed + return ch, handlers.ErrCodeAuthorizationHeaderMalformed } if credElements[2] != string(stype) { //switch stype { //case ServiceSTS: - // return ch, handlers.ErrAuthorizationHeaderMalformed + // return ch, handlers.ErrCodeAuthorizationHeaderMalformed //} - return ch, handlers.ErrAuthorizationHeaderMalformed + return ch, handlers.ErrCodeAuthorizationHeaderMalformed } cred.scope.service = credElements[2] if credElements[3] != "aws4_request" { - return ch, handlers.ErrAuthorizationHeaderMalformed + return ch, handlers.ErrCodeAuthorizationHeaderMalformed } cred.scope.request = credElements[3] - return cred, handlers.ErrNone + return cred, handlers.ErrCodeNone } // Parse signature from signature tag. func parseSignature(signElement string) (string, handlers.ErrorCode) { signFields := strings.Split(strings.TrimSpace(signElement), "=") if len(signFields) != 2 { - return "", handlers.ErrMissingFields + return "", handlers.ErrCodeMissingFields } if signFields[0] != "Signature" { - return "", handlers.ErrMissingSignTag + return "", handlers.ErrCodeMissingSignTag } if signFields[1] == "" { - return "", handlers.ErrMissingFields + return "", handlers.ErrCodeMissingFields } signature := signFields[1] - return signature, handlers.ErrNone + return signature, handlers.ErrCodeNone } // Parse slice of signed headers from signed headers tag. func parseSignedHeader(signedHdrElement string) ([]string, handlers.ErrorCode) { signedHdrFields := strings.Split(strings.TrimSpace(signedHdrElement), "=") if len(signedHdrFields) != 2 { - return nil, handlers.ErrMissingFields + return nil, handlers.ErrCodeMissingFields } if signedHdrFields[0] != "SignedHeaders" { - return nil, handlers.ErrMissingSignHeadersTag + return nil, handlers.ErrCodeMissingSignHeadersTag } if signedHdrFields[1] == "" { - return nil, handlers.ErrMissingFields + return nil, handlers.ErrCodeMissingFields } signedHeaders := strings.Split(signedHdrFields[1], ";") - return signedHeaders, handlers.ErrNone + return signedHeaders, handlers.ErrCodeNone } // signValues data type represents structured form of AWS Signature V4 header. @@ -166,23 +166,23 @@ func doesV4PresignParamsExist(query url.Values) handlers.ErrorCode { v4PresignQueryParams := []string{consts.AmzAlgorithm, consts.AmzCredential, consts.AmzSignature, consts.AmzDate, consts.AmzSignedHeaders, consts.AmzExpires} for _, v4PresignQueryParam := range v4PresignQueryParams { if _, ok := query[v4PresignQueryParam]; !ok { - return handlers.ErrInvalidQueryParams + return handlers.ErrCodeInvalidQueryParams } } - return handlers.ErrNone + return handlers.ErrCodeNone } // Parses all the presigned signature values into separate elements. func parsePreSignV4(query url.Values, region string, stype serviceType) (psv preSignValues, aec handlers.ErrorCode) { // verify whether the required query params exist. aec = doesV4PresignParamsExist(query) - if aec != handlers.ErrNone { + if aec != handlers.ErrCodeNone { return psv, aec } // Verify if the query algorithm is supported or not. if query.Get(consts.AmzAlgorithm) != signV4Algorithm { - return psv, handlers.ErrAuthorizationHeaderMalformed + return psv, handlers.ErrCodeAuthorizationHeaderMalformed } // Initialize signature version '4' structured header. @@ -190,7 +190,7 @@ func parsePreSignV4(query url.Values, region string, stype serviceType) (psv pre // Save credential. preSignV4Values.Credential, aec = parseCredentialHeader("Credential="+query.Get(consts.AmzCredential), region, stype) - if aec != handlers.ErrNone { + if aec != handlers.ErrCodeNone { return psv, aec } @@ -198,38 +198,38 @@ func parsePreSignV4(query url.Values, region string, stype serviceType) (psv pre // Save date in native time.Time. preSignV4Values.Date, e = time.Parse(iso8601Format, query.Get(consts.AmzDate)) if e != nil { - return psv, handlers.ErrAuthorizationHeaderMalformed + return psv, handlers.ErrCodeAuthorizationHeaderMalformed } // Save expires in native time.Duration. preSignV4Values.Expires, e = time.ParseDuration(query.Get(consts.AmzExpires) + "s") if e != nil { - return psv, handlers.ErrAuthorizationHeaderMalformed + return psv, handlers.ErrCodeAuthorizationHeaderMalformed } if preSignV4Values.Expires < 0 { - return psv, handlers.ErrAuthorizationHeaderMalformed + return psv, handlers.ErrCodeAuthorizationHeaderMalformed } // Check if Expiry time is less than 7 days (value in seconds). if preSignV4Values.Expires.Seconds() > 604800 { - return psv, handlers.ErrAuthorizationHeaderMalformed + return psv, handlers.ErrCodeAuthorizationHeaderMalformed } // Save signed headers. preSignV4Values.SignedHeaders, aec = parseSignedHeader("SignedHeaders=" + query.Get(consts.AmzSignedHeaders)) - if aec != handlers.ErrNone { + if aec != handlers.ErrCodeNone { return psv, aec } // Save signature. preSignV4Values.Signature, aec = parseSignature("Signature=" + query.Get(consts.AmzSignature)) - if aec != handlers.ErrNone { + if aec != handlers.ErrCodeNone { return psv, aec } // Return structed form of signature query string. - return preSignV4Values, handlers.ErrNone + return preSignV4Values, handlers.ErrCodeNone } // Parses signature version '4' header of the following form. @@ -244,19 +244,19 @@ func parseSignV4(v4Auth string, region string, stype serviceType) (sv signValues // to make parsing easier. v4Auth = strings.ReplaceAll(v4Auth, " ", "") if v4Auth == "" { - return sv, handlers.ErrAuthHeaderEmpty + return sv, handlers.ErrCodeAuthHeaderEmpty } // Verify if the header algorithm is supported or not. if !strings.HasPrefix(v4Auth, signV4Algorithm) { - return sv, handlers.ErrSignatureVersionNotSupported + return sv, handlers.ErrCodeSignatureVersionNotSupported } // Strip off the Algorithm prefix. v4Auth = strings.TrimPrefix(v4Auth, signV4Algorithm) authFields := strings.Split(strings.TrimSpace(v4Auth), ",") if len(authFields) != 3 { - return sv, handlers.ErrMissingFields + return sv, handlers.ErrCodeMissingFields } // Initialize signature version '4' structured header. @@ -265,37 +265,37 @@ func parseSignV4(v4Auth string, region string, stype serviceType) (sv signValues var s3Err handlers.ErrorCode // Save credentail values. signV4Values.Credential, s3Err = parseCredentialHeader(strings.TrimSpace(credElement), region, stype) - if s3Err != handlers.ErrNone { + if s3Err != handlers.ErrCodeNone { return sv, s3Err } // Save signed headers. signV4Values.SignedHeaders, s3Err = parseSignedHeader(authFields[1]) - if s3Err != handlers.ErrNone { + if s3Err != handlers.ErrCodeNone { return sv, s3Err } // Save signature. signV4Values.Signature, s3Err = parseSignature(authFields[2]) - if s3Err != handlers.ErrNone { + if s3Err != handlers.ErrCodeNone { return sv, s3Err } // Return the structure here. - return signV4Values, handlers.ErrNone + return signV4Values, handlers.ErrCodeNone } func (s *Service) getReqAccessKeyV4(r *http.Request, region string, stype serviceType) (*handlers.AccessKeyRecord, handlers.ErrorCode) { ch, s3Err := parseCredentialHeader("Credential="+r.Form.Get(consts.AmzCredential), region, stype) - if s3Err != handlers.ErrNone { + if s3Err != handlers.ErrCodeNone { // Strip off the Algorithm prefix. v4Auth := strings.TrimPrefix(r.Header.Get("Authorization"), signV4Algorithm) authFields := strings.Split(strings.TrimSpace(v4Auth), ",") if len(authFields) != 3 { - return &handlers.AccessKeyRecord{}, handlers.ErrMissingFields + return &handlers.AccessKeyRecord{}, handlers.ErrCodeMissingFields } ch, s3Err = parseCredentialHeader(authFields[0], region, stype) - if s3Err != handlers.ErrNone { + if s3Err != handlers.ErrCodeNone { return &handlers.AccessKeyRecord{}, s3Err } } @@ -303,7 +303,7 @@ func (s *Service) getReqAccessKeyV4(r *http.Request, region string, stype servic // check accessKey. record, err := s.accessKeySvc.Get(ch.accessKey) if err != nil { - return &handlers.AccessKeyRecord{}, handlers.ErrNoSuchUserPolicy + return &handlers.AccessKeyRecord{}, handlers.ErrCodeNoSuchUserPolicy } - return record, handlers.ErrNone + return record, handlers.ErrCodeNone } diff --git a/s3/services/auth/signature-v4-utils.go b/s3/services/auth/signature-v4-utils.go index 25b8b48c5..6128cc233 100644 --- a/s3/services/auth/signature-v4-utils.go +++ b/s3/services/auth/signature-v4-utils.go @@ -64,9 +64,9 @@ func extractSignedHeaders(signedHeaders []string, r *http.Request) (http.Header, reqHeaders := r.Header reqQueries := r.Form // find whether "host" is part of list of signed headers. - // if not return ErrUnsignedHeaders. "host" is mandatory. + // if not return ErrCodeUnsignedHeaders. "host" is mandatory. if !contains(signedHeaders, "host") { - return nil, handlers.ErrUnsignedHeaders + return nil, handlers.ErrCodeUnsignedHeaders } extractedSignedHeaders := make(http.Header) for _, header := range signedHeaders { @@ -116,10 +116,10 @@ func extractSignedHeaders(signedHeaders []string, r *http.Request) (http.Header, // calculation to be compatible with such clients. extractedSignedHeaders.Set(header, strconv.FormatInt(r.ContentLength, 10)) default: - return nil, handlers.ErrUnsignedHeaders + return nil, handlers.ErrCodeUnsignedHeaders } } - return extractedSignedHeaders, handlers.ErrNone + return extractedSignedHeaders, handlers.ErrCodeNone } // Returns SHA256 for calculating canonical-request. diff --git a/s3/services/auth/signature-v4.go b/s3/services/auth/signature-v4.go index 4b06b49f8..56ec46442 100644 --- a/s3/services/auth/signature-v4.go +++ b/s3/services/auth/signature-v4.go @@ -57,37 +57,37 @@ func compareSignatureV4(sig1, sig2 string) bool { // DoesPresignedSignatureMatch - Verify queryString headers with presigned signature // - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html // -// returns handlers.ErrNone if the signature matches. +// returns handlers.ErrCodeNone if the signature matches. func (s *Service) doesPresignedSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType) handlers.ErrorCode { // Copy request req := *r // Parse request query string. pSignValues, errCode := parsePreSignV4(req.Form, region, stype) - if errCode != handlers.ErrNone { + if errCode != handlers.ErrCodeNone { return errCode } // get access_info by accessKey cred, err := s.accessKeySvc.Get(pSignValues.Credential.accessKey) if err != nil { - return handlers.ErrNoSuchUserPolicy + return handlers.ErrCodeNoSuchUserPolicy } // Extract all the signed headers along with its values. extractedSignedHeaders, errCode := extractSignedHeaders(pSignValues.SignedHeaders, r) - if errCode != handlers.ErrNone { + if errCode != handlers.ErrCodeNone { return errCode } // If the host which signed the request is slightly ahead in time (by less than MaxSkewTime) the // request should still be allowed. if pSignValues.Date.After(time.Now().UTC().Add(consts.MaxSkewTime)) { - return handlers.ErrRequestNotReadyYet + return handlers.ErrCodeRequestNotReadyYet } if time.Now().UTC().Sub(pSignValues.Date) > pSignValues.Expires { - return handlers.ErrExpiredPresignRequest + return handlers.ErrCodeExpiredPresignRequest } // Save the date and expires. @@ -138,23 +138,23 @@ func (s *Service) doesPresignedSignatureMatch(hashedPayload string, r *http.Requ // Verify if date query is same. if req.Form.Get(consts.AmzDate) != query.Get(consts.AmzDate) { - return handlers.ErrSignatureDoesNotMatch + return handlers.ErrCodeSignatureDoesNotMatch } // Verify if expires query is same. if req.Form.Get(consts.AmzExpires) != query.Get(consts.AmzExpires) { - return handlers.ErrSignatureDoesNotMatch + return handlers.ErrCodeSignatureDoesNotMatch } // Verify if signed headers query is same. if req.Form.Get(consts.AmzSignedHeaders) != query.Get(consts.AmzSignedHeaders) { - return handlers.ErrSignatureDoesNotMatch + return handlers.ErrCodeSignatureDoesNotMatch } // Verify if credential query is same. if req.Form.Get(consts.AmzCredential) != query.Get(consts.AmzCredential) { - return handlers.ErrSignatureDoesNotMatch + return handlers.ErrCodeSignatureDoesNotMatch } // Verify if sha256 payload query is same. if clntHashedPayload != "" && clntHashedPayload != query.Get(consts.AmzContentSha256) { - return handlers.ErrContentSHA256Mismatch + return handlers.ErrCodeContentSHA256Mismatch } // not check SessionToken. //// Verify if security token is correct. @@ -179,15 +179,15 @@ func (s *Service) doesPresignedSignatureMatch(hashedPayload string, r *http.Requ // Verify signature. if !compareSignatureV4(req.Form.Get(consts.AmzSignature), newSignature) { - return handlers.ErrSignatureDoesNotMatch + return handlers.ErrCodeSignatureDoesNotMatch } - return handlers.ErrNone + return handlers.ErrCodeNone } // DoesSignatureMatch - Verify authorization header with calculated header in accordance with // - http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html // -// returns handlers.ErrNone if signature matches. +// returns handlers.ErrCodeNone if signature matches. func (s *Service) doesSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType) handlers.ErrorCode { // Copy request. req := *r @@ -197,33 +197,33 @@ func (s *Service) doesSignatureMatch(hashedPayload string, r *http.Request, regi // Parse signature version '4' header. signV4Values, errCode := parseSignV4(v4Auth, region, stype) - if errCode != handlers.ErrNone { + if errCode != handlers.ErrCodeNone { return errCode } // Extract all the signed headers along with its values. extractedSignedHeaders, errCode := extractSignedHeaders(signV4Values.SignedHeaders, r) - if errCode != handlers.ErrNone { + if errCode != handlers.ErrCodeNone { return errCode } cred, err := s.accessKeySvc.Get(signV4Values.Credential.accessKey) if err != nil { - return handlers.ErrNoSuchUserPolicy + return handlers.ErrCodeNoSuchUserPolicy } // Extract date, if not present throw error. var date string if date = req.Header.Get(consts.AmzDate); date == "" { if date = r.Header.Get(consts.Date); date == "" { - return handlers.ErrMissingDateHeader + return handlers.ErrCodeMissingDateHeader } } // Parse date header. t, e := time.Parse(iso8601Format, date) if e != nil { - return handlers.ErrAuthorizationHeaderMalformed + return handlers.ErrCodeAuthorizationHeaderMalformed } // Query string. @@ -244,11 +244,11 @@ func (s *Service) doesSignatureMatch(hashedPayload string, r *http.Request, regi // Verify if signature match. if !compareSignatureV4(newSignature, signV4Values.Signature) { - return handlers.ErrSignatureDoesNotMatch + return handlers.ErrCodeSignatureDoesNotMatch } // Return error none. - return handlers.ErrNone + return handlers.ErrCodeNone } //// getScope generate a string of a specific date, an AWS region, and a service. From 3a572cb4964979febf7a4f77dd8dea0a01b4bbe1 Mon Sep 17 00:00:00 2001 From: fish <920886811@163.com> Date: Thu, 17 Aug 2023 17:03:47 +0800 Subject: [PATCH 038/139] feat: add more bucket api --- s3/apierrors/errors.go | 80 -- s3/apierrors/s3_error.go | 44 -- s3/apierrors/s3api_errors.go | 1310 -------------------------------- s3/handlers/errors.go | 95 +-- s3/handlers/handlers.go | 220 +++++- s3/handlers/request.go | 42 +- s3/handlers/response_acl.go | 49 ++ s3/handlers/services.go | 2 + s3/handlers/services_errors.go | 7 +- s3/routers/handlerser.go | 5 + s3/routers/routers.go | 5 + s3/services/bucket/service.go | 6 +- 12 files changed, 346 insertions(+), 1519 deletions(-) delete mode 100644 s3/apierrors/errors.go delete mode 100644 s3/apierrors/s3_error.go delete mode 100644 s3/apierrors/s3api_errors.go create mode 100644 s3/handlers/response_acl.go diff --git a/s3/apierrors/errors.go b/s3/apierrors/errors.go deleted file mode 100644 index e8870e73d..000000000 --- a/s3/apierrors/errors.go +++ /dev/null @@ -1,80 +0,0 @@ -package apierrors - -//import ( -// "context" -// "github.com/yann-y/fds/internal/lock" -// "github.com/yann-y/fds/internal/store" -// "github.com/yann-y/fds/internal/utils/hash" -// "github.com/yann-y/fds/pkg/s3utils" -// "golang.org/x/xerrors" -// "net/url" -//) -// -//// NotImplemented If a feature is not implemented -//type NotImplemented struct { -// Message string -//} -// -//// ContextCanceled returns whether a context is canceled. -//func ContextCanceled(ctx context.Context) bool { -// select { -// case <-ctx.Done(): -// return true -// default: -// return false -// } -//} -// -//func ToApiError(ctx context.Context, err error) ErrorCode { -// if ContextCanceled(ctx) { -// if ctx.Err() == context.Canceled { -// return ErrClientDisconnected -// } -// } -// errCode := ErrInternalError -// switch err.(type) { -// case lock.OperationTimedOut: -// errCode = ErrOperationTimedOut -// case hash.SHA256Mismatch: -// errCode = ErrContentSHA256Mismatch -// case hash.BadDigest: -// errCode = ErrBadDigest -// case store.BucketNotFound: -// errCode = ErrNoSuchBucket -// case store.BucketPolicyNotFound: -// errCode = ErrNoSuchBucketPolicy -// case store.BucketTaggingNotFound: -// errCode = ErrBucketTaggingNotFound -// case s3utils.BucketNameInvalid: -// errCode = ErrInvalidBucketName -// case s3utils.ObjectNameInvalid: -// errCode = ErrInvalidObjectName -// case s3utils.ObjectNameTooLong: -// errCode = ErrKeyTooLongError -// case s3utils.ObjectNamePrefixAsSlash: -// errCode = ErrInvalidObjectNamePrefixSlash -// case s3utils.InvalidUploadIDKeyCombination: -// errCode = ErrNotImplemented -// case s3utils.InvalidMarkerPrefixCombination: -// errCode = ErrNotImplemented -// case s3utils.MalformedUploadID: -// errCode = ErrNoSuchUpload -// case s3utils.InvalidUploadID: -// errCode = ErrNoSuchUpload -// case s3utils.InvalidPart: -// errCode = ErrInvalidPart -// case s3utils.PartTooSmall: -// errCode = ErrEntityTooSmall -// case s3utils.PartTooBig: -// errCode = ErrEntityTooLarge -// case url.EscapeError: -// errCode = ErrInvalidObjectName -// default: -// if xerrors.Is(err, store.ErrObjectNotFound) { -// errCode = ErrNoSuchKey -// } else if xerrors.Is(err, store.ErrBucketNotEmpty) { -// errCode = ErrBucketNotEmpty -// } -// } -// return errCode -//} diff --git a/s3/apierrors/s3_error.go b/s3/apierrors/s3_error.go deleted file mode 100644 index c440daa24..000000000 --- a/s3/apierrors/s3_error.go +++ /dev/null @@ -1,44 +0,0 @@ -package apierrors - -// http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html -var s3ErrorResponseMap = map[string]string{ - "AccessDenied": "Access Denied.", - "BadDigest": "The Content-Md5 you specified did not match what we received.", - "EntityTooSmall": "Your proposed upload is smaller than the minimum allowed object size.", - "EntityTooLarge": "Your proposed upload exceeds the maximum allowed object size.", - "IncompleteBody": "You did not provide the number of bytes specified by the Content-Length HTTP header.", - "InternalError": "We encountered an internal error, please try again.", - "InvalidAccessKeyId": "The access key ID you provided does not exist in our records.", - "InvalidBucketName": "The specified bucket is not valid.", - "InvalidDigest": "The Content-Md5 you specified is not valid.", - "InvalidRange": "The requested range is not satisfiable", - "MalformedXML": "The XML you provided was not well-formed or did not validate against our published schema.", - "MissingContentLength": "You must provide the Content-Length HTTP header.", - "MissingContentMD5": "Missing required header for this request: Content-Md5.", - "MissingRequestBodyError": "Request body is empty.", - "NoSuchBucket": "The specified bucket does not exist.", - "NoSuchBucketPolicy": "The bucket policy does not exist", - "NoSuchKey": "The specified key does not exist.", - "NoSuchUpload": "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.", - "NotImplemented": "A header you provided implies functionality that is not implemented", - "PreconditionFailed": "At least one of the pre-conditions you specified did not hold", - "RequestTimeTooSkewed": "The difference between the request time and the server's time is too large.", - "SignatureDoesNotMatch": "The request signature we calculated does not match the signature you provided. Check your key and signing method.", - "MethodNotAllowed": "The specified method is not allowed against this resource.", - "InvalidPart": "One or more of the specified parts could not be found.", - "InvalidPartOrder": "The list of parts was not in ascending order. The parts list must be specified in order by part number.", - "InvalidObjectState": "The operation is not valid for the current state of the object.", - "AuthorizationHeaderMalformed": "The authorization header is malformed; the region is wrong.", - "MalformedPOSTRequest": "The body of your POST request is not well-formed multipart/form-data.", - "BucketNotEmpty": "The bucket you tried to delete is not empty", - "AllAccessDisabled": "All access to this bucket has been disabled.", - "MalformedPolicy": "Policy has invalid resource.", - "MissingFields": "Missing fields in request.", - "AuthorizationQueryParametersError": "Error parsing the X-Amz-Credential parameter; the Credential is mal-formed; expecting \"/YYYYMMDD/REGION/SERVICE/aws4_request\".", - "MalformedDate": "Invalid date format header, expected to be in ISO8601, RFC1123 or RFC1123Z time format.", - "BucketAlreadyOwnedByYou": "Your previous request to create the named bucket succeeded and you already own it.", - "InvalidDuration": "Duration provided in the request is invalid.", - "XAmzContentSHA256Mismatch": "The provided 'x-amz-content-sha256' header does not match what was computed.", - // Add new API errors here. - "NoSuchCORSConfiguration": "The CORS configuration does not exist", -} diff --git a/s3/apierrors/s3api_errors.go b/s3/apierrors/s3api_errors.go deleted file mode 100644 index 39505c5f5..000000000 --- a/s3/apierrors/s3api_errors.go +++ /dev/null @@ -1,1310 +0,0 @@ -package apierrors - -import ( - "encoding/xml" - "fmt" - "net/http" -) - -// APIError structure -type APIError struct { - Code string - Description string - HTTPStatusCode int -} - -// RESTErrorResponse - error response format -type RESTErrorResponse struct { - XMLName xml.Name `xml:"Error" json:"-"` - Code string `xml:"Code" json:"Code"` - Message string `xml:"Message" json:"Message"` - Resource string `xml:"Resource" json:"Resource"` - RequestID string `xml:"RequestId" json:"RequestId"` - Key string `xml:"Key,omitempty" json:"Key,omitempty"` - BucketName string `xml:"BucketName,omitempty" json:"BucketName,omitempty"` -} - -// Error - Returns S3 error string. -func (e RESTErrorResponse) Error() string { - if e.Message == "" { - msg, ok := s3ErrorResponseMap[e.Code] - if !ok { - msg = fmt.Sprintf("Error response code %s.", e.Code) - } - return msg - } - return e.Message -} - -// ErrorCode type of error status. -type ErrorCode int - -// Error codes, non exhaustive list - http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html -const ( - ErrNone ErrorCode = iota - ErrAccessDenied - ErrBadDigest - ErrEntityTooSmall - ErrEntityTooLarge - ErrIncompleteBody - ErrInternalError - ErrInvalidAccessKeyID - ErrAccessKeyDisabled - ErrInvalidBucketName - ErrInvalidDigest - ErrInvalidRange - ErrInvalidRangePartNumber - ErrInvalidCopyPartRange - ErrInvalidCopyPartRangeSource - ErrInvalidMaxKeys - ErrInvalidEncodingMethod - ErrInvalidMaxUploads - ErrInvalidMaxParts - ErrInvalidPartNumberMarker - ErrInvalidRequestBody - ErrInvalidCopySource - ErrInvalidMetadataDirective - ErrInvalidCopyDest - ErrInvalidPolicyDocument - ErrInvalidObjectState - ErrMalformedXML - ErrMissingContentLength - ErrMissingContentMD5 - ErrMissingRequestBodyError - ErrMissingSecurityHeader - ErrNoSuchUser - ErrUserAlreadyExists - ErrNoSuchUserPolicy - ErrUserPolicyAlreadyExists - ErrNoSuchBucket - ErrNoSuchBucketPolicy - ErrNoSuchLifecycleConfiguration - ErrNoSuchCORSConfiguration - ErrNoSuchWebsiteConfiguration - ErrReplicationConfigurationNotFoundError - ErrReplicationNeedsVersioningError - ErrReplicationBucketNeedsVersioningError - ErrObjectRestoreAlreadyInProgress - ErrNoSuchKey - ErrNoSuchUpload - ErrInvalidVersionID - ErrNoSuchVersion - ErrNotImplemented - ErrPreconditionFailed - ErrRequestTimeTooSkewed - ErrSignatureDoesNotMatch - ErrMethodNotAllowed - ErrInvalidPart - ErrInvalidPartOrder - ErrAuthorizationHeaderMalformed - ErrMalformedDate - ErrMalformedPOSTRequest - ErrPOSTFileRequired - ErrSignatureVersionNotSupported - ErrBucketNotEmpty - ErrAllAccessDisabled - ErrMalformedPolicy - ErrMissingFields - ErrMissingCredTag - ErrCredMalformed - ErrInvalidRegion - - ErrMissingSignTag - ErrMissingSignHeadersTag - - ErrAuthHeaderEmpty - ErrExpiredPresignRequest - ErrRequestNotReadyYet - ErrUnsignedHeaders - ErrMissingDateHeader - - ErrBucketAlreadyOwnedByYou - ErrInvalidDuration - ErrBucketAlreadyExists - ErrMetadataTooLarge - ErrUnsupportedMetadata - - ErrSlowDown - ErrBadRequest - ErrKeyTooLongError - ErrInvalidBucketObjectLockConfiguration - ErrObjectLockConfigurationNotAllowed - ErrNoSuchObjectLockConfiguration - ErrObjectLocked - ErrInvalidRetentionDate - ErrPastObjectLockRetainDate - ErrUnknownWORMModeDirective - ErrBucketTaggingNotFound - ErrObjectLockInvalidHeaders - ErrInvalidTagDirective - // Add new error codes here. - - // SSE-S3 related API errors - ErrInvalidEncryptionMethod - ErrInvalidQueryParams - ErrNoAccessKey - ErrInvalidToken - - // Bucket notification related errors. - ErrEventNotification - ErrARNNotification - ErrRegionNotification - ErrOverlappingFilterNotification - ErrFilterNameInvalid - ErrFilterNamePrefix - ErrFilterNameSuffix - ErrFilterValueInvalid - ErrOverlappingConfigs - - // S3 extended errors. - ErrContentSHA256Mismatch - - // Add new extended error codes here. - ErrInvalidObjectName - ErrInvalidObjectNamePrefixSlash - ErrClientDisconnected - ErrOperationTimedOut - ErrOperationMaxedOut - ErrInvalidRequest - ErrIncorrectContinuationToken - ErrInvalidFormatAccessKey - - // S3 Select Errors - ErrEmptyRequestBody - ErrUnsupportedFunction - ErrInvalidExpressionType - ErrBusy - ErrUnauthorizedAccess - ErrExpressionTooLong - ErrIllegalSQLFunctionArgument - ErrInvalidKeyPath - ErrInvalidCompressionFormat - ErrInvalidFileHeaderInfo - ErrInvalidJSONType - ErrInvalidQuoteFields - ErrInvalidRequestParameter - ErrInvalidDataType - ErrInvalidTextEncoding - ErrInvalidDataSource - ErrInvalidTableAlias - ErrMissingRequiredParameter - ErrObjectSerializationConflict - ErrUnsupportedSQLOperation - ErrUnsupportedSQLStructure - ErrUnsupportedSyntax - ErrUnsupportedRangeHeader - ErrLexerInvalidChar - ErrLexerInvalidOperator - ErrLexerInvalidLiteral - ErrLexerInvalidIONLiteral - ErrParseExpectedDatePart - ErrParseExpectedKeyword - ErrParseExpectedTokenType - ErrParseExpected2TokenTypes - ErrParseExpectedNumber - ErrParseExpectedRightParenBuiltinFunctionCall - ErrParseExpectedTypeName - ErrParseExpectedWhenClause - ErrParseUnsupportedToken - ErrParseUnsupportedLiteralsGroupBy - ErrParseExpectedMember - ErrParseUnsupportedSelect - ErrParseUnsupportedCase - ErrParseUnsupportedCaseClause - ErrParseUnsupportedAlias - ErrParseUnsupportedSyntax - ErrParseUnknownOperator - ErrParseMissingIdentAfterAt - ErrParseUnexpectedOperator - ErrParseUnexpectedTerm - ErrParseUnexpectedToken - ErrParseUnexpectedKeyword - ErrParseExpectedExpression - ErrParseExpectedLeftParenAfterCast - ErrParseExpectedLeftParenValueConstructor - ErrParseExpectedLeftParenBuiltinFunctionCall - ErrParseExpectedArgumentDelimiter - ErrParseCastArity - ErrParseInvalidTypeParam - ErrParseEmptySelect - ErrParseSelectMissingFrom - ErrParseExpectedIdentForGroupName - ErrParseExpectedIdentForAlias - ErrParseUnsupportedCallWithStar - ErrParseNonUnaryAgregateFunctionCall - ErrParseMalformedJoin - ErrParseExpectedIdentForAt - ErrParseAsteriskIsNotAloneInSelectList - ErrParseCannotMixSqbAndWildcardInSelectList - ErrParseInvalidContextForWildcardInSelectList - ErrIncorrectSQLFunctionArgumentType - ErrValueParseFailure - ErrEvaluatorInvalidArguments - ErrIntegerOverflow - ErrLikeInvalidInputs - ErrCastFailed - ErrInvalidCast - ErrEvaluatorInvalidTimestampFormatPattern - ErrEvaluatorInvalidTimestampFormatPatternSymbolForParsing - ErrEvaluatorTimestampFormatPatternDuplicateFields - ErrEvaluatorTimestampFormatPatternHourClockAmPmMismatch - ErrEvaluatorUnterminatedTimestampFormatPatternToken - ErrEvaluatorInvalidTimestampFormatPatternToken - ErrEvaluatorInvalidTimestampFormatPatternSymbol - ErrEvaluatorBindingDoesNotExist - ErrMissingHeaders - ErrInvalidColumnIndex - ErrPostPolicyConditionInvalidFormat - - ErrMalformedJSON -) - -// error code to APIError structure, these fields carry respective -// descriptions for all the error responses. -var errorCodeResponse = map[ErrorCode]APIError{ - ErrInvalidCopyDest: { - Code: "InvalidRequest", - Description: "This copy request is illegal because it is trying to copy an object to itself without changing the object's metadata, storage class, website redirect location or encryption attributes.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrInvalidCopySource: { - Code: "InvalidArgument", - Description: "Copy Source must mention the source bucket and key: sourcebucket/sourcekey.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrInvalidMetadataDirective: { - Code: "InvalidArgument", - Description: "Unknown metadata directive.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrInvalidRequestBody: { - Code: "InvalidArgument", - Description: "Body shouldn't be set for this request.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrInvalidMaxUploads: { - Code: "InvalidArgument", - Description: "Argument max-uploads must be an integer between 0 and 2147483647", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrInvalidMaxKeys: { - Code: "InvalidArgument", - Description: "Argument maxKeys must be an integer between 0 and 2147483647", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrInvalidEncodingMethod: { - Code: "InvalidArgument", - Description: "Invalid Encoding Method specified in Request", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrInvalidMaxParts: { - Code: "InvalidArgument", - Description: "Part number must be an integer between 1 and 10000, inclusive", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrInvalidPartNumberMarker: { - Code: "InvalidArgument", - Description: "Argument partNumberMarker must be an integer.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrInvalidPolicyDocument: { - Code: "InvalidPolicyDocument", - Description: "The content of the form does not meet the conditions specified in the policy document.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrAccessDenied: { - Code: "AccessDenied", - Description: "Access Denied.", - HTTPStatusCode: http.StatusForbidden, - }, - ErrBadDigest: { - Code: "BadDigest", - Description: "The Content-Md5 you specified did not match what we received.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrEntityTooSmall: { - Code: "EntityTooSmall", - Description: "Your proposed upload is smaller than the minimum allowed object size.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrEntityTooLarge: { - Code: "EntityTooLarge", - Description: "Your proposed upload exceeds the maximum allowed object size.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrIncompleteBody: { - Code: "IncompleteBody", - Description: "You did not provide the number of bytes specified by the Content-Length HTTP header.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrInternalError: { - Code: "InternalError", - Description: "We encountered an internal error, please try again.", - HTTPStatusCode: http.StatusInternalServerError, - }, - ErrInvalidAccessKeyID: { - Code: "InvalidAccessKeyId", - Description: "The Access Key Id you provided does not exist in our records.", - HTTPStatusCode: http.StatusForbidden, - }, - ErrAccessKeyDisabled: { - Code: "InvalidAccessKeyId", - Description: "Your account is disabled; please contact your administrator.", - HTTPStatusCode: http.StatusForbidden, - }, - ErrInvalidBucketName: { - Code: "InvalidBucketName", - Description: "The specified bucket is not valid.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrInvalidDigest: { - Code: "InvalidDigest", - Description: "The Content-Md5 you specified is not valid.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrInvalidRange: { - Code: "InvalidRange", - Description: "The requested range is not satisfiable", - HTTPStatusCode: http.StatusRequestedRangeNotSatisfiable, - }, - ErrInvalidRangePartNumber: { - Code: "InvalidRequest", - Description: "Cannot specify both Range header and partNumber query parameter", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrMalformedXML: { - Code: "MalformedXML", - Description: "The XML you provided was not well-formed or did not validate against our published schema.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrMissingContentLength: { - Code: "MissingContentLength", - Description: "You must provide the Content-Length HTTP header.", - HTTPStatusCode: http.StatusLengthRequired, - }, - ErrMissingContentMD5: { - Code: "MissingContentMD5", - Description: "Missing required header for this request: Content-Md5.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrMissingSecurityHeader: { - Code: "MissingSecurityHeader", - Description: "Your request was missing a required header", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrMissingRequestBodyError: { - Code: "MissingRequestBodyError", - Description: "Request body is empty.", - HTTPStatusCode: http.StatusLengthRequired, - }, - ErrNoSuchBucket: { - Code: "NoSuchBucket", - Description: "The specified bucket does not exist", - HTTPStatusCode: http.StatusNotFound, - }, - ErrNoSuchBucketPolicy: { - Code: "NoSuchBucketPolicy", - Description: "The bucket policy does not exist", - HTTPStatusCode: http.StatusNotFound, - }, - ErrNoSuchLifecycleConfiguration: { - Code: "NoSuchLifecycleConfiguration", - Description: "The lifecycle configuration does not exist", - HTTPStatusCode: http.StatusNotFound, - }, - ErrNoSuchUser: { - Code: "NoSuchUser", - Description: "The specified user does not exist", - HTTPStatusCode: http.StatusConflict, - }, - ErrUserAlreadyExists: { - Code: "UserAlreadyExists", - Description: "The request was rejected because it attempted to create a resource that already exists .", - HTTPStatusCode: http.StatusConflict, - }, - ErrNoSuchUserPolicy: { - Code: "NoSuchUserPolicy", - Description: "The specified user policy does not exist", - HTTPStatusCode: http.StatusConflict, - }, - ErrUserPolicyAlreadyExists: { - Code: "UserPolicyAlreadyExists", - Description: "The same user policy already exists .", - HTTPStatusCode: http.StatusConflict, - }, - ErrNoSuchKey: { - Code: "NoSuchKey", - Description: "The specified key does not exist.", - HTTPStatusCode: http.StatusNotFound, - }, - ErrNoSuchUpload: { - Code: "NoSuchUpload", - Description: "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.", - HTTPStatusCode: http.StatusNotFound, - }, - ErrInvalidVersionID: { - Code: "InvalidArgument", - Description: "Invalid version id specified", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrNoSuchVersion: { - Code: "NoSuchVersion", - Description: "The specified version does not exist.", - HTTPStatusCode: http.StatusNotFound, - }, - ErrNotImplemented: { - Code: "NotImplemented", - Description: "A header you provided implies functionality that is not implemented", - HTTPStatusCode: http.StatusNotImplemented, - }, - ErrPreconditionFailed: { - Code: "PreconditionFailed", - Description: "At least one of the pre-conditions you specified did not hold", - HTTPStatusCode: http.StatusPreconditionFailed, - }, - ErrRequestTimeTooSkewed: { - Code: "RequestTimeTooSkewed", - Description: "The difference between the request time and the server's time is too large.", - HTTPStatusCode: http.StatusForbidden, - }, - ErrSignatureDoesNotMatch: { - Code: "SignatureDoesNotMatch", - Description: "The request signature we calculated does not match the signature you provided. Check your key and signing method.", - HTTPStatusCode: http.StatusForbidden, - }, - ErrMethodNotAllowed: { - Code: "MethodNotAllowed", - Description: "The specified method is not allowed against this resource.", - HTTPStatusCode: http.StatusMethodNotAllowed, - }, - ErrInvalidPart: { - Code: "InvalidPart", - Description: "One or more of the specified parts could not be found. The part may not have been uploaded, or the specified entity tag may not match the part's entity tag.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrInvalidPartOrder: { - Code: "InvalidPartOrder", - Description: "The list of parts was not in ascending order. The parts list must be specified in order by part number.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrInvalidObjectState: { - Code: "InvalidObjectState", - Description: "The operation is not valid for the current state of the object.", - HTTPStatusCode: http.StatusForbidden, - }, - ErrAuthorizationHeaderMalformed: { - Code: "AuthorizationHeaderMalformed", - Description: "The authorization header is malformed; the region is wrong; expecting 'us-east-1'.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrMalformedPOSTRequest: { - Code: "MalformedPOSTRequest", - Description: "The body of your POST request is not well-formed multipart/form-data.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrPOSTFileRequired: { - Code: "InvalidArgument", - Description: "POST requires exactly one file upload per request.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrSignatureVersionNotSupported: { - Code: "InvalidRequest", - Description: "The authorization mechanism you have provided is not supported. Please use AWS4-HMAC-SHA256.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrBucketNotEmpty: { - Code: "BucketNotEmpty", - Description: "The bucket you tried to delete is not empty", - HTTPStatusCode: http.StatusConflict, - }, - ErrBucketAlreadyExists: { - Code: "BucketAlreadyExists", - Description: "The requested bucket name is not available. The bucket namespace is shared by all users of the system. Please select a different name and try again.", - HTTPStatusCode: http.StatusConflict, - }, - ErrAllAccessDisabled: { - Code: "AllAccessDisabled", - Description: "All access to this resource has been disabled.", - HTTPStatusCode: http.StatusForbidden, - }, - ErrMalformedPolicy: { - Code: "MalformedPolicy", - Description: "Policy has invalid resource.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrMissingCredTag: { - Code: "InvalidRequest", - Description: "Missing Credential field for this request.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrInvalidRegion: { - Code: "InvalidRegion", - Description: "Region does not match.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrMissingSignTag: { - Code: "AccessDenied", - Description: "Signature header missing Signature field.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrMissingSignHeadersTag: { - Code: "InvalidArgument", - Description: "Signature header missing SignedHeaders field.", - HTTPStatusCode: http.StatusBadRequest, - }, - - ErrAuthHeaderEmpty: { - Code: "InvalidArgument", - Description: "Authorization header is invalid -- one and only one ' ' (space) required.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrMissingDateHeader: { - Code: "AccessDenied", - Description: "AWS authentication requires a valid Date or x-amz-date header", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrExpiredPresignRequest: { - Code: "AccessDenied", - Description: "Request has expired", - HTTPStatusCode: http.StatusForbidden, - }, - ErrRequestNotReadyYet: { - Code: "AccessDenied", - Description: "Request is not valid yet", - HTTPStatusCode: http.StatusForbidden, - }, - ErrSlowDown: { - Code: "SlowDown", - Description: "Resource requested is unreadable, please reduce your request rate", - HTTPStatusCode: http.StatusServiceUnavailable, - }, - ErrBadRequest: { - Code: "BadRequest", - Description: "400 BadRequest", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrKeyTooLongError: { - Code: "KeyTooLongError", - Description: "Your key is too long", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrUnsignedHeaders: { - Code: "AccessDenied", - Description: "There were headers present in the request which were not signed", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrBucketAlreadyOwnedByYou: { - Code: "BucketAlreadyOwnedByYou", - Description: "Your previous request to create the named bucket succeeded and you already own it.", - HTTPStatusCode: http.StatusConflict, - }, - ErrInvalidDuration: { - Code: "InvalidDuration", - Description: "Duration provided in the request is invalid.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrInvalidBucketObjectLockConfiguration: { - Code: "InvalidRequest", - Description: "Bucket is missing ObjectLockConfiguration", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrBucketTaggingNotFound: { - Code: "NoSuchTagSet", - Description: "The TagSet does not exist", - HTTPStatusCode: http.StatusNotFound, - }, - ErrObjectLockConfigurationNotAllowed: { - Code: "InvalidBucketState", - Description: "Object Lock configuration cannot be enabled on existing buckets", - HTTPStatusCode: http.StatusConflict, - }, - ErrNoSuchCORSConfiguration: { - Code: "NoSuchCORSConfiguration", - Description: "The CORS configuration does not exist", - HTTPStatusCode: http.StatusNotFound, - }, - ErrNoSuchWebsiteConfiguration: { - Code: "NoSuchWebsiteConfiguration", - Description: "The specified bucket does not have a website configuration", - HTTPStatusCode: http.StatusNotFound, - }, - ErrReplicationConfigurationNotFoundError: { - Code: "ReplicationConfigurationNotFoundError", - Description: "The replication configuration was not found", - HTTPStatusCode: http.StatusNotFound, - }, - ErrReplicationNeedsVersioningError: { - Code: "InvalidRequest", - Description: "Versioning must be 'Enabled' on the bucket to apply a replication configuration", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrReplicationBucketNeedsVersioningError: { - Code: "InvalidRequest", - Description: "Versioning must be 'Enabled' on the bucket to add a replication target", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrNoSuchObjectLockConfiguration: { - Code: "NoSuchObjectLockConfiguration", - Description: "The specified object does not have a ObjectLock configuration", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrObjectLocked: { - Code: "InvalidRequest", - Description: "Object is WORM protected and cannot be overwritten", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrInvalidRetentionDate: { - Code: "InvalidRequest", - Description: "Date must be provided in ISO 8601 format", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrPastObjectLockRetainDate: { - Code: "InvalidRequest", - Description: "the retain until date must be in the future", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrUnknownWORMModeDirective: { - Code: "InvalidRequest", - Description: "unknown wormMode directive", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrObjectLockInvalidHeaders: { - Code: "InvalidRequest", - Description: "x-amz-object-lock-retain-until-date and x-amz-object-lock-mode must both be supplied", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrObjectRestoreAlreadyInProgress: { - Code: "RestoreAlreadyInProgress", - Description: "Object restore is already in progress", - HTTPStatusCode: http.StatusConflict, - }, - // Bucket notification related errors. - ErrEventNotification: { - Code: "InvalidArgument", - Description: "A specified event is not supported for notifications.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrARNNotification: { - Code: "InvalidArgument", - Description: "A specified destination ARN does not exist or is not well-formed. Verify the destination ARN.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrRegionNotification: { - Code: "InvalidArgument", - Description: "A specified destination is in a different region than the bucket. You must use a destination that resides in the same region as the bucket.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrOverlappingFilterNotification: { - Code: "InvalidArgument", - Description: "An object key name filtering rule defined with overlapping prefixes, overlapping suffixes, or overlapping combinations of prefixes and suffixes for the same event types.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrFilterNameInvalid: { - Code: "InvalidArgument", - Description: "filter rule name must be either prefix or suffix", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrFilterNamePrefix: { - Code: "InvalidArgument", - Description: "Cannot specify more than one prefix rule in a filter.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrFilterNameSuffix: { - Code: "InvalidArgument", - Description: "Cannot specify more than one suffix rule in a filter.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrFilterValueInvalid: { - Code: "InvalidArgument", - Description: "Size of filter rule value cannot exceed 1024 bytes in UTF-8 representation", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrOverlappingConfigs: { - Code: "InvalidArgument", - Description: "Configurations overlap. Configurations on the same bucket cannot share a common event type.", - HTTPStatusCode: http.StatusBadRequest, - }, - - ErrInvalidCopyPartRange: { - Code: "InvalidArgument", - Description: "The x-amz-copy-source-range value must be of the form bytes=first-last where first and last are the zero-based offsets of the first and last bytes to copy", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrInvalidCopyPartRangeSource: { - Code: "InvalidArgument", - Description: "Range specified is not valid for source object", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrMetadataTooLarge: { - Code: "MetadataTooLarge", - Description: "Your metadata headers exceed the maximum allowed metadata size.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrInvalidTagDirective: { - Code: "InvalidArgument", - Description: "Unknown tag directive.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrInvalidEncryptionMethod: { - Code: "InvalidRequest", - Description: "The encryption method specified is not supported", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrInvalidQueryParams: { - Code: "AuthorizationQueryParametersError", - Description: "Query-string authentication version 4 requires the X-Amz-Algorithm, X-Amz-Credential, X-Amz-Signature, X-Amz-Date, X-Amz-SignedHeaders, and X-Amz-Expires parameters.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrNoAccessKey: { - Code: "AccessDenied", - Description: "No AWSAccessKey was presented", - HTTPStatusCode: http.StatusForbidden, - }, - ErrInvalidToken: { - Code: "InvalidTokenId", - Description: "The security token included in the request is invalid", - HTTPStatusCode: http.StatusForbidden, - }, - - // S3 extensions. - ErrInvalidObjectName: { - Code: "InvalidObjectName", - Description: "Object name contains unsupported characters.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrInvalidObjectNamePrefixSlash: { - Code: "InvalidObjectName", - Description: "Object name contains a leading slash.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrClientDisconnected: { - Code: "ClientDisconnected", - Description: "Client disconnected before response was ready", - HTTPStatusCode: 499, // No official code, use nginx value. - }, - ErrOperationTimedOut: { - Code: "RequestTimeout", - Description: "A timeout occurred while trying to lock a resource, please reduce your request rate", - HTTPStatusCode: http.StatusServiceUnavailable, - }, - ErrOperationMaxedOut: { - Code: "SlowDown", - Description: "A timeout exceeded while waiting to proceed with the request, please reduce your request rate", - HTTPStatusCode: http.StatusServiceUnavailable, - }, - ErrUnsupportedMetadata: { - Code: "InvalidArgument", - Description: "Your metadata headers are not supported.", - HTTPStatusCode: http.StatusBadRequest, - }, - // Generic Invalid-Request error. Should be used for response errors only for unlikely - // corner case errors for which introducing new APIErrorCode is not worth it. LogIf() - // should be used to log the error at the source of the error for debugging purposes. - ErrInvalidRequest: { - Code: "InvalidRequest", - Description: "Invalid Request", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrIncorrectContinuationToken: { - Code: "InvalidArgument", - Description: "The continuation token provided is incorrect", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrInvalidFormatAccessKey: { - Code: "InvalidAccessKeyId", - Description: "The Access Key Id you provided contains invalid characters.", - HTTPStatusCode: http.StatusBadRequest, - }, - // S3 Select API Errors - ErrEmptyRequestBody: { - Code: "EmptyRequestBody", - Description: "Request body cannot be empty.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrUnsupportedFunction: { - Code: "UnsupportedFunction", - Description: "Encountered an unsupported SQL function.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrInvalidDataSource: { - Code: "InvalidDataSource", - Description: "Invalid data source type. Only CSV and JSON are supported at this time.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrInvalidExpressionType: { - Code: "InvalidExpressionType", - Description: "The ExpressionType is invalid. Only SQL expressions are supported at this time.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrBusy: { - Code: "Busy", - Description: "The service is unavailable. Please retry.", - HTTPStatusCode: http.StatusServiceUnavailable, - }, - ErrUnauthorizedAccess: { - Code: "UnauthorizedAccess", - Description: "You are not authorized to perform this operation", - HTTPStatusCode: http.StatusUnauthorized, - }, - ErrExpressionTooLong: { - Code: "ExpressionTooLong", - Description: "The SQL expression is too long: The maximum byte-length for the SQL expression is 256 KB.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrIllegalSQLFunctionArgument: { - Code: "IllegalSqlFunctionArgument", - Description: "Illegal argument was used in the SQL function.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrInvalidKeyPath: { - Code: "InvalidKeyPath", - Description: "Key path in the SQL expression is invalid.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrInvalidCompressionFormat: { - Code: "InvalidCompressionFormat", - Description: "The file is not in a supported compression format. Only GZIP is supported at this time.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrInvalidFileHeaderInfo: { - Code: "InvalidFileHeaderInfo", - Description: "The FileHeaderInfo is invalid. Only NONE, USE, and IGNORE are supported.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrInvalidJSONType: { - Code: "InvalidJsonType", - Description: "The JsonType is invalid. Only DOCUMENT and LINES are supported at this time.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrInvalidQuoteFields: { - Code: "InvalidQuoteFields", - Description: "The QuoteFields is invalid. Only ALWAYS and ASNEEDED are supported.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrInvalidRequestParameter: { - Code: "InvalidRequestParameter", - Description: "The value of a parameter in SelectRequest element is invalid. Check the service API documentation and try again.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrInvalidDataType: { - Code: "InvalidDataType", - Description: "The SQL expression contains an invalid data type.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrInvalidTextEncoding: { - Code: "InvalidTextEncoding", - Description: "Invalid encoding type. Only UTF-8 encoding is supported at this time.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrInvalidTableAlias: { - Code: "InvalidTableAlias", - Description: "The SQL expression contains an invalid table alias.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrMissingRequiredParameter: { - Code: "MissingRequiredParameter", - Description: "The SelectRequest entity is missing a required parameter. Check the service documentation and try again.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrObjectSerializationConflict: { - Code: "ObjectSerializationConflict", - Description: "The SelectRequest entity can only contain one of CSV or JSON. Check the service documentation and try again.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrUnsupportedSQLOperation: { - Code: "UnsupportedSqlOperation", - Description: "Encountered an unsupported SQL operation.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrUnsupportedSQLStructure: { - Code: "UnsupportedSqlStructure", - Description: "Encountered an unsupported SQL structure. Check the SQL Reference.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrUnsupportedSyntax: { - Code: "UnsupportedSyntax", - Description: "Encountered invalid syntax.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrUnsupportedRangeHeader: { - Code: "UnsupportedRangeHeader", - Description: "Range header is not supported for this operation.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrLexerInvalidChar: { - Code: "LexerInvalidChar", - Description: "The SQL expression contains an invalid character.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrLexerInvalidOperator: { - Code: "LexerInvalidOperator", - Description: "The SQL expression contains an invalid literal.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrLexerInvalidLiteral: { - Code: "LexerInvalidLiteral", - Description: "The SQL expression contains an invalid operator.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrLexerInvalidIONLiteral: { - Code: "LexerInvalidIONLiteral", - Description: "The SQL expression contains an invalid operator.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrParseExpectedDatePart: { - Code: "ParseExpectedDatePart", - Description: "Did not find the expected date part in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrParseExpectedKeyword: { - Code: "ParseExpectedKeyword", - Description: "Did not find the expected keyword in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrParseExpectedTokenType: { - Code: "ParseExpectedTokenType", - Description: "Did not find the expected token in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrParseExpected2TokenTypes: { - Code: "ParseExpected2TokenTypes", - Description: "Did not find the expected token in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrParseExpectedNumber: { - Code: "ParseExpectedNumber", - Description: "Did not find the expected number in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrParseExpectedRightParenBuiltinFunctionCall: { - Code: "ParseExpectedRightParenBuiltinFunctionCall", - Description: "Did not find the expected right parenthesis character in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrParseExpectedTypeName: { - Code: "ParseExpectedTypeName", - Description: "Did not find the expected type name in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrParseExpectedWhenClause: { - Code: "ParseExpectedWhenClause", - Description: "Did not find the expected WHEN clause in the SQL expression. CASE is not supported.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrParseUnsupportedToken: { - Code: "ParseUnsupportedToken", - Description: "The SQL expression contains an unsupported token.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrParseUnsupportedLiteralsGroupBy: { - Code: "ParseUnsupportedLiteralsGroupBy", - Description: "The SQL expression contains an unsupported use of GROUP BY.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrParseExpectedMember: { - Code: "ParseExpectedMember", - Description: "The SQL expression contains an unsupported use of MEMBER.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrParseUnsupportedSelect: { - Code: "ParseUnsupportedSelect", - Description: "The SQL expression contains an unsupported use of SELECT.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrParseUnsupportedCase: { - Code: "ParseUnsupportedCase", - Description: "The SQL expression contains an unsupported use of CASE.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrParseUnsupportedCaseClause: { - Code: "ParseUnsupportedCaseClause", - Description: "The SQL expression contains an unsupported use of CASE.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrParseUnsupportedAlias: { - Code: "ParseUnsupportedAlias", - Description: "The SQL expression contains an unsupported use of ALIAS.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrParseUnsupportedSyntax: { - Code: "ParseUnsupportedSyntax", - Description: "The SQL expression contains unsupported syntax.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrParseUnknownOperator: { - Code: "ParseUnknownOperator", - Description: "The SQL expression contains an invalid operator.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrParseMissingIdentAfterAt: { - Code: "ParseMissingIdentAfterAt", - Description: "Did not find the expected identifier after the @ symbol in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrParseUnexpectedOperator: { - Code: "ParseUnexpectedOperator", - Description: "The SQL expression contains an unexpected operator.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrParseUnexpectedTerm: { - Code: "ParseUnexpectedTerm", - Description: "The SQL expression contains an unexpected term.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrParseUnexpectedToken: { - Code: "ParseUnexpectedToken", - Description: "The SQL expression contains an unexpected token.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrParseUnexpectedKeyword: { - Code: "ParseUnexpectedKeyword", - Description: "The SQL expression contains an unexpected keyword.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrParseExpectedExpression: { - Code: "ParseExpectedExpression", - Description: "Did not find the expected SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrParseExpectedLeftParenAfterCast: { - Code: "ParseExpectedLeftParenAfterCast", - Description: "Did not find expected the left parenthesis in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrParseExpectedLeftParenValueConstructor: { - Code: "ParseExpectedLeftParenValueConstructor", - Description: "Did not find expected the left parenthesis in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrParseExpectedLeftParenBuiltinFunctionCall: { - Code: "ParseExpectedLeftParenBuiltinFunctionCall", - Description: "Did not find the expected left parenthesis in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrParseExpectedArgumentDelimiter: { - Code: "ParseExpectedArgumentDelimiter", - Description: "Did not find the expected argument delimiter in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrParseCastArity: { - Code: "ParseCastArity", - Description: "The SQL expression CAST has incorrect arity.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrParseInvalidTypeParam: { - Code: "ParseInvalidTypeParam", - Description: "The SQL expression contains an invalid parameter value.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrParseEmptySelect: { - Code: "ParseEmptySelect", - Description: "The SQL expression contains an empty SELECT.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrParseSelectMissingFrom: { - Code: "ParseSelectMissingFrom", - Description: "GROUP is not supported in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrParseExpectedIdentForGroupName: { - Code: "ParseExpectedIdentForGroupName", - Description: "GROUP is not supported in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrParseExpectedIdentForAlias: { - Code: "ParseExpectedIdentForAlias", - Description: "Did not find the expected identifier for the alias in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrParseUnsupportedCallWithStar: { - Code: "ParseUnsupportedCallWithStar", - Description: "Only COUNT with (*) as a parameter is supported in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrParseNonUnaryAgregateFunctionCall: { - Code: "ParseNonUnaryAgregateFunctionCall", - Description: "Only one argument is supported for aggregate functions in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrParseMalformedJoin: { - Code: "ParseMalformedJoin", - Description: "JOIN is not supported in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrParseExpectedIdentForAt: { - Code: "ParseExpectedIdentForAt", - Description: "Did not find the expected identifier for AT name in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrParseAsteriskIsNotAloneInSelectList: { - Code: "ParseAsteriskIsNotAloneInSelectList", - Description: "Other expressions are not allowed in the SELECT list when '*' is used without dot notation in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrParseCannotMixSqbAndWildcardInSelectList: { - Code: "ParseCannotMixSqbAndWildcardInSelectList", - Description: "Cannot mix [] and * in the same expression in a SELECT list in SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrParseInvalidContextForWildcardInSelectList: { - Code: "ParseInvalidContextForWildcardInSelectList", - Description: "Invalid use of * in SELECT list in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrIncorrectSQLFunctionArgumentType: { - Code: "IncorrectSqlFunctionArgumentType", - Description: "Incorrect type of arguments in function call in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrValueParseFailure: { - Code: "ValueParseFailure", - Description: "Time stamp parse failure in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrEvaluatorInvalidArguments: { - Code: "EvaluatorInvalidArguments", - Description: "Incorrect number of arguments in the function call in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrIntegerOverflow: { - Code: "IntegerOverflow", - Description: "Int overflow or underflow in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrLikeInvalidInputs: { - Code: "LikeInvalidInputs", - Description: "Invalid argument given to the LIKE clause in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCastFailed: { - Code: "CastFailed", - Description: "Attempt to convert from one data type to another using CAST failed in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrInvalidCast: { - Code: "InvalidCast", - Description: "Attempt to convert from one data type to another using CAST failed in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrEvaluatorInvalidTimestampFormatPattern: { - Code: "EvaluatorInvalidTimestampFormatPattern", - Description: "Time stamp format pattern requires additional fields in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrEvaluatorInvalidTimestampFormatPatternSymbolForParsing: { - Code: "EvaluatorInvalidTimestampFormatPatternSymbolForParsing", - Description: "Time stamp format pattern contains a valid format symbol that cannot be applied to time stamp parsing in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrEvaluatorTimestampFormatPatternDuplicateFields: { - Code: "EvaluatorTimestampFormatPatternDuplicateFields", - Description: "Time stamp format pattern contains multiple format specifiers representing the time stamp field in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrEvaluatorTimestampFormatPatternHourClockAmPmMismatch: { - Code: "EvaluatorUnterminatedTimestampFormatPatternToken", - Description: "Time stamp format pattern contains unterminated token in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrEvaluatorUnterminatedTimestampFormatPatternToken: { - Code: "EvaluatorInvalidTimestampFormatPatternToken", - Description: "Time stamp format pattern contains an invalid token in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrEvaluatorInvalidTimestampFormatPatternToken: { - Code: "EvaluatorInvalidTimestampFormatPatternToken", - Description: "Time stamp format pattern contains an invalid token in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrEvaluatorInvalidTimestampFormatPatternSymbol: { - Code: "EvaluatorInvalidTimestampFormatPatternSymbol", - Description: "Time stamp format pattern contains an invalid symbol in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrEvaluatorBindingDoesNotExist: { - Code: "ErrEvaluatorBindingDoesNotExist", - Description: "A column name or a path provided does not exist in the SQL expression", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrMissingHeaders: { - Code: "MissingHeaders", - Description: "Some headers in the query are missing from the file. Check the file and try again.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrInvalidColumnIndex: { - Code: "InvalidColumnIndex", - Description: "The column index is invalid. Please check the service documentation and try again.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrPostPolicyConditionInvalidFormat: { - Code: "PostPolicyInvalidKeyName", - Description: "Invalid according to Policy: Policy Conditions failed", - HTTPStatusCode: http.StatusForbidden, - }, - // Add your error structure here. - ErrMalformedJSON: { - Code: "MalformedJSON", - Description: "The JSON was not well-formed or did not validate against our published format.", - HTTPStatusCode: http.StatusBadRequest, - }, -} - -// GetAPIError provides API Error for input API error code. -func GetAPIError(code ErrorCode) APIError { - return errorCodeResponse[code] -} - -// STSErrorCode type of error status. -type STSErrorCode int - -// STSError structure -type STSError struct { - Code string - Description string - HTTPStatusCode int -} - -// Error codes,list - http://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithSAML.html -const ( - ErrSTSNone STSErrorCode = iota - ErrSTSAccessDenied - ErrSTSMissingParameter - ErrSTSInvalidParameterValue - ErrSTSInternalError -) - -type stsErrorCodeMap map[STSErrorCode]STSError - -//ToSTSErr code to err -func (e stsErrorCodeMap) ToSTSErr(errCode STSErrorCode) STSError { - apiErr, ok := e[errCode] - if !ok { - return e[ErrSTSInternalError] - } - return apiErr -} - -// StsErrCodes error code to STSError structure, these fields carry respective -// descriptions for all the error responses. -var StsErrCodes = stsErrorCodeMap{ - ErrSTSAccessDenied: { - Code: "AccessDenied", - Description: "Generating temporary credentials not allowed for this request.", - HTTPStatusCode: http.StatusForbidden, - }, - ErrSTSMissingParameter: { - Code: "MissingParameter", - Description: "A required parameter for the specified action is not supplied.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrSTSInvalidParameterValue: { - Code: "InvalidParameterValue", - Description: "An invalid or out-of-range value was supplied for the input parameter.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrSTSInternalError: { - Code: "InternalError", - Description: "We encountered an internal error generating credentials, please try again.", - HTTPStatusCode: http.StatusInternalServerError, - }, -} diff --git a/s3/handlers/errors.go b/s3/handlers/errors.go index 4785f1638..f069c0528 100644 --- a/s3/handlers/errors.go +++ b/s3/handlers/errors.go @@ -2,12 +2,6 @@ package handlers import ( "context" - "github.com/yann-y/fds/internal/lock" - "github.com/yann-y/fds/internal/store" - "github.com/yann-y/fds/internal/utils/hash" - "github.com/yann-y/fds/pkg/s3utils" - "golang.org/x/xerrors" - "net/url" ) // NotImplemented If a feature is not implemented @@ -25,13 +19,6 @@ func ContextCanceled(ctx context.Context) bool { } } -//ErrInvalidBucketName = errors.New("bucket name is invalid") -//ErrBucketNotFound = errors.New("bucket is not found") -//ErrBucketAccessDenied = errors.New("bucket access denied. ") -//ErrSetBucketEmptyFailed = errors.New("set bucket empty failed. ") -//ErrCreateBucket = errors.New("create bucket failed") -//) - func ToApiError(ctx context.Context, err error) ErrorCode { if ContextCanceled(ctx) { if ctx.Err() == context.Canceled { @@ -51,47 +38,49 @@ func ToApiError(ctx context.Context, err error) ErrorCode { case ErrSetBucketEmptyFailed: case ErrCreateBucket: errCode = ErrCodeInternalError - - case lock.OperationTimedOut: - errCode = ErrCodeOperationTimedOut - case hash.SHA256Mismatch: - errCode = ErrCodeContentSHA256Mismatch - case hash.BadDigest: - errCode = ErrCodeBadDigest - case store.BucketPolicyNotFound: - errCode = ErrCodeNoSuchBucketPolicy - case store.BucketTaggingNotFound: - errCode = ErrBucketTaggingNotFound - case s3utils.BucketNameInvalid: - errCode = ErrCodeInvalidBucketName - case s3utils.ObjectNameInvalid: - errCode = ErrCodeInvalidObjectName - case s3utils.ObjectNameTooLong: - errCode = ErrCodeKeyTooLongError - case s3utils.ObjectNamePrefixAsSlash: - errCode = ErrCodeInvalidObjectNamePrefixSlash - case s3utils.InvalidUploadIDKeyCombination: - errCode = ErrCodeNotImplemented - case s3utils.InvalidMarkerPrefixCombination: + case ErrNotImplemented: errCode = ErrCodeNotImplemented - case s3utils.MalformedUploadID: - errCode = ErrCodeNoSuchUpload - case s3utils.InvalidUploadID: - errCode = ErrCodeNoSuchUpload - case s3utils.InvalidPart: - errCode = ErrCodeInvalidPart - case s3utils.PartTooSmall: - errCode = ErrCodeEntityTooSmall - case s3utils.PartTooBig: - errCode = ErrCodeEntityTooLarge - case url.EscapeError: - errCode = ErrCodeInvalidObjectName - default: - if xerrors.Is(err, store.ErrObjectNotFound) { - errCode = ErrCodeNoSuchKey - } else if xerrors.Is(err, store.ErrBucketNotEmpty) { - errCode = ErrCodeBucketNotEmpty - } + + //case lock.OperationTimedOut: + // errCode = ErrCodeOperationTimedOut + //case hash.SHA256Mismatch: + // errCode = ErrCodeContentSHA256Mismatch + //case hash.BadDigest: + // errCode = ErrCodeBadDigest + //case store.BucketPolicyNotFound: + // errCode = ErrCodeNoSuchBucketPolicy + //case store.BucketTaggingNotFound: + // errCode = ErrBucketTaggingNotFound + //case s3utils.BucketNameInvalid: + // errCode = ErrCodeInvalidBucketName + //case s3utils.ObjectNameInvalid: + // errCode = ErrCodeInvalidObjectName + //case s3utils.ObjectNameTooLong: + // errCode = ErrCodeKeyTooLongError + //case s3utils.ObjectNamePrefixAsSlash: + // errCode = ErrCodeInvalidObjectNamePrefixSlash + //case s3utils.InvalidUploadIDKeyCombination: + // errCode = ErrCodeNotImplemented + //case s3utils.InvalidMarkerPrefixCombination: + // errCode = ErrCodeNotImplemented + //case s3utils.MalformedUploadID: + // errCode = ErrCodeNoSuchUpload + //case s3utils.InvalidUploadID: + // errCode = ErrCodeNoSuchUpload + //case s3utils.InvalidPart: + // errCode = ErrCodeInvalidPart + //case s3utils.PartTooSmall: + // errCode = ErrCodeEntityTooSmall + //case s3utils.PartTooBig: + // errCode = ErrCodeEntityTooLarge + //case url.EscapeError: + // errCode = ErrCodeInvalidObjectName + //default: + // if xerrors.Is(err, store.ErrObjectNotFound) { + // errCode = ErrCodeNoSuchKey + // } else if xerrors.Is(err, store.ErrBucketNotEmpty) { + // errCode = ErrCodeBucketNotEmpty + // } } return errCode } diff --git a/s3/handlers/handlers.go b/s3/handlers/handlers.go index 6a4b0eb77..0d133beae 100644 --- a/s3/handlers/handlers.go +++ b/s3/handlers/handlers.go @@ -4,8 +4,10 @@ package handlers import ( "net/http" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + s3action "github.com/bittorrent/go-btfs/s3/action" "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/policy" "github.com/bittorrent/go-btfs/s3/routers" "github.com/bittorrent/go-btfs/s3/s3utils" "github.com/rs/cors" @@ -73,7 +75,7 @@ func (handlers *Handlers) Sign(handler http.Handler) http.Handler { // return //} -func (handlers *Handlers) PutBucketHandler(w http.ResponseWriter, r *http.Request) { +func (h *Handlers) PutBucketHandler(w http.ResponseWriter, r *http.Request) { ctx := r.Context() req := &PutBucketRequest{} err := req.Bind(r) @@ -82,22 +84,33 @@ func (handlers *Handlers) PutBucketHandler(w http.ResponseWriter, r *http.Reques return } - accessKeyRecord, errCode := handlers.authSvc.VerifySignature(ctx, r) + accessKeyRecord, errCode := h.authSvc.VerifySignature(ctx, r) if errCode != ErrCodeNone { WriteErrorResponse(w, r, errCode) return } + err = h.bucketSvc.CheckACL(accessKeyRecord, req.Bucket, s3action.CreateBucketAction) + if err != nil { + WriteErrorResponse(w, r, ToApiError(ctx, ErrNoSuchUserPolicy)) + return + } + if err := s3utils.CheckValidBucketNameStrict(req.Bucket); err != nil { WriteErrorResponse(w, r, ToApiError(ctx, ErrInvalidBucketName)) return } if !checkPermissionType(req.ACL) { - req.ACL = policy.Private + WriteErrorResponse(w, r, ToApiError(ctx, ErrNotImplemented)) } - err = handlers.bucketSvc.CreateBucket(ctx, req.Bucket, req.Region, accessKeyRecord.Key, req.ACL) + if ok := h.bucketSvc.HasBucket(r.Context(), req.Bucket); !ok { + WriteErrorResponseHeadersOnly(w, r, ToApiError(ctx, ErrBucketNotFound)) + return + } + + err = h.bucketSvc.CreateBucket(ctx, req.Bucket, req.Region, accessKeyRecord.Key, req.ACL) if err != nil { log.Errorf("PutBucketHandler create bucket error:%v", err) WriteErrorResponse(w, r, ToApiError(ctx, ErrCreateBucket)) @@ -113,3 +126,200 @@ func (handlers *Handlers) PutBucketHandler(w http.ResponseWriter, r *http.Reques return } + +func (h *Handlers) HeadBucketHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + req := &PutBucketRequest{} + err := req.Bind(r) + if err != nil { + WriteErrorResponse(w, r, ToApiError(ctx, ErrInvalidArgument)) + return + } + + accessKeyRecord, errCode := h.authSvc.VerifySignature(ctx, r) + if errCode != ErrCodeNone { + WriteErrorResponse(w, r, errCode) + return + } + + err = h.bucketSvc.CheckACL(accessKeyRecord, req.Bucket, s3action.HeadBucketAction) + if err != nil { + WriteErrorResponse(w, r, ToApiError(ctx, ErrNoSuchUserPolicy)) + return + } + + if ok := h.bucketSvc.HasBucket(r.Context(), req.Bucket); !ok { + WriteErrorResponseHeadersOnly(w, r, ToApiError(ctx, ErrBucketNotFound)) + return + } + + WriteSuccessResponseHeadersOnly(w, r) +} + +func (h *Handlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + req := &PutBucketRequest{} + err := req.Bind(r) + if err != nil { + WriteErrorResponse(w, r, ToApiError(ctx, ErrInvalidArgument)) + return + } + + accessKeyRecord, errCode := h.authSvc.VerifySignature(ctx, r) + if errCode != ErrCodeNone { + WriteErrorResponse(w, r, errCode) + return + } + + err = h.bucketSvc.CheckACL(accessKeyRecord, req.Bucket, s3action.HeadBucketAction) + if err != nil { + WriteErrorResponse(w, r, ToApiError(ctx, ErrNoSuchUserPolicy)) + return + } + + //todo check all errors. + err = h.bucketSvc.DeleteBucket(ctx, req.Bucket) + if err != nil { + WriteErrorResponse(w, r, ToApiError(ctx, err)) + return + } + WriteSuccessNoContent(w) +} + +func (h *Handlers) ListBucketsHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + req := &PutBucketRequest{} + err := req.Bind(r) + if err != nil { + WriteErrorResponse(w, r, ToApiError(ctx, ErrInvalidArgument)) + return + } + + accessKeyRecord, errCode := h.authSvc.VerifySignature(ctx, r) + if errCode != ErrCodeNone { + WriteErrorResponse(w, r, errCode) + return + } + + err = h.bucketSvc.CheckACL(accessKeyRecord, req.Bucket, s3action.ListBucketAction) + if err != nil { + WriteErrorResponse(w, r, ToApiError(ctx, ErrNoSuchUserPolicy)) + return + } + + //todo check all errors + bucketMetas, err := h.bucketSvc.GetAllBucketsOfUser(ctx, accessKeyRecord.Key) + if err != nil { + WriteErrorResponse(w, r, ToApiError(ctx, err)) + return + } + var buckets []*s3.Bucket + for _, b := range bucketMetas { + buckets = append(buckets, &s3.Bucket{ + Name: aws.String(b.Name), + CreationDate: aws.Time(b.Created), + }) + } + + resp := ListAllMyBucketsResult{ + Owner: &s3.Owner{ + ID: aws.String(consts.DefaultOwnerID), + DisplayName: aws.String(consts.DisplayName), + }, + Buckets: buckets, + } + + WriteSuccessResponseXML(w, r, resp) +} + +func (h *Handlers) GetBucketAclHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + req := &PutBucketRequest{} + err := req.Bind(r) + if err != nil { + WriteErrorResponse(w, r, ToApiError(ctx, ErrInvalidArgument)) + return + } + + accessKeyRecord, errCode := h.authSvc.VerifySignature(ctx, r) + if errCode != ErrCodeNone { + WriteErrorResponse(w, r, errCode) + return + } + + err = h.bucketSvc.CheckACL(accessKeyRecord, req.Bucket, s3action.GetBucketAclAction) + if err != nil { + WriteErrorResponse(w, r, ToApiError(ctx, ErrNoSuchUserPolicy)) + return + } + + if !h.bucketSvc.HasBucket(ctx, req.Bucket) { + WriteErrorResponseHeadersOnly(w, r, ToApiError(ctx, ErrBucketNotFound)) + return + } + //todo check all errors + bucketMeta, err := h.bucketSvc.GetBucketMeta(ctx, req.Bucket) + if err != nil { + WriteErrorResponse(w, r, ToApiError(ctx, err)) + return + } + // 校验桶ACL类型,公共读(PublicRead),公共读写(PublicReadWrite),私有(Private) + acl := bucketMeta.Acl + if acl == "" { + acl = "private" + } + + resp := AccessControlPolicy{} + id := accessKeyRecord.Key + if resp.Owner.DisplayName == "" { + resp.Owner.DisplayName = accessKeyRecord.Key + resp.Owner.ID = id + } + resp.AccessControlList.Grant = append(resp.AccessControlList.Grant, Grant{ + Grantee: Grantee{ + ID: id, + DisplayName: accessKeyRecord.Key, + Type: "CanonicalUser", + XMLXSI: "CanonicalUser", + XMLNS: "http://www.w3.org/2001/XMLSchema-instance"}, + Permission: Permission(acl), //todo change + }) + WriteSuccessResponseXML(w, r, resp) +} + +func (h *Handlers) PutBucketAclHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + req := &PutBucketRequest{} + err := req.Bind(r) + if err != nil { + WriteErrorResponse(w, r, ToApiError(ctx, ErrInvalidArgument)) + return + } + + accessKeyRecord, errCode := h.authSvc.VerifySignature(ctx, r) + if errCode != ErrCodeNone { + WriteErrorResponse(w, r, errCode) + return + } + + err = h.bucketSvc.CheckACL(accessKeyRecord, req.Bucket, s3action.PutBucketAclAction) + if err != nil { + WriteErrorResponse(w, r, ToApiError(ctx, ErrNoSuchUserPolicy)) + return + } + + if !checkPermissionType(req.ACL) || req.ACL == "" { + WriteErrorResponse(w, r, ToApiError(ctx, ErrNotImplemented)) + return + } + + //todo check all errors + err = h.bucketSvc.UpdateBucketAcl(ctx, req.Bucket, req.ACL) + if err != nil { + WriteErrorResponse(w, r, ToApiError(ctx, err)) + return + } + + //todo check no return? + WriteSuccessNoContent(w) +} diff --git a/s3/handlers/request.go b/s3/handlers/request.go index 1e43cad2a..40029f03f 100644 --- a/s3/handlers/request.go +++ b/s3/handlers/request.go @@ -2,14 +2,14 @@ package handlers import ( "encoding/xml" - "github.com/bittorrent/go-btfs/s3/apierrors" + "io" + "net/http" + "path" + "github.com/bittorrent/go-btfs/s3/consts" "github.com/bittorrent/go-btfs/s3/policy" "github.com/bittorrent/go-btfs/s3/utils" "github.com/gorilla/mux" - "io" - "net/http" - "path" ) type RequestBinder interface { @@ -61,7 +61,7 @@ func (req *PutBucketRequest) Bind(r *http.Request) (err error) { } // Parses location constraint from the incoming reader. -func parseLocationConstraint(r *http.Request) (location string, s3Error apierrors.ErrorCode) { +func parseLocationConstraint(r *http.Request) (location string, s3Error ErrorCode) { // If the request has no body with content-length set to 0, // we do not have to validate location constraint. Bucket will // be created at default region. @@ -69,13 +69,13 @@ func parseLocationConstraint(r *http.Request) (location string, s3Error apierror err := utils.XmlDecoder(r.Body, &locationConstraint, r.ContentLength) if err != nil && r.ContentLength != 0 { // Treat all other failures as XML parsing errors. - return "", apierrors.ErrMalformedXML + return "", ErrCodeMalformedXML } // else for both err as nil or io.EOF location = locationConstraint.Location if location == "" { location = consts.DefaultRegion } - return location, apierrors.ErrNone + return location, ErrCodeNone } // createBucketConfiguration container for bucket configuration request from client. @@ -95,17 +95,17 @@ func pathClean(p string) string { return cp } -func unmarshalXML(reader io.Reader, isObject bool) (*store.Tags, error) { - tagging := &store.Tags{ - TagSet: &store.TagSet{ - TagMap: make(map[string]string), - IsObject: isObject, - }, - } - - if err := xml.NewDecoder(reader).Decode(tagging); err != nil { - return nil, err - } - - return tagging, nil -} +//func unmarshalXML(reader io.Reader, isObject bool) (*store.Tags, error) { +// tagging := &store.Tags{ +// TagSet: &store.TagSet{ +// TagMap: make(map[string]string), +// IsObject: isObject, +// }, +// } +// +// if err := xml.NewDecoder(reader).Decode(tagging); err != nil { +// return nil, err +// } +// +// return tagging, nil +//} diff --git a/s3/handlers/response_acl.go b/s3/handlers/response_acl.go new file mode 100644 index 000000000..404a48bd1 --- /dev/null +++ b/s3/handlers/response_acl.go @@ -0,0 +1,49 @@ +package handlers + +type accessControlList struct { + Grant []Grant `xml:"Grant,omitempty"` +} +type canonicalUser struct { + ID string `xml:"ID"` + DisplayName string `xml:"DisplayName,omitempty"` +} + +// AccessControlPolicy +// +// 75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a +// CustomersName@amazon.com +// +// +// +// +// 75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a +// CustomersName@amazon.com +// +// FULL_CONTROL +// +// +// +type AccessControlPolicy struct { + Owner canonicalUser `xml:"Owner"` + AccessControlList accessControlList `xml:"AccessControlList"` +} + +//Grant grant +type Grant struct { + Grantee Grantee `xml:"Grantee"` + Permission Permission `xml:"Permission"` +} + +//Grantee grant +type Grantee struct { + XMLNS string `xml:"xmlns:xsi,attr"` + XMLXSI string `xml:"xsi:type,attr"` + Type string `xml:"Type"` + ID string `xml:"ID,omitempty"` + DisplayName string `xml:"DisplayName,omitempty"` + URI string `xml:"URI,omitempty"` +} + +// Permission May be one of READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL +type Permission string diff --git a/s3/handlers/services.go b/s3/handlers/services.go index 8c4c2c6dc..91d244e4e 100644 --- a/s3/handlers/services.go +++ b/s3/handlers/services.go @@ -35,6 +35,8 @@ type BucketService interface { SetEmptyBucket(emptyBucket func(ctx context.Context, bucket string) (bool, error)) DeleteBucket(ctx context.Context, bucket string) error GetAllBucketsOfUser(ctx context.Context, accessKey string) ([]BucketMetadata, error) + UpdateBucketAcl(ctx context.Context, bucket, acl string) error + GetBucketAcl(ctx context.Context, bucket string) (string, error) } type ObjectService interface { diff --git a/s3/handlers/services_errors.go b/s3/handlers/services_errors.go index 95fdb2cd4..f0d447c62 100644 --- a/s3/handlers/services_errors.go +++ b/s3/handlers/services_errors.go @@ -9,7 +9,10 @@ var ( ErrInvalidBucketName = errors.New("bucket name is invalid") ErrBucketNotFound = errors.New("bucket is not found") - ErrBucketAccessDenied = errors.New("bucket access denied. ") - ErrSetBucketEmptyFailed = errors.New("set bucket empty failed. ") + ErrBucketAccessDenied = errors.New("bucket access denied") + ErrSetBucketEmptyFailed = errors.New("set bucket empty failed") ErrCreateBucket = errors.New("create bucket failed") + ErrNoSuchUserPolicy = errors.New("no such user policy") + + ErrNotImplemented = errors.New("not implemented") ) diff --git a/s3/routers/handlerser.go b/s3/routers/handlerser.go index b41e8e7c6..5e3af9c21 100644 --- a/s3/routers/handlerser.go +++ b/s3/routers/handlerser.go @@ -9,6 +9,11 @@ type Handlerser interface { Sign(handler http.Handler) http.Handler PutBucketHandler(w http.ResponseWriter, r *http.Request) + HeadBucketHandler(w http.ResponseWriter, r *http.Request) + DeleteBucketHandler(w http.ResponseWriter, r *http.Request) + ListBucketsHandler(w http.ResponseWriter, r *http.Request) + GetBucketAclHandler(w http.ResponseWriter, r *http.Request) + PutBucketAclHandler(w http.ResponseWriter, r *http.Request) //PutObjectHandler(w http.ResponseWriter, r *http.Request) } diff --git a/s3/routers/routers.go b/s3/routers/routers.go index 0beae03d4..691cc8f4f 100644 --- a/s3/routers/routers.go +++ b/s3/routers/routers.go @@ -26,6 +26,11 @@ func (routers *Routers) Register() http.Handler { bucket := root.PathPrefix("/{bucket}").Subrouter() bucket.Methods(http.MethodPut).Path("/{bucket:.+}").HandlerFunc(routers.handlers.PutBucketHandler) + bucket.Methods(http.MethodHead).Path("/{bucket:.+}").HandlerFunc(routers.handlers.HeadBucketHandler) + bucket.Methods(http.MethodDelete).Path("/{bucket:.+}").HandlerFunc(routers.handlers.DeleteBucketHandler) + bucket.Methods(http.MethodGet).Path("/").HandlerFunc(routers.handlers.ListBucketsHandler) + bucket.Methods(http.MethodGet).Path("/{bucket:.+}").HandlerFunc(routers.handlers.GetBucketAclHandler).Queries("acl", "") + bucket.Methods(http.MethodPut).Path("/{bucket:.+}").HandlerFunc(routers.handlers.PutBucketAclHandler).Queries("acl", "") //object //bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(routers.handlers.PutObjectHandler) diff --git a/s3/services/bucket/service.go b/s3/services/bucket/service.go index 9214f839c..ec07668e7 100644 --- a/s3/services/bucket/service.go +++ b/s3/services/bucket/service.go @@ -41,7 +41,7 @@ func NewService(providers services.Providerser, options ...Option) (s *Service) } func (s *Service) CheckACL(accessKeyRecord *handlers.AccessKeyRecord, bucketName string, action action.Action) (err error) { - //todo 是否需要判断原始的 + //需要判断原始的 if bucketName == "" { return handlers.ErrBucketNotFound } @@ -51,8 +51,6 @@ func (s *Service) CheckACL(accessKeyRecord *handlers.AccessKeyRecord, bucketName return err } - //todo 注意:如果action是CreateBucketAction,HasBucket(ctx, bucketName)进行判断 - if policy.IsAllowed(bucketMeta.Owner == accessKeyRecord.Key, bucketMeta.Acl, action) == false { return handlers.ErrBucketAccessDenied } @@ -166,7 +164,7 @@ func (s *Service) GetAllBucketsOfUser(ctx context.Context, username string) ([]h } // UpdateBucketAcl . -func (s *Service) UpdateBucketAcl(ctx context.Context, bucket, acl, accessKey string) error { +func (s *Service) UpdateBucketAcl(ctx context.Context, bucket, acl string) error { ctx, cancel := context.WithTimeout(context.Background(), s.updateTimeout) defer cancel() From 3ce88b330114c75a123f16eee5ce862e4d877ea9 Mon Sep 17 00:00:00 2001 From: fish <920886811@163.com> Date: Thu, 17 Aug 2023 18:55:13 +0800 Subject: [PATCH 039/139] chore: --- s3/handlers/handlers.go | 13 +++-- s3/handlers/request.go | 103 ++++++++++++++++++++++++++++++++++------ 2 files changed, 95 insertions(+), 21 deletions(-) diff --git a/s3/handlers/handlers.go b/s3/handlers/handlers.go index 0d133beae..c4592b397 100644 --- a/s3/handlers/handlers.go +++ b/s3/handlers/handlers.go @@ -129,7 +129,7 @@ func (h *Handlers) PutBucketHandler(w http.ResponseWriter, r *http.Request) { func (h *Handlers) HeadBucketHandler(w http.ResponseWriter, r *http.Request) { ctx := r.Context() - req := &PutBucketRequest{} + req := &HeadBucketRequest{} err := req.Bind(r) if err != nil { WriteErrorResponse(w, r, ToApiError(ctx, ErrInvalidArgument)) @@ -158,7 +158,7 @@ func (h *Handlers) HeadBucketHandler(w http.ResponseWriter, r *http.Request) { func (h *Handlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) { ctx := r.Context() - req := &PutBucketRequest{} + req := &DeleteBucketRequest{} err := req.Bind(r) if err != nil { WriteErrorResponse(w, r, ToApiError(ctx, ErrInvalidArgument)) @@ -188,7 +188,7 @@ func (h *Handlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) { func (h *Handlers) ListBucketsHandler(w http.ResponseWriter, r *http.Request) { ctx := r.Context() - req := &PutBucketRequest{} + req := &ListBucketsRequest{} err := req.Bind(r) if err != nil { WriteErrorResponse(w, r, ToApiError(ctx, ErrInvalidArgument)) @@ -234,7 +234,7 @@ func (h *Handlers) ListBucketsHandler(w http.ResponseWriter, r *http.Request) { func (h *Handlers) GetBucketAclHandler(w http.ResponseWriter, r *http.Request) { ctx := r.Context() - req := &PutBucketRequest{} + req := &GetBucketAclRequest{} err := req.Bind(r) if err != nil { WriteErrorResponse(w, r, ToApiError(ctx, ErrInvalidArgument)) @@ -258,13 +258,12 @@ func (h *Handlers) GetBucketAclHandler(w http.ResponseWriter, r *http.Request) { return } //todo check all errors - bucketMeta, err := h.bucketSvc.GetBucketMeta(ctx, req.Bucket) + acl, err := h.bucketSvc.GetBucketAcl(ctx, req.Bucket) if err != nil { WriteErrorResponse(w, r, ToApiError(ctx, err)) return } // 校验桶ACL类型,公共读(PublicRead),公共读写(PublicReadWrite),私有(Private) - acl := bucketMeta.Acl if acl == "" { acl = "private" } @@ -289,7 +288,7 @@ func (h *Handlers) GetBucketAclHandler(w http.ResponseWriter, r *http.Request) { func (h *Handlers) PutBucketAclHandler(w http.ResponseWriter, r *http.Request) { ctx := r.Context() - req := &PutBucketRequest{} + req := &PutBucketAclRequest{} err := req.Bind(r) if err != nil { WriteErrorResponse(w, r, ToApiError(ctx, ErrInvalidArgument)) diff --git a/s3/handlers/request.go b/s3/handlers/request.go index 40029f03f..87675f215 100644 --- a/s3/handlers/request.go +++ b/s3/handlers/request.go @@ -2,7 +2,6 @@ package handlers import ( "encoding/xml" - "io" "net/http" "path" @@ -26,23 +25,11 @@ type RequestBinder interface { // return //} -func checkPermissionType(s string) bool { - switch s { - case policy.PublicRead: - return true - case policy.PublicReadWrite: - return true - case policy.Private: - return true - } - return false -} - +// PutBucketRequest . type PutBucketRequest struct { Bucket string ACL string Region string - Body io.Reader } func (req *PutBucketRequest) Bind(r *http.Request) (err error) { @@ -60,6 +47,82 @@ func (req *PutBucketRequest) Bind(r *http.Request) (err error) { return } +// HeadBucketRequest . +type HeadBucketRequest struct { + Bucket string +} + +func (req *HeadBucketRequest) Bind(r *http.Request) (err error) { + vars := mux.Vars(r) + bucket := vars["bucket"] + + //set request + req.Bucket = bucket + return +} + +// DeleteBucketRequest . +type DeleteBucketRequest struct { + Bucket string +} + +func (req *DeleteBucketRequest) Bind(r *http.Request) (err error) { + vars := mux.Vars(r) + bucket := vars["bucket"] + + //set request + req.Bucket = bucket + return +} + +// ListBucketsRequest . +type ListBucketsRequest struct { + Bucket string +} + +func (req *ListBucketsRequest) Bind(r *http.Request) (err error) { + vars := mux.Vars(r) + bucket := vars["bucket"] + + //set request + req.Bucket = bucket + return +} + +// GetBucketAclRequest . +type GetBucketAclRequest struct { + Bucket string +} + +func (req *GetBucketAclRequest) Bind(r *http.Request) (err error) { + vars := mux.Vars(r) + bucket := vars["bucket"] + + //set request + req.Bucket = bucket + return +} + +// PutBucketAclRequest . +type PutBucketAclRequest struct { + Bucket string + ACL string +} + +func (req *PutBucketAclRequest) Bind(r *http.Request) (err error) { + vars := mux.Vars(r) + bucket := vars["bucket"] + + acl := r.Header.Get(consts.AmzACL) + + //set request + req.Bucket = bucket + req.ACL = acl + return +} + +/*********************************/ + // Parses location constraint from the incoming reader. func parseLocationConstraint(r *http.Request) (location string, s3Error ErrorCode) { // If the request has no body with content-length set to 0, @@ -109,3 +172,15 @@ func pathClean(p string) string { // // return tagging, nil //} + +func checkPermissionType(s string) bool { + switch s { + case policy.PublicRead: + return true + case policy.PublicReadWrite: + return true + case policy.Private: + return true + } + return false +} From 0eaf129c412cda306e1b4eb8b37bd2e6c56ce844 Mon Sep 17 00:00:00 2001 From: fish <920886811@163.com> Date: Thu, 17 Aug 2023 18:58:20 +0800 Subject: [PATCH 040/139] chore: --- s3/handlers/handlers.go | 4 ++-- s3/handlers/response_comm.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/s3/handlers/handlers.go b/s3/handlers/handlers.go index c4592b397..15204592b 100644 --- a/s3/handlers/handlers.go +++ b/s3/handlers/handlers.go @@ -122,7 +122,7 @@ func (h *Handlers) PutBucketHandler(w http.ResponseWriter, r *http.Request) { w.Header().Set(consts.Location, cp) // Clean any trailing slashes. } - WriteSuccessResponseHeadersOnly(w, r) + WriteSuccessResponse(w, r) return } @@ -153,7 +153,7 @@ func (h *Handlers) HeadBucketHandler(w http.ResponseWriter, r *http.Request) { return } - WriteSuccessResponseHeadersOnly(w, r) + WriteSuccessResponse(w, r) } func (h *Handlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) { diff --git a/s3/handlers/response_comm.go b/s3/handlers/response_comm.go index 31d1d1cff..170785b12 100644 --- a/s3/handlers/response_comm.go +++ b/s3/handlers/response_comm.go @@ -141,8 +141,8 @@ type ListAllMyBucketsResult struct { Buckets []*s3.Bucket `xml:"Buckets>Bucket"` } -// WriteSuccessResponseHeadersOnly write SuccessResponseHeadersOnly -func WriteSuccessResponseHeadersOnly(w http.ResponseWriter, r *http.Request) { +// WriteSuccessResponse write SuccessResponseHeadersOnly +func WriteSuccessResponse(w http.ResponseWriter, r *http.Request) { writeResponse(w, r, http.StatusOK, nil, mimeNone) } From 0b87d68df830e36b384bc4247862fbc126595fb9 Mon Sep 17 00:00:00 2001 From: fish <920886811@163.com> Date: Thu, 17 Aug 2023 19:28:10 +0800 Subject: [PATCH 041/139] feat: add request and response --- s3/handlers/handlers.go | 79 +++---------------- s3/handlers/response.go | 70 ++++++++++++++-- .../{response_acl.go => response_comm_acl.go} | 0 3 files changed, 78 insertions(+), 71 deletions(-) rename s3/handlers/{response_acl.go => response_comm_acl.go} (100%) diff --git a/s3/handlers/handlers.go b/s3/handlers/handlers.go index 15204592b..b22e6cccc 100644 --- a/s3/handlers/handlers.go +++ b/s3/handlers/handlers.go @@ -4,8 +4,6 @@ package handlers import ( "net/http" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" s3action "github.com/bittorrent/go-btfs/s3/action" "github.com/bittorrent/go-btfs/s3/consts" "github.com/bittorrent/go-btfs/s3/routers" @@ -44,37 +42,20 @@ func NewHandlers( return } -func (handlers *Handlers) Cors(handler http.Handler) http.Handler { +func (h *Handlers) Cors(handler http.Handler) http.Handler { return cors.New(cors.Options{ - AllowedOrigins: handlers.corsSvc.GetAllowOrigins(), - AllowedMethods: handlers.corsSvc.GetAllowMethods(), - AllowedHeaders: handlers.corsSvc.GetAllowHeaders(), - ExposedHeaders: handlers.corsSvc.GetAllowHeaders(), + AllowedOrigins: h.corsSvc.GetAllowOrigins(), + AllowedMethods: h.corsSvc.GetAllowMethods(), + AllowedHeaders: h.corsSvc.GetAllowHeaders(), + ExposedHeaders: h.corsSvc.GetAllowHeaders(), AllowCredentials: true, }).Handler(handler) } -func (handlers *Handlers) Sign(handler http.Handler) http.Handler { +func (h *Handlers) Sign(handler http.Handler) http.Handler { return nil } -//func (handlers *Handlers) parsePutObjectReq(r *http.Request) (arg *PutObjectReq, err error) { -// return -//} -// -//func (handlers *Handlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) { -// req := &PutObjectRequest{} -// err := req.Bind(r) -// if err != nil { -// return -// } -// //.... -// -// WritePutObjectResponse(w, object) -// -// return -//} - func (h *Handlers) PutBucketHandler(w http.ResponseWriter, r *http.Request) { ctx := r.Context() req := &PutBucketRequest{} @@ -122,7 +103,7 @@ func (h *Handlers) PutBucketHandler(w http.ResponseWriter, r *http.Request) { w.Header().Set(consts.Location, cp) // Clean any trailing slashes. } - WriteSuccessResponse(w, r) + WritePutBucketResponse(w, r) return } @@ -153,7 +134,7 @@ func (h *Handlers) HeadBucketHandler(w http.ResponseWriter, r *http.Request) { return } - WriteSuccessResponse(w, r) + WriteHeadBucketResponse(w, r) } func (h *Handlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) { @@ -183,7 +164,7 @@ func (h *Handlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) { WriteErrorResponse(w, r, ToApiError(ctx, err)) return } - WriteSuccessNoContent(w) + WriteDeleteBucketResponse(w) } func (h *Handlers) ListBucketsHandler(w http.ResponseWriter, r *http.Request) { @@ -213,23 +194,8 @@ func (h *Handlers) ListBucketsHandler(w http.ResponseWriter, r *http.Request) { WriteErrorResponse(w, r, ToApiError(ctx, err)) return } - var buckets []*s3.Bucket - for _, b := range bucketMetas { - buckets = append(buckets, &s3.Bucket{ - Name: aws.String(b.Name), - CreationDate: aws.Time(b.Created), - }) - } - resp := ListAllMyBucketsResult{ - Owner: &s3.Owner{ - ID: aws.String(consts.DefaultOwnerID), - DisplayName: aws.String(consts.DisplayName), - }, - Buckets: buckets, - } - - WriteSuccessResponseXML(w, r, resp) + WriteListBucketsResponse(w, r, bucketMetas) } func (h *Handlers) GetBucketAclHandler(w http.ResponseWriter, r *http.Request) { @@ -263,27 +229,8 @@ func (h *Handlers) GetBucketAclHandler(w http.ResponseWriter, r *http.Request) { WriteErrorResponse(w, r, ToApiError(ctx, err)) return } - // 校验桶ACL类型,公共读(PublicRead),公共读写(PublicReadWrite),私有(Private) - if acl == "" { - acl = "private" - } - - resp := AccessControlPolicy{} - id := accessKeyRecord.Key - if resp.Owner.DisplayName == "" { - resp.Owner.DisplayName = accessKeyRecord.Key - resp.Owner.ID = id - } - resp.AccessControlList.Grant = append(resp.AccessControlList.Grant, Grant{ - Grantee: Grantee{ - ID: id, - DisplayName: accessKeyRecord.Key, - Type: "CanonicalUser", - XMLXSI: "CanonicalUser", - XMLNS: "http://www.w3.org/2001/XMLSchema-instance"}, - Permission: Permission(acl), //todo change - }) - WriteSuccessResponseXML(w, r, resp) + + WriteGetBucketAclResponse(w, r, accessKeyRecord, acl) } func (h *Handlers) PutBucketAclHandler(w http.ResponseWriter, r *http.Request) { @@ -320,5 +267,5 @@ func (h *Handlers) PutBucketAclHandler(w http.ResponseWriter, r *http.Request) { } //todo check no return? - WriteSuccessNoContent(w) + WritePutBucketAclResponse(w, r) } diff --git a/s3/handlers/response.go b/s3/handlers/response.go index 491e0b4af..9cd2f1456 100644 --- a/s3/handlers/response.go +++ b/s3/handlers/response.go @@ -1,11 +1,71 @@ package handlers -import "net/http" +import ( + "net/http" -//func WritePutObjectResponse(w http.ResponseWriter, objectMeta *ObjectMetadata) { -// return -//} + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/bittorrent/go-btfs/s3/consts" +) -func WritePutObjectResponse(w http.ResponseWriter, objectMeta *ObjectMetadata) { +func WritePutBucketResponse(w http.ResponseWriter, r *http.Request) { + WriteSuccessResponse(w, r) + return +} + +func WriteHeadBucketResponse(w http.ResponseWriter, r *http.Request) { + WriteSuccessResponse(w, r) + return +} + +func WriteDeleteBucketResponse(w http.ResponseWriter) { + WriteSuccessNoContent(w) + return +} + +func WriteListBucketsResponse(w http.ResponseWriter, r *http.Request, bucketMetas []BucketMetadata) { + var buckets []*s3.Bucket + for _, b := range bucketMetas { + buckets = append(buckets, &s3.Bucket{ + Name: aws.String(b.Name), + CreationDate: aws.Time(b.Created), + }) + } + + resp := ListAllMyBucketsResult{ + Owner: &s3.Owner{ + ID: aws.String(consts.DefaultOwnerID), + DisplayName: aws.String(consts.DisplayName), + }, + Buckets: buckets, + } + + WriteSuccessResponseXML(w, r, resp) + return +} + +func WriteGetBucketAclResponse(w http.ResponseWriter, r *http.Request, accessKeyRecord *AccessKeyRecord, acl string) { + resp := AccessControlPolicy{} + id := accessKeyRecord.Key + if resp.Owner.DisplayName == "" { + resp.Owner.DisplayName = accessKeyRecord.Key + resp.Owner.ID = id + } + resp.AccessControlList.Grant = append(resp.AccessControlList.Grant, Grant{ + Grantee: Grantee{ + ID: id, + DisplayName: accessKeyRecord.Key, + Type: "CanonicalUser", + XMLXSI: "CanonicalUser", + XMLNS: "http://www.w3.org/2001/XMLSchema-instance"}, + Permission: Permission(acl), //todo change + }) + + WriteSuccessResponseXML(w, r, resp) + return +} + +func WritePutBucketAclResponse(w http.ResponseWriter, r *http.Request) { + WriteSuccessResponse(w, r) return } diff --git a/s3/handlers/response_acl.go b/s3/handlers/response_comm_acl.go similarity index 100% rename from s3/handlers/response_acl.go rename to s3/handlers/response_comm_acl.go From 3e845571374c0b62eb15da38ab3cb292802d1df8 Mon Sep 17 00:00:00 2001 From: fish <920886811@163.com> Date: Thu, 17 Aug 2023 19:31:40 +0800 Subject: [PATCH 042/139] chore: --- s3/services/bucket/service.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/s3/services/bucket/service.go b/s3/services/bucket/service.go index ec07668e7..0baf134d7 100644 --- a/s3/services/bucket/service.go +++ b/s3/services/bucket/service.go @@ -41,7 +41,7 @@ func NewService(providers services.Providerser, options ...Option) (s *Service) } func (s *Service) CheckACL(accessKeyRecord *handlers.AccessKeyRecord, bucketName string, action action.Action) (err error) { - //需要判断原始的 + //需要判断bucketName是否为空字符串 if bucketName == "" { return handlers.ErrBucketNotFound } From 1cab3984ced2ca8e096ea571bb9d2597b0ad5692 Mon Sep 17 00:00:00 2001 From: steve Date: Fri, 18 Aug 2023 17:56:45 +0800 Subject: [PATCH 043/139] feat: server build --- cmd/btfs/daemon.go | 15 ++++--- go.mod | 10 +++-- go.sum | 5 ++- s3/handlers/handlers.go | 28 +++++++++---- s3/handlers/request.go | 5 +++ s3/handlers/response_error.go | 1 + s3/routers/routers.go | 14 +++---- s3/server.go | 52 +++++++++++++++++++++++++ s3/services/auth/service.go | 3 +- s3/services/auth/signature-v4-parser.go | 2 +- s3/services/auth/signature-v4.go | 2 +- s3/utils/hash/reader.go | 2 +- s3/utils/ip.go | 46 ---------------------- statestore/mock/store.go | 5 +++ 14 files changed, 111 insertions(+), 79 deletions(-) create mode 100644 s3/server.go delete mode 100644 s3/utils/ip.go diff --git a/cmd/btfs/daemon.go b/cmd/btfs/daemon.go index c3e71d398..884eba03f 100644 --- a/cmd/btfs/daemon.go +++ b/cmd/btfs/daemon.go @@ -7,10 +7,10 @@ import ( "errors" _ "expvar" "fmt" + "github.com/bittorrent/go-btfs/s3" "github.com/bittorrent/go-btfs/s3/providers" "github.com/bittorrent/go-btfs/s3/providers/filestore" s3statestore "github.com/bittorrent/go-btfs/s3/providers/statestore" - "github.com/bittorrent/go-btfs/s3/server" "github.com/bittorrent/go-btfs/s3/services/accesskey" "io/ioutil" "math/rand" @@ -426,9 +426,6 @@ If the user need to start multiple nodes on the same machine, the configuration statestore.Close() }() - // access-key init - accesskey.InitService(s3statestore.NewStorageStateStoreProxy(statestore)) - if SimpleMode == false { chainid, stored, err := getChainID(req, cfg, statestore) if err != nil { @@ -721,6 +718,12 @@ If the user need to start multiple nodes on the same machine, the configuration functest(cfg.Services.OnlineServerDomain, cfg.Identity.PeerID, hValue) } + // access-key init + accesskey.InitService(s3.GetProviders(statestore)) + s3Server := s3.NewServer(statestore) + _ = s3Server.Start() + defer s3Server.Stop() + if SimpleMode == false { // set Analytics flag if specified if dc, ok := req.Options[enableDataCollection]; ok == true { @@ -1473,7 +1476,3 @@ func buildS3Providers(storageStore storage.StateStorer) *providers.Providers { filestore.NewLocalShell(), ) } - -func buildS3Server(providers providers.Providers, address string, corsAllowHeaders []string) *server.Server { - -} diff --git a/go.mod b/go.mod index 58953f52d..c7aed1319 100644 --- a/go.mod +++ b/go.mod @@ -5,6 +5,7 @@ go 1.18 require ( bazil.org/fuse v0.0.0-20200117225306-7b5117fecadc github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 + github.com/aws/aws-sdk-go v1.27.0 github.com/bittorrent/go-btfs-api v0.5.0 github.com/bittorrent/go-btfs-chunker v0.4.0 github.com/bittorrent/go-btfs-cmds v0.3.0 @@ -34,6 +35,7 @@ require ( github.com/gogo/protobuf v1.3.2 github.com/golang/protobuf v1.5.3 github.com/google/uuid v1.3.0 + github.com/gorilla/mux v1.7.3 github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d github.com/ip2location/ip2location-go/v9 v9.0.0 @@ -174,12 +176,12 @@ require ( github.com/golang/mock v1.6.0 // indirect github.com/google/btree v1.1.2 // indirect github.com/google/pprof v0.0.0-20221203041831-ce31453925ec // indirect - github.com/gorilla/mux v1.8.0 // indirect github.com/huandu/xstrings v1.3.2 // indirect github.com/ipfs/go-bitfield v1.1.0 // indirect github.com/ipfs/go-ipld-legacy v0.1.1 // indirect github.com/ipfs/go-ipns v0.3.0 // indirect github.com/ipld/edelweiss v0.2.0 // indirect + github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af // indirect github.com/libp2p/go-libp2p-core v0.20.1 // indirect github.com/libp2p/go-libp2p-xor v0.1.0 // indirect github.com/libp2p/go-yamux/v4 v4.0.0 // indirect @@ -282,7 +284,7 @@ require ( github.com/ipld/go-ipld-prime v0.19.0 github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jinzhu/inflection v1.0.0 // indirect - github.com/json-iterator/go v1.1.12 // indirect + github.com/json-iterator/go v1.1.12 github.com/kisielk/errcheck v1.5.0 // indirect github.com/klauspost/compress v1.15.15 // indirect github.com/klauspost/cpuid/v2 v2.2.4 // indirect @@ -325,7 +327,7 @@ require ( github.com/prometheus/client_model v0.3.0 // indirect github.com/prometheus/common v0.42.0 // indirect github.com/prometheus/procfs v0.9.0 // indirect - github.com/rs/cors v1.7.0 // indirect + github.com/rs/cors v1.7.0 github.com/segmentio/encoding v0.3.6 // indirect github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible // indirect github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 // indirect @@ -335,7 +337,7 @@ require ( github.com/tklauser/numcpus v0.2.2 // indirect github.com/ulikunitz/xz v0.5.6 // indirect github.com/vmihailenco/bufpool v0.1.11 // indirect - github.com/vmihailenco/msgpack/v4 v4.3.12 // indirect + github.com/vmihailenco/msgpack/v4 v4.3.12 github.com/vmihailenco/tagparser v0.1.2 // indirect github.com/whyrusleeping/cbor-gen v0.0.0-20210219115102-f37d292932f2 // indirect github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f // indirect diff --git a/go.sum b/go.sum index a27c84df3..ab57d764b 100644 --- a/go.sum +++ b/go.sum @@ -180,6 +180,7 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= +github.com/aws/aws-sdk-go v1.27.0 h1:0xphMHGMLBrPMfxR2AmVjZKcMEESEgWF8Kru94BNByk= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk= @@ -601,9 +602,8 @@ github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99 h1:twflg0XRTjwKp github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= @@ -894,6 +894,7 @@ github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= diff --git a/s3/handlers/handlers.go b/s3/handlers/handlers.go index b22e6cccc..fd72f1f32 100644 --- a/s3/handlers/handlers.go +++ b/s3/handlers/handlers.go @@ -2,6 +2,7 @@ package handlers import ( + "fmt" "net/http" s3action "github.com/bittorrent/go-btfs/s3/action" @@ -57,9 +58,15 @@ func (h *Handlers) Sign(handler http.Handler) http.Handler { } func (h *Handlers) PutBucketHandler(w http.ResponseWriter, r *http.Request) { + fmt.Println("receive request") ctx := r.Context() req := &PutBucketRequest{} err := req.Bind(r) + + defer func() { + fmt.Println("handle err: ", err) + }() + if err != nil { WriteErrorResponse(w, r, ToApiError(ctx, ErrInvalidArgument)) return @@ -71,26 +78,30 @@ func (h *Handlers) PutBucketHandler(w http.ResponseWriter, r *http.Request) { return } - err = h.bucketSvc.CheckACL(accessKeyRecord, req.Bucket, s3action.CreateBucketAction) - if err != nil { - WriteErrorResponse(w, r, ToApiError(ctx, ErrNoSuchUserPolicy)) - return - } + //err = h.bucketSvc.CheckACL(accessKeyRecord, req.Bucket, s3action.CreateBucketAction) + //if err != nil { + // WriteErrorResponse(w, r, ToApiError(ctx, ErrNoSuchUserPolicy)) + // return + //} - if err := s3utils.CheckValidBucketNameStrict(req.Bucket); err != nil { + if err = s3utils.CheckValidBucketNameStrict(req.Bucket); err != nil { WriteErrorResponse(w, r, ToApiError(ctx, ErrInvalidBucketName)) return } + fmt.Println("4") if !checkPermissionType(req.ACL) { WriteErrorResponse(w, r, ToApiError(ctx, ErrNotImplemented)) } - if ok := h.bucketSvc.HasBucket(r.Context(), req.Bucket); !ok { + fmt.Println("3") + if ok := h.bucketSvc.HasBucket(r.Context(), req.Bucket); ok { WriteErrorResponseHeadersOnly(w, r, ToApiError(ctx, ErrBucketNotFound)) return } + fmt.Println("2") + fmt.Println(h.bucketSvc, accessKeyRecord) err = h.bucketSvc.CreateBucket(ctx, req.Bucket, req.Region, accessKeyRecord.Key, req.ACL) if err != nil { log.Errorf("PutBucketHandler create bucket error:%v", err) @@ -98,11 +109,14 @@ func (h *Handlers) PutBucketHandler(w http.ResponseWriter, r *http.Request) { return } + fmt.Println("1") // Make sure to add Location information here only for bucket if cp := pathClean(r.URL.Path); cp != "" { w.Header().Set(consts.Location, cp) // Clean any trailing slashes. } + fmt.Println("0") + WritePutBucketResponse(w, r) return diff --git a/s3/handlers/request.go b/s3/handlers/request.go index 87675f215..ccfb38075 100644 --- a/s3/handlers/request.go +++ b/s3/handlers/request.go @@ -44,6 +44,11 @@ func (req *PutBucketRequest) Bind(r *http.Request) (err error) { req.Bucket = bucket req.ACL = acl req.Region = region + + if req.ACL == "" { + req.ACL = policy.PublicRead + } + return } diff --git a/s3/handlers/response_error.go b/s3/handlers/response_error.go index ac3cb001c..7283280de 100644 --- a/s3/handlers/response_error.go +++ b/s3/handlers/response_error.go @@ -13,6 +13,7 @@ func WriteErrorResponseHeadersOnly(w http.ResponseWriter, r *http.Request, err E // WriteErrorResponse write ErrorResponse func WriteErrorResponse(w http.ResponseWriter, r *http.Request, errorCode ErrorCode) { + fmt.Println("response errcode: ", errorCode) vars := mux.Vars(r) bucket := vars["bucket"] object := vars["object"] diff --git a/s3/routers/routers.go b/s3/routers/routers.go index 691cc8f4f..df2acda18 100644 --- a/s3/routers/routers.go +++ b/s3/routers/routers.go @@ -22,15 +22,15 @@ func NewRouters(handlers Handlerser, options ...Option) (routers *Routers) { func (routers *Routers) Register() http.Handler { root := mux.NewRouter() - root.Use(routers.handlers.Cors, routers.handlers.Sign) + root.Use(routers.handlers.Cors) bucket := root.PathPrefix("/{bucket}").Subrouter() - bucket.Methods(http.MethodPut).Path("/{bucket:.+}").HandlerFunc(routers.handlers.PutBucketHandler) - bucket.Methods(http.MethodHead).Path("/{bucket:.+}").HandlerFunc(routers.handlers.HeadBucketHandler) - bucket.Methods(http.MethodDelete).Path("/{bucket:.+}").HandlerFunc(routers.handlers.DeleteBucketHandler) - bucket.Methods(http.MethodGet).Path("/").HandlerFunc(routers.handlers.ListBucketsHandler) - bucket.Methods(http.MethodGet).Path("/{bucket:.+}").HandlerFunc(routers.handlers.GetBucketAclHandler).Queries("acl", "") - bucket.Methods(http.MethodPut).Path("/{bucket:.+}").HandlerFunc(routers.handlers.PutBucketAclHandler).Queries("acl", "") + bucket.Methods(http.MethodPut).HandlerFunc(routers.handlers.PutBucketHandler) + bucket.Methods(http.MethodHead).HandlerFunc(routers.handlers.HeadBucketHandler) + bucket.Methods(http.MethodDelete).HandlerFunc(routers.handlers.DeleteBucketHandler) + bucket.Methods(http.MethodGet).HandlerFunc(routers.handlers.ListBucketsHandler) + bucket.Methods(http.MethodGet).HandlerFunc(routers.handlers.GetBucketAclHandler).Queries("acl", "") + bucket.Methods(http.MethodPut).HandlerFunc(routers.handlers.PutBucketAclHandler).Queries("acl", "") //object //bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(routers.handlers.PutObjectHandler) diff --git a/s3/server.go b/s3/server.go new file mode 100644 index 000000000..8348244a8 --- /dev/null +++ b/s3/server.go @@ -0,0 +1,52 @@ +package s3 + +import ( + "github.com/bittorrent/go-btfs/s3/handlers" + "github.com/bittorrent/go-btfs/s3/providers" + "github.com/bittorrent/go-btfs/s3/providers/filestore" + "github.com/bittorrent/go-btfs/s3/providers/statestore" + "github.com/bittorrent/go-btfs/s3/routers" + "github.com/bittorrent/go-btfs/s3/server" + "github.com/bittorrent/go-btfs/s3/services/accesskey" + "github.com/bittorrent/go-btfs/s3/services/auth" + "github.com/bittorrent/go-btfs/s3/services/bucket" + "github.com/bittorrent/go-btfs/s3/services/cors" + "github.com/bittorrent/go-btfs/transaction/storage" + "sync" +) + +var ( + ps *providers.Providers + once sync.Once +) + +func GetProviders(storageStore storage.StateStorer) *providers.Providers { + once.Do(func() { + sstore := statestore.NewStorageStateStoreProxy(storageStore) + fstore := filestore.NewLocalShell() + ps = providers.NewProviders(sstore, fstore) + + }) + return ps +} + +func NewServer(storageStore storage.StateStorer) *server.Server { + _ = GetProviders(storageStore) + + // services + corsSvc := cors.NewService() + accessKeySvc := accesskey.NewService(ps) + authSvc := auth.NewService(ps, accessKeySvc) + bucketSvc := bucket.NewService(ps) + + // handlers + hs := handlers.NewHandlers(corsSvc, authSvc, bucketSvc, nil, nil) + + // routers + rs := routers.NewRouters(hs) + + // server + svr := server.NewServer(rs) + + return svr +} diff --git a/s3/services/auth/service.go b/s3/services/auth/service.go index ed48c84be..5ae457535 100644 --- a/s3/services/auth/service.go +++ b/s3/services/auth/service.go @@ -27,6 +27,5 @@ func NewService(providers services.Providerser, accessKeySvc handlers.AccessKeyS } func (s *Service) VerifySignature(ctx context.Context, r *http.Request) (accessKeyRecord *handlers.AccessKeyRecord, err handlers.ErrorCode) { - s.CheckRequestAuthTypeCredential(ctx, r) - return + return s.CheckRequestAuthTypeCredential(ctx, r) } diff --git a/s3/services/auth/signature-v4-parser.go b/s3/services/auth/signature-v4-parser.go index 32ed43211..d6f206706 100644 --- a/s3/services/auth/signature-v4-parser.go +++ b/s3/services/auth/signature-v4-parser.go @@ -60,7 +60,7 @@ func parseCredentialHeader(credElement string, region string, stype serviceType) } credElements := strings.Split(strings.TrimSpace(creds[1]), consts.SlashSeparator) if len(credElements) < 5 { - return ch, handlers.ErrCredMalformed + return ch, handlers.ErrCodeCredMalformed } accessKey := strings.Join(credElements[:len(credElements)-4], consts.SlashSeparator) // The access key may contain one or more `/` //if !IsAccessKeyValid(accessKey) { diff --git a/s3/services/auth/signature-v4.go b/s3/services/auth/signature-v4.go index 56ec46442..989fedb38 100644 --- a/s3/services/auth/signature-v4.go +++ b/s3/services/auth/signature-v4.go @@ -236,7 +236,7 @@ func (s *Service) doesSignatureMatch(hashedPayload string, r *http.Request, regi stringToSign := utils.GetStringToSign(canonicalRequest, t, signV4Values.Credential.getScope()) // Get hmac signing key. - signingKey := utils.GetSigningKey(cred.Key, signV4Values.Credential.scope.date, + signingKey := utils.GetSigningKey(cred.Secret, signV4Values.Credential.scope.date, signV4Values.Credential.scope.region, string(stype)) // Calculate signature. diff --git a/s3/utils/hash/reader.go b/s3/utils/hash/reader.go index 26157490d..f3464213d 100644 --- a/s3/utils/hash/reader.go +++ b/s3/utils/hash/reader.go @@ -6,7 +6,7 @@ import ( "encoding/base64" "encoding/hex" "errors" - "github.com/yann-y/fds/pkg/etag" + "github.com/bittorrent/go-btfs/s3/etag" "hash" "io" ) diff --git a/s3/utils/ip.go b/s3/utils/ip.go deleted file mode 100644 index d80841629..000000000 --- a/s3/utils/ip.go +++ /dev/null @@ -1,46 +0,0 @@ -package utils - -import ( - logging "github.com/ipfs/go-log/v2" - "github.com/yann-y/fds/internal/iam/set" - "net" - "runtime" -) - -var log = logging.Logger("utils") - -// MustGetLocalIP4 returns IPv4 addresses of localhost. It panics on error. -func MustGetLocalIP4() (ipList set.StringSet) { - ipList = set.NewStringSet() - ifs, err := net.Interfaces() - if err != nil { - log.Errorf("Unable to get IP addresses of this host %v", err) - - } - - for _, interf := range ifs { - addrs, err := interf.Addrs() - if err != nil { - continue - } - if runtime.GOOS == "windows" && interf.Flags&net.FlagUp == 0 { - continue - } - - for _, addr := range addrs { - var ip net.IP - switch v := addr.(type) { - case *net.IPNet: - ip = v.IP - case *net.IPAddr: - ip = v.IP - } - - if ip.To4() != nil { - ipList.Add(ip.String()) - } - } - } - - return ipList -} diff --git a/statestore/mock/store.go b/statestore/mock/store.go index 5bd3a4a9e..bf53f1c9b 100644 --- a/statestore/mock/store.go +++ b/statestore/mock/store.go @@ -1,6 +1,7 @@ package mock import ( + "context" "encoding" "encoding/json" "fmt" @@ -96,6 +97,10 @@ func (s *store) Iterate(prefix string, iterFunc storage.StateIterFunc) (err erro return nil } +func (s *store) ReadAllChan(ctx context.Context, prefix string, seekKey string) (c <-chan *storage.Entry, err error) { + return +} + // DB implements StateStorer.DB method. func (s *store) DB() *leveldb.DB { return nil From bd25a0057919eeaacece646f5cb16fa848aa50d4 Mon Sep 17 00:00:00 2001 From: fish <920886811@163.com> Date: Mon, 21 Aug 2023 19:52:59 +0800 Subject: [PATCH 044/139] chore: check acl --- s3/handlers/errors.go | 3 ++- s3/handlers/handlers.go | 13 +++++++++---- s3/handlers/request.go | 9 +++++++-- s3/handlers/response.go | 9 +++++++++ s3/handlers/services_errors.go | 1 + s3/policy/policy.go | 11 ++++++++--- s3/routers/routers.go | 2 ++ 7 files changed, 38 insertions(+), 10 deletions(-) diff --git a/s3/handlers/errors.go b/s3/handlers/errors.go index f069c0528..c02320c83 100644 --- a/s3/handlers/errors.go +++ b/s3/handlers/errors.go @@ -40,7 +40,8 @@ func ToApiError(ctx context.Context, err error) ErrorCode { errCode = ErrCodeInternalError case ErrNotImplemented: errCode = ErrCodeNotImplemented - + case ErrBucketAlreadyExists: + errCode = ErrCodeBucketAlreadyExists //case lock.OperationTimedOut: // errCode = ErrCodeOperationTimedOut //case hash.SHA256Mismatch: diff --git a/s3/handlers/handlers.go b/s3/handlers/handlers.go index fd72f1f32..a4f925526 100644 --- a/s3/handlers/handlers.go +++ b/s3/handlers/handlers.go @@ -90,13 +90,14 @@ func (h *Handlers) PutBucketHandler(w http.ResponseWriter, r *http.Request) { } fmt.Println("4") - if !checkPermissionType(req.ACL) { + if !checkAclPermissionType(&req.ACL) { WriteErrorResponse(w, r, ToApiError(ctx, ErrNotImplemented)) + return } fmt.Println("3") if ok := h.bucketSvc.HasBucket(r.Context(), req.Bucket); ok { - WriteErrorResponseHeadersOnly(w, r, ToApiError(ctx, ErrBucketNotFound)) + WriteErrorResponseHeadersOnly(w, r, ToApiError(ctx, ErrBucketAlreadyExists)) return } @@ -131,6 +132,8 @@ func (h *Handlers) HeadBucketHandler(w http.ResponseWriter, r *http.Request) { return } + fmt.Println("... head bucket ", req) + accessKeyRecord, errCode := h.authSvc.VerifySignature(ctx, r) if errCode != ErrCodeNone { WriteErrorResponse(w, r, errCode) @@ -221,6 +224,8 @@ func (h *Handlers) GetBucketAclHandler(w http.ResponseWriter, r *http.Request) { return } + fmt.Println("... get acl req: ", req) + accessKeyRecord, errCode := h.authSvc.VerifySignature(ctx, r) if errCode != ErrCodeNone { WriteErrorResponse(w, r, errCode) @@ -251,7 +256,7 @@ func (h *Handlers) PutBucketAclHandler(w http.ResponseWriter, r *http.Request) { ctx := r.Context() req := &PutBucketAclRequest{} err := req.Bind(r) - if err != nil { + if err != nil || len(req.ACL) == 0 || len(req.Bucket) == 0 { WriteErrorResponse(w, r, ToApiError(ctx, ErrInvalidArgument)) return } @@ -268,7 +273,7 @@ func (h *Handlers) PutBucketAclHandler(w http.ResponseWriter, r *http.Request) { return } - if !checkPermissionType(req.ACL) || req.ACL == "" { + if !checkAclPermissionType(&req.ACL) { WriteErrorResponse(w, r, ToApiError(ctx, ErrNotImplemented)) return } diff --git a/s3/handlers/request.go b/s3/handlers/request.go index ccfb38075..d7f0f00d7 100644 --- a/s3/handlers/request.go +++ b/s3/handlers/request.go @@ -178,8 +178,13 @@ func pathClean(p string) string { // return tagging, nil //} -func checkPermissionType(s string) bool { - switch s { +func checkAclPermissionType(s *string) bool { + if len(*s) == 0 { + *s = policy.PublicRead + return true + } + + switch *s { case policy.PublicRead: return true case policy.PublicReadWrite: diff --git a/s3/handlers/response.go b/s3/handlers/response.go index 9cd2f1456..d9e0a14aa 100644 --- a/s3/handlers/response.go +++ b/s3/handlers/response.go @@ -1,6 +1,7 @@ package handlers import ( + "fmt" "net/http" "github.com/aws/aws-sdk-go/aws" @@ -46,11 +47,16 @@ func WriteListBucketsResponse(w http.ResponseWriter, r *http.Request, bucketMeta func WriteGetBucketAclResponse(w http.ResponseWriter, r *http.Request, accessKeyRecord *AccessKeyRecord, acl string) { resp := AccessControlPolicy{} + fmt.Printf(" -1- get acl resp: %+v \n", resp) + id := accessKeyRecord.Key if resp.Owner.DisplayName == "" { resp.Owner.DisplayName = accessKeyRecord.Key resp.Owner.ID = id } + fmt.Printf(" -2- get acl resp: %+v \n", resp) + + resp.AccessControlList.Grant = make([]Grant, 0) resp.AccessControlList.Grant = append(resp.AccessControlList.Grant, Grant{ Grantee: Grantee{ ID: id, @@ -60,6 +66,9 @@ func WriteGetBucketAclResponse(w http.ResponseWriter, r *http.Request, accessKey XMLNS: "http://www.w3.org/2001/XMLSchema-instance"}, Permission: Permission(acl), //todo change }) + fmt.Printf(" -3- get acl resp: %+v \n", resp) + + fmt.Printf("get acl resp: %+v \n", resp) WriteSuccessResponseXML(w, r, resp) return diff --git a/s3/handlers/services_errors.go b/s3/handlers/services_errors.go index f0d447c62..13533b885 100644 --- a/s3/handlers/services_errors.go +++ b/s3/handlers/services_errors.go @@ -9,6 +9,7 @@ var ( ErrInvalidBucketName = errors.New("bucket name is invalid") ErrBucketNotFound = errors.New("bucket is not found") + ErrBucketAlreadyExists = errors.New("bucket is already exists") ErrBucketAccessDenied = errors.New("bucket access denied") ErrSetBucketEmptyFailed = errors.New("set bucket empty failed") ErrCreateBucket = errors.New("create bucket failed") diff --git a/s3/policy/policy.go b/s3/policy/policy.go index c761fe8c9..a59b76558 100644 --- a/s3/policy/policy.go +++ b/s3/policy/policy.go @@ -53,12 +53,17 @@ func checkActionInPublicRead(action s3action.Action) bool { } func IsAllowed(own bool, acl string, action s3action.Action) (allow bool) { - // 1.if bucket + // 1.如果是自己,都能操作 + if own { + return true + } + + // 2.如果是别人,不能操作bucket if action.IsBucketAction() { - return own + return false } - // 2.if object + // 2.如果是别人,区分acl操作object if action.IsObjectAction() { switch acl { case Private: diff --git a/s3/routers/routers.go b/s3/routers/routers.go index df2acda18..b32d21e5f 100644 --- a/s3/routers/routers.go +++ b/s3/routers/routers.go @@ -29,6 +29,8 @@ func (routers *Routers) Register() http.Handler { bucket.Methods(http.MethodHead).HandlerFunc(routers.handlers.HeadBucketHandler) bucket.Methods(http.MethodDelete).HandlerFunc(routers.handlers.DeleteBucketHandler) bucket.Methods(http.MethodGet).HandlerFunc(routers.handlers.ListBucketsHandler) + //bucket.Methods(http.MethodGet).HandlerFunc(routers.handlers.GetBucketAclHandler) + //bucket.Methods(http.MethodPut).HandlerFunc(routers.handlers.PutBucketAclHandler) bucket.Methods(http.MethodGet).HandlerFunc(routers.handlers.GetBucketAclHandler).Queries("acl", "") bucket.Methods(http.MethodPut).HandlerFunc(routers.handlers.PutBucketAclHandler).Queries("acl", "") From 0fab872ea351431e26482c36c3c8a61016d171f1 Mon Sep 17 00:00:00 2001 From: fish <920886811@163.com> Date: Tue, 22 Aug 2023 12:22:29 +0800 Subject: [PATCH 045/139] chore --- s3/handlers/handlers.go | 2 +- s3/handlers/response.go | 2 +- s3/handlers/services.go | 2 +- s3/providers/statestore/storage_proxy.go | 5 --- s3/services/bucket/service.go | 28 ++++++++-------- s3/services/providerser.go | 3 -- statestore/leveldb/leveldb.go | 42 ------------------------ statestore/mock/store.go | 5 --- transaction/storage/store.go | 17 ---------- 9 files changed, 16 insertions(+), 90 deletions(-) diff --git a/s3/handlers/handlers.go b/s3/handlers/handlers.go index a4f925526..c1f7ecf2a 100644 --- a/s3/handlers/handlers.go +++ b/s3/handlers/handlers.go @@ -206,7 +206,7 @@ func (h *Handlers) ListBucketsHandler(w http.ResponseWriter, r *http.Request) { } //todo check all errors - bucketMetas, err := h.bucketSvc.GetAllBucketsOfUser(ctx, accessKeyRecord.Key) + bucketMetas, err := h.bucketSvc.GetAllBucketsOfUser(accessKeyRecord.Key) if err != nil { WriteErrorResponse(w, r, ToApiError(ctx, err)) return diff --git a/s3/handlers/response.go b/s3/handlers/response.go index d9e0a14aa..f505de24c 100644 --- a/s3/handlers/response.go +++ b/s3/handlers/response.go @@ -24,7 +24,7 @@ func WriteDeleteBucketResponse(w http.ResponseWriter) { return } -func WriteListBucketsResponse(w http.ResponseWriter, r *http.Request, bucketMetas []BucketMetadata) { +func WriteListBucketsResponse(w http.ResponseWriter, r *http.Request, bucketMetas []*BucketMetadata) { var buckets []*s3.Bucket for _, b := range bucketMetas { buckets = append(buckets, &s3.Bucket{ diff --git a/s3/handlers/services.go b/s3/handlers/services.go index 91d244e4e..0178e083b 100644 --- a/s3/handlers/services.go +++ b/s3/handlers/services.go @@ -34,7 +34,7 @@ type BucketService interface { HasBucket(ctx context.Context, bucket string) bool SetEmptyBucket(emptyBucket func(ctx context.Context, bucket string) (bool, error)) DeleteBucket(ctx context.Context, bucket string) error - GetAllBucketsOfUser(ctx context.Context, accessKey string) ([]BucketMetadata, error) + GetAllBucketsOfUser(username string) (list []*BucketMetadata, err error) UpdateBucketAcl(ctx context.Context, bucket, acl string) error GetBucketAcl(ctx context.Context, bucket string) (string, error) } diff --git a/s3/providers/statestore/storage_proxy.go b/s3/providers/statestore/storage_proxy.go index ee5d97a6b..4ef6045b0 100644 --- a/s3/providers/statestore/storage_proxy.go +++ b/s3/providers/statestore/storage_proxy.go @@ -1,7 +1,6 @@ package statestore import ( - "context" "errors" "github.com/bittorrent/go-btfs/s3/services" "github.com/bittorrent/go-btfs/transaction/storage" @@ -42,7 +41,3 @@ func (s *StorageProxy) Delete(key string) (err error) { func (s *StorageProxy) Iterate(prefix string, iterFunc services.StateStoreIterFunc) (err error) { return s.proxy.Iterate(prefix, storage.StateIterFunc(iterFunc)) } - -func (s *StorageProxy) ReadAllChan(ctx context.Context, prefix string, seekKey string) (<-chan *storage.Entry, error) { - return s.proxy.ReadAllChan(ctx, prefix, seekKey) -} diff --git a/s3/services/bucket/service.go b/s3/services/bucket/service.go index 0baf134d7..d251d2175 100644 --- a/s3/services/bucket/service.go +++ b/s3/services/bucket/service.go @@ -144,23 +144,21 @@ func (s *Service) SetEmptyBucket(emptyBucket func(ctx context.Context, bucket st } // GetAllBucketsOfUser metadata for all bucket. -func (s *Service) GetAllBucketsOfUser(ctx context.Context, username string) ([]handlers.BucketMetadata, error) { - var m []handlers.BucketMetadata - all, err := s.providers.GetStateStore().ReadAllChan(ctx, bucketPrefix, "") - if err != nil { - return nil, err - } - for entry := range all { - data := handlers.BucketMetadata{} - if err = entry.UnmarshalValue(&data); err != nil { - continue +func (s *Service) GetAllBucketsOfUser(username string) (list []*handlers.BucketMetadata, err error) { + err = s.providers.GetStateStore().Iterate(bucketPrefix, func(key, _ []byte) (stop bool, er error) { + record := &handlers.BucketMetadata{} + er = s.providers.GetStateStore().Get(string(key), record) + if er != nil { + return } - if data.Owner != username { - continue + if record.Owner == username { + list = append(list, record) } - m = append(m, data) - } - return m, nil + + return + }) + + return } // UpdateBucketAcl . diff --git a/s3/services/providerser.go b/s3/services/providerser.go index cbb508ca9..796291d97 100644 --- a/s3/services/providerser.go +++ b/s3/services/providerser.go @@ -1,8 +1,6 @@ package services import ( - "context" - "github.com/bittorrent/go-btfs/transaction/storage" "io" ) @@ -23,5 +21,4 @@ type StateStorer interface { Put(key string, i interface{}) (err error) Delete(key string) (err error) Iterate(prefix string, iterFunc StateStoreIterFunc) (err error) - ReadAllChan(ctx context.Context, prefix string, seekKey string) (<-chan *storage.Entry, error) } diff --git a/statestore/leveldb/leveldb.go b/statestore/leveldb/leveldb.go index 090f6936c..c74413d97 100644 --- a/statestore/leveldb/leveldb.go +++ b/statestore/leveldb/leveldb.go @@ -1,7 +1,6 @@ package leveldb import ( - "context" "encoding" "encoding/json" "errors" @@ -13,11 +12,8 @@ import ( "github.com/syndtr/goleveldb/leveldb" ldb "github.com/syndtr/goleveldb/leveldb" ldberr "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/opt" ldbs "github.com/syndtr/goleveldb/leveldb/storage" "github.com/syndtr/goleveldb/leveldb/util" - "go.uber.org/zap/buffer" ) var log = logging.Logger("leveldb") @@ -175,41 +171,3 @@ func (s *store) DB() *leveldb.DB { func (s *store) Close() error { return s.db.Close() } - -// NewIterator /** -func (l *store) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { - return l.db.NewIterator(slice, ro) -} - -//ReadAllChan read all key value -func (l *store) ReadAllChan(ctx context.Context, prefix string, seekKey string) (<-chan *storage.Entry, error) { - ch := make(chan *storage.Entry) - var slice *util.Range - if prefix != "" { - slice = util.BytesPrefix([]byte(prefix)) - } - iter := l.NewIterator(slice, nil) - if seekKey != "" { - iter.Seek([]byte(seekKey)) - } - go func() { - defer func() { - iter.Release() - close(ch) - }() - for iter.Next() { - key := string(iter.Key()) - buf := buffer.Buffer{} - buf.Write(iter.Value()) - select { - case <-ctx.Done(): - return - case ch <- &storage.Entry{ - Key: key, - Value: buf.Bytes(), - }: - } - } - }() - return ch, nil -} diff --git a/statestore/mock/store.go b/statestore/mock/store.go index bf53f1c9b..5bd3a4a9e 100644 --- a/statestore/mock/store.go +++ b/statestore/mock/store.go @@ -1,7 +1,6 @@ package mock import ( - "context" "encoding" "encoding/json" "fmt" @@ -97,10 +96,6 @@ func (s *store) Iterate(prefix string, iterFunc storage.StateIterFunc) (err erro return nil } -func (s *store) ReadAllChan(ctx context.Context, prefix string, seekKey string) (c <-chan *storage.Entry, err error) { - return -} - // DB implements StateStorer.DB method. func (s *store) DB() *leveldb.DB { return nil diff --git a/transaction/storage/store.go b/transaction/storage/store.go index 440b254a9..d6df4d3e8 100644 --- a/transaction/storage/store.go +++ b/transaction/storage/store.go @@ -1,9 +1,6 @@ package storage import ( - "context" - "encoding" - "encoding/json" "errors" "io" @@ -161,19 +158,6 @@ var ( // SubscribePull(ctx context.Context, bin uint8, since, until uint64) (c <-chan Descriptor, closed <-chan struct{}, stop func()) //} -// Entry 特别注意:Entry是否需要专门处理下,在ReadAllChan解析出来数据的时候 -type Entry struct { - Key string - Value []byte -} - -func (e *Entry) UnmarshalValue(value interface{}) error { - if unmarshaler, ok := value.(encoding.BinaryUnmarshaler); ok { - return unmarshaler.UnmarshalBinary(e.Value) - } - return json.Unmarshal(e.Value, value) -} - // StateStorer defines methods required to get, set, delete values for different keys // and close the underlying resources. type StateStorer interface { @@ -181,7 +165,6 @@ type StateStorer interface { Put(key string, i interface{}) (err error) Delete(key string) (err error) Iterate(prefix string, iterFunc StateIterFunc) (err error) - ReadAllChan(ctx context.Context, prefix string, seekKey string) (<-chan *Entry, error) // DB returns the underlying DB storage. DB() *leveldb.DB io.Closer From 7d83079bf8b39c97c2acd90bc5bfce7042b7b637 Mon Sep 17 00:00:00 2001 From: fish <920886811@163.com> Date: Tue, 22 Aug 2023 15:48:26 +0800 Subject: [PATCH 046/139] chore: adjust bucket url --- s3/handlers/handlers.go | 9 +++++++++ s3/handlers/response_error.go | 2 +- s3/routers/routers.go | 10 +++++----- 3 files changed, 15 insertions(+), 6 deletions(-) diff --git a/s3/handlers/handlers.go b/s3/handlers/handlers.go index c1f7ecf2a..6d34f2b7f 100644 --- a/s3/handlers/handlers.go +++ b/s3/handlers/handlers.go @@ -58,6 +58,7 @@ func (h *Handlers) Sign(handler http.Handler) http.Handler { } func (h *Handlers) PutBucketHandler(w http.ResponseWriter, r *http.Request) { + fmt.Println("... PutBucketHandler: begin") fmt.Println("receive request") ctx := r.Context() req := &PutBucketRequest{} @@ -124,6 +125,7 @@ func (h *Handlers) PutBucketHandler(w http.ResponseWriter, r *http.Request) { } func (h *Handlers) HeadBucketHandler(w http.ResponseWriter, r *http.Request) { + fmt.Println("... HeadBucketHandler: begin") ctx := r.Context() req := &HeadBucketRequest{} err := req.Bind(r) @@ -155,6 +157,7 @@ func (h *Handlers) HeadBucketHandler(w http.ResponseWriter, r *http.Request) { } func (h *Handlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) { + fmt.Println("... DeleteBucketHandler: begin") ctx := r.Context() req := &DeleteBucketRequest{} err := req.Bind(r) @@ -185,6 +188,7 @@ func (h *Handlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) { } func (h *Handlers) ListBucketsHandler(w http.ResponseWriter, r *http.Request) { + fmt.Println("... ListBucketsHandler: begin") ctx := r.Context() req := &ListBucketsRequest{} err := req.Bind(r) @@ -216,6 +220,8 @@ func (h *Handlers) ListBucketsHandler(w http.ResponseWriter, r *http.Request) { } func (h *Handlers) GetBucketAclHandler(w http.ResponseWriter, r *http.Request) { + fmt.Println("... get acl req: begin") + ctx := r.Context() req := &GetBucketAclRequest{} err := req.Bind(r) @@ -249,10 +255,13 @@ func (h *Handlers) GetBucketAclHandler(w http.ResponseWriter, r *http.Request) { return } + fmt.Println("... get acl = ", req) + WriteGetBucketAclResponse(w, r, accessKeyRecord, acl) } func (h *Handlers) PutBucketAclHandler(w http.ResponseWriter, r *http.Request) { + fmt.Println("... PutBucketAclHandler: begin") ctx := r.Context() req := &PutBucketAclRequest{} err := req.Bind(r) diff --git a/s3/handlers/response_error.go b/s3/handlers/response_error.go index 7283280de..2d6a81261 100644 --- a/s3/handlers/response_error.go +++ b/s3/handlers/response_error.go @@ -13,7 +13,7 @@ func WriteErrorResponseHeadersOnly(w http.ResponseWriter, r *http.Request, err E // WriteErrorResponse write ErrorResponse func WriteErrorResponse(w http.ResponseWriter, r *http.Request, errorCode ErrorCode) { - fmt.Println("response errcode: ", errorCode) + fmt.Println("response errcode: ", errorCode, r.URL, r.Method, r.Header) vars := mux.Vars(r) bucket := vars["bucket"] object := vars["object"] diff --git a/s3/routers/routers.go b/s3/routers/routers.go index b32d21e5f..b56f8c373 100644 --- a/s3/routers/routers.go +++ b/s3/routers/routers.go @@ -25,14 +25,14 @@ func (routers *Routers) Register() http.Handler { root.Use(routers.handlers.Cors) bucket := root.PathPrefix("/{bucket}").Subrouter() + bucket.Methods(http.MethodGet).HandlerFunc(routers.handlers.GetBucketAclHandler).Queries("acl", "") + bucket.Methods(http.MethodPut).HandlerFunc(routers.handlers.PutBucketAclHandler).Queries("acl", "") + bucket.Methods(http.MethodPut).HandlerFunc(routers.handlers.PutBucketHandler) bucket.Methods(http.MethodHead).HandlerFunc(routers.handlers.HeadBucketHandler) bucket.Methods(http.MethodDelete).HandlerFunc(routers.handlers.DeleteBucketHandler) - bucket.Methods(http.MethodGet).HandlerFunc(routers.handlers.ListBucketsHandler) - //bucket.Methods(http.MethodGet).HandlerFunc(routers.handlers.GetBucketAclHandler) - //bucket.Methods(http.MethodPut).HandlerFunc(routers.handlers.PutBucketAclHandler) - bucket.Methods(http.MethodGet).HandlerFunc(routers.handlers.GetBucketAclHandler).Queries("acl", "") - bucket.Methods(http.MethodPut).HandlerFunc(routers.handlers.PutBucketAclHandler).Queries("acl", "") + + root.Methods(http.MethodGet).Path("/").HandlerFunc(routers.handlers.ListBucketsHandler) //object //bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(routers.handlers.PutObjectHandler) From a869bfdbad184873ba412ced7810e0f7cf143335 Mon Sep 17 00:00:00 2001 From: Steve Date: Wed, 23 Aug 2023 16:23:20 +0800 Subject: [PATCH 047/139]  --- s3/handlers/accesskey.go | 7 - s3/handlers/cctx/access_key.go | 21 + s3/handlers/cctx/key.go | 7 + s3/handlers/errors.go | 87 -- s3/handlers/handlers.go | 192 ++- .../handlerser.go => handlers/proto.go} | 4 +- .../{request.go => requests/parsers.go} | 26 +- s3/handlers/requests/types.go | 8 + s3/handlers/response_comm.go | 584 -------- s3/handlers/response_comm_acl.go | 49 - s3/handlers/response_error.go | 39 - s3/handlers/responses/error.go | 1035 +++++++++++++ s3/handlers/responses/types.go | 191 +++ s3/handlers/responses/types_common.go | 1 + .../{response.go => responses/wirters.go} | 15 +- s3/handlers/responses/writers_common.go | 171 +++ s3/handlers/s3_error.go | 44 - s3/handlers/s3api_errors.go | 1312 ----------------- s3/handlers/services.go | 46 - s3/handlers/services_errors.go | 19 - s3/handlers/services_types.go | 24 - s3/handlers/to_response_err.go | 18 + s3/lock/lock.go | 217 --- s3/lock/rwmutex.go | 154 -- s3/providers/file_store.go | 17 + s3/providers/filestore/local_shell.go | 18 - .../providerser.go => providers/proto.go} | 9 +- s3/providers/providers.go | 24 +- s3/providers/state_store.go | 42 + s3/providers/statestore/storage_proxy.go | 43 - s3/routers/routers.go | 5 +- s3/{server => routers}/routerser.go | 2 +- s3/server.go | 6 +- s3/server/server.go | 8 +- s3/services/accesskey/service.go | 32 +- s3/services/accesskey/service_instance.go | 10 +- s3/services/auth/check_handler_auth.go | 59 +- s3/services/auth/service.go | 16 +- s3/services/auth/signature-v4-parser.go | 136 +- s3/services/auth/signature-v4-utils.go | 10 +- s3/services/auth/signature-v4.go | 81 +- s3/services/bucket/service.go | 36 +- s3/services/cors/service.go | 4 +- s3/services/multipart/service.go | 4 +- s3/services/proto.go | 84 ++ s3/services/providerser_errors.go | 7 - s3/services/providerser_types.go | 3 - 47 files changed, 1931 insertions(+), 2996 deletions(-) delete mode 100644 s3/handlers/accesskey.go create mode 100644 s3/handlers/cctx/access_key.go create mode 100644 s3/handlers/cctx/key.go delete mode 100644 s3/handlers/errors.go rename s3/{routers/handlerser.go => handlers/proto.go} (89%) rename s3/handlers/{request.go => requests/parsers.go} (89%) create mode 100644 s3/handlers/requests/types.go delete mode 100644 s3/handlers/response_comm.go delete mode 100644 s3/handlers/response_comm_acl.go delete mode 100644 s3/handlers/response_error.go create mode 100644 s3/handlers/responses/error.go create mode 100644 s3/handlers/responses/types.go create mode 100644 s3/handlers/responses/types_common.go rename s3/handlers/{response.go => responses/wirters.go} (86%) create mode 100644 s3/handlers/responses/writers_common.go delete mode 100644 s3/handlers/s3_error.go delete mode 100644 s3/handlers/s3api_errors.go delete mode 100644 s3/handlers/services.go delete mode 100644 s3/handlers/services_errors.go delete mode 100644 s3/handlers/services_types.go create mode 100644 s3/handlers/to_response_err.go delete mode 100644 s3/lock/lock.go delete mode 100644 s3/lock/rwmutex.go create mode 100644 s3/providers/file_store.go delete mode 100644 s3/providers/filestore/local_shell.go rename s3/{services/providerser.go => providers/proto.go} (77%) create mode 100644 s3/providers/state_store.go delete mode 100644 s3/providers/statestore/storage_proxy.go rename s3/{server => routers}/routerser.go (82%) create mode 100644 s3/services/proto.go delete mode 100644 s3/services/providerser_errors.go delete mode 100644 s3/services/providerser_types.go diff --git a/s3/handlers/accesskey.go b/s3/handlers/accesskey.go deleted file mode 100644 index 6e767a668..000000000 --- a/s3/handlers/accesskey.go +++ /dev/null @@ -1,7 +0,0 @@ -package handlers - -import ( - "errors" -) - -var ErrAccessKeyIsNotFound = errors.New("access-key is not found") diff --git a/s3/handlers/cctx/access_key.go b/s3/handlers/cctx/access_key.go new file mode 100644 index 000000000..2eb57c310 --- /dev/null +++ b/s3/handlers/cctx/access_key.go @@ -0,0 +1,21 @@ +package cctx + +import ( + "context" + "github.com/bittorrent/go-btfs/s3/services" + "net/http" +) + +func SetAccessKey(r *http.Request, ack *services.AccessKey) { + ctx := context.WithValue(r.Context(), keyOfAccessKey, ack) + r.WithContext(ctx) +} + +func GetAccessKey(r *http.Request) (ack *services.AccessKey) { + v := r.Context().Value(keyOfAccessKey) + if v == nil { + return + } + ack, _ = v.(*services.AccessKey) + return +} diff --git a/s3/handlers/cctx/key.go b/s3/handlers/cctx/key.go new file mode 100644 index 000000000..8abc7c5a1 --- /dev/null +++ b/s3/handlers/cctx/key.go @@ -0,0 +1,7 @@ +package cctx + +type key struct{} + +var ( + keyOfAccessKey = &key{} +) diff --git a/s3/handlers/errors.go b/s3/handlers/errors.go deleted file mode 100644 index c02320c83..000000000 --- a/s3/handlers/errors.go +++ /dev/null @@ -1,87 +0,0 @@ -package handlers - -import ( - "context" -) - -// NotImplemented If a feature is not implemented -type NotImplemented struct { - Message string -} - -// ContextCanceled returns whether a context is canceled. -func ContextCanceled(ctx context.Context) bool { - select { - case <-ctx.Done(): - return true - default: - return false - } -} - -func ToApiError(ctx context.Context, err error) ErrorCode { - if ContextCanceled(ctx) { - if ctx.Err() == context.Canceled { - return ErrCodeClientDisconnected - } - } - errCode := ErrCodeInternalError - switch err { - case ErrInvalidArgument: - errCode = ErrCodeInvalidRequestBody //实际是request请求信息, header or query uri 信息。 - case ErrInvalidBucketName: - errCode = ErrCodeInvalidBucketName - case ErrBucketNotFound: - errCode = ErrCodeNoSuchBucket - case ErrBucketAccessDenied: - errCode = ErrCodeAccessDenied - case ErrSetBucketEmptyFailed: - case ErrCreateBucket: - errCode = ErrCodeInternalError - case ErrNotImplemented: - errCode = ErrCodeNotImplemented - case ErrBucketAlreadyExists: - errCode = ErrCodeBucketAlreadyExists - //case lock.OperationTimedOut: - // errCode = ErrCodeOperationTimedOut - //case hash.SHA256Mismatch: - // errCode = ErrCodeContentSHA256Mismatch - //case hash.BadDigest: - // errCode = ErrCodeBadDigest - //case store.BucketPolicyNotFound: - // errCode = ErrCodeNoSuchBucketPolicy - //case store.BucketTaggingNotFound: - // errCode = ErrBucketTaggingNotFound - //case s3utils.BucketNameInvalid: - // errCode = ErrCodeInvalidBucketName - //case s3utils.ObjectNameInvalid: - // errCode = ErrCodeInvalidObjectName - //case s3utils.ObjectNameTooLong: - // errCode = ErrCodeKeyTooLongError - //case s3utils.ObjectNamePrefixAsSlash: - // errCode = ErrCodeInvalidObjectNamePrefixSlash - //case s3utils.InvalidUploadIDKeyCombination: - // errCode = ErrCodeNotImplemented - //case s3utils.InvalidMarkerPrefixCombination: - // errCode = ErrCodeNotImplemented - //case s3utils.MalformedUploadID: - // errCode = ErrCodeNoSuchUpload - //case s3utils.InvalidUploadID: - // errCode = ErrCodeNoSuchUpload - //case s3utils.InvalidPart: - // errCode = ErrCodeInvalidPart - //case s3utils.PartTooSmall: - // errCode = ErrCodeEntityTooSmall - //case s3utils.PartTooBig: - // errCode = ErrCodeEntityTooLarge - //case url.EscapeError: - // errCode = ErrCodeInvalidObjectName - //default: - // if xerrors.Is(err, store.ErrObjectNotFound) { - // errCode = ErrCodeNoSuchKey - // } else if xerrors.Is(err, store.ErrBucketNotEmpty) { - // errCode = ErrCodeBucketNotEmpty - // } - } - return errCode -} diff --git a/s3/handlers/handlers.go b/s3/handlers/handlers.go index 6d34f2b7f..49fb36c65 100644 --- a/s3/handlers/handlers.go +++ b/s3/handlers/handlers.go @@ -3,31 +3,34 @@ package handlers import ( "fmt" + "github.com/bittorrent/go-btfs/s3/handlers/cctx" + "github.com/bittorrent/go-btfs/s3/handlers/requests" + "github.com/bittorrent/go-btfs/s3/handlers/responses" + "github.com/bittorrent/go-btfs/s3/services" "net/http" s3action "github.com/bittorrent/go-btfs/s3/action" "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/routers" "github.com/bittorrent/go-btfs/s3/s3utils" "github.com/rs/cors" ) -var _ routers.Handlerser = (*Handlers)(nil) +var _ Handlerser = (*Handlers)(nil) type Handlers struct { - corsSvc CorsService - authSvc AuthService - bucketSvc BucketService - objectSvc ObjectService - multipartSvc MultipartService + corsSvc services.CorsService + authSvc services.AuthService + bucketSvc services.BucketService + objectSvc services.ObjectService + multipartSvc services.MultipartService } func NewHandlers( - corsSvc CorsService, - authSvc AuthService, - bucketSvc BucketService, - objectSvc ObjectService, - multipartSvc MultipartService, + corsSvc services.CorsService, + authSvc services.AuthService, + bucketSvc services.BucketService, + objectSvc services.ObjectService, + multipartSvc services.MultipartService, options ...Option, ) (handlers *Handlers) { handlers = &Handlers{ @@ -53,29 +56,26 @@ func (h *Handlers) Cors(handler http.Handler) http.Handler { }).Handler(handler) } -func (h *Handlers) Sign(handler http.Handler) http.Handler { - return nil +func (h *Handlers) Auth(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ack, rErr := h.authSvc.VerifySignature(r.Context(), r) + if rErr != nil { + responses.WriteErrorResponse(w, r, rErr) + return + } + cctx.SetAccessKey(r, ack) + handler.ServeHTTP(w, r) + }) } func (h *Handlers) PutBucketHandler(w http.ResponseWriter, r *http.Request) { fmt.Println("... PutBucketHandler: begin") - fmt.Println("receive request") - ctx := r.Context() - req := &PutBucketRequest{} - err := req.Bind(r) - defer func() { - fmt.Println("handle err: ", err) - }() + ctx := r.Context() + req, err := requests.ParsePubBucketRequest(r) if err != nil { - WriteErrorResponse(w, r, ToApiError(ctx, ErrInvalidArgument)) - return - } - - accessKeyRecord, errCode := h.authSvc.VerifySignature(ctx, r) - if errCode != ErrCodeNone { - WriteErrorResponse(w, r, errCode) + responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestBody) return } @@ -86,214 +86,196 @@ func (h *Handlers) PutBucketHandler(w http.ResponseWriter, r *http.Request) { //} if err = s3utils.CheckValidBucketNameStrict(req.Bucket); err != nil { - WriteErrorResponse(w, r, ToApiError(ctx, ErrInvalidBucketName)) + responses.WriteErrorResponse(w, r, responses.ErrInvalidBucketName) return } fmt.Println("4") - if !checkAclPermissionType(&req.ACL) { - WriteErrorResponse(w, r, ToApiError(ctx, ErrNotImplemented)) + if !requests.CheckAclPermissionType(&req.ACL) { + responses.WriteErrorResponse(w, r, responses.ErrNotImplemented) return } fmt.Println("3") - if ok := h.bucketSvc.HasBucket(r.Context(), req.Bucket); ok { - WriteErrorResponseHeadersOnly(w, r, ToApiError(ctx, ErrBucketAlreadyExists)) + if ok := h.bucketSvc.HasBucket(ctx, req.Bucket); ok { + responses.WriteErrorResponseHeadersOnly(w, r, responses.ErrBucketAlreadyExists) return } fmt.Println("2") - fmt.Println(h.bucketSvc, accessKeyRecord) - err = h.bucketSvc.CreateBucket(ctx, req.Bucket, req.Region, accessKeyRecord.Key, req.ACL) + err = h.bucketSvc.CreateBucket(ctx, req.Bucket, req.Region, cctx.GetAccessKey(r).Key, req.ACL) if err != nil { - log.Errorf("PutBucketHandler create bucket error:%v", err) - WriteErrorResponse(w, r, ToApiError(ctx, ErrCreateBucket)) + responses.WriteErrorResponse(w, r, responses.ErrInternalError) return } fmt.Println("1") // Make sure to add Location information here only for bucket - if cp := pathClean(r.URL.Path); cp != "" { + if cp := requests.PathClean(r.URL.Path); cp != "" { w.Header().Set(consts.Location, cp) // Clean any trailing slashes. } fmt.Println("0") - WritePutBucketResponse(w, r) + responses.WritePutBucketResponse(w, r) return } func (h *Handlers) HeadBucketHandler(w http.ResponseWriter, r *http.Request) { fmt.Println("... HeadBucketHandler: begin") + ctx := r.Context() - req := &HeadBucketRequest{} + ack := cctx.GetAccessKey(r) + + req := &requests.HeadBucketRequest{} err := req.Bind(r) if err != nil { - WriteErrorResponse(w, r, ToApiError(ctx, ErrInvalidArgument)) + responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestBody) return } fmt.Println("... head bucket ", req) - accessKeyRecord, errCode := h.authSvc.VerifySignature(ctx, r) - if errCode != ErrCodeNone { - WriteErrorResponse(w, r, errCode) - return - } - - err = h.bucketSvc.CheckACL(accessKeyRecord, req.Bucket, s3action.HeadBucketAction) + err = h.bucketSvc.CheckACL(ack, req.Bucket, s3action.HeadBucketAction) if err != nil { - WriteErrorResponse(w, r, ToApiError(ctx, ErrNoSuchUserPolicy)) + responses.WriteErrorResponse(w, r, ToResponseErr(err)) return } - if ok := h.bucketSvc.HasBucket(r.Context(), req.Bucket); !ok { - WriteErrorResponseHeadersOnly(w, r, ToApiError(ctx, ErrBucketNotFound)) + if ok := h.bucketSvc.HasBucket(ctx, req.Bucket); !ok { + responses.WriteErrorResponseHeadersOnly(w, r, responses.ErrNoSuchBucket) return } - WriteHeadBucketResponse(w, r) + responses.WriteHeadBucketResponse(w, r) } func (h *Handlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) { fmt.Println("... DeleteBucketHandler: begin") + ctx := r.Context() - req := &DeleteBucketRequest{} + ack := cctx.GetAccessKey(r) + + req := &requests.DeleteBucketRequest{} err := req.Bind(r) if err != nil { - WriteErrorResponse(w, r, ToApiError(ctx, ErrInvalidArgument)) - return - } - - accessKeyRecord, errCode := h.authSvc.VerifySignature(ctx, r) - if errCode != ErrCodeNone { - WriteErrorResponse(w, r, errCode) + responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestBody) return } - err = h.bucketSvc.CheckACL(accessKeyRecord, req.Bucket, s3action.HeadBucketAction) + err = h.bucketSvc.CheckACL(ack, req.Bucket, s3action.HeadBucketAction) if err != nil { - WriteErrorResponse(w, r, ToApiError(ctx, ErrNoSuchUserPolicy)) + responses.WriteErrorResponse(w, r, ToResponseErr(err)) return } //todo check all errors. err = h.bucketSvc.DeleteBucket(ctx, req.Bucket) if err != nil { - WriteErrorResponse(w, r, ToApiError(ctx, err)) + responses.WriteErrorResponse(w, r, ToResponseErr(err)) return } - WriteDeleteBucketResponse(w) + + responses.WriteDeleteBucketResponse(w) } func (h *Handlers) ListBucketsHandler(w http.ResponseWriter, r *http.Request) { fmt.Println("... ListBucketsHandler: begin") - ctx := r.Context() - req := &ListBucketsRequest{} + + ack := cctx.GetAccessKey(r) + + req := &requests.ListBucketsRequest{} err := req.Bind(r) if err != nil { - WriteErrorResponse(w, r, ToApiError(ctx, ErrInvalidArgument)) + responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestBody) return } - accessKeyRecord, errCode := h.authSvc.VerifySignature(ctx, r) - if errCode != ErrCodeNone { - WriteErrorResponse(w, r, errCode) - return - } - - err = h.bucketSvc.CheckACL(accessKeyRecord, req.Bucket, s3action.ListBucketAction) + err = h.bucketSvc.CheckACL(ack, req.Bucket, s3action.ListBucketAction) if err != nil { - WriteErrorResponse(w, r, ToApiError(ctx, ErrNoSuchUserPolicy)) + responses.WriteErrorResponse(w, r, ToResponseErr(err)) return } //todo check all errors - bucketMetas, err := h.bucketSvc.GetAllBucketsOfUser(accessKeyRecord.Key) + bucketMetas, err := h.bucketSvc.GetAllBucketsOfUser(ack.Key) if err != nil { - WriteErrorResponse(w, r, ToApiError(ctx, err)) + responses.WriteErrorResponse(w, r, ToResponseErr(err)) return } - WriteListBucketsResponse(w, r, bucketMetas) + responses.WriteListBucketsResponse(w, r, bucketMetas) } func (h *Handlers) GetBucketAclHandler(w http.ResponseWriter, r *http.Request) { fmt.Println("... get acl req: begin") ctx := r.Context() - req := &GetBucketAclRequest{} + ack := cctx.GetAccessKey(r) + + req := &requests.GetBucketAclRequest{} err := req.Bind(r) if err != nil { - WriteErrorResponse(w, r, ToApiError(ctx, ErrInvalidArgument)) + responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestBody) return } fmt.Println("... get acl req: ", req) - accessKeyRecord, errCode := h.authSvc.VerifySignature(ctx, r) - if errCode != ErrCodeNone { - WriteErrorResponse(w, r, errCode) - return - } - - err = h.bucketSvc.CheckACL(accessKeyRecord, req.Bucket, s3action.GetBucketAclAction) + err = h.bucketSvc.CheckACL(ack, req.Bucket, s3action.GetBucketAclAction) if err != nil { - WriteErrorResponse(w, r, ToApiError(ctx, ErrNoSuchUserPolicy)) + responses.WriteErrorResponse(w, r, ToResponseErr(err)) return } if !h.bucketSvc.HasBucket(ctx, req.Bucket) { - WriteErrorResponseHeadersOnly(w, r, ToApiError(ctx, ErrBucketNotFound)) + responses.WriteErrorResponseHeadersOnly(w, r, responses.ErrNoSuchBucket) return } //todo check all errors acl, err := h.bucketSvc.GetBucketAcl(ctx, req.Bucket) if err != nil { - WriteErrorResponse(w, r, ToApiError(ctx, err)) + responses.WriteErrorResponse(w, r, ToResponseErr(err)) return } fmt.Println("... get acl = ", req) - WriteGetBucketAclResponse(w, r, accessKeyRecord, acl) + responses.WriteGetBucketAclResponse(w, r, ack, acl) } func (h *Handlers) PutBucketAclHandler(w http.ResponseWriter, r *http.Request) { fmt.Println("... PutBucketAclHandler: begin") + ctx := r.Context() - req := &PutBucketAclRequest{} + ack := cctx.GetAccessKey(r) + + req := &requests.PutBucketAclRequest{} err := req.Bind(r) if err != nil || len(req.ACL) == 0 || len(req.Bucket) == 0 { - WriteErrorResponse(w, r, ToApiError(ctx, ErrInvalidArgument)) - return - } - - accessKeyRecord, errCode := h.authSvc.VerifySignature(ctx, r) - if errCode != ErrCodeNone { - WriteErrorResponse(w, r, errCode) + responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestBody) return } - err = h.bucketSvc.CheckACL(accessKeyRecord, req.Bucket, s3action.PutBucketAclAction) + err = h.bucketSvc.CheckACL(ack, req.Bucket, s3action.PutBucketAclAction) if err != nil { - WriteErrorResponse(w, r, ToApiError(ctx, ErrNoSuchUserPolicy)) + responses.WriteErrorResponse(w, r, ToResponseErr(err)) return } - if !checkAclPermissionType(&req.ACL) { - WriteErrorResponse(w, r, ToApiError(ctx, ErrNotImplemented)) + if !requests.CheckAclPermissionType(&req.ACL) { + responses.WriteErrorResponse(w, r, responses.ErrNotImplemented) return } //todo check all errors err = h.bucketSvc.UpdateBucketAcl(ctx, req.Bucket, req.ACL) if err != nil { - WriteErrorResponse(w, r, ToApiError(ctx, err)) + responses.WriteErrorResponse(w, r, ToResponseErr(err)) return } //todo check no return? - WritePutBucketAclResponse(w, r) + responses.WritePutBucketAclResponse(w, r) } diff --git a/s3/routers/handlerser.go b/s3/handlers/proto.go similarity index 89% rename from s3/routers/handlerser.go rename to s3/handlers/proto.go index 5e3af9c21..b75df0668 100644 --- a/s3/routers/handlerser.go +++ b/s3/handlers/proto.go @@ -1,4 +1,4 @@ -package routers +package handlers import ( "net/http" @@ -6,7 +6,7 @@ import ( type Handlerser interface { Cors(handler http.Handler) http.Handler - Sign(handler http.Handler) http.Handler + Auth(handler http.Handler) http.Handler PutBucketHandler(w http.ResponseWriter, r *http.Request) HeadBucketHandler(w http.ResponseWriter, r *http.Request) diff --git a/s3/handlers/request.go b/s3/handlers/requests/parsers.go similarity index 89% rename from s3/handlers/request.go rename to s3/handlers/requests/parsers.go index d7f0f00d7..b9a5a7e39 100644 --- a/s3/handlers/request.go +++ b/s3/handlers/requests/parsers.go @@ -1,7 +1,8 @@ -package handlers +package requests import ( "encoding/xml" + "github.com/bittorrent/go-btfs/s3/handlers/responses" "net/http" "path" @@ -11,10 +12,6 @@ import ( "github.com/gorilla/mux" ) -type RequestBinder interface { - Bind(r *http.Request) (err error) -} - //type PutObjectRequest struct { // Bucket string // Object string @@ -25,14 +22,9 @@ type RequestBinder interface { // return //} -// PutBucketRequest . -type PutBucketRequest struct { - Bucket string - ACL string - Region string -} +func ParsePubBucketRequest(r *http.Request) (req *PutBucketRequest, err error) { + req = &PutBucketRequest{} -func (req *PutBucketRequest) Bind(r *http.Request) (err error) { vars := mux.Vars(r) bucket := vars["bucket"] @@ -129,7 +121,7 @@ func (req *PutBucketAclRequest) Bind(r *http.Request) (err error) { /*********************************/ // Parses location constraint from the incoming reader. -func parseLocationConstraint(r *http.Request) (location string, s3Error ErrorCode) { +func parseLocationConstraint(r *http.Request) (location string, s3Error *responses.Error) { // If the request has no body with content-length set to 0, // we do not have to validate location constraint. Bucket will // be created at default region. @@ -137,13 +129,13 @@ func parseLocationConstraint(r *http.Request) (location string, s3Error ErrorCod err := utils.XmlDecoder(r.Body, &locationConstraint, r.ContentLength) if err != nil && r.ContentLength != 0 { // Treat all other failures as XML parsing errors. - return "", ErrCodeMalformedXML + return "", responses.ErrMalformedXML } // else for both err as nil or io.EOF location = locationConstraint.Location if location == "" { location = consts.DefaultRegion } - return location, ErrCodeNone + return location, nil } // createBucketConfiguration container for bucket configuration request from client. @@ -155,7 +147,7 @@ type createBucketLocationConfiguration struct { // pathClean is like path.Clean but does not return "." for // empty inputs, instead returns "empty" as is. -func pathClean(p string) string { +func PathClean(p string) string { cp := path.Clean(p) if cp == "." { return "" @@ -178,7 +170,7 @@ func pathClean(p string) string { // return tagging, nil //} -func checkAclPermissionType(s *string) bool { +func CheckAclPermissionType(s *string) bool { if len(*s) == 0 { *s = policy.PublicRead return true diff --git a/s3/handlers/requests/types.go b/s3/handlers/requests/types.go new file mode 100644 index 000000000..0dddc3dc0 --- /dev/null +++ b/s3/handlers/requests/types.go @@ -0,0 +1,8 @@ +package requests + +// PutBucketRequest . +type PutBucketRequest struct { + Bucket string + ACL string + Region string +} diff --git a/s3/handlers/response_comm.go b/s3/handlers/response_comm.go deleted file mode 100644 index 170785b12..000000000 --- a/s3/handlers/response_comm.go +++ /dev/null @@ -1,584 +0,0 @@ -package handlers - -import ( - "bytes" - "encoding/json" - "encoding/xml" - "fmt" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/bittorrent/go-btfs/s3/consts" - logging "github.com/ipfs/go-log/v2" - "net/http" - "net/url" - "strconv" - "time" -) - -var log = logging.Logger("resp") - -type mimeType string - -const ( - mimeNone mimeType = "" - mimeJSON mimeType = "application/json" - //mimeXML application/xml UTF-8 - mimeXML mimeType = " application/xml" -) - -// APIErrorResponse - error response format -type APIErrorResponse struct { - XMLName xml.Name `xml:"Error" json:"-"` - Code string - Message string - Resource string - RequestID string `xml:"RequestId" json:"RequestId"` - HostID string `xml:"HostId" json:"HostId"` -} - -// WriteSuccessResponseXML Write Success Response XML -func WriteSuccessResponseXML(w http.ResponseWriter, r *http.Request, response interface{}) { - WriteXMLResponse(w, r, http.StatusOK, response) -} - -// WriteXMLResponse Write XMLResponse -func WriteXMLResponse(w http.ResponseWriter, r *http.Request, statusCode int, response interface{}) { - writeResponse(w, r, statusCode, encodeXMLResponse(response), mimeXML) -} - -func writeResponse(w http.ResponseWriter, r *http.Request, statusCode int, response []byte, mType mimeType) { - setCommonHeaders(w, r) - if response != nil { - w.Header().Set(consts.ContentLength, strconv.Itoa(len(response))) - } - if mType != mimeNone { - w.Header().Set(consts.ContentType, string(mType)) - } - w.WriteHeader(statusCode) - if response != nil { - log.Debugf("status %d %s: %s", statusCode, mType, string(response)) - _, err := w.Write(response) - if err != nil { - log.Errorf("write err: %v", err) - } - w.(http.Flusher).Flush() - } -} - -func setCommonHeaders(w http.ResponseWriter, r *http.Request) { - w.Header().Set(consts.ServerInfo, "FDS") - w.Header().Set(consts.AmzRequestID, fmt.Sprintf("%d", time.Now().UnixNano())) - w.Header().Set(consts.AcceptRanges, "bytes") - if r.Header.Get("Origin") != "" { - w.Header().Set("Access-Control-Allow-Origin", "*") - w.Header().Set("Access-Control-Allow-Credentials", "true") - } -} - -// encodeXMLResponse Encodes the response headers into XML format. -func encodeXMLResponse(response interface{}) []byte { - var bytesBuffer bytes.Buffer - bytesBuffer.WriteString(xml.Header) - e := xml.NewEncoder(&bytesBuffer) - e.Encode(response) - return bytesBuffer.Bytes() -} - -// WriteErrorResponseJSON - writes error response in JSON format; -// useful for admin APIs. -func WriteErrorResponseJSON(w http.ResponseWriter, err APIError, reqURL *url.URL, host string) { - // Generate error response. - errorResponse := getAPIErrorResponse(err, reqURL.Path, w.Header().Get(consts.AmzRequestID), host) - encodedErrorResponse := encodeResponseJSON(errorResponse) - writeResponseSimple(w, err.HTTPStatusCode, encodedErrorResponse, mimeJSON) -} - -// getErrorResponse gets in standard error and resource value and -// provides a encodable populated response values -func getAPIErrorResponse(err APIError, resource, requestID, hostID string) APIErrorResponse { - return APIErrorResponse{ - Code: err.Code, - Message: err.Description, - Resource: resource, - RequestID: requestID, - HostID: hostID, - } -} - -// Encodes the response headers into JSON format. -func encodeResponseJSON(response interface{}) []byte { - var bytesBuffer bytes.Buffer - e := json.NewEncoder(&bytesBuffer) - e.Encode(response) - return bytesBuffer.Bytes() -} - -// WriteSuccessResponseJSON writes success headers and response if any, -// with content-type set to `application/json`. -func WriteSuccessResponseJSON(w http.ResponseWriter, response []byte) { - writeResponseSimple(w, http.StatusOK, response, mimeJSON) -} - -func writeResponseSimple(w http.ResponseWriter, statusCode int, response []byte, mType mimeType) { - if mType != mimeNone { - w.Header().Set(consts.ContentType, string(mType)) - } - w.Header().Set(consts.ContentLength, strconv.Itoa(len(response))) - w.WriteHeader(statusCode) - if response != nil { - w.Write(response) - } -} - -// WriteSuccessNoContent writes success headers with http status 204 -func WriteSuccessNoContent(w http.ResponseWriter) { - writeResponseSimple(w, http.StatusNoContent, nil, mimeNone) -} - -// ListAllMyBucketsResult List All Buckets Result -type ListAllMyBucketsResult struct { - XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListAllMyBucketsResult"` - Owner *s3.Owner - Buckets []*s3.Bucket `xml:"Buckets>Bucket"` -} - -// WriteSuccessResponse write SuccessResponseHeadersOnly -func WriteSuccessResponse(w http.ResponseWriter, r *http.Request) { - writeResponse(w, r, http.StatusOK, nil, mimeNone) -} - -type CopyObjectResponse struct { - CopyObjectResult CopyObjectResult `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopyObjectResult"` -} - -type CopyObjectResult struct { - LastModified string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"` - ETag string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ETag"` -} - -// LocationResponse - format for location response. -type LocationResponse struct { - XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LocationConstraint" json:"-"` - Location string `xml:",chardata"` -} - -// ListObjectsResponse - format for list objects response. -type ListObjectsResponse struct { - XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult" json:"-"` - - Name string - Prefix string - Marker string - - // When response is truncated (the IsTruncated element value in the response - // is true), you can use the key name in this field as marker in the subsequent - // request to get next set of objects. Server lists objects in alphabetical - // order Note: This element is returned only if you have delimiter request parameter - // specified. If response does not include the NextMaker and it is truncated, - // you can use the value of the last Key in the response as the marker in the - // subsequent request to get the next set of object keys. - NextMarker string `xml:"NextMarker,omitempty"` - - MaxKeys int - Delimiter string - // A flag that indicates whether or not ListObjects returned all of the results - // that satisfied the search criteria. - IsTruncated bool - - Contents []Object - CommonPrefixes []CommonPrefix - - // Encoding type used to encode object keys in the response. - EncodingType string `xml:"EncodingType,omitempty"` -} - -// ListObjectsV2Response - format for list objects response. -type ListObjectsV2Response struct { - XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult" json:"-"` - - Name string - Prefix string - StartAfter string `xml:"StartAfter,omitempty"` - // When response is truncated (the IsTruncated element value in the response - // is true), you can use the key name in this field as marker in the subsequent - // request to get next set of objects. Server lists objects in alphabetical - // order Note: This element is returned only if you have delimiter request parameter - // specified. If response does not include the NextMaker and it is truncated, - // you can use the value of the last Key in the response as the marker in the - // subsequent request to get the next set of object keys. - ContinuationToken string `xml:"ContinuationToken,omitempty"` - NextContinuationToken string `xml:"NextContinuationToken,omitempty"` - - KeyCount int - MaxKeys int - Delimiter string - // A flag that indicates whether or not ListObjects returned all of the results - // that satisfied the search criteria. - IsTruncated bool - - Contents []Object - CommonPrefixes []CommonPrefix - - // Encoding type used to encode object keys in the response. - EncodingType string `xml:"EncodingType,omitempty"` -} - -// Object container for object metadata -type Object struct { - Key string - LastModified string // time string of format "2006-01-02T15:04:05.000Z" - ETag string - Size int64 - - // Owner of the object. - Owner s3.Owner - - // The class of storage used to store the object. - StorageClass string - - // UserMetadata user-defined metadata - UserMetadata StringMap `xml:"UserMetadata,omitempty"` -} - -// StringMap is a map[string]string -type StringMap map[string]string - -// MarshalXML - StringMap marshals into XML. -func (s StringMap) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - tokens := []xml.Token{start} - - for key, value := range s { - t := xml.StartElement{} - t.Name = xml.Name{ - Space: "", - Local: key, - } - tokens = append(tokens, t, xml.CharData(value), xml.EndElement{Name: t.Name}) - } - - tokens = append(tokens, xml.EndElement{ - Name: start.Name, - }) - - for _, t := range tokens { - if err := e.EncodeToken(t); err != nil { - return err - } - } - - // flush to ensure tokens are written - return e.Flush() -} - -// CommonPrefix container for prefix response in ListObjectsResponse -type CommonPrefix struct { - Prefix string -} - -// -//// DeleteError structure. -//type DeleteError struct { -// Code string -// Message string -// Key string -// VersionID string `xml:"VersionId"` -//} -// -//// DeleteObjectsResponse container for multiple object deletes. -//type DeleteObjectsResponse struct { -// XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DeleteResult" json:"-"` -// -// // Collection of all deleted objects -// DeletedObjects []datatypes.DeletedObject `xml:"Deleted,omitempty"` -// -// // Collection of errors deleting certain objects. -// Errors []DeleteError `xml:"Error,omitempty"` -//} -// -//// GenerateListObjectsV2Response Generates an ListObjectsV2 response for the said bucket with other enumerated options. -//func GenerateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, isTruncated bool, maxKeys int, objects []store.ObjectInfo, prefixes []string) ListObjectsV2Response { -// contents := make([]Object, 0, len(objects)) -// id := consts.DefaultOwnerID -// name := consts.DisplayName -// owner := s3.Owner{ -// ID: &id, -// DisplayName: &name, -// } -// data := ListObjectsV2Response{} -// -// for _, object := range objects { -// content := Object{} -// if object.Name == "" { -// continue -// } -// content.Key = utils.S3EncodeName(object.Name, encodingType) -// content.LastModified = object.ModTime.UTC().Format(consts.Iso8601TimeFormat) -// if object.ETag != "" { -// content.ETag = "\"" + object.ETag + "\"" -// } -// content.Size = object.Size -// content.Owner = owner -// contents = append(contents, content) -// } -// data.Name = bucket -// data.Contents = contents -// -// data.EncodingType = encodingType -// data.StartAfter = utils.S3EncodeName(startAfter, encodingType) -// data.Delimiter = utils.S3EncodeName(delimiter, encodingType) -// data.Prefix = utils.S3EncodeName(prefix, encodingType) -// data.MaxKeys = maxKeys -// data.ContinuationToken = base64.StdEncoding.EncodeToString([]byte(token)) -// data.NextContinuationToken = base64.StdEncoding.EncodeToString([]byte(nextToken)) -// data.IsTruncated = isTruncated -// -// commonPrefixes := make([]CommonPrefix, 0, len(prefixes)) -// for _, prefix := range prefixes { -// prefixItem := CommonPrefix{} -// prefixItem.Prefix = utils.S3EncodeName(prefix, encodingType) -// commonPrefixes = append(commonPrefixes, prefixItem) -// } -// data.CommonPrefixes = commonPrefixes -// data.KeyCount = len(data.Contents) + len(data.CommonPrefixes) -// return data -//} -// -//// generates an ListObjectsV1 response for the said bucket with other enumerated options. -//func GenerateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp store.ListObjectsInfo) ListObjectsResponse { -// contents := make([]Object, 0, len(resp.Objects)) -// id := consts.DefaultOwnerID -// name := consts.DisplayName -// owner := s3.Owner{ -// ID: &id, -// DisplayName: &name, -// } -// data := ListObjectsResponse{} -// -// for _, object := range resp.Objects { -// content := Object{} -// if object.Name == "" { -// continue -// } -// content.Key = utils.S3EncodeName(object.Name, encodingType) -// content.LastModified = object.ModTime.UTC().Format(consts.Iso8601TimeFormat) -// if object.ETag != "" { -// content.ETag = "\"" + object.ETag + "\"" -// } -// content.Size = object.Size -// content.StorageClass = "" -// content.Owner = owner -// contents = append(contents, content) -// } -// data.Name = bucket -// data.Contents = contents -// -// data.EncodingType = encodingType -// data.Prefix = utils.S3EncodeName(prefix, encodingType) -// data.Marker = utils.S3EncodeName(marker, encodingType) -// data.Delimiter = utils.S3EncodeName(delimiter, encodingType) -// data.MaxKeys = maxKeys -// data.NextMarker = utils.S3EncodeName(resp.NextMarker, encodingType) -// data.IsTruncated = resp.IsTruncated -// -// prefixes := make([]CommonPrefix, 0, len(resp.Prefixes)) -// for _, prefix := range resp.Prefixes { -// prefixItem := CommonPrefix{} -// prefixItem.Prefix = utils.S3EncodeName(prefix, encodingType) -// prefixes = append(prefixes, prefixItem) -// } -// data.CommonPrefixes = prefixes -// return data -//} -// -//// generate multi objects delete response. -//func GenerateMultiDeleteResponse(quiet bool, deletedObjects []datatypes.DeletedObject, errs []DeleteError) DeleteObjectsResponse { -// deleteResp := DeleteObjectsResponse{} -// if !quiet { -// deleteResp.DeletedObjects = deletedObjects -// } -// deleteResp.Errors = errs -// return deleteResp -//} -// -//// InitiateMultipartUploadResponse container for InitiateMultiPartUpload response, provides uploadID to start MultiPart upload -//type InitiateMultipartUploadResponse struct { -// XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ InitiateMultipartUploadResult" json:"-"` -// -// Bucket string -// Key string -// UploadID string `xml:"UploadId"` -//} -// -//// CompleteMultipartUploadResponse container for completed multipart upload response -//type CompleteMultipartUploadResponse struct { -// XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUploadResult" json:"-"` -// -// Location string -// Bucket string -// Key string -// ETag string -// -// ChecksumCRC32 string -// ChecksumCRC32C string -// ChecksumSHA1 string -// ChecksumSHA256 string -//} -// -//// Part container for part metadata. -//type Part struct { -// PartNumber int -// LastModified string -// ETag string -// Size int64 -// -// // Checksum values -// ChecksumCRC32 string -// ChecksumCRC32C string -// ChecksumSHA1 string -// ChecksumSHA256 string -//} -// -//// Initiator inherit from Owner struct, fields are same -//type Initiator s3.Owner -// -//// ListPartsResponse - format for list parts response. -//type ListPartsResponse struct { -// XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListPartsResult" json:"-"` -// -// Bucket string -// Key string -// UploadID string `xml:"UploadId"` -// -// Initiator Initiator -// Owner s3.Owner -// -// // The class of storage used to store the object. -// StorageClass string -// -// PartNumberMarker int -// NextPartNumberMarker int -// MaxParts int -// IsTruncated bool -// -// ChecksumAlgorithm string -// // List of parts. -// Parts []Part `xml:"Part"` -//} -// -//// ListMultipartUploadsResponse - format for list multipart uploads response. -//type ListMultipartUploadsResponse struct { -// XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListMultipartUploadsResult" json:"-"` -// -// Bucket string -// KeyMarker string -// UploadIDMarker string `xml:"UploadIdMarker"` -// NextKeyMarker string -// NextUploadIDMarker string `xml:"NextUploadIdMarker"` -// Delimiter string -// Prefix string -// EncodingType string `xml:"EncodingType,omitempty"` -// MaxUploads int -// IsTruncated bool -// -// // List of pending uploads. -// Uploads []Upload `xml:"Upload"` -// -// // Delimed common prefixes. -// CommonPrefixes []CommonPrefix -//} -// -//// Upload container for in progress multipart upload -//type Upload struct { -// Key string -// UploadID string `xml:"UploadId"` -// Initiator Initiator -// Owner s3.Owner -// StorageClass string -// Initiated string -//} -// -//// generates InitiateMultipartUploadResponse for given bucket, key and uploadID. -//func GenerateInitiateMultipartUploadResponse(bucket, key, uploadID string) InitiateMultipartUploadResponse { -// return InitiateMultipartUploadResponse{ -// Bucket: bucket, -// Key: key, -// UploadID: uploadID, -// } -//} -// -//// generates CompleteMultipartUploadResponse for given bucket, key, location and ETag. -//func GenerateCompleteMultpartUploadResponse(bucket, key, location string, oi store.ObjectInfo) CompleteMultipartUploadResponse { -// c := CompleteMultipartUploadResponse{ -// Location: location, -// Bucket: bucket, -// Key: key, -// // AWS S3 quotes the ETag in XML, make sure we are compatible here. -// ETag: "\"" + oi.ETag + "\"", -// } -// return c -//} -// -//// generates ListPartsResponse from ListPartsInfo. -//func GenerateListPartsResponse(partsInfo store.ListPartsInfo, encodingType string) ListPartsResponse { -// resp := ListPartsResponse{} -// resp.Bucket = partsInfo.Bucket -// resp.Key = utils.S3EncodeName(partsInfo.Object, encodingType) -// resp.UploadID = partsInfo.UploadID -// resp.StorageClass = consts.DefaultStorageClass -// -// // Dumb values not meaningful -// resp.Initiator = Initiator{ -// ID: aws.String(consts.DefaultOwnerID), -// DisplayName: aws.String(consts.DisplayName), -// } -// resp.Owner = s3.Owner{ -// ID: aws.String(consts.DefaultOwnerID), -// DisplayName: aws.String(consts.DisplayName), -// } -// -// resp.MaxParts = partsInfo.MaxParts -// resp.PartNumberMarker = partsInfo.PartNumberMarker -// resp.IsTruncated = partsInfo.IsTruncated -// resp.NextPartNumberMarker = partsInfo.NextPartNumberMarker -// resp.ChecksumAlgorithm = partsInfo.ChecksumAlgorithm -// -// resp.Parts = make([]Part, len(partsInfo.Parts)) -// for index, part := range partsInfo.Parts { -// newPart := Part{} -// newPart.PartNumber = part.Number -// newPart.ETag = "\"" + part.ETag + "\"" -// newPart.Size = part.Size -// newPart.LastModified = part.ModTime.UTC().Format(consts.Iso8601TimeFormat) -// resp.Parts[index] = newPart -// } -// return resp -//} -// -//// generates ListMultipartUploadsResponse for given bucket and ListMultipartsInfo. -//func GenerateListMultipartUploadsResponse(bucket string, multipartsInfo store.ListMultipartsInfo, encodingType string) ListMultipartUploadsResponse { -// resp := ListMultipartUploadsResponse{} -// resp.Bucket = bucket -// resp.Delimiter = utils.S3EncodeName(multipartsInfo.Delimiter, encodingType) -// resp.IsTruncated = multipartsInfo.IsTruncated -// resp.EncodingType = encodingType -// resp.Prefix = utils.S3EncodeName(multipartsInfo.Prefix, encodingType) -// resp.KeyMarker = utils.S3EncodeName(multipartsInfo.KeyMarker, encodingType) -// resp.NextKeyMarker = utils.S3EncodeName(multipartsInfo.NextKeyMarker, encodingType) -// resp.MaxUploads = multipartsInfo.MaxUploads -// resp.NextUploadIDMarker = multipartsInfo.NextUploadIDMarker -// resp.UploadIDMarker = multipartsInfo.UploadIDMarker -// resp.CommonPrefixes = make([]CommonPrefix, len(multipartsInfo.CommonPrefixes)) -// for index, commonPrefix := range multipartsInfo.CommonPrefixes { -// resp.CommonPrefixes[index] = CommonPrefix{ -// Prefix: utils.S3EncodeName(commonPrefix, encodingType), -// } -// } -// resp.Uploads = make([]Upload, len(multipartsInfo.Uploads)) -// for index, upload := range multipartsInfo.Uploads { -// newUpload := Upload{} -// newUpload.UploadID = upload.UploadID -// newUpload.Key = utils.S3EncodeName(upload.Object, encodingType) -// newUpload.Initiated = upload.Initiated.UTC().Format(consts.Iso8601TimeFormat) -// resp.Uploads[index] = newUpload -// } -// return resp -//} diff --git a/s3/handlers/response_comm_acl.go b/s3/handlers/response_comm_acl.go deleted file mode 100644 index 404a48bd1..000000000 --- a/s3/handlers/response_comm_acl.go +++ /dev/null @@ -1,49 +0,0 @@ -package handlers - -type accessControlList struct { - Grant []Grant `xml:"Grant,omitempty"` -} -type canonicalUser struct { - ID string `xml:"ID"` - DisplayName string `xml:"DisplayName,omitempty"` -} - -// AccessControlPolicy -// -// 75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a -// CustomersName@amazon.com -// -// -// -// -// 75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a -// CustomersName@amazon.com -// -// FULL_CONTROL -// -// -// -type AccessControlPolicy struct { - Owner canonicalUser `xml:"Owner"` - AccessControlList accessControlList `xml:"AccessControlList"` -} - -//Grant grant -type Grant struct { - Grantee Grantee `xml:"Grantee"` - Permission Permission `xml:"Permission"` -} - -//Grantee grant -type Grantee struct { - XMLNS string `xml:"xmlns:xsi,attr"` - XMLXSI string `xml:"xsi:type,attr"` - Type string `xml:"Type"` - ID string `xml:"ID,omitempty"` - DisplayName string `xml:"DisplayName,omitempty"` - URI string `xml:"URI,omitempty"` -} - -// Permission May be one of READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL -type Permission string diff --git a/s3/handlers/response_error.go b/s3/handlers/response_error.go deleted file mode 100644 index 2d6a81261..000000000 --- a/s3/handlers/response_error.go +++ /dev/null @@ -1,39 +0,0 @@ -package handlers - -import ( - "fmt" - "github.com/gorilla/mux" - "net/http" - "time" -) - -func WriteErrorResponseHeadersOnly(w http.ResponseWriter, r *http.Request, err ErrorCode) { - writeResponse(w, r, GetAPIError(err).HTTPStatusCode, nil, mimeNone) -} - -// WriteErrorResponse write ErrorResponse -func WriteErrorResponse(w http.ResponseWriter, r *http.Request, errorCode ErrorCode) { - fmt.Println("response errcode: ", errorCode, r.URL, r.Method, r.Header) - vars := mux.Vars(r) - bucket := vars["bucket"] - object := vars["object"] - - apiError := GetAPIError(errorCode) - errorResponse := getRESTErrorResponse(apiError, r.URL.Path, bucket, object) - WriteXMLResponse(w, r, apiError.HTTPStatusCode, errorResponse) -} - -func getRESTErrorResponse(err APIError, resource string, bucket, object string) RESTErrorResponse { - return RESTErrorResponse{ - Code: err.Code, - BucketName: bucket, - Key: object, - Message: err.Description, - Resource: resource, - RequestID: fmt.Sprintf("%d", time.Now().UnixNano()), - } -} - -// NotFoundHandler If none of the http routes match respond with MethodNotAllowed -func NotFoundHandler(w http.ResponseWriter, r *http.Request) { -} diff --git a/s3/handlers/responses/error.go b/s3/handlers/responses/error.go new file mode 100644 index 000000000..e886a514a --- /dev/null +++ b/s3/handlers/responses/error.go @@ -0,0 +1,1035 @@ +package responses + +import ( + "fmt" + "net/http" +) + +type Error struct { + code string + description string + httpStatusCode int +} + +func (err *Error) Code() string { + return err.code +} + +func (err *Error) Description() string { + return err.description +} + +func (err *Error) HTTPStatusCode() int { + return err.httpStatusCode +} + +func (err *Error) Error() string { + return fmt.Sprintf( + "code <%s>, description <%s>, status <%d>", + err.code, + err.description, + err.httpStatusCode, + ) +} + +// Errors http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html +var ( + ErrInvalidCopyDest = &Error{ + code: "InvalidRequest", + description: "This copy request is illegal because it is trying to copy an object to itself without changing the object's metadata, storage class, website redirect location or encryption attributes.", + httpStatusCode: http.StatusBadRequest, + } + ErrInvalidCopySource = &Error{ + code: "InvalidArgument", + description: "Copy Source must mention the source bucket and key: sourcebucket/sourcekey.", + httpStatusCode: http.StatusBadRequest, + } + ErrInvalidMetadataDirective = &Error{ + code: "InvalidArgument", + description: "Unknown metadata directive.", + httpStatusCode: http.StatusBadRequest, + } + ErrInvalidRequestBody = &Error{ + code: "InvalidArgument", + description: "Body shouldn't be set for this request.", + httpStatusCode: http.StatusBadRequest, + } + ErrInvalidMaxUploads = &Error{ + code: "InvalidArgument", + description: "Argument max-uploads must be an integer between 0 and 2147483647", + httpStatusCode: http.StatusBadRequest, + } + ErrInvalidMaxKeys = &Error{ + code: "InvalidArgument", + description: "Argument maxKeys must be an integer between 0 and 2147483647", + httpStatusCode: http.StatusBadRequest, + } + ErrInvalidEncodingMethod = &Error{ + code: "InvalidArgument", + description: "Invalid Encoding Method specified in Request", + httpStatusCode: http.StatusBadRequest, + } + ErrInvalidMaxParts = &Error{ + code: "InvalidArgument", + description: "Part number must be an integer between 1 and 10000, inclusive", + httpStatusCode: http.StatusBadRequest, + } + ErrInvalidPartNumberMarker = &Error{ + code: "InvalidArgument", + description: "Argument partNumberMarker must be an integer.", + httpStatusCode: http.StatusBadRequest, + } + ErrInvalidPolicyDocument = &Error{ + code: "InvalidPolicyDocument", + description: "The content of the form does not meet the conditions specified in the policy document.", + httpStatusCode: http.StatusBadRequest, + } + ErrAccessDenied = &Error{ + code: "AccessDenied", + description: "Access Denied.", + httpStatusCode: http.StatusForbidden, + } + ErrBadDigest = &Error{ + code: "BadDigest", + description: "The Content-Md5 you specified did not match what we received.", + httpStatusCode: http.StatusBadRequest, + } + ErrEntityTooSmall = &Error{ + code: "EntityTooSmall", + description: "Your proposed upload is smaller than the minimum allowed object size.", + httpStatusCode: http.StatusBadRequest, + } + ErrEntityTooLarge = &Error{ + code: "EntityTooLarge", + description: "Your proposed upload exceeds the maximum allowed object size.", + httpStatusCode: http.StatusBadRequest, + } + ErrIncompleteBody = &Error{ + code: "IncompleteBody", + description: "You did not provide the number of bytes specified by the Content-Length HTTP header.", + httpStatusCode: http.StatusBadRequest, + } + ErrInternalError = &Error{ + code: "InternalError", + description: "We encountered an internal error, please try again.", + httpStatusCode: http.StatusInternalServerError, + } + ErrInvalidAccessKeyID = &Error{ + code: "InvalidAccessKeyId", + description: "The Access Key Id you provided does not exist in our records.", + httpStatusCode: http.StatusForbidden, + } + ErrAccessKeyDisabled = &Error{ + code: "InvalidAccessKeyId", + description: "Your account is disabled; please contact your administrator.", + httpStatusCode: http.StatusForbidden, + } + ErrInvalidBucketName = &Error{ + code: "InvalidBucketName", + description: "The specified bucket is not valid.", + httpStatusCode: http.StatusBadRequest, + } + ErrInvalidDigest = &Error{ + code: "InvalidDigest", + description: "The Content-Md5 you specified is not valid.", + httpStatusCode: http.StatusBadRequest, + } + ErrInvalidRange = &Error{ + code: "InvalidRange", + description: "The requested range is not satisfiable", + httpStatusCode: http.StatusRequestedRangeNotSatisfiable, + } + ErrInvalidRangePartNumber = &Error{ + code: "InvalidRequest", + description: "Cannot specify both Range header and partNumber query parameter", + httpStatusCode: http.StatusBadRequest, + } + ErrMalformedXML = &Error{ + code: "MalformedXML", + description: "The XML you provided was not well-formed or did not validate against our published schema.", + httpStatusCode: http.StatusBadRequest, + } + ErrMissingContentLength = &Error{ + code: "MissingContentLength", + description: "You must provide the Content-Length HTTP header.", + httpStatusCode: http.StatusLengthRequired, + } + ErrMissingContentMD5 = &Error{ + code: "MissingContentMD5", + description: "Missing required header for this request: Content-Md5.", + httpStatusCode: http.StatusBadRequest, + } + ErrMissingSecurityHeader = &Error{ + code: "MissingSecurityHeader", + description: "Your request was missing a required header", + httpStatusCode: http.StatusBadRequest, + } + ErrMissingRequestBodyError = &Error{ + code: "MissingRequestBodyError", + description: "Request body is empty.", + httpStatusCode: http.StatusLengthRequired, + } + ErrNoSuchBucket = &Error{ + code: "NoSuchBucket", + description: "The specified bucket does not exist", + httpStatusCode: http.StatusNotFound, + } + ErrNoSuchBucketPolicy = &Error{ + code: "NoSuchBucketPolicy", + description: "The bucket policy does not exist", + httpStatusCode: http.StatusNotFound, + } + ErrNoSuchLifecycleConfiguration = &Error{ + code: "NoSuchLifecycleConfiguration", + description: "The lifecycle configuration does not exist", + httpStatusCode: http.StatusNotFound, + } + ErrNoSuchUser = &Error{ + code: "NoSuchUser", + description: "The specified user does not exist", + httpStatusCode: http.StatusConflict, + } + ErrUserAlreadyExists = &Error{ + code: "UserAlreadyExists", + description: "The request was rejected because it attempted to create a resource that already exists .", + httpStatusCode: http.StatusConflict, + } + ErrNoSuchUserPolicy = &Error{ + code: "NoSuchUserPolicy", + description: "The specified user policy does not exist", + httpStatusCode: http.StatusConflict, + } + ErrUserPolicyAlreadyExists = &Error{ + code: "UserPolicyAlreadyExists", + description: "The same user policy already exists .", + httpStatusCode: http.StatusConflict, + } + ErrNoSuchKey = &Error{ + code: "NoSuchKey", + description: "The specified key does not exist.", + httpStatusCode: http.StatusNotFound, + } + ErrNoSuchUpload = &Error{ + code: "NoSuchUpload", + description: "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.", + httpStatusCode: http.StatusNotFound, + } + ErrInvalidVersionID = &Error{ + code: "InvalidArgument", + description: "Invalid version id specified", + httpStatusCode: http.StatusBadRequest, + } + ErrNoSuchVersion = &Error{ + code: "NoSuchVersion", + description: "The specified version does not exist.", + httpStatusCode: http.StatusNotFound, + } + ErrNotImplemented = &Error{ + code: "NotImplemented", + description: "A header you provided implies functionality that is not implemented", + httpStatusCode: http.StatusNotImplemented, + } + ErrPreconditionFailed = &Error{ + code: "PreconditionFailed", + description: "At least one of the pre-conditions you specified did not hold", + httpStatusCode: http.StatusPreconditionFailed, + } + ErrRequestTimeTooSkewed = &Error{ + code: "RequestTimeTooSkewed", + description: "The difference between the request time and the server's time is too large.", + httpStatusCode: http.StatusForbidden, + } + ErrSignatureDoesNotMatch = &Error{ + code: "SignatureDoesNotMatch", + description: "The request signature we calculated does not match the signature you provided. Check your key and signing method.", + httpStatusCode: http.StatusForbidden, + } + ErrMethodNotAllowed = &Error{ + code: "MethodNotAllowed", + description: "The specified method is not allowed against this resource.", + httpStatusCode: http.StatusMethodNotAllowed, + } + ErrInvalidPart = &Error{ + code: "InvalidPart", + description: "One or more of the specified parts could not be found. The part may not have been uploaded, or the specified entity tag may not match the part's entity tag.", + httpStatusCode: http.StatusBadRequest, + } + ErrInvalidPartOrder = &Error{ + code: "InvalidPartOrder", + description: "The list of parts was not in ascending order. The parts list must be specified in order by part number.", + httpStatusCode: http.StatusBadRequest, + } + ErrInvalidObjectState = &Error{ + code: "InvalidObjectState", + description: "The operation is not valid for the current state of the object.", + httpStatusCode: http.StatusForbidden, + } + ErrAuthorizationHeaderMalformed = &Error{ + code: "AuthorizationHeaderMalformed", + description: "The authorization header is malformed; the region is wrong; expecting 'us-east-1'.", + httpStatusCode: http.StatusBadRequest, + } + ErrMalformedPOSTRequest = &Error{ + code: "MalformedPOSTRequest", + description: "The body of your POST request is not well-formed multipart/form-data.", + httpStatusCode: http.StatusBadRequest, + } + ErrPOSTFileRequired = &Error{ + code: "InvalidArgument", + description: "POST requires exactly one file upload per request.", + httpStatusCode: http.StatusBadRequest, + } + ErrSignatureVersionNotSupported = &Error{ + code: "InvalidRequest", + description: "The authorization mechanism you have provided is not supported. Please use AWS4-HMAC-SHA256.", + httpStatusCode: http.StatusBadRequest, + } + ErrBucketNotEmpty = &Error{ + code: "BucketNotEmpty", + description: "The bucket you tried to delete is not empty", + httpStatusCode: http.StatusConflict, + } + ErrBucketAlreadyExists = &Error{ + code: "BucketAlreadyExists", + description: "The requested bucket name is not available. The bucket namespace is shared by all users of the system. Please select a different name and try again.", + httpStatusCode: http.StatusConflict, + } + ErrAllAccessDisabled = &Error{ + code: "AllAccessDisabled", + description: "All access to this resource has been disabled.", + httpStatusCode: http.StatusForbidden, + } + ErrMalformedPolicy = &Error{ + code: "MalformedPolicy", + description: "Policy has invalid resource.", + httpStatusCode: http.StatusBadRequest, + } + ErrMissingFields = &Error{ // todo + code: "InvalidRequest", + description: "ErrMissingFields", + httpStatusCode: http.StatusBadRequest, + } + ErrMissingCredTag = &Error{ + code: "InvalidRequest", + description: "Missing Credential field for this request.", + httpStatusCode: http.StatusBadRequest, + } + ErrCredMalformed = &Error{ // todo + code: "InvalidRequest", + description: "ErrCredMalformed", + httpStatusCode: http.StatusBadRequest, + } + ErrInvalidRegion = &Error{ + code: "InvalidRegion", + description: "Region does not match.", + httpStatusCode: http.StatusBadRequest, + } + ErrMissingSignTag = &Error{ + code: "AccessDenied", + description: "Signature header missing Signature field.", + httpStatusCode: http.StatusBadRequest, + } + ErrMissingSignHeadersTag = &Error{ + code: "InvalidArgument", + description: "Signature header missing SignedHeaders field.", + httpStatusCode: http.StatusBadRequest, + } + + ErrAuthHeaderEmpty = &Error{ + code: "InvalidArgument", + description: "Authorization header is invalid -- one and only one ' ' (space) required.", + httpStatusCode: http.StatusBadRequest, + } + ErrMissingDateHeader = &Error{ + code: "AccessDenied", + description: "AWS authentication requires a valid Date or x-amz-date header", + httpStatusCode: http.StatusBadRequest, + } + ErrExpiredPresignRequest = &Error{ + code: "AccessDenied", + description: "Request has expired", + httpStatusCode: http.StatusForbidden, + } + ErrRequestNotReadyYet = &Error{ + code: "AccessDenied", + description: "Request is not valid yet", + httpStatusCode: http.StatusForbidden, + } + ErrSlowDown = &Error{ + code: "SlowDown", + description: "Resource requested is unreadable, please reduce your request rate", + httpStatusCode: http.StatusServiceUnavailable, + } + ErrBadRequest = &Error{ + code: "BadRequest", + description: "400 BadRequest", + httpStatusCode: http.StatusBadRequest, + } + ErrKeyTooLongError = &Error{ + code: "KeyTooLongError", + description: "Your key is too long", + httpStatusCode: http.StatusBadRequest, + } + ErrUnsignedHeaders = &Error{ + code: "AccessDenied", + description: "There were headers present in the request which were not signed", + httpStatusCode: http.StatusBadRequest, + } + ErrBucketAlreadyOwnedByYou = &Error{ + code: "BucketAlreadyOwnedByYou", + description: "Your previous request to create the named bucket succeeded and you already own it.", + httpStatusCode: http.StatusConflict, + } + ErrInvalidDuration = &Error{ + code: "InvalidDuration", + description: "Duration provided in the request is invalid.", + httpStatusCode: http.StatusBadRequest, + } + ErrInvalidBucketObjectLockConfiguration = &Error{ + code: "InvalidRequest", + description: "Bucket is missing ObjectLockConfiguration", + httpStatusCode: http.StatusBadRequest, + } + ErrBucketTaggingNotFound = &Error{ + code: "NoSuchTagSet", + description: "The TagSet does not exist", + httpStatusCode: http.StatusNotFound, + } + ErrObjectLockConfigurationNotAllowed = &Error{ + code: "InvalidBucketState", + description: "Object Lock configuration cannot be enabled on existing buckets", + httpStatusCode: http.StatusConflict, + } + ErrNoSuchCORSConfiguration = &Error{ + code: "NoSuchCORSConfiguration", + description: "The CORS configuration does not exist", + httpStatusCode: http.StatusNotFound, + } + ErrNoSuchWebsiteConfiguration = &Error{ + code: "NoSuchWebsiteConfiguration", + description: "The specified bucket does not have a website configuration", + httpStatusCode: http.StatusNotFound, + } + ErrReplicationConfigurationNotFoundError = &Error{ + code: "ReplicationConfigurationNotFoundError", + description: "The replication configuration was not found", + httpStatusCode: http.StatusNotFound, + } + ErrReplicationNeedsVersioningError = &Error{ + code: "InvalidRequest", + description: "Versioning must be 'Enabled' on the bucket to apply a replication configuration", + httpStatusCode: http.StatusBadRequest, + } + ErrReplicationBucketNeedsVersioningError = &Error{ + code: "InvalidRequest", + description: "Versioning must be 'Enabled' on the bucket to add a replication target", + httpStatusCode: http.StatusBadRequest, + } + ErrNoSuchObjectLockConfiguration = &Error{ + code: "NoSuchObjectLockConfiguration", + description: "The specified object does not have a ObjectLock configuration", + httpStatusCode: http.StatusBadRequest, + } + ErrObjectLocked = &Error{ + code: "InvalidRequest", + description: "Object is WORM protected and cannot be overwritten", + httpStatusCode: http.StatusBadRequest, + } + ErrInvalidRetentionDate = &Error{ + code: "InvalidRequest", + description: "Date must be provided in ISO 8601 format", + httpStatusCode: http.StatusBadRequest, + } + ErrPastObjectLockRetainDate = &Error{ + code: "InvalidRequest", + description: "the retain until date must be in the future", + httpStatusCode: http.StatusBadRequest, + } + ErrUnknownWORMModeDirective = &Error{ + code: "InvalidRequest", + description: "unknown wormMode directive", + httpStatusCode: http.StatusBadRequest, + } + ErrObjectLockInvalidHeaders = &Error{ + code: "InvalidRequest", + description: "x-amz-object-lock-retain-until-date and x-amz-object-lock-mode must both be supplied", + httpStatusCode: http.StatusBadRequest, + } + ErrObjectRestoreAlreadyInProgress = &Error{ + code: "RestoreAlreadyInProgress", + description: "Object restore is already in progress", + httpStatusCode: http.StatusConflict, + } + // Bucket notification related errors. + ErrEventNotification = &Error{ + code: "InvalidArgument", + description: "A specified event is not supported for notifications.", + httpStatusCode: http.StatusBadRequest, + } + ErrARNNotification = &Error{ + code: "InvalidArgument", + description: "A specified destination ARN does not exist or is not well-formed. Verify the destination ARN.", + httpStatusCode: http.StatusBadRequest, + } + ErrRegionNotification = &Error{ + code: "InvalidArgument", + description: "A specified destination is in a different region than the bucket. You must use a destination that resides in the same region as the bucket.", + httpStatusCode: http.StatusBadRequest, + } + ErrOverlappingFilterNotification = &Error{ + code: "InvalidArgument", + description: "An object key name filtering rule defined with overlapping prefixes, overlapping suffixes, or overlapping combinations of prefixes and suffixes for the same event types.", + httpStatusCode: http.StatusBadRequest, + } + ErrFilterNameInvalid = &Error{ + code: "InvalidArgument", + description: "filter rule name must be either prefix or suffix", + httpStatusCode: http.StatusBadRequest, + } + ErrFilterNamePrefix = &Error{ + code: "InvalidArgument", + description: "Cannot specify more than one prefix rule in a filter.", + httpStatusCode: http.StatusBadRequest, + } + ErrFilterNameSuffix = &Error{ + code: "InvalidArgument", + description: "Cannot specify more than one suffix rule in a filter.", + httpStatusCode: http.StatusBadRequest, + } + ErrFilterValueInvalid = &Error{ + code: "InvalidArgument", + description: "Size of filter rule value cannot exceed 1024 bytes in UTF-8 representation", + httpStatusCode: http.StatusBadRequest, + } + ErrOverlappingConfigs = &Error{ + code: "InvalidArgument", + description: "Configurations overlap. Configurations on the same bucket cannot share a common event type.", + httpStatusCode: http.StatusBadRequest, + } + ErrContentSHA256Mismatch = &Error{ //todo + code: "InvalidArgument", + description: "ErrContentSHA256Mismatch", + httpStatusCode: http.StatusBadRequest, + } + ErrInvalidCopyPartRange = &Error{ + code: "InvalidArgument", + description: "The x-amz-copy-source-range value must be of the form bytes=first-last where first and last are the zero-based offsets of the first and last bytes to copy", + httpStatusCode: http.StatusBadRequest, + } + ErrInvalidCopyPartRangeSource = &Error{ + code: "InvalidArgument", + description: "Range specified is not valid for source object", + httpStatusCode: http.StatusBadRequest, + } + ErrMetadataTooLarge = &Error{ + code: "MetadataTooLarge", + description: "Your metadata headers exceed the maximum allowed metadata size.", + httpStatusCode: http.StatusBadRequest, + } + ErrInvalidTagDirective = &Error{ + code: "InvalidArgument", + description: "Unknown tag directive.", + httpStatusCode: http.StatusBadRequest, + } + ErrInvalidEncryptionMethod = &Error{ + code: "InvalidRequest", + description: "The encryption method specified is not supported", + httpStatusCode: http.StatusBadRequest, + } + ErrInvalidQueryParams = &Error{ + code: "AuthorizationQueryParametersError", + description: "Query-string authentication version 4 requires the X-Amz-Algorithm, X-Amz-Credential, X-Amz-Signature, X-Amz-Date, X-Amz-SignedHeaders, and X-Amz-Expires parameters.", + httpStatusCode: http.StatusBadRequest, + } + ErrNoAccessKey = &Error{ + code: "AccessDenied", + description: "No AWSAccessKey was presented", + httpStatusCode: http.StatusForbidden, + } + ErrInvalidToken = &Error{ + code: "InvalidTokenId", + description: "The security token included in the request is invalid", + httpStatusCode: http.StatusForbidden, + } + + // S3 extensions. + ErrInvalidObjectName = &Error{ + code: "InvalidObjectName", + description: "Object name contains unsupported characters.", + httpStatusCode: http.StatusBadRequest, + } + ErrInvalidObjectNamePrefixSlash = &Error{ + code: "InvalidObjectName", + description: "Object name contains a leading slash.", + httpStatusCode: http.StatusBadRequest, + } + ErrClientDisconnected = &Error{ + code: "ClientDisconnected", + description: "Client disconnected before response was ready", + httpStatusCode: 499, // No official code, use nginx value. + } + ErrOperationTimedOut = &Error{ + code: "RequestTimeout", + description: "A timeout occurred while trying to lock a resource, please reduce your request rate", + httpStatusCode: http.StatusServiceUnavailable, + } + ErrOperationMaxedOut = &Error{ + code: "SlowDown", + description: "A timeout exceeded while waiting to proceed with the request, please reduce your request rate", + httpStatusCode: http.StatusServiceUnavailable, + } + ErrUnsupportedMetadata = &Error{ + code: "InvalidArgument", + description: "Your metadata headers are not supported.", + httpStatusCode: http.StatusBadRequest, + } + // Generic Invalid-Request error. Should be used for response errors only for unlikely + // corner case errors for which introducing new APIErrorcode is not worth it. LogIf() + // should be used to log the error at the source of the error for debugging purposes. + ErrInvalidRequest = &Error{ + code: "InvalidRequest", + description: "Invalid Request", + httpStatusCode: http.StatusBadRequest, + } + ErrIncorrectContinuationToken = &Error{ + code: "InvalidArgument", + description: "The continuation token provided is incorrect", + httpStatusCode: http.StatusBadRequest, + } + ErrInvalidFormatAccessKey = &Error{ + code: "InvalidAccessKeyId", + description: "The Access Key Id you provided contains invalid characters.", + httpStatusCode: http.StatusBadRequest, + } + // S3 Select API Errors + ErrEmptyRequestBody = &Error{ + code: "EmptyRequestBody", + description: "Request body cannot be empty.", + httpStatusCode: http.StatusBadRequest, + } + ErrUnsupportedFunction = &Error{ + code: "UnsupportedFunction", + description: "Encountered an unsupported SQL function.", + httpStatusCode: http.StatusBadRequest, + } + ErrInvalidDataSource = &Error{ + code: "InvalidDataSource", + description: "Invalid data source type. Only CSV and JSON are supported at this time.", + httpStatusCode: http.StatusBadRequest, + } + ErrInvalidExpressionType = &Error{ + code: "InvalidExpressionType", + description: "The ExpressionType is invalid. Only SQL expressions are supported at this time.", + httpStatusCode: http.StatusBadRequest, + } + ErrBusy = &Error{ + code: "Busy", + description: "The service is unavailable. Please retry.", + httpStatusCode: http.StatusServiceUnavailable, + } + ErrUnauthorizedAccess = &Error{ + code: "UnauthorizedAccess", + description: "You are not authorized to perform this operation", + httpStatusCode: http.StatusUnauthorized, + } + ErrExpressionTooLong = &Error{ + code: "ExpressionTooLong", + description: "The SQL expression is too long: The maximum byte-length for the SQL expression is 256 KB.", + httpStatusCode: http.StatusBadRequest, + } + ErrIllegalSQLFunctionArgument = &Error{ + code: "IllegalSqlFunctionArgument", + description: "Illegal argument was used in the SQL function.", + httpStatusCode: http.StatusBadRequest, + } + ErrInvalidKeyPath = &Error{ + code: "InvalidKeyPath", + description: "Key path in the SQL expression is invalid.", + httpStatusCode: http.StatusBadRequest, + } + ErrInvalidCompressionFormat = &Error{ + code: "InvalidCompressionFormat", + description: "The file is not in a supported compression format. Only GZIP is supported at this time.", + httpStatusCode: http.StatusBadRequest, + } + ErrInvalidFileHeaderInfo = &Error{ + code: "InvalidFileHeaderInfo", + description: "The FileHeaderInfo is invalid. Only NONE, USE, and IGNORE are supported.", + httpStatusCode: http.StatusBadRequest, + } + ErrInvalidJSONType = &Error{ + code: "InvalidJsonType", + description: "The JsonType is invalid. Only DOCUMENT and LINES are supported at this time.", + httpStatusCode: http.StatusBadRequest, + } + ErrInvalidQuoteFields = &Error{ + code: "InvalidQuoteFields", + description: "The QuoteFields is invalid. Only ALWAYS and ASNEEDED are supported.", + httpStatusCode: http.StatusBadRequest, + } + ErrInvalidRequestParameter = &Error{ + code: "InvalidRequestParameter", + description: "The value of a parameter in SelectRequest element is invalid. Check the service API documentation and try again.", + httpStatusCode: http.StatusBadRequest, + } + ErrInvalidDataType = &Error{ + code: "InvalidDataType", + description: "The SQL expression contains an invalid data type.", + httpStatusCode: http.StatusBadRequest, + } + ErrInvalidTextEncoding = &Error{ + code: "InvalidTextEncoding", + description: "Invalid encoding type. Only UTF-8 encoding is supported at this time.", + httpStatusCode: http.StatusBadRequest, + } + ErrInvalidTableAlias = &Error{ + code: "InvalidTableAlias", + description: "The SQL expression contains an invalid table alias.", + httpStatusCode: http.StatusBadRequest, + } + ErrMissingRequiredParameter = &Error{ + code: "MissingRequiredParameter", + description: "The SelectRequest entity is missing a required parameter. Check the service documentation and try again.", + httpStatusCode: http.StatusBadRequest, + } + ErrObjectSerializationConflict = &Error{ + code: "ObjectSerializationConflict", + description: "The SelectRequest entity can only contain one of CSV or JSON. Check the service documentation and try again.", + httpStatusCode: http.StatusBadRequest, + } + ErrUnsupportedSQLOperation = &Error{ + code: "UnsupportedSqlOperation", + description: "Encountered an unsupported SQL operation.", + httpStatusCode: http.StatusBadRequest, + } + ErrUnsupportedSQLStructure = &Error{ + code: "UnsupportedSqlStructure", + description: "Encountered an unsupported SQL structure. Check the SQL Reference.", + httpStatusCode: http.StatusBadRequest, + } + ErrUnsupportedSyntax = &Error{ + code: "UnsupportedSyntax", + description: "Encountered invalid syntax.", + httpStatusCode: http.StatusBadRequest, + } + ErrUnsupportedRangeHeader = &Error{ + code: "UnsupportedRangeHeader", + description: "Range header is not supported for this operation.", + httpStatusCode: http.StatusBadRequest, + } + ErrLexerInvalidChar = &Error{ + code: "LexerInvalidChar", + description: "The SQL expression contains an invalid character.", + httpStatusCode: http.StatusBadRequest, + } + ErrLexerInvalidOperator = &Error{ + code: "LexerInvalidOperator", + description: "The SQL expression contains an invalid literal.", + httpStatusCode: http.StatusBadRequest, + } + ErrLexerInvalidLiteral = &Error{ + code: "LexerInvalidLiteral", + description: "The SQL expression contains an invalid operator.", + httpStatusCode: http.StatusBadRequest, + } + ErrLexerInvalidIONLiteral = &Error{ + code: "LexerInvalidIONLiteral", + description: "The SQL expression contains an invalid operator.", + httpStatusCode: http.StatusBadRequest, + } + ErrParseExpectedDatePart = &Error{ + code: "ParseExpectedDatePart", + description: "Did not find the expected date part in the SQL expression.", + httpStatusCode: http.StatusBadRequest, + } + ErrParseExpectedKeyword = &Error{ + code: "ParseExpectedKeyword", + description: "Did not find the expected keyword in the SQL expression.", + httpStatusCode: http.StatusBadRequest, + } + ErrParseExpectedTokenType = &Error{ + code: "ParseExpectedTokenType", + description: "Did not find the expected token in the SQL expression.", + httpStatusCode: http.StatusBadRequest, + } + ErrParseExpected2TokenTypes = &Error{ + code: "ParseExpected2TokenTypes", + description: "Did not find the expected token in the SQL expression.", + httpStatusCode: http.StatusBadRequest, + } + ErrParseExpectedNumber = &Error{ + code: "ParseExpectedNumber", + description: "Did not find the expected number in the SQL expression.", + httpStatusCode: http.StatusBadRequest, + } + ErrParseExpectedRightParenBuiltinFunctionCall = &Error{ + code: "ParseExpectedRightParenBuiltinFunctionCall", + description: "Did not find the expected right parenthesis character in the SQL expression.", + httpStatusCode: http.StatusBadRequest, + } + ErrParseExpectedTypeName = &Error{ + code: "ParseExpectedTypeName", + description: "Did not find the expected type name in the SQL expression.", + httpStatusCode: http.StatusBadRequest, + } + ErrParseExpectedWhenClause = &Error{ + code: "ParseExpectedWhenClause", + description: "Did not find the expected WHEN clause in the SQL expression. CASE is not supported.", + httpStatusCode: http.StatusBadRequest, + } + ErrParseUnsupportedToken = &Error{ + code: "ParseUnsupportedToken", + description: "The SQL expression contains an unsupported token.", + httpStatusCode: http.StatusBadRequest, + } + ErrParseUnsupportedLiteralsGroupBy = &Error{ + code: "ParseUnsupportedLiteralsGroupBy", + description: "The SQL expression contains an unsupported use of GROUP BY.", + httpStatusCode: http.StatusBadRequest, + } + ErrParseExpectedMember = &Error{ + code: "ParseExpectedMember", + description: "The SQL expression contains an unsupported use of MEMBER.", + httpStatusCode: http.StatusBadRequest, + } + ErrParseUnsupportedSelect = &Error{ + code: "ParseUnsupportedSelect", + description: "The SQL expression contains an unsupported use of SELECT.", + httpStatusCode: http.StatusBadRequest, + } + ErrParseUnsupportedCase = &Error{ + code: "ParseUnsupportedCase", + description: "The SQL expression contains an unsupported use of CASE.", + httpStatusCode: http.StatusBadRequest, + } + ErrParseUnsupportedCaseClause = &Error{ + code: "ParseUnsupportedCaseClause", + description: "The SQL expression contains an unsupported use of CASE.", + httpStatusCode: http.StatusBadRequest, + } + ErrParseUnsupportedAlias = &Error{ + code: "ParseUnsupportedAlias", + description: "The SQL expression contains an unsupported use of ALIAS.", + httpStatusCode: http.StatusBadRequest, + } + ErrParseUnsupportedSyntax = &Error{ + code: "ParseUnsupportedSyntax", + description: "The SQL expression contains unsupported syntax.", + httpStatusCode: http.StatusBadRequest, + } + ErrParseUnknownOperator = &Error{ + code: "ParseUnknownOperator", + description: "The SQL expression contains an invalid operator.", + httpStatusCode: http.StatusBadRequest, + } + ErrParseMissingIdentAfterAt = &Error{ + code: "ParseMissingIdentAfterAt", + description: "Did not find the expected identifier after the @ symbol in the SQL expression.", + httpStatusCode: http.StatusBadRequest, + } + ErrParseUnexpectedOperator = &Error{ + code: "ParseUnexpectedOperator", + description: "The SQL expression contains an unexpected operator.", + httpStatusCode: http.StatusBadRequest, + } + ErrParseUnexpectedTerm = &Error{ + code: "ParseUnexpectedTerm", + description: "The SQL expression contains an unexpected term.", + httpStatusCode: http.StatusBadRequest, + } + ErrParseUnexpectedToken = &Error{ + code: "ParseUnexpectedToken", + description: "The SQL expression contains an unexpected token.", + httpStatusCode: http.StatusBadRequest, + } + ErrParseUnexpectedKeyword = &Error{ + code: "ParseUnexpectedKeyword", + description: "The SQL expression contains an unexpected keyword.", + httpStatusCode: http.StatusBadRequest, + } + ErrParseExpectedExpression = &Error{ + code: "ParseExpectedExpression", + description: "Did not find the expected SQL expression.", + httpStatusCode: http.StatusBadRequest, + } + ErrParseExpectedLeftParenAfterCast = &Error{ + code: "ParseExpectedLeftParenAfterCast", + description: "Did not find expected the left parenthesis in the SQL expression.", + httpStatusCode: http.StatusBadRequest, + } + ErrParseExpectedLeftParenValueConstructor = &Error{ + code: "ParseExpectedLeftParenValueConstructor", + description: "Did not find expected the left parenthesis in the SQL expression.", + httpStatusCode: http.StatusBadRequest, + } + ErrParseExpectedLeftParenBuiltinFunctionCall = &Error{ + code: "ParseExpectedLeftParenBuiltinFunctionCall", + description: "Did not find the expected left parenthesis in the SQL expression.", + httpStatusCode: http.StatusBadRequest, + } + ErrParseExpectedArgumentDelimiter = &Error{ + code: "ParseExpectedArgumentDelimiter", + description: "Did not find the expected argument delimiter in the SQL expression.", + httpStatusCode: http.StatusBadRequest, + } + ErrParseCastArity = &Error{ + code: "ParseCastArity", + description: "The SQL expression CAST has incorrect arity.", + httpStatusCode: http.StatusBadRequest, + } + ErrParseInvalidTypeParam = &Error{ + code: "ParseInvalidTypeParam", + description: "The SQL expression contains an invalid parameter value.", + httpStatusCode: http.StatusBadRequest, + } + ErrParseEmptySelect = &Error{ + code: "ParseEmptySelect", + description: "The SQL expression contains an empty SELECT.", + httpStatusCode: http.StatusBadRequest, + } + ErrParseSelectMissingFrom = &Error{ + code: "ParseSelectMissingFrom", + description: "GROUP is not supported in the SQL expression.", + httpStatusCode: http.StatusBadRequest, + } + ErrParseExpectedIdentForGroupName = &Error{ + code: "ParseExpectedIdentForGroupName", + description: "GROUP is not supported in the SQL expression.", + httpStatusCode: http.StatusBadRequest, + } + ErrParseExpectedIdentForAlias = &Error{ + code: "ParseExpectedIdentForAlias", + description: "Did not find the expected identifier for the alias in the SQL expression.", + httpStatusCode: http.StatusBadRequest, + } + ErrParseUnsupportedCallWithStar = &Error{ + code: "ParseUnsupportedCallWithStar", + description: "Only COUNT with (*) as a parameter is supported in the SQL expression.", + httpStatusCode: http.StatusBadRequest, + } + ErrParseNonUnaryAgregateFunctionCall = &Error{ + code: "ParseNonUnaryAgregateFunctionCall", + description: "Only one argument is supported for aggregate functions in the SQL expression.", + httpStatusCode: http.StatusBadRequest, + } + ErrParseMalformedJoin = &Error{ + code: "ParseMalformedJoin", + description: "JOIN is not supported in the SQL expression.", + httpStatusCode: http.StatusBadRequest, + } + ErrParseExpectedIdentForAt = &Error{ + code: "ParseExpectedIdentForAt", + description: "Did not find the expected identifier for AT name in the SQL expression.", + httpStatusCode: http.StatusBadRequest, + } + ErrParseAsteriskIsNotAloneInSelectList = &Error{ + code: "ParseAsteriskIsNotAloneInSelectList", + description: "Other expressions are not allowed in the SELECT list when '*' is used without dot notation in the SQL expression.", + httpStatusCode: http.StatusBadRequest, + } + ErrParseCannotMixSqbAndWildcardInSelectList = &Error{ + code: "ParseCannotMixSqbAndWildcardInSelectList", + description: "Cannot mix [] and * in the same expression in a SELECT list in SQL expression.", + httpStatusCode: http.StatusBadRequest, + } + ErrParseInvalidContextForWildcardInSelectList = &Error{ + code: "ParseInvalidContextForWildcardInSelectList", + description: "Invalid use of * in SELECT list in the SQL expression.", + httpStatusCode: http.StatusBadRequest, + } + ErrIncorrectSQLFunctionArgumentType = &Error{ + code: "IncorrectSqlFunctionArgumentType", + description: "Incorrect type of arguments in function call in the SQL expression.", + httpStatusCode: http.StatusBadRequest, + } + ErrValueParseFailure = &Error{ + code: "ValueParseFailure", + description: "Time stamp parse failure in the SQL expression.", + httpStatusCode: http.StatusBadRequest, + } + ErrEvaluatorInvalidArguments = &Error{ + code: "EvaluatorInvalidArguments", + description: "Incorrect number of arguments in the function call in the SQL expression.", + httpStatusCode: http.StatusBadRequest, + } + ErrIntegerOverflow = &Error{ + code: "IntegerOverflow", + description: "Int overflow or underflow in the SQL expression.", + httpStatusCode: http.StatusBadRequest, + } + ErrLikeInvalidInputs = &Error{ + code: "LikeInvalidInputs", + description: "Invalid argument given to the LIKE clause in the SQL expression.", + httpStatusCode: http.StatusBadRequest, + } + ErrCastFailed = &Error{ + code: "CastFailed", + description: "Attempt to convert from one data type to another using CAST failed in the SQL expression.", + httpStatusCode: http.StatusBadRequest, + } + ErrInvalidCast = &Error{ + code: "InvalidCast", + description: "Attempt to convert from one data type to another using CAST failed in the SQL expression.", + httpStatusCode: http.StatusBadRequest, + } + ErrEvaluatorInvalidTimestampFormatPattern = &Error{ + code: "EvaluatorInvalidTimestampFormatPattern", + description: "Time stamp format pattern requires additional fields in the SQL expression.", + httpStatusCode: http.StatusBadRequest, + } + ErrEvaluatorInvalidTimestampFormatPatternSymbolForParsing = &Error{ + code: "EvaluatorInvalidTimestampFormatPatternSymbolForParsing", + description: "Time stamp format pattern contains a valid format symbol that cannot be applied to time stamp parsing in the SQL expression.", + httpStatusCode: http.StatusBadRequest, + } + ErrEvaluatorTimestampFormatPatternDuplicateFields = &Error{ + code: "EvaluatorTimestampFormatPatternDuplicateFields", + description: "Time stamp format pattern contains multiple format specifiers representing the time stamp field in the SQL expression.", + httpStatusCode: http.StatusBadRequest, + } + ErrEvaluatorTimestampFormatPatternHourClockAmPmMismatch = &Error{ + code: "EvaluatorUnterminatedTimestampFormatPatternToken", + description: "Time stamp format pattern contains unterminated token in the SQL expression.", + httpStatusCode: http.StatusBadRequest, + } + ErrEvaluatorUnterminatedTimestampFormatPatternToken = &Error{ + code: "EvaluatorInvalidTimestampFormatPatternToken", + description: "Time stamp format pattern contains an invalid token in the SQL expression.", + httpStatusCode: http.StatusBadRequest, + } + ErrEvaluatorInvalidTimestampFormatPatternToken = &Error{ + code: "EvaluatorInvalidTimestampFormatPatternToken", + description: "Time stamp format pattern contains an invalid token in the SQL expression.", + httpStatusCode: http.StatusBadRequest, + } + ErrEvaluatorInvalidTimestampFormatPatternSymbol = &Error{ + code: "EvaluatorInvalidTimestampFormatPatternSymbol", + description: "Time stamp format pattern contains an invalid symbol in the SQL expression.", + httpStatusCode: http.StatusBadRequest, + } + ErrEvaluatorBindingDoesNotExist = &Error{ + code: "ErrEvaluatorBindingDoesNotExist", + description: "A column name or a path provided does not exist in the SQL expression", + httpStatusCode: http.StatusBadRequest, + } + ErrMissingHeaders = &Error{ + code: "MissingHeaders", + description: "Some headers in the query are missing from the file. Check the file and try again.", + httpStatusCode: http.StatusBadRequest, + } + ErrInvalidColumnIndex = &Error{ + code: "InvalidColumnIndex", + description: "The column index is invalid. Please check the service documentation and try again.", + httpStatusCode: http.StatusBadRequest, + } + ErrPostPolicyConditionInvalidFormat = &Error{ + code: "PostPolicyInvalidKeyName", + description: "Invalid according to Policy: Policy Conditions failed", + httpStatusCode: http.StatusForbidden, + } + ErrMalformedJSON = &Error{ + code: "MalformedJSON", + description: "The JSON was not well-formed or did not validate against our published format.", + httpStatusCode: http.StatusBadRequest, + } +) diff --git a/s3/handlers/responses/types.go b/s3/handlers/responses/types.go new file mode 100644 index 000000000..1ca496d00 --- /dev/null +++ b/s3/handlers/responses/types.go @@ -0,0 +1,191 @@ +package responses + +import ( + "encoding/xml" + "github.com/aws/aws-sdk-go/service/s3" +) + +type GetBucketAclResponse AccessControlPolicy + +// AccessControlPolicy +// +// 75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a +// CustomersName@amazon.com +// +// +// +// +// 75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a +// CustomersName@amazon.com +// +// FULL_CONTROL +// +// +// +type AccessControlPolicy struct { + Owner canonicalUser `xml:"Owner"` + AccessControlList accessControlList `xml:"AccessControlList"` +} + +type accessControlList struct { + Grant []Grant `xml:"Grant,omitempty"` +} +type canonicalUser struct { + ID string `xml:"ID"` + DisplayName string `xml:"DisplayName,omitempty"` +} + +//Grant grant +type Grant struct { + Grantee Grantee `xml:"Grantee"` + Permission Permission `xml:"Permission"` +} + +//Grantee grant +type Grantee struct { + XMLNS string `xml:"xmlns:xsi,attr"` + XMLXSI string `xml:"xsi:type,attr"` + Type string `xml:"Type"` + ID string `xml:"ID,omitempty"` + DisplayName string `xml:"DisplayName,omitempty"` + URI string `xml:"URI,omitempty"` +} + +// Permission May be one of READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL +type Permission string + +// ListAllMyBucketsResult List All Buckets Result +type ListAllMyBucketsResult struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListAllMyBucketsResult"` + Owner *s3.Owner + Buckets []*s3.Bucket `xml:"Buckets>Bucket"` +} + +type CopyObjectResponse struct { + CopyObjectResult CopyObjectResult `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopyObjectResult"` +} + +type CopyObjectResult struct { + LastModified string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"` + ETag string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ETag"` +} + +// LocationResponse - format for location response. +type LocationResponse struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LocationConstraint" json:"-"` + Location string `xml:",chardata"` +} + +// ListObjectsResponse - format for list objects response. +type ListObjectsResponse struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult" json:"-"` + + Name string + Prefix string + Marker string + + // When response is truncated (the IsTruncated element value in the response + // is true), you can use the key name in this field as marker in the subsequent + // request to get next set of objects. Server lists objects in alphabetical + // order Note: This element is returned only if you have delimiter request parameter + // specified. If response does not include the NextMaker and it is truncated, + // you can use the value of the last Key in the response as the marker in the + // subsequent request to get the next set of object keys. + NextMarker string `xml:"NextMarker,omitempty"` + + MaxKeys int + Delimiter string + // A flag that indicates whether or not ListObjects returned all of the results + // that satisfied the search criteria. + IsTruncated bool + + Contents []Object + CommonPrefixes []CommonPrefix + + // Encoding type used to encode object keys in the response. + EncodingType string `xml:"EncodingType,omitempty"` +} + +// ListObjectsV2Response - format for list objects response. +type ListObjectsV2Response struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult" json:"-"` + + Name string + Prefix string + StartAfter string `xml:"StartAfter,omitempty"` + // When response is truncated (the IsTruncated element value in the response + // is true), you can use the key name in this field as marker in the subsequent + // request to get next set of objects. Server lists objects in alphabetical + // order Note: This element is returned only if you have delimiter request parameter + // specified. If response does not include the NextMaker and it is truncated, + // you can use the value of the last Key in the response as the marker in the + // subsequent request to get the next set of object keys. + ContinuationToken string `xml:"ContinuationToken,omitempty"` + NextContinuationToken string `xml:"NextContinuationToken,omitempty"` + + KeyCount int + MaxKeys int + Delimiter string + // A flag that indicates whether or not ListObjects returned all of the results + // that satisfied the search criteria. + IsTruncated bool + + Contents []Object + CommonPrefixes []CommonPrefix + + // Encoding type used to encode object keys in the response. + EncodingType string `xml:"EncodingType,omitempty"` +} + +// Object container for object metadata +type Object struct { + Key string + LastModified string // time string of format "2006-01-02T15:04:05.000Z" + ETag string + Size int64 + + // Owner of the object. + Owner s3.Owner + + // The class of storage used to store the object. + StorageClass string + + // UserMetadata user-defined metadata + UserMetadata StringMap `xml:"UserMetadata,omitempty"` +} + +// StringMap is a map[string]string +type StringMap map[string]string + +// MarshalXML - StringMap marshals into XML. +func (s StringMap) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + tokens := []xml.Token{start} + + for key, value := range s { + t := xml.StartElement{} + t.Name = xml.Name{ + Space: "", + Local: key, + } + tokens = append(tokens, t, xml.CharData(value), xml.EndElement{Name: t.Name}) + } + + tokens = append(tokens, xml.EndElement{ + Name: start.Name, + }) + + for _, t := range tokens { + if err := e.EncodeToken(t); err != nil { + return err + } + } + + // flush to ensure tokens are written + return e.Flush() +} + +// CommonPrefix container for prefix response in ListObjectsResponse +type CommonPrefix struct { + Prefix string +} diff --git a/s3/handlers/responses/types_common.go b/s3/handlers/responses/types_common.go new file mode 100644 index 000000000..66522789b --- /dev/null +++ b/s3/handlers/responses/types_common.go @@ -0,0 +1 @@ +package responses diff --git a/s3/handlers/response.go b/s3/handlers/responses/wirters.go similarity index 86% rename from s3/handlers/response.go rename to s3/handlers/responses/wirters.go index f505de24c..4fc2cb56c 100644 --- a/s3/handlers/response.go +++ b/s3/handlers/responses/wirters.go @@ -1,7 +1,8 @@ -package handlers +package responses import ( "fmt" + "github.com/bittorrent/go-btfs/s3/services" "net/http" "github.com/aws/aws-sdk-go/aws" @@ -24,7 +25,7 @@ func WriteDeleteBucketResponse(w http.ResponseWriter) { return } -func WriteListBucketsResponse(w http.ResponseWriter, r *http.Request, bucketMetas []*BucketMetadata) { +func WriteListBucketsResponse(w http.ResponseWriter, r *http.Request, bucketMetas []*services.BucketMetadata) { var buckets []*s3.Bucket for _, b := range bucketMetas { buckets = append(buckets, &s3.Bucket{ @@ -45,13 +46,13 @@ func WriteListBucketsResponse(w http.ResponseWriter, r *http.Request, bucketMeta return } -func WriteGetBucketAclResponse(w http.ResponseWriter, r *http.Request, accessKeyRecord *AccessKeyRecord, acl string) { - resp := AccessControlPolicy{} +func WriteGetBucketAclResponse(w http.ResponseWriter, r *http.Request, ack *services.AccessKey, acl string) { + resp := GetBucketAclResponse{} fmt.Printf(" -1- get acl resp: %+v \n", resp) - id := accessKeyRecord.Key + id := ack.Key if resp.Owner.DisplayName == "" { - resp.Owner.DisplayName = accessKeyRecord.Key + resp.Owner.DisplayName = ack.Key resp.Owner.ID = id } fmt.Printf(" -2- get acl resp: %+v \n", resp) @@ -60,7 +61,7 @@ func WriteGetBucketAclResponse(w http.ResponseWriter, r *http.Request, accessKey resp.AccessControlList.Grant = append(resp.AccessControlList.Grant, Grant{ Grantee: Grantee{ ID: id, - DisplayName: accessKeyRecord.Key, + DisplayName: ack.Key, Type: "CanonicalUser", XMLXSI: "CanonicalUser", XMLNS: "http://www.w3.org/2001/XMLSchema-instance"}, diff --git a/s3/handlers/responses/writers_common.go b/s3/handlers/responses/writers_common.go new file mode 100644 index 000000000..c867897e0 --- /dev/null +++ b/s3/handlers/responses/writers_common.go @@ -0,0 +1,171 @@ +package responses + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "github.com/bittorrent/go-btfs/s3/consts" + "github.com/gorilla/mux" + logging "github.com/ipfs/go-log/v2" + "net/http" + "net/url" + "strconv" + "time" +) + +var log = logging.Logger("resp") + +type mimeType string + +const ( + mimeNone mimeType = "" + mimeJSON mimeType = "application/json" + //mimeXML application/xml UTF-8 + mimeXML mimeType = " application/xml" +) + +// APIErrorResponse - error response format +type APIErrorResponse struct { + XMLName xml.Name `xml:"Error" json:"-"` + Code string + Message string + Resource string + RequestID string `xml:"RequestId" json:"RequestId"` + HostID string `xml:"HostId" json:"HostId"` +} + +type RESTErrorResponse struct { + XMLName xml.Name `xml:"Error" json:"-"` + Code string `xml:"Code" json:"Code"` + Message string `xml:"Message" json:"Message"` + Resource string `xml:"Resource" json:"Resource"` + RequestID string `xml:"RequestId" json:"RequestId"` + Key string `xml:"Key,omitempty" json:"Key,omitempty"` + BucketName string `xml:"BucketName,omitempty" json:"BucketName,omitempty"` +} + +func WriteErrorResponseHeadersOnly(w http.ResponseWriter, r *http.Request, rerr *Error) { + writeResponse(w, r, rerr.HTTPStatusCode(), nil, mimeNone) +} + +// WriteErrorResponse write ErrorResponse +func WriteErrorResponse(w http.ResponseWriter, r *http.Request, rerr *Error) { + fmt.Println("response err: ", rerr.Error(), r.URL, r.Method, r.Header) + vars := mux.Vars(r) + bucket := vars["bucket"] + object := vars["object"] + errorResponse := RESTErrorResponse{ + Code: rerr.Code(), + BucketName: bucket, + Key: object, + Message: rerr.Description(), + Resource: r.URL.Path, + RequestID: fmt.Sprintf("%d", time.Now().UnixNano()), + } + WriteXMLResponse(w, r, rerr.HTTPStatusCode(), errorResponse) +} + +// WriteSuccessResponse write SuccessResponseHeadersOnly +func WriteSuccessResponse(w http.ResponseWriter, r *http.Request) { + writeResponse(w, r, http.StatusOK, nil, mimeNone) +} + +// WriteSuccessResponseXML Write Success Response XML +func WriteSuccessResponseXML(w http.ResponseWriter, r *http.Request, response interface{}) { + WriteXMLResponse(w, r, http.StatusOK, response) +} + +// WriteXMLResponse Write XMLResponse +func WriteXMLResponse(w http.ResponseWriter, r *http.Request, statusCode int, response interface{}) { + writeResponse(w, r, statusCode, encodeXMLResponse(response), mimeXML) +} + +func writeResponse(w http.ResponseWriter, r *http.Request, statusCode int, response []byte, mType mimeType) { + setCommonHeaders(w, r) + if response != nil { + w.Header().Set(consts.ContentLength, strconv.Itoa(len(response))) + } + if mType != mimeNone { + w.Header().Set(consts.ContentType, string(mType)) + } + w.WriteHeader(statusCode) + if response != nil { + log.Debugf("status %d %s: %s", statusCode, mType, string(response)) + _, err := w.Write(response) + if err != nil { + log.Errorf("write err: %v", err) + } + w.(http.Flusher).Flush() + } +} + +func setCommonHeaders(w http.ResponseWriter, r *http.Request) { + w.Header().Set(consts.ServerInfo, "FDS") + w.Header().Set(consts.AmzRequestID, fmt.Sprintf("%d", time.Now().UnixNano())) + w.Header().Set(consts.AcceptRanges, "bytes") + if r.Header.Get("Origin") != "" { + w.Header().Set("Access-Control-Allow-Origin", "*") + w.Header().Set("Access-Control-Allow-Credentials", "true") + } +} + +// encodeXMLResponse Encodes the response headers into XML format. +func encodeXMLResponse(response interface{}) []byte { + var bytesBuffer bytes.Buffer + bytesBuffer.WriteString(xml.Header) + e := xml.NewEncoder(&bytesBuffer) + e.Encode(response) + return bytesBuffer.Bytes() +} + +// WriteErrorResponseJSON - writes error response in JSON format; +// useful for admin APIs. +func WriteErrorResponseJSON(w http.ResponseWriter, err *Error, reqURL *url.URL, host string) { + // Generate error response. + errorResponse := getAPIErrorResponse(err, reqURL.Path, w.Header().Get(consts.AmzRequestID), host) + encodedErrorResponse := encodeResponseJSON(errorResponse) + writeResponseSimple(w, err.HTTPStatusCode(), encodedErrorResponse, mimeJSON) +} + +// getErrorResponse gets in standard error and resource value and +// provides a encodable populated response values +func getAPIErrorResponse(err *Error, resource, requestID, hostID string) APIErrorResponse { + return APIErrorResponse{ + Code: err.Code(), + Message: err.Description(), + Resource: resource, + RequestID: requestID, + HostID: hostID, + } +} + +// Encodes the response headers into JSON format. +func encodeResponseJSON(response interface{}) []byte { + var bytesBuffer bytes.Buffer + e := json.NewEncoder(&bytesBuffer) + e.Encode(response) + return bytesBuffer.Bytes() +} + +// WriteSuccessResponseJSON writes success headers and response if any, +// with content-type set to `application/json`. +func WriteSuccessResponseJSON(w http.ResponseWriter, response []byte) { + writeResponseSimple(w, http.StatusOK, response, mimeJSON) +} + +func writeResponseSimple(w http.ResponseWriter, statusCode int, response []byte, mType mimeType) { + if mType != mimeNone { + w.Header().Set(consts.ContentType, string(mType)) + } + w.Header().Set(consts.ContentLength, strconv.Itoa(len(response))) + w.WriteHeader(statusCode) + if response != nil { + w.Write(response) + } +} + +// WriteSuccessNoContent writes success headers with http status 204 +func WriteSuccessNoContent(w http.ResponseWriter) { + writeResponseSimple(w, http.StatusNoContent, nil, mimeNone) +} diff --git a/s3/handlers/s3_error.go b/s3/handlers/s3_error.go deleted file mode 100644 index b577e5127..000000000 --- a/s3/handlers/s3_error.go +++ /dev/null @@ -1,44 +0,0 @@ -package handlers - -// http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html -var s3ErrorResponseMap = map[string]string{ - "AccessDenied": "Access Denied.", - "BadDigest": "The Content-Md5 you specified did not match what we received.", - "EntityTooSmall": "Your proposed upload is smaller than the minimum allowed object size.", - "EntityTooLarge": "Your proposed upload exceeds the maximum allowed object size.", - "IncompleteBody": "You did not provide the number of bytes specified by the Content-Length HTTP header.", - "InternalError": "We encountered an internal error, please try again.", - "InvalidAccessKeyId": "The access key ID you provided does not exist in our records.", - "InvalidBucketName": "The specified bucket is not valid.", - "InvalidDigest": "The Content-Md5 you specified is not valid.", - "InvalidRange": "The requested range is not satisfiable", - "MalformedXML": "The XML you provided was not well-formed or did not validate against our published schema.", - "MissingContentLength": "You must provide the Content-Length HTTP header.", - "MissingContentMD5": "Missing required header for this request: Content-Md5.", - "MissingRequestBodyError": "Request body is empty.", - "NoSuchBucket": "The specified bucket does not exist.", - "NoSuchBucketPolicy": "The bucket policy does not exist", - "NoSuchKey": "The specified key does not exist.", - "NoSuchUpload": "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.", - "NotImplemented": "A header you provided implies functionality that is not implemented", - "PreconditionFailed": "At least one of the pre-conditions you specified did not hold", - "RequestTimeTooSkewed": "The difference between the request time and the server's time is too large.", - "SignatureDoesNotMatch": "The request signature we calculated does not match the signature you provided. Check your key and signing method.", - "MethodNotAllowed": "The specified method is not allowed against this resource.", - "InvalidPart": "One or more of the specified parts could not be found.", - "InvalidPartOrder": "The list of parts was not in ascending order. The parts list must be specified in order by part number.", - "InvalidObjectState": "The operation is not valid for the current state of the object.", - "AuthorizationHeaderMalformed": "The authorization header is malformed; the region is wrong.", - "MalformedPOSTRequest": "The body of your POST request is not well-formed multipart/form-data.", - "BucketNotEmpty": "The bucket you tried to delete is not empty", - "AllAccessDisabled": "All access to this bucket has been disabled.", - "MalformedPolicy": "Policy has invalid resource.", - "MissingFields": "Missing fields in request.", - "AuthorizationQueryParametersError": "Error parsing the X-Amz-Credential parameter; the Credential is mal-formed; expecting \"/YYYYMMDD/REGION/SERVICE/aws4_request\".", - "MalformedDate": "Invalid date format header, expected to be in ISO8601, RFC1123 or RFC1123Z time format.", - "BucketAlreadyOwnedByYou": "Your previous request to create the named bucket succeeded and you already own it.", - "InvalidDuration": "Duration provided in the request is invalid.", - "XAmzContentSHA256Mismatch": "The provided 'x-amz-content-sha256' header does not match what was computed.", - // Add new API errors here. - "NoSuchCORSConfiguration": "The CORS configuration does not exist", -} diff --git a/s3/handlers/s3api_errors.go b/s3/handlers/s3api_errors.go deleted file mode 100644 index 8ca8ba72b..000000000 --- a/s3/handlers/s3api_errors.go +++ /dev/null @@ -1,1312 +0,0 @@ -package handlers - -import ( - "encoding/xml" - "fmt" - "net/http" -) - -// APIError structure -type APIError struct { - Code string - Description string - HTTPStatusCode int -} - -// RESTErrorResponse - error response format -type RESTErrorResponse struct { - XMLName xml.Name `xml:"Error" json:"-"` - Code string `xml:"Code" json:"Code"` - Message string `xml:"Message" json:"Message"` - Resource string `xml:"Resource" json:"Resource"` - RequestID string `xml:"RequestId" json:"RequestId"` - Key string `xml:"Key,omitempty" json:"Key,omitempty"` - BucketName string `xml:"BucketName,omitempty" json:"BucketName,omitempty"` -} - -// Error - Returns S3 error string. -func (e RESTErrorResponse) Error() string { - if e.Message == "" { - msg, ok := s3ErrorResponseMap[e.Code] - if !ok { - msg = fmt.Sprintf("Error response code %s.", e.Code) - } - return msg - } - return e.Message -} - -// ErrorCode type of error status. -type ErrorCode int - -// Error codes, non exhaustive list - http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html -const ( - ErrCodeNone ErrorCode = iota - ErrCodeAccessDenied - ErrCodeBadDigest - ErrCodeEntityTooSmall - ErrCodeEntityTooLarge - ErrCodeIncompleteBody - ErrCodeInternalError - ErrCodeInvalidAccessKeyID - ErrCodeAccessKeyDisabled - ErrCodeInvalidBucketName - ErrCodeInvalidDigest - ErrCodeInvalidRange - ErrCodeInvalidRangePartNumber - ErrCodeInvalidCopyPartRange - ErrCodeInvalidCopyPartRangeSource - ErrCodeInvalidMaxKeys - ErrCodeInvalidEncodingMethod - ErrCodeInvalidMaxUploads - ErrCodeInvalidMaxParts - ErrCodeInvalidPartNumberMarker - ErrCodeInvalidRequestBody - ErrCodeInvalidCopySource - ErrCodeInvalidMetadataDirective - ErrCodeCodeInvalidCopyDest - ErrCodeInvalidPolicyDocument - ErrCodeInvalidObjectState - ErrCodeMalformedXML - ErrCodeMissingContentLength - ErrCodeMissingContentMD5 - ErrCodeMissingRequestBodyError - ErrCodeMissingSecurityHeader - ErrCodeNoSuchUser - ErrCodeUserAlreadyExists - ErrCodeNoSuchUserPolicy - ErrCodeUserPolicyAlreadyExists - ErrCodeNoSuchBucket - ErrCodeNoSuchBucketPolicy - ErrCodeNoSuchLifecycleConfiguration - ErrCodeNoSuchCORSConfiguration - ErrCodeNoSuchWebsiteConfiguration - ErrCodeReplicationConfigurationNotFoundError - ErrCodeReplicationNeedsVersioningError - ErrCodeReplicationBucketNeedsVersioningError - ErrCodeObjectRestoreAlreadyInProgress - ErrCodeNoSuchKey - ErrCodeNoSuchUpload - ErrCodeInvalidVersionID - ErrCodeNoSuchVersion - ErrCodeNotImplemented - ErrCodePreconditionFailed - ErrCodeRequestTimeTooSkewed - ErrCodeSignatureDoesNotMatch - ErrCodeMethodNotAllowed - ErrCodeInvalidPart - ErrCodeInvalidPartOrder - ErrCodeAuthorizationHeaderMalformed - ErrCodeMalformedDate - ErrCodeMalformedPOSTRequest - ErrCodePOSTFileRequired - ErrCodeSignatureVersionNotSupported - ErrCodeBucketNotEmpty - ErrCodeAllAccessDisabled - ErrCodeMalformedPolicy - ErrCodeMissingFields - ErrCodeMissingCredTag - ErrCodeCredMalformed - ErrCodeInvalidRegion - - ErrCodeMissingSignTag - ErrCodeMissingSignHeadersTag - - ErrCodeAuthHeaderEmpty - ErrCodeExpiredPresignRequest - ErrCodeRequestNotReadyYet - ErrCodeUnsignedHeaders - ErrCodeMissingDateHeader - - ErrCodeBucketAlreadyOwnedByYou - ErrCodeInvalidDuration - ErrCodeBucketAlreadyExists - ErrCodeMetadataTooLarge - ErrCodeUnsupportedMetadata - - ErrCodeSlowDown - ErrCodeBadRequest - ErrCodeKeyTooLongError - ErrCodeInvalidBucketObjectLockConfiguration - ErrCodeObjectLockConfigurationNotAllowed - ErrCodeNoSuchObjectLockConfiguration - ErrCodeObjectLocked - ErrCodeInvalidRetentionDate - ErrCodePastObjectLockRetainDate - ErrCodeUnknownWORMModeDirective - ErrCodeBucketTaggingNotFound - ErrCodeObjectLockInvalidHeaders - ErrCodeInvalidTagDirective - // Add new error codes here. - - // SSE-S3 related API errors - ErrCodeInvalidEncryptionMethod - ErrCodeInvalidQueryParams - ErrCodeNoAccessKey - ErrCodeInvalidToken - - // Bucket notification related errors. - ErrCodeEventNotification - ErrCodeARNNotification - ErrCodeRegionNotification - ErrCodeOverlappingFilterNotification - ErrCodeFilterNameInvalid - ErrCodeFilterNamePrefix - ErrCodeFilterNameSuffix - ErrCodeFilterValueInvalid - ErrCodeOverlappingConfigs - - // S3 extended errors. - ErrCodeContentSHA256Mismatch - - // Add new extended error codes here. - ErrCodeInvalidObjectName - ErrCodeInvalidObjectNamePrefixSlash - ErrCodeClientDisconnected - ErrCodeOperationTimedOut - ErrCodeOperationMaxedOut - ErrCodeInvalidRequest - ErrCodeIncorrectContinuationToken - ErrCodeInvalidFormatAccessKey - - // S3 Select Errors - ErrCodeEmptyRequestBody - ErrCodeUnsupportedFunction - ErrCodeInvalidExpressionType - ErrCodeBusy - ErrCodeUnauthorizedAccess - ErrCodeExpressionTooLong - ErrCodeIllegalSQLFunctionArgument - ErrCodeInvalidKeyPath - ErrCodeInvalidCompressionFormat - ErrCodeInvalidFileHeaderInfo - ErrCodeInvalidJSONType - ErrCodeInvalidQuoteFields - ErrCodeInvalidRequestParameter - ErrCodeInvalidDataType - ErrCodeInvalidTextEncoding - ErrCodeInvalidDataSource - ErrCodeInvalidTableAlias - ErrCodeMissingRequiredParameter - ErrCodeObjectSerializationConflict - ErrCodeUnsupportedSQLOperation - ErrCodeUnsupportedSQLStructure - ErrCodeUnsupportedSyntax - ErrCodeUnsupportedRangeHeader - ErrCodeLexerInvalidChar - ErrCodeLexerInvalidOperator - ErrCodeLexerInvalidLiteral - ErrCodeLexerInvalidIONLiteral - ErrCodeParseExpectedDatePart - ErrCodeParseExpectedKeyword - ErrCodeParseExpectedTokenType - ErrCodeParseExpected2TokenTypes - ErrCodeParseExpectedNumber - ErrCodeParseExpectedRightParenBuiltinFunctionCall - ErrCodeParseExpectedTypeName - ErrCodeParseExpectedWhenClause - ErrCodeParseUnsupportedToken - ErrCodeParseUnsupportedLiteralsGroupBy - ErrCodeParseExpectedMember - ErrCodeParseUnsupportedSelect - ErrCodeParseUnsupportedCase - ErrCodeParseUnsupportedCaseClause - ErrCodeParseUnsupportedAlias - ErrCodeParseUnsupportedSyntax - ErrCodeParseUnknownOperator - ErrCodeParseMissingIdentAfterAt - ErrCodeParseUnexpectedOperator - ErrCodeParseUnexpectedTerm - ErrCodeParseUnexpectedToken - ErrCodeParseUnexpectedKeyword - ErrCodeParseExpectedExpression - ErrCodeParseExpectedLeftParenAfterCast - ErrCodeParseExpectedLeftParenValueConstructor - ErrCodeParseExpectedLeftParenBuiltinFunctionCall - ErrCodeParseExpectedArgumentDelimiter - ErrCodeParseCastArity - ErrCodeParseInvalidTypeParam - ErrCodeParseEmptySelect - ErrCodeParseSelectMissingFrom - ErrCodeParseExpectedIdentForGroupName - ErrCodeParseExpectedIdentForAlias - ErrCodeParseUnsupportedCallWithStar - ErrCodeParseNonUnaryAgregateFunctionCall - ErrCodeParseMalformedJoin - ErrCodeParseExpectedIdentForAt - ErrCodeParseAsteriskIsNotAloneInSelectList - ErrCodeParseCannotMixSqbAndWildcardInSelectList - ErrCodeParseInvalidContextForWildcardInSelectList - ErrCodeIncorrectSQLFunctionArgumentType - ErrCodeValueParseFailure - ErrCodeEvaluatorInvalidArguments - ErrCodeIntegerOverflow - ErrCodeLikeInvalidInputs - ErrCodeCastFailed - ErrCodeInvalidCast - ErrCodeEvaluatorInvalidTimestampFormatPattern - ErrCodeEvaluatorInvalidTimestampFormatPatternSymbolForParsing - ErrCodeEvaluatorTimestampFormatPatternDuplicateFields - ErrCodeEvaluatorTimestampFormatPatternHourClockAmPmMismatch - ErrCodeEvaluatorUnterminatedTimestampFormatPatternToken - ErrCodeEvaluatorInvalidTimestampFormatPatternToken - ErrCodeEvaluatorInvalidTimestampFormatPatternSymbol - ErrCodeEvaluatorBindingDoesNotExist - ErrCodeMissingHeaders - ErrCodeInvalidColumnIndex - - ErrCodePostPolicyConditionInvalidFormat - - ErrCodeMalformedJSON -) - -// error code to APIError structure, these fields carry respective -// descriptions for all the error responses. -var errorCodeResponse = map[ErrorCode]APIError{ - ErrCodeCodeInvalidCopyDest: { - Code: "InvalidRequest", - Description: "This copy request is illegal because it is trying to copy an object to itself without changing the object's metadata, storage class, website redirect location or encryption attributes.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeInvalidCopySource: { - Code: "InvalidArgument", - Description: "Copy Source must mention the source bucket and key: sourcebucket/sourcekey.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeInvalidMetadataDirective: { - Code: "InvalidArgument", - Description: "Unknown metadata directive.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeInvalidRequestBody: { - Code: "InvalidArgument", - Description: "Body shouldn't be set for this request.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeInvalidMaxUploads: { - Code: "InvalidArgument", - Description: "Argument max-uploads must be an integer between 0 and 2147483647", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeInvalidMaxKeys: { - Code: "InvalidArgument", - Description: "Argument maxKeys must be an integer between 0 and 2147483647", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeInvalidEncodingMethod: { - Code: "InvalidArgument", - Description: "Invalid Encoding Method specified in Request", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeInvalidMaxParts: { - Code: "InvalidArgument", - Description: "Part number must be an integer between 1 and 10000, inclusive", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeInvalidPartNumberMarker: { - Code: "InvalidArgument", - Description: "Argument partNumberMarker must be an integer.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeInvalidPolicyDocument: { - Code: "InvalidPolicyDocument", - Description: "The content of the form does not meet the conditions specified in the policy document.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeAccessDenied: { - Code: "AccessDenied", - Description: "Access Denied.", - HTTPStatusCode: http.StatusForbidden, - }, - ErrCodeBadDigest: { - Code: "BadDigest", - Description: "The Content-Md5 you specified did not match what we received.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeEntityTooSmall: { - Code: "EntityTooSmall", - Description: "Your proposed upload is smaller than the minimum allowed object size.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeEntityTooLarge: { - Code: "EntityTooLarge", - Description: "Your proposed upload exceeds the maximum allowed object size.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeIncompleteBody: { - Code: "IncompleteBody", - Description: "You did not provide the number of bytes specified by the Content-Length HTTP header.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeInternalError: { - Code: "InternalError", - Description: "We encountered an internal error, please try again.", - HTTPStatusCode: http.StatusInternalServerError, - }, - ErrCodeInvalidAccessKeyID: { - Code: "InvalidAccessKeyId", - Description: "The Access Key Id you provided does not exist in our records.", - HTTPStatusCode: http.StatusForbidden, - }, - ErrCodeAccessKeyDisabled: { - Code: "InvalidAccessKeyId", - Description: "Your account is disabled; please contact your administrator.", - HTTPStatusCode: http.StatusForbidden, - }, - ErrCodeInvalidBucketName: { - Code: "InvalidBucketName", - Description: "The specified bucket is not valid.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeInvalidDigest: { - Code: "InvalidDigest", - Description: "The Content-Md5 you specified is not valid.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeInvalidRange: { - Code: "InvalidRange", - Description: "The requested range is not satisfiable", - HTTPStatusCode: http.StatusRequestedRangeNotSatisfiable, - }, - ErrCodeInvalidRangePartNumber: { - Code: "InvalidRequest", - Description: "Cannot specify both Range header and partNumber query parameter", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeMalformedXML: { - Code: "MalformedXML", - Description: "The XML you provided was not well-formed or did not validate against our published schema.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeMissingContentLength: { - Code: "MissingContentLength", - Description: "You must provide the Content-Length HTTP header.", - HTTPStatusCode: http.StatusLengthRequired, - }, - ErrCodeMissingContentMD5: { - Code: "MissingContentMD5", - Description: "Missing required header for this request: Content-Md5.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeMissingSecurityHeader: { - Code: "MissingSecurityHeader", - Description: "Your request was missing a required header", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeMissingRequestBodyError: { - Code: "MissingRequestBodyError", - Description: "Request body is empty.", - HTTPStatusCode: http.StatusLengthRequired, - }, - ErrCodeNoSuchBucket: { - Code: "NoSuchBucket", - Description: "The specified bucket does not exist", - HTTPStatusCode: http.StatusNotFound, - }, - ErrCodeNoSuchBucketPolicy: { - Code: "NoSuchBucketPolicy", - Description: "The bucket policy does not exist", - HTTPStatusCode: http.StatusNotFound, - }, - ErrCodeNoSuchLifecycleConfiguration: { - Code: "NoSuchLifecycleConfiguration", - Description: "The lifecycle configuration does not exist", - HTTPStatusCode: http.StatusNotFound, - }, - ErrCodeNoSuchUser: { - Code: "NoSuchUser", - Description: "The specified user does not exist", - HTTPStatusCode: http.StatusConflict, - }, - ErrCodeUserAlreadyExists: { - Code: "UserAlreadyExists", - Description: "The request was rejected because it attempted to create a resource that already exists .", - HTTPStatusCode: http.StatusConflict, - }, - ErrCodeNoSuchUserPolicy: { - Code: "NoSuchUserPolicy", - Description: "The specified user policy does not exist", - HTTPStatusCode: http.StatusConflict, - }, - ErrCodeUserPolicyAlreadyExists: { - Code: "UserPolicyAlreadyExists", - Description: "The same user policy already exists .", - HTTPStatusCode: http.StatusConflict, - }, - ErrCodeNoSuchKey: { - Code: "NoSuchKey", - Description: "The specified key does not exist.", - HTTPStatusCode: http.StatusNotFound, - }, - ErrCodeNoSuchUpload: { - Code: "NoSuchUpload", - Description: "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.", - HTTPStatusCode: http.StatusNotFound, - }, - ErrCodeInvalidVersionID: { - Code: "InvalidArgument", - Description: "Invalid version id specified", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeNoSuchVersion: { - Code: "NoSuchVersion", - Description: "The specified version does not exist.", - HTTPStatusCode: http.StatusNotFound, - }, - ErrCodeNotImplemented: { - Code: "NotImplemented", - Description: "A header you provided implies functionality that is not implemented", - HTTPStatusCode: http.StatusNotImplemented, - }, - ErrCodePreconditionFailed: { - Code: "PreconditionFailed", - Description: "At least one of the pre-conditions you specified did not hold", - HTTPStatusCode: http.StatusPreconditionFailed, - }, - ErrCodeRequestTimeTooSkewed: { - Code: "RequestTimeTooSkewed", - Description: "The difference between the request time and the server's time is too large.", - HTTPStatusCode: http.StatusForbidden, - }, - ErrCodeSignatureDoesNotMatch: { - Code: "SignatureDoesNotMatch", - Description: "The request signature we calculated does not match the signature you provided. Check your key and signing method.", - HTTPStatusCode: http.StatusForbidden, - }, - ErrCodeMethodNotAllowed: { - Code: "MethodNotAllowed", - Description: "The specified method is not allowed against this resource.", - HTTPStatusCode: http.StatusMethodNotAllowed, - }, - ErrCodeInvalidPart: { - Code: "InvalidPart", - Description: "One or more of the specified parts could not be found. The part may not have been uploaded, or the specified entity tag may not match the part's entity tag.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeInvalidPartOrder: { - Code: "InvalidPartOrder", - Description: "The list of parts was not in ascending order. The parts list must be specified in order by part number.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeInvalidObjectState: { - Code: "InvalidObjectState", - Description: "The operation is not valid for the current state of the object.", - HTTPStatusCode: http.StatusForbidden, - }, - ErrCodeAuthorizationHeaderMalformed: { - Code: "AuthorizationHeaderMalformed", - Description: "The authorization header is malformed; the region is wrong; expecting 'us-east-1'.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeMalformedPOSTRequest: { - Code: "MalformedPOSTRequest", - Description: "The body of your POST request is not well-formed multipart/form-data.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodePOSTFileRequired: { - Code: "InvalidArgument", - Description: "POST requires exactly one file upload per request.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeSignatureVersionNotSupported: { - Code: "InvalidRequest", - Description: "The authorization mechanism you have provided is not supported. Please use AWS4-HMAC-SHA256.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeBucketNotEmpty: { - Code: "BucketNotEmpty", - Description: "The bucket you tried to delete is not empty", - HTTPStatusCode: http.StatusConflict, - }, - ErrCodeBucketAlreadyExists: { - Code: "BucketAlreadyExists", - Description: "The requested bucket name is not available. The bucket namespace is shared by all users of the system. Please select a different name and try again.", - HTTPStatusCode: http.StatusConflict, - }, - ErrCodeAllAccessDisabled: { - Code: "AllAccessDisabled", - Description: "All access to this resource has been disabled.", - HTTPStatusCode: http.StatusForbidden, - }, - ErrCodeMalformedPolicy: { - Code: "MalformedPolicy", - Description: "Policy has invalid resource.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeMissingCredTag: { - Code: "InvalidRequest", - Description: "Missing Credential field for this request.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeInvalidRegion: { - Code: "InvalidRegion", - Description: "Region does not match.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeMissingSignTag: { - Code: "AccessDenied", - Description: "Signature header missing Signature field.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeMissingSignHeadersTag: { - Code: "InvalidArgument", - Description: "Signature header missing SignedHeaders field.", - HTTPStatusCode: http.StatusBadRequest, - }, - - ErrCodeAuthHeaderEmpty: { - Code: "InvalidArgument", - Description: "Authorization header is invalid -- one and only one ' ' (space) required.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeMissingDateHeader: { - Code: "AccessDenied", - Description: "AWS authentication requires a valid Date or x-amz-date header", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeExpiredPresignRequest: { - Code: "AccessDenied", - Description: "Request has expired", - HTTPStatusCode: http.StatusForbidden, - }, - ErrCodeRequestNotReadyYet: { - Code: "AccessDenied", - Description: "Request is not valid yet", - HTTPStatusCode: http.StatusForbidden, - }, - ErrCodeSlowDown: { - Code: "SlowDown", - Description: "Resource requested is unreadable, please reduce your request rate", - HTTPStatusCode: http.StatusServiceUnavailable, - }, - ErrCodeBadRequest: { - Code: "BadRequest", - Description: "400 BadRequest", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeKeyTooLongError: { - Code: "KeyTooLongError", - Description: "Your key is too long", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeUnsignedHeaders: { - Code: "AccessDenied", - Description: "There were headers present in the request which were not signed", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeBucketAlreadyOwnedByYou: { - Code: "BucketAlreadyOwnedByYou", - Description: "Your previous request to create the named bucket succeeded and you already own it.", - HTTPStatusCode: http.StatusConflict, - }, - ErrCodeInvalidDuration: { - Code: "InvalidDuration", - Description: "Duration provided in the request is invalid.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeInvalidBucketObjectLockConfiguration: { - Code: "InvalidRequest", - Description: "Bucket is missing ObjectLockConfiguration", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeBucketTaggingNotFound: { - Code: "NoSuchTagSet", - Description: "The TagSet does not exist", - HTTPStatusCode: http.StatusNotFound, - }, - ErrCodeObjectLockConfigurationNotAllowed: { - Code: "InvalidBucketState", - Description: "Object Lock configuration cannot be enabled on existing buckets", - HTTPStatusCode: http.StatusConflict, - }, - ErrCodeNoSuchCORSConfiguration: { - Code: "NoSuchCORSConfiguration", - Description: "The CORS configuration does not exist", - HTTPStatusCode: http.StatusNotFound, - }, - ErrCodeNoSuchWebsiteConfiguration: { - Code: "NoSuchWebsiteConfiguration", - Description: "The specified bucket does not have a website configuration", - HTTPStatusCode: http.StatusNotFound, - }, - ErrCodeReplicationConfigurationNotFoundError: { - Code: "ReplicationConfigurationNotFoundError", - Description: "The replication configuration was not found", - HTTPStatusCode: http.StatusNotFound, - }, - ErrCodeReplicationNeedsVersioningError: { - Code: "InvalidRequest", - Description: "Versioning must be 'Enabled' on the bucket to apply a replication configuration", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeReplicationBucketNeedsVersioningError: { - Code: "InvalidRequest", - Description: "Versioning must be 'Enabled' on the bucket to add a replication target", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeNoSuchObjectLockConfiguration: { - Code: "NoSuchObjectLockConfiguration", - Description: "The specified object does not have a ObjectLock configuration", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeObjectLocked: { - Code: "InvalidRequest", - Description: "Object is WORM protected and cannot be overwritten", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeInvalidRetentionDate: { - Code: "InvalidRequest", - Description: "Date must be provided in ISO 8601 format", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodePastObjectLockRetainDate: { - Code: "InvalidRequest", - Description: "the retain until date must be in the future", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeUnknownWORMModeDirective: { - Code: "InvalidRequest", - Description: "unknown wormMode directive", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeObjectLockInvalidHeaders: { - Code: "InvalidRequest", - Description: "x-amz-object-lock-retain-until-date and x-amz-object-lock-mode must both be supplied", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeObjectRestoreAlreadyInProgress: { - Code: "RestoreAlreadyInProgress", - Description: "Object restore is already in progress", - HTTPStatusCode: http.StatusConflict, - }, - // Bucket notification related errors. - ErrCodeEventNotification: { - Code: "InvalidArgument", - Description: "A specified event is not supported for notifications.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeARNNotification: { - Code: "InvalidArgument", - Description: "A specified destination ARN does not exist or is not well-formed. Verify the destination ARN.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeRegionNotification: { - Code: "InvalidArgument", - Description: "A specified destination is in a different region than the bucket. You must use a destination that resides in the same region as the bucket.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeOverlappingFilterNotification: { - Code: "InvalidArgument", - Description: "An object key name filtering rule defined with overlapping prefixes, overlapping suffixes, or overlapping combinations of prefixes and suffixes for the same event types.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeFilterNameInvalid: { - Code: "InvalidArgument", - Description: "filter rule name must be either prefix or suffix", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeFilterNamePrefix: { - Code: "InvalidArgument", - Description: "Cannot specify more than one prefix rule in a filter.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeFilterNameSuffix: { - Code: "InvalidArgument", - Description: "Cannot specify more than one suffix rule in a filter.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeFilterValueInvalid: { - Code: "InvalidArgument", - Description: "Size of filter rule value cannot exceed 1024 bytes in UTF-8 representation", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeOverlappingConfigs: { - Code: "InvalidArgument", - Description: "Configurations overlap. Configurations on the same bucket cannot share a common event type.", - HTTPStatusCode: http.StatusBadRequest, - }, - - ErrCodeInvalidCopyPartRange: { - Code: "InvalidArgument", - Description: "The x-amz-copy-source-range value must be of the form bytes=first-last where first and last are the zero-based offsets of the first and last bytes to copy", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeInvalidCopyPartRangeSource: { - Code: "InvalidArgument", - Description: "Range specified is not valid for source object", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeMetadataTooLarge: { - Code: "MetadataTooLarge", - Description: "Your metadata headers exceed the maximum allowed metadata size.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeInvalidTagDirective: { - Code: "InvalidArgument", - Description: "Unknown tag directive.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeInvalidEncryptionMethod: { - Code: "InvalidRequest", - Description: "The encryption method specified is not supported", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeInvalidQueryParams: { - Code: "AuthorizationQueryParametersError", - Description: "Query-string authentication version 4 requires the X-Amz-Algorithm, X-Amz-Credential, X-Amz-Signature, X-Amz-Date, X-Amz-SignedHeaders, and X-Amz-Expires parameters.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeNoAccessKey: { - Code: "AccessDenied", - Description: "No AWSAccessKey was presented", - HTTPStatusCode: http.StatusForbidden, - }, - ErrCodeInvalidToken: { - Code: "InvalidTokenId", - Description: "The security token included in the request is invalid", - HTTPStatusCode: http.StatusForbidden, - }, - - // S3 extensions. - ErrCodeInvalidObjectName: { - Code: "InvalidObjectName", - Description: "Object name contains unsupported characters.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeInvalidObjectNamePrefixSlash: { - Code: "InvalidObjectName", - Description: "Object name contains a leading slash.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeClientDisconnected: { - Code: "ClientDisconnected", - Description: "Client disconnected before response was ready", - HTTPStatusCode: 499, // No official code, use nginx value. - }, - ErrCodeOperationTimedOut: { - Code: "RequestTimeout", - Description: "A timeout occurred while trying to lock a resource, please reduce your request rate", - HTTPStatusCode: http.StatusServiceUnavailable, - }, - ErrCodeOperationMaxedOut: { - Code: "SlowDown", - Description: "A timeout exceeded while waiting to proceed with the request, please reduce your request rate", - HTTPStatusCode: http.StatusServiceUnavailable, - }, - ErrCodeUnsupportedMetadata: { - Code: "InvalidArgument", - Description: "Your metadata headers are not supported.", - HTTPStatusCode: http.StatusBadRequest, - }, - // Generic Invalid-Request error. Should be used for response errors only for unlikely - // corner case errors for which introducing new APIErrCodeorCode is not worth it. LogIf() - // should be used to log the error at the source of the error for debugging purposes. - ErrCodeInvalidRequest: { - Code: "InvalidRequest", - Description: "Invalid Request", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeIncorrectContinuationToken: { - Code: "InvalidArgument", - Description: "The continuation token provided is incorrect", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeInvalidFormatAccessKey: { - Code: "InvalidAccessKeyId", - Description: "The Access Key Id you provided contains invalid characters.", - HTTPStatusCode: http.StatusBadRequest, - }, - // S3 Select API Errors - ErrCodeEmptyRequestBody: { - Code: "EmptyRequestBody", - Description: "Request body cannot be empty.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeUnsupportedFunction: { - Code: "UnsupportedFunction", - Description: "Encountered an unsupported SQL function.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeInvalidDataSource: { - Code: "InvalidDataSource", - Description: "Invalid data source type. Only CSV and JSON are supported at this time.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeInvalidExpressionType: { - Code: "InvalidExpressionType", - Description: "The ExpressionType is invalid. Only SQL expressions are supported at this time.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeBusy: { - Code: "Busy", - Description: "The service is unavailable. Please retry.", - HTTPStatusCode: http.StatusServiceUnavailable, - }, - ErrCodeUnauthorizedAccess: { - Code: "UnauthorizedAccess", - Description: "You are not authorized to perform this operation", - HTTPStatusCode: http.StatusUnauthorized, - }, - ErrCodeExpressionTooLong: { - Code: "ExpressionTooLong", - Description: "The SQL expression is too long: The maximum byte-length for the SQL expression is 256 KB.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeIllegalSQLFunctionArgument: { - Code: "IllegalSqlFunctionArgument", - Description: "Illegal argument was used in the SQL function.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeInvalidKeyPath: { - Code: "InvalidKeyPath", - Description: "Key path in the SQL expression is invalid.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeInvalidCompressionFormat: { - Code: "InvalidCompressionFormat", - Description: "The file is not in a supported compression format. Only GZIP is supported at this time.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeInvalidFileHeaderInfo: { - Code: "InvalidFileHeaderInfo", - Description: "The FileHeaderInfo is invalid. Only NONE, USE, and IGNORE are supported.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeInvalidJSONType: { - Code: "InvalidJsonType", - Description: "The JsonType is invalid. Only DOCUMENT and LINES are supported at this time.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeInvalidQuoteFields: { - Code: "InvalidQuoteFields", - Description: "The QuoteFields is invalid. Only ALWAYS and ASNEEDED are supported.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeInvalidRequestParameter: { - Code: "InvalidRequestParameter", - Description: "The value of a parameter in SelectRequest element is invalid. Check the service API documentation and try again.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeInvalidDataType: { - Code: "InvalidDataType", - Description: "The SQL expression contains an invalid data type.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeInvalidTextEncoding: { - Code: "InvalidTextEncoding", - Description: "Invalid encoding type. Only UTF-8 encoding is supported at this time.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeInvalidTableAlias: { - Code: "InvalidTableAlias", - Description: "The SQL expression contains an invalid table alias.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeMissingRequiredParameter: { - Code: "MissingRequiredParameter", - Description: "The SelectRequest entity is missing a required parameter. Check the service documentation and try again.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeObjectSerializationConflict: { - Code: "ObjectSerializationConflict", - Description: "The SelectRequest entity can only contain one of CSV or JSON. Check the service documentation and try again.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeUnsupportedSQLOperation: { - Code: "UnsupportedSqlOperation", - Description: "Encountered an unsupported SQL operation.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeUnsupportedSQLStructure: { - Code: "UnsupportedSqlStructure", - Description: "Encountered an unsupported SQL structure. Check the SQL Reference.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeUnsupportedSyntax: { - Code: "UnsupportedSyntax", - Description: "Encountered invalid syntax.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeUnsupportedRangeHeader: { - Code: "UnsupportedRangeHeader", - Description: "Range header is not supported for this operation.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeLexerInvalidChar: { - Code: "LexerInvalidChar", - Description: "The SQL expression contains an invalid character.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeLexerInvalidOperator: { - Code: "LexerInvalidOperator", - Description: "The SQL expression contains an invalid literal.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeLexerInvalidLiteral: { - Code: "LexerInvalidLiteral", - Description: "The SQL expression contains an invalid operator.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeLexerInvalidIONLiteral: { - Code: "LexerInvalidIONLiteral", - Description: "The SQL expression contains an invalid operator.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeParseExpectedDatePart: { - Code: "ParseExpectedDatePart", - Description: "Did not find the expected date part in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeParseExpectedKeyword: { - Code: "ParseExpectedKeyword", - Description: "Did not find the expected keyword in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeParseExpectedTokenType: { - Code: "ParseExpectedTokenType", - Description: "Did not find the expected token in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeParseExpected2TokenTypes: { - Code: "ParseExpected2TokenTypes", - Description: "Did not find the expected token in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeParseExpectedNumber: { - Code: "ParseExpectedNumber", - Description: "Did not find the expected number in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeParseExpectedRightParenBuiltinFunctionCall: { - Code: "ParseExpectedRightParenBuiltinFunctionCall", - Description: "Did not find the expected right parenthesis character in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeParseExpectedTypeName: { - Code: "ParseExpectedTypeName", - Description: "Did not find the expected type name in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeParseExpectedWhenClause: { - Code: "ParseExpectedWhenClause", - Description: "Did not find the expected WHEN clause in the SQL expression. CASE is not supported.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeParseUnsupportedToken: { - Code: "ParseUnsupportedToken", - Description: "The SQL expression contains an unsupported token.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeParseUnsupportedLiteralsGroupBy: { - Code: "ParseUnsupportedLiteralsGroupBy", - Description: "The SQL expression contains an unsupported use of GROUP BY.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeParseExpectedMember: { - Code: "ParseExpectedMember", - Description: "The SQL expression contains an unsupported use of MEMBER.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeParseUnsupportedSelect: { - Code: "ParseUnsupportedSelect", - Description: "The SQL expression contains an unsupported use of SELECT.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeParseUnsupportedCase: { - Code: "ParseUnsupportedCase", - Description: "The SQL expression contains an unsupported use of CASE.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeParseUnsupportedCaseClause: { - Code: "ParseUnsupportedCaseClause", - Description: "The SQL expression contains an unsupported use of CASE.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeParseUnsupportedAlias: { - Code: "ParseUnsupportedAlias", - Description: "The SQL expression contains an unsupported use of ALIAS.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeParseUnsupportedSyntax: { - Code: "ParseUnsupportedSyntax", - Description: "The SQL expression contains unsupported syntax.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeParseUnknownOperator: { - Code: "ParseUnknownOperator", - Description: "The SQL expression contains an invalid operator.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeParseMissingIdentAfterAt: { - Code: "ParseMissingIdentAfterAt", - Description: "Did not find the expected identifier after the @ symbol in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeParseUnexpectedOperator: { - Code: "ParseUnexpectedOperator", - Description: "The SQL expression contains an unexpected operator.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeParseUnexpectedTerm: { - Code: "ParseUnexpectedTerm", - Description: "The SQL expression contains an unexpected term.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeParseUnexpectedToken: { - Code: "ParseUnexpectedToken", - Description: "The SQL expression contains an unexpected token.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeParseUnexpectedKeyword: { - Code: "ParseUnexpectedKeyword", - Description: "The SQL expression contains an unexpected keyword.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeParseExpectedExpression: { - Code: "ParseExpectedExpression", - Description: "Did not find the expected SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeParseExpectedLeftParenAfterCast: { - Code: "ParseExpectedLeftParenAfterCast", - Description: "Did not find expected the left parenthesis in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeParseExpectedLeftParenValueConstructor: { - Code: "ParseExpectedLeftParenValueConstructor", - Description: "Did not find expected the left parenthesis in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeParseExpectedLeftParenBuiltinFunctionCall: { - Code: "ParseExpectedLeftParenBuiltinFunctionCall", - Description: "Did not find the expected left parenthesis in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeParseExpectedArgumentDelimiter: { - Code: "ParseExpectedArgumentDelimiter", - Description: "Did not find the expected argument delimiter in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeParseCastArity: { - Code: "ParseCastArity", - Description: "The SQL expression CAST has incorrect arity.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeParseInvalidTypeParam: { - Code: "ParseInvalidTypeParam", - Description: "The SQL expression contains an invalid parameter value.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeParseEmptySelect: { - Code: "ParseEmptySelect", - Description: "The SQL expression contains an empty SELECT.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeParseSelectMissingFrom: { - Code: "ParseSelectMissingFrom", - Description: "GROUP is not supported in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeParseExpectedIdentForGroupName: { - Code: "ParseExpectedIdentForGroupName", - Description: "GROUP is not supported in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeParseExpectedIdentForAlias: { - Code: "ParseExpectedIdentForAlias", - Description: "Did not find the expected identifier for the alias in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeParseUnsupportedCallWithStar: { - Code: "ParseUnsupportedCallWithStar", - Description: "Only COUNT with (*) as a parameter is supported in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeParseNonUnaryAgregateFunctionCall: { - Code: "ParseNonUnaryAgregateFunctionCall", - Description: "Only one argument is supported for aggregate functions in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeParseMalformedJoin: { - Code: "ParseMalformedJoin", - Description: "JOIN is not supported in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeParseExpectedIdentForAt: { - Code: "ParseExpectedIdentForAt", - Description: "Did not find the expected identifier for AT name in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeParseAsteriskIsNotAloneInSelectList: { - Code: "ParseAsteriskIsNotAloneInSelectList", - Description: "Other expressions are not allowed in the SELECT list when '*' is used without dot notation in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeParseCannotMixSqbAndWildcardInSelectList: { - Code: "ParseCannotMixSqbAndWildcardInSelectList", - Description: "Cannot mix [] and * in the same expression in a SELECT list in SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeParseInvalidContextForWildcardInSelectList: { - Code: "ParseInvalidContextForWildcardInSelectList", - Description: "Invalid use of * in SELECT list in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeIncorrectSQLFunctionArgumentType: { - Code: "IncorrectSqlFunctionArgumentType", - Description: "Incorrect type of arguments in function call in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeValueParseFailure: { - Code: "ValueParseFailure", - Description: "Time stamp parse failure in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeEvaluatorInvalidArguments: { - Code: "EvaluatorInvalidArguments", - Description: "Incorrect number of arguments in the function call in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeIntegerOverflow: { - Code: "IntegerOverflow", - Description: "Int overflow or underflow in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeLikeInvalidInputs: { - Code: "LikeInvalidInputs", - Description: "Invalid argument given to the LIKE clause in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeCastFailed: { - Code: "CastFailed", - Description: "Attempt to convert from one data type to another using CAST failed in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeInvalidCast: { - Code: "InvalidCast", - Description: "Attempt to convert from one data type to another using CAST failed in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeEvaluatorInvalidTimestampFormatPattern: { - Code: "EvaluatorInvalidTimestampFormatPattern", - Description: "Time stamp format pattern requires additional fields in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeEvaluatorInvalidTimestampFormatPatternSymbolForParsing: { - Code: "EvaluatorInvalidTimestampFormatPatternSymbolForParsing", - Description: "Time stamp format pattern contains a valid format symbol that cannot be applied to time stamp parsing in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeEvaluatorTimestampFormatPatternDuplicateFields: { - Code: "EvaluatorTimestampFormatPatternDuplicateFields", - Description: "Time stamp format pattern contains multiple format specifiers representing the time stamp field in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeEvaluatorTimestampFormatPatternHourClockAmPmMismatch: { - Code: "EvaluatorUnterminatedTimestampFormatPatternToken", - Description: "Time stamp format pattern contains unterminated token in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeEvaluatorUnterminatedTimestampFormatPatternToken: { - Code: "EvaluatorInvalidTimestampFormatPatternToken", - Description: "Time stamp format pattern contains an invalid token in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeEvaluatorInvalidTimestampFormatPatternToken: { - Code: "EvaluatorInvalidTimestampFormatPatternToken", - Description: "Time stamp format pattern contains an invalid token in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeEvaluatorInvalidTimestampFormatPatternSymbol: { - Code: "EvaluatorInvalidTimestampFormatPatternSymbol", - Description: "Time stamp format pattern contains an invalid symbol in the SQL expression.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeEvaluatorBindingDoesNotExist: { - Code: "ErrCodeEvaluatorBindingDoesNotExist", - Description: "A column name or a path provided does not exist in the SQL expression", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeMissingHeaders: { - Code: "MissingHeaders", - Description: "Some headers in the query are missing from the file. Check the file and try again.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodeInvalidColumnIndex: { - Code: "InvalidColumnIndex", - Description: "The column index is invalid. Please check the service documentation and try again.", - HTTPStatusCode: http.StatusBadRequest, - }, - ErrCodePostPolicyConditionInvalidFormat: { - Code: "PostPolicyInvalidKeyName", - Description: "Invalid according to Policy: Policy Conditions failed", - HTTPStatusCode: http.StatusForbidden, - }, - // Add your error structure here. - ErrCodeMalformedJSON: { - Code: "MalformedJSON", - Description: "The JSON was not well-formed or did not validate against our published format.", - HTTPStatusCode: http.StatusBadRequest, - }, -} - -// GetAPIError provides API Error for input API error code. -func GetAPIError(code ErrorCode) APIError { - return errorCodeResponse[code] -} - -// -//// STSErrorCode type of error status. -//type STSErrorCode int -// -//// STSError structure -//type STSError struct { -// Code string -// Description string -// HTTPStatusCode int -//} -// -//// Error codes,list - http://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithSAML.html -//const ( -// ErrSTSNone STSErrorCode = iota -// ErrSTSAccessDenied -// ErrSTSMissingParameter -// ErrSTSInvalidParameterValue -// ErrSTSInternalError -//) -// -//type stsErrorCodeMap map[STSErrorCode]STSError -// -////ToSTSErr code to err -//func (e stsErrorCodeMap) ToSTSErr(errCode STSErrorCode) STSError { -// apiErr, ok := e[errCode] -// if !ok { -// return e[ErrSTSInternalError] -// } -// return apiErr -//} -// -//// StsErrCodes error code to STSError structure, these fields carry respective -//// descriptions for all the error responses. -//var StsErrCodes = stsErrorCodeMap{ -// ErrSTSAccessDenied: { -// Code: "AccessDenied", -// Description: "Generating temporary credentials not allowed for this request.", -// HTTPStatusCode: http.StatusForbidden, -// }, -// ErrSTSMissingParameter: { -// Code: "MissingParameter", -// Description: "A required parameter for the specified action is not supplied.", -// HTTPStatusCode: http.StatusBadRequest, -// }, -// ErrSTSInvalidParameterValue: { -// Code: "InvalidParameterValue", -// Description: "An invalid or out-of-range value was supplied for the input parameter.", -// HTTPStatusCode: http.StatusBadRequest, -// }, -// ErrSTSInternalError: { -// Code: "InternalError", -// Description: "We encountered an internal error generating credentials, please try again.", -// HTTPStatusCode: http.StatusInternalServerError, -// }, -//} diff --git a/s3/handlers/services.go b/s3/handlers/services.go deleted file mode 100644 index 0178e083b..000000000 --- a/s3/handlers/services.go +++ /dev/null @@ -1,46 +0,0 @@ -package handlers - -import ( - "context" - "net/http" - - "github.com/bittorrent/go-btfs/s3/action" -) - -type CorsService interface { - GetAllowOrigins() []string - GetAllowMethods() []string - GetAllowHeaders() []string -} - -type AccessKeyService interface { - Generate() (record *AccessKeyRecord, err error) - Enable(key string) (err error) - Disable(key string) (err error) - Reset(key string) (err error) - Delete(key string) (err error) - Get(key string) (record *AccessKeyRecord, err error) - List() (list []*AccessKeyRecord, err error) -} - -type AuthService interface { - VerifySignature(ctx context.Context, r *http.Request) (accessKeyRecord *AccessKeyRecord, err ErrorCode) -} - -type BucketService interface { - CheckACL(accessKeyRecord *AccessKeyRecord, bucketName string, action action.Action) (err error) - CreateBucket(ctx context.Context, bucket, region, accessKey, acl string) error - GetBucketMeta(ctx context.Context, bucket string) (meta BucketMetadata, err error) - HasBucket(ctx context.Context, bucket string) bool - SetEmptyBucket(emptyBucket func(ctx context.Context, bucket string) (bool, error)) - DeleteBucket(ctx context.Context, bucket string) error - GetAllBucketsOfUser(username string) (list []*BucketMetadata, err error) - UpdateBucketAcl(ctx context.Context, bucket, acl string) error - GetBucketAcl(ctx context.Context, bucket string) (string, error) -} - -type ObjectService interface { -} - -type MultipartService interface { -} diff --git a/s3/handlers/services_errors.go b/s3/handlers/services_errors.go deleted file mode 100644 index 13533b885..000000000 --- a/s3/handlers/services_errors.go +++ /dev/null @@ -1,19 +0,0 @@ -package handlers - -import "errors" - -var ( - ErrSginVersionNotSupport = errors.New("sign version is not support") - - ErrInvalidArgument = errors.New("invalid argument") - - ErrInvalidBucketName = errors.New("bucket name is invalid") - ErrBucketNotFound = errors.New("bucket is not found") - ErrBucketAlreadyExists = errors.New("bucket is already exists") - ErrBucketAccessDenied = errors.New("bucket access denied") - ErrSetBucketEmptyFailed = errors.New("set bucket empty failed") - ErrCreateBucket = errors.New("create bucket failed") - ErrNoSuchUserPolicy = errors.New("no such user policy") - - ErrNotImplemented = errors.New("not implemented") -) diff --git a/s3/handlers/services_types.go b/s3/handlers/services_types.go deleted file mode 100644 index 30e1d9ba1..000000000 --- a/s3/handlers/services_types.go +++ /dev/null @@ -1,24 +0,0 @@ -package handlers - -import "time" - -type AccessKeyRecord struct { - Key string `json:"key"` - Secret string `json:"secret"` - Enable bool `json:"enable"` - IsDeleted bool `json:"is_deleted"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` -} - -// BucketMetadata contains bucket metadata. -type BucketMetadata struct { - Name string - Region string - Owner string - Acl string - Created time.Time -} - -type ObjectMetadata struct { -} diff --git a/s3/handlers/to_response_err.go b/s3/handlers/to_response_err.go new file mode 100644 index 000000000..3c456acfc --- /dev/null +++ b/s3/handlers/to_response_err.go @@ -0,0 +1,18 @@ +package handlers + +import ( + "github.com/bittorrent/go-btfs/s3/handlers/responses" + "github.com/bittorrent/go-btfs/s3/services" +) + +var toResponseErr = map[error]*responses.Error{ + services.ErrBucketNotFound: responses.ErrNoSuchBucket, +} + +func ToResponseErr(err error) (rerr *responses.Error) { + rerr, ok := toResponseErr[err] + if !ok { + rerr = responses.ErrInternalError + } + return +} diff --git a/s3/lock/lock.go b/s3/lock/lock.go deleted file mode 100644 index 1d457ac8d..000000000 --- a/s3/lock/lock.go +++ /dev/null @@ -1,217 +0,0 @@ -package lock - -import ( - "context" - "errors" - logging "github.com/ipfs/go-log/v2" - "path" - "sort" - "strings" - "sync" - "time" -) - -var log = logging.Logger("nslocker") - -// OperationTimedOut - a timeout occurred. -type OperationTimedOut struct{} - -func (e OperationTimedOut) Error() string { - return "Operation timed out" -} - -// RWLocker - locker interface to introduce GetRLock, RUnlock. -type RWLocker interface { - GetLock(ctx context.Context, timeout time.Duration) (lkCtx LockContext, timedOutErr error) - Unlock(cancel context.CancelFunc) - GetRLock(ctx context.Context, timeout time.Duration) (lkCtx LockContext, timedOutErr error) - RUnlock(cancel context.CancelFunc) -} - -// LockContext lock context holds the lock backed context and canceler for the context. -type LockContext struct { - ctx context.Context - cancel context.CancelFunc -} - -// Context returns lock context -func (l LockContext) Context() context.Context { - return l.ctx -} - -// Cancel function calls cancel() function -func (l LockContext) Cancel() { - if l.cancel != nil { - l.cancel() - } -} - -// NewNSLock - return a new name space lock map. -func NewNSLock() *NsLockMap { - return &NsLockMap{ - lockMap: make(map[string]*nsLock), - } -} - -// nsLock - provides primitives for locking critical namespace regions. -type nsLock struct { - ref int32 - *TRWMutex -} - -// NsLockMap - namespace lock map, provides primitives to Lock, -// Unlock, RLock and RUnlock. -type NsLockMap struct { - lockMap map[string]*nsLock - lockMapMutex sync.Mutex -} - -// Lock the namespace resource. -func (n *NsLockMap) lock(ctx context.Context, volume string, path string, readLock bool, timeout time.Duration) (locked bool) { - resource := PathJoin(volume, path) - - n.lockMapMutex.Lock() - nsLk, found := n.lockMap[resource] - if !found { - nsLk = &nsLock{ - TRWMutex: NewTRWMutex(), - } - } - nsLk.ref++ - n.lockMap[resource] = nsLk - n.lockMapMutex.Unlock() - - // Locking here will block (until timeout). - if readLock { - locked = nsLk.GetRLock(ctx, timeout) - } else { - locked = nsLk.GetLock(ctx, timeout) - } - - if !locked { // We failed to get the lock - // Decrement ref count since we failed to get the lock - n.lockMapMutex.Lock() - n.lockMap[resource].ref-- - if n.lockMap[resource].ref < 0 { - log.Error(errors.New("resource reference count was lower than 0")) - } - if n.lockMap[resource].ref == 0 { - // Remove from the map if there are no more references. - delete(n.lockMap, resource) - } - n.lockMapMutex.Unlock() - } - - return -} - -// Unlock the namespace resource. -func (n *NsLockMap) unlock(volume string, path string, readLock bool) { - resource := PathJoin(volume, path) - - n.lockMapMutex.Lock() - defer n.lockMapMutex.Unlock() - if _, found := n.lockMap[resource]; !found { - return - } - if readLock { - n.lockMap[resource].RUnlock() - } else { - n.lockMap[resource].Unlock() - } - n.lockMap[resource].ref-- - if n.lockMap[resource].ref < 0 { - log.Error(errors.New("resource reference count was lower than 0")) - } - if n.lockMap[resource].ref == 0 { - // Remove from the map if there are no more references. - delete(n.lockMap, resource) - } -} - -// localLockInstance - frontend/top-level interface for namespace locks. -type localLockInstance struct { - ns *NsLockMap - volume string - paths []string -} - -// NewNSLock - returns a lock instance for a given volume and -// path. The returned lockInstance object encapsulates the nsLockMap, -// volume, path and operation ID. -func (n *NsLockMap) NewNSLock(volume string, paths ...string) RWLocker { - sort.Strings(paths) - return &localLockInstance{n, volume, paths} -} - -// GetLock - block until write lock is taken or timeout has occurred. -func (li *localLockInstance) GetLock(ctx context.Context, timeout time.Duration) (_ LockContext, timedOutErr error) { - const readLock = false - success := make([]int, len(li.paths)) - for i, path := range li.paths { - if !li.ns.lock(ctx, li.volume, path, readLock, timeout) { - for si, sint := range success { - if sint == 1 { - li.ns.unlock(li.volume, li.paths[si], readLock) - } - } - return LockContext{}, OperationTimedOut{} - } - success[i] = 1 - } - return LockContext{ctx: ctx, cancel: func() {}}, nil -} - -// Unlock - block until write lock is released. -func (li *localLockInstance) Unlock(cancel context.CancelFunc) { - if cancel != nil { - cancel() - } - const readLock = false - for _, path := range li.paths { - li.ns.unlock(li.volume, path, readLock) - } -} - -// GetRLock - block until read lock is taken or timeout has occurred. -func (li *localLockInstance) GetRLock(ctx context.Context, timeout time.Duration) (_ LockContext, timedOutErr error) { - const readLock = true - success := make([]int, len(li.paths)) - for i, path := range li.paths { - if !li.ns.lock(ctx, li.volume, path, readLock, timeout) { - for si, sint := range success { - if sint == 1 { - li.ns.unlock(li.volume, li.paths[si], readLock) - } - } - return LockContext{}, OperationTimedOut{} - } - success[i] = 1 - } - return LockContext{ctx: ctx, cancel: func() {}}, nil -} - -// RUnlock - block until read lock is released. -func (li *localLockInstance) RUnlock(cancel context.CancelFunc) { - if cancel != nil { - cancel() - } - const readLock = true - for _, path := range li.paths { - li.ns.unlock(li.volume, path, readLock) - } -} - -// SlashSeparator - slash separator. -const SlashSeparator = "/" - -// PathJoin - like path.Join() but retains trailing SlashSeparator of the last element -func PathJoin(elem ...string) string { - trailingSlash := "" - if len(elem) > 0 { - if strings.HasSuffix(elem[len(elem)-1], SlashSeparator) { - trailingSlash = SlashSeparator - } - } - return path.Join(elem...) + trailingSlash -} diff --git a/s3/lock/rwmutex.go b/s3/lock/rwmutex.go deleted file mode 100644 index 11416272c..000000000 --- a/s3/lock/rwmutex.go +++ /dev/null @@ -1,154 +0,0 @@ -package lock - -import ( - "context" - "math" - "math/rand" - "sync" - "time" -) - -// A TRWMutex is a mutual exclusion lock with timeouts. -type TRWMutex struct { - isWriteLock bool - ref int - mu sync.Mutex // Mutex to prevent multiple simultaneous locks -} - -// NewTRWMutex - initializes a new lsync RW mutex. -func NewTRWMutex() *TRWMutex { - return &TRWMutex{} -} - -// Lock holds a write lock on lm. -// -// If the lock is already in use, the calling go routine -// blocks until the mutex is available. -func (m *TRWMutex) Lock() { - const isWriteLock = true - m.lockLoop(context.Background(), math.MaxInt64, isWriteLock) -} - -// GetLock tries to get a write lock on lm before the timeout occurs. -func (m *TRWMutex) GetLock(ctx context.Context, timeout time.Duration) (locked bool) { - const isWriteLock = true - return m.lockLoop(ctx, timeout, isWriteLock) -} - -// RLock holds a read lock on lm. -// -// If one or more read lock are already in use, it will grant another lock. -// Otherwise the calling go routine blocks until the mutex is available. -func (m *TRWMutex) RLock() { - const isWriteLock = false - m.lockLoop(context.Background(), 1<<63-1, isWriteLock) -} - -// GetRLock tries to get a read lock on lm before the timeout occurs. -func (m *TRWMutex) GetRLock(ctx context.Context, timeout time.Duration) (locked bool) { - const isWriteLock = false - return m.lockLoop(ctx, timeout, isWriteLock) -} - -func (m *TRWMutex) lock(isWriteLock bool) (locked bool) { - m.mu.Lock() - defer m.mu.Unlock() - - if isWriteLock { - if m.ref == 0 && !m.isWriteLock { - m.ref = 1 - m.isWriteLock = true - locked = true - } - } else { - if !m.isWriteLock { - m.ref++ - locked = true - } - } - - return locked -} - -const ( - lockRetryInterval = 50 * time.Millisecond -) - -// lockLoop will acquire either a read or a write lock -// -// The call will block until the lock is granted using a built-in -// timing randomized back-off algorithm to try again until successful -func (m *TRWMutex) lockLoop(ctx context.Context, timeout time.Duration, isWriteLock bool) (locked bool) { - r := rand.New(rand.NewSource(time.Now().UnixNano())) - - retryCtx, cancel := context.WithTimeout(ctx, timeout) - defer cancel() - - for { - select { - case <-retryCtx.Done(): - // Caller context canceled or we timedout, - // return false anyways for both situations. - return false - default: - if m.lock(isWriteLock) { - return true - } - time.Sleep(time.Duration(r.Float64() * float64(lockRetryInterval))) - } - } -} - -// Unlock unlocks the write lock. -// -// It is a run-time error if lm is not locked on entry to Unlock. -func (m *TRWMutex) Unlock() { - isWriteLock := true - success := m.unlock(isWriteLock) - if !success { - panic("Trying to Unlock() while no Lock() is active") - } -} - -// RUnlock releases a read lock held on lm. -// -// It is a run-time error if lm is not locked on entry to RUnlock. -func (m *TRWMutex) RUnlock() { - isWriteLock := false - success := m.unlock(isWriteLock) - if !success { - panic("Trying to RUnlock() while no RLock() is active") - } -} - -func (m *TRWMutex) unlock(isWriteLock bool) (unlocked bool) { - m.mu.Lock() - defer m.mu.Unlock() - - // Try to release lock. - if isWriteLock { - if m.isWriteLock && m.ref == 1 { - m.ref = 0 - m.isWriteLock = false - unlocked = true - } - } else { - if !m.isWriteLock { - if m.ref > 0 { - m.ref-- - unlocked = true - } - } - } - - return unlocked -} - -// ForceUnlock will forcefully clear a write or read lock. -func (m *TRWMutex) ForceUnlock() { - m.mu.Lock() - defer m.mu.Unlock() - - m.ref = 0 - m.isWriteLock = false -} diff --git a/s3/providers/file_store.go b/s3/providers/file_store.go new file mode 100644 index 000000000..e5b72933e --- /dev/null +++ b/s3/providers/file_store.go @@ -0,0 +1,17 @@ +package providers + +import ( + shell "github.com/bittorrent/go-btfs-api" +) + +var _ FileStorer = (*FileStore)(nil) + +type FileStore struct { + *shell.Shell +} + +func NewFileStore() *FileStore { + return &FileStore{ + Shell: shell.NewLocalShell(), + } +} diff --git a/s3/providers/filestore/local_shell.go b/s3/providers/filestore/local_shell.go deleted file mode 100644 index 6653af683..000000000 --- a/s3/providers/filestore/local_shell.go +++ /dev/null @@ -1,18 +0,0 @@ -package filestore - -import ( - shell "github.com/bittorrent/go-btfs-api" - "github.com/bittorrent/go-btfs/s3/services" -) - -var _ services.FileStorer = (*LocalShell)(nil) - -type LocalShell struct { - *shell.Shell -} - -func NewLocalShell() *LocalShell { - return &LocalShell{ - Shell: shell.NewLocalShell(), - } -} diff --git a/s3/services/providerser.go b/s3/providers/proto.go similarity index 77% rename from s3/services/providerser.go rename to s3/providers/proto.go index 796291d97..23a741746 100644 --- a/s3/services/providerser.go +++ b/s3/providers/proto.go @@ -1,6 +1,7 @@ -package services +package providers import ( + "errors" "io" ) @@ -22,3 +23,9 @@ type StateStorer interface { Delete(key string) (err error) Iterate(prefix string, iterFunc StateStoreIterFunc) (err error) } + +type StateStoreIterFunc func(key, value []byte) (stop bool, err error) + +var ( + ErrStateStoreNotFound = errors.New("not found") +) diff --git a/s3/providers/providers.go b/s3/providers/providers.go index 21bdf1ca0..9e67f9de3 100644 --- a/s3/providers/providers.go +++ b/s3/providers/providers.go @@ -1,20 +1,16 @@ package providers -import ( - "github.com/bittorrent/go-btfs/s3/services" -) - -var _ services.Providerser = (*Providers)(nil) +var _ Providerser = (*Providers)(nil) type Providers struct { - statestore services.StateStorer - filestore services.FileStorer + stateStore StateStorer + fileStore FileStorer } -func NewProviders(statestore services.StateStorer, filestore services.FileStorer, options ...Option) (providers *Providers) { +func NewProviders(stateStore StateStorer, fileStore FileStorer, options ...Option) (providers *Providers) { providers = &Providers{ - statestore: statestore, - filestore: filestore, + stateStore: stateStore, + fileStore: fileStore, } for _, option := range options { option(providers) @@ -22,10 +18,10 @@ func NewProviders(statestore services.StateStorer, filestore services.FileStorer return } -func (p *Providers) GetStateStore() services.StateStorer { - return p.statestore +func (p *Providers) GetStateStore() StateStorer { + return p.stateStore } -func (p *Providers) GetFileStore() services.FileStorer { - return p.filestore +func (p *Providers) GetFileStore() FileStorer { + return p.fileStore } diff --git a/s3/providers/state_store.go b/s3/providers/state_store.go new file mode 100644 index 000000000..836d358a8 --- /dev/null +++ b/s3/providers/state_store.go @@ -0,0 +1,42 @@ +package providers + +import ( + "errors" + "github.com/bittorrent/go-btfs/transaction/storage" +) + +var _ StateStorer = (*StateStore)(nil) + +type StateStore struct { + proxy storage.StateStorer +} + +func NewStorageStateStoreProxy(proxy storage.StateStorer) *StateStore { + return &StateStore{ + proxy: proxy, + } +} + +func (s *StateStore) Put(key string, val interface{}) (err error) { + return s.proxy.Put(key, val) +} + +func (s *StateStore) Get(key string, i interface{}) (err error) { + err = s.proxy.Get(key, i) + if errors.Is(err, storage.ErrNotFound) { + err = ErrStateStoreNotFound + } + return +} + +func (s *StateStore) Delete(key string) (err error) { + err = s.proxy.Delete(key) + if errors.Is(err, storage.ErrNotFound) { + err = ErrStateStoreNotFound + } + return +} + +func (s *StateStore) Iterate(prefix string, iterFunc StateStoreIterFunc) (err error) { + return s.proxy.Iterate(prefix, storage.StateIterFunc(iterFunc)) +} diff --git a/s3/providers/statestore/storage_proxy.go b/s3/providers/statestore/storage_proxy.go deleted file mode 100644 index 4ef6045b0..000000000 --- a/s3/providers/statestore/storage_proxy.go +++ /dev/null @@ -1,43 +0,0 @@ -package statestore - -import ( - "errors" - "github.com/bittorrent/go-btfs/s3/services" - "github.com/bittorrent/go-btfs/transaction/storage" -) - -var _ services.StateStorer = (*StorageProxy)(nil) - -type StorageProxy struct { - proxy storage.StateStorer -} - -func NewStorageStateStoreProxy(proxy storage.StateStorer) *StorageProxy { - return &StorageProxy{ - proxy: proxy, - } -} - -func (s *StorageProxy) Put(key string, val interface{}) (err error) { - return s.proxy.Put(key, val) -} - -func (s *StorageProxy) Get(key string, i interface{}) (err error) { - err = s.proxy.Get(key, i) - if errors.Is(err, storage.ErrNotFound) { - err = services.ErrStateStoreNotFound - } - return -} - -func (s *StorageProxy) Delete(key string) (err error) { - err = s.proxy.Delete(key) - if errors.Is(err, storage.ErrNotFound) { - err = services.ErrStateStoreNotFound - } - return -} - -func (s *StorageProxy) Iterate(prefix string, iterFunc services.StateStoreIterFunc) (err error) { - return s.proxy.Iterate(prefix, storage.StateIterFunc(iterFunc)) -} diff --git a/s3/routers/routers.go b/s3/routers/routers.go index b56f8c373..b3ce65dfe 100644 --- a/s3/routers/routers.go +++ b/s3/routers/routers.go @@ -1,15 +1,16 @@ package routers import ( + "github.com/bittorrent/go-btfs/s3/handlers" "github.com/gorilla/mux" "net/http" ) type Routers struct { - handlers Handlerser + handlers handlers.Handlerser } -func NewRouters(handlers Handlerser, options ...Option) (routers *Routers) { +func NewRouters(handlers handlers.Handlerser, options ...Option) (routers *Routers) { routers = &Routers{ handlers: handlers, } diff --git a/s3/server/routerser.go b/s3/routers/routerser.go similarity index 82% rename from s3/server/routerser.go rename to s3/routers/routerser.go index ca4c12b2d..665e1271b 100644 --- a/s3/server/routerser.go +++ b/s3/routers/routerser.go @@ -1,4 +1,4 @@ -package server +package routers import "net/http" diff --git a/s3/server.go b/s3/server.go index 8348244a8..5271b609a 100644 --- a/s3/server.go +++ b/s3/server.go @@ -3,8 +3,6 @@ package s3 import ( "github.com/bittorrent/go-btfs/s3/handlers" "github.com/bittorrent/go-btfs/s3/providers" - "github.com/bittorrent/go-btfs/s3/providers/filestore" - "github.com/bittorrent/go-btfs/s3/providers/statestore" "github.com/bittorrent/go-btfs/s3/routers" "github.com/bittorrent/go-btfs/s3/server" "github.com/bittorrent/go-btfs/s3/services/accesskey" @@ -22,8 +20,8 @@ var ( func GetProviders(storageStore storage.StateStorer) *providers.Providers { once.Do(func() { - sstore := statestore.NewStorageStateStoreProxy(storageStore) - fstore := filestore.NewLocalShell() + sstore := providers.NewStorageStateStoreProxy(storageStore) + fstore := providers.NewFileStore() ps = providers.NewProviders(sstore, fstore) }) diff --git a/s3/server/server.go b/s3/server/server.go index d18303f20..5973e2c4f 100644 --- a/s3/server/server.go +++ b/s3/server/server.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "github.com/bittorrent/go-btfs/s3/routers" "net/http" "sync" ) @@ -16,13 +17,14 @@ var ( ) type Server struct { - routers Routerser - address string + routers routers.Routerser + address string + shutdown func() error mutex sync.Mutex } -func NewServer(routers Routerser, options ...Option) (s *Server) { +func NewServer(routers routers.Routerser, options ...Option) (s *Server) { s = &Server{ routers: routers, address: defaultServerAddress, diff --git a/s3/services/accesskey/service.go b/s3/services/accesskey/service.go index 30d01bd57..8aceaba0f 100644 --- a/s3/services/accesskey/service.go +++ b/s3/services/accesskey/service.go @@ -4,7 +4,7 @@ import ( "context" "errors" "github.com/bittorrent/go-btfs/s3/ctxmu" - "github.com/bittorrent/go-btfs/s3/handlers" + "github.com/bittorrent/go-btfs/s3/providers" "github.com/bittorrent/go-btfs/s3/services" "github.com/bittorrent/go-btfs/transaction/storage" "github.com/bittorrent/go-btfs/utils" @@ -18,17 +18,17 @@ const ( defaultUpdateTimeoutMS = 200 ) -var _ handlers.AccessKeyService = (*Service)(nil) +var _ services.AccessKeyService = (*Service)(nil) type Service struct { - providers services.Providerser + providers providers.Providerser secretLength int storeKeyPrefix string locks *ctxmu.MultiCtxRWMutex updateTimeout time.Duration } -func NewService(providers services.Providerser, options ...Option) (svc *Service) { +func NewService(providers providers.Providerser, options ...Option) (svc *Service) { svc = &Service{ providers: providers, secretLength: defaultSecretLength, @@ -42,9 +42,9 @@ func NewService(providers services.Providerser, options ...Option) (svc *Service return svc } -func (svc *Service) Generate() (record *handlers.AccessKeyRecord, err error) { +func (svc *Service) Generate() (record *services.AccessKey, err error) { now := time.Now() - record = &handlers.AccessKeyRecord{ + record = &services.AccessKey{ Key: svc.newKey(), Secret: svc.newSecret(), Enable: true, @@ -88,21 +88,21 @@ func (svc *Service) Delete(key string) (err error) { return } -func (svc *Service) Get(key string) (record *handlers.AccessKeyRecord, err error) { - record = &handlers.AccessKeyRecord{} - err = svc.providers.GetStateStore().Get(svc.getStoreKey(key), record) - if err != nil && !errors.Is(err, services.ErrStateStoreNotFound) { +func (svc *Service) Get(key string) (ack *services.AccessKey, err error) { + ack = &services.AccessKey{} + err = svc.providers.GetStateStore().Get(svc.getStoreKey(key), ack) + if err != nil && !errors.Is(err, providers.ErrStateStoreNotFound) { return } - if errors.Is(err, services.ErrStateStoreNotFound) || record.IsDeleted { - err = handlers.ErrAccessKeyIsNotFound + if errors.Is(err, providers.ErrStateStoreNotFound) || ack.IsDeleted { + err = services.ErrAccessKeyIsNotFound } return } -func (svc *Service) List() (list []*handlers.AccessKeyRecord, err error) { +func (svc *Service) List() (list []*services.AccessKey, err error) { err = svc.providers.GetStateStore().Iterate(svc.storeKeyPrefix, func(key, _ []byte) (stop bool, er error) { - record := &handlers.AccessKeyRecord{} + record := &services.AccessKey{} er = svc.providers.GetStateStore().Get(string(key), record) if er != nil { return @@ -147,7 +147,7 @@ func (svc *Service) update(key string, args *updateArgs) (err error) { } defer svc.locks.Unlock(key) - record := &handlers.AccessKeyRecord{} + record := &services.AccessKey{} stk := svc.getStoreKey(key) err = svc.providers.GetStateStore().Get(stk, record) @@ -155,7 +155,7 @@ func (svc *Service) update(key string, args *updateArgs) (err error) { return } if errors.Is(err, storage.ErrNotFound) || record.IsDeleted { - err = handlers.ErrAccessKeyIsNotFound + err = services.ErrAccessKeyIsNotFound return } diff --git a/s3/services/accesskey/service_instance.go b/s3/services/accesskey/service_instance.go index 6d7686f55..ab9d20fc5 100644 --- a/s3/services/accesskey/service_instance.go +++ b/s3/services/accesskey/service_instance.go @@ -1,7 +1,7 @@ package accesskey import ( - "github.com/bittorrent/go-btfs/s3/handlers" + "github.com/bittorrent/go-btfs/s3/providers" "github.com/bittorrent/go-btfs/s3/services" "sync" ) @@ -10,7 +10,7 @@ var service *Service var once sync.Once -func InitService(providers services.Providerser, options ...Option) { +func InitService(providers providers.Providerser, options ...Option) { once.Do(func() { service = NewService(providers, options...) }) @@ -20,7 +20,7 @@ func GetService() *Service { return service } -func Generate() (record *handlers.AccessKeyRecord, err error) { +func Generate() (ack *services.AccessKey, err error) { return service.Generate() } @@ -40,10 +40,10 @@ func Delete(key string) (err error) { return service.Delete(key) } -func Get(key string) (record *handlers.AccessKeyRecord, err error) { +func Get(key string) (record *services.AccessKey, err error) { return service.Get(key) } -func List() (list []*handlers.AccessKeyRecord, err error) { +func List() (list []*services.AccessKey, err error) { return service.List() } diff --git a/s3/services/auth/check_handler_auth.go b/s3/services/auth/check_handler_auth.go index 948c99149..fd73eaba9 100644 --- a/s3/services/auth/check_handler_auth.go +++ b/s3/services/auth/check_handler_auth.go @@ -3,11 +3,12 @@ package auth import ( "context" "encoding/hex" + "github.com/bittorrent/go-btfs/s3/handlers/responses" + "github.com/bittorrent/go-btfs/s3/services" "net/http" "github.com/bittorrent/go-btfs/s3/consts" "github.com/bittorrent/go-btfs/s3/etag" - "github.com/bittorrent/go-btfs/s3/handlers" "github.com/bittorrent/go-btfs/s3/utils/hash" ) @@ -16,28 +17,26 @@ import ( // - validates the policy action if anonymous tests bucket policies if any, // for authenticated requests validates IAM policies. // -// returns APIErrorCode if any to be replied to the client. +// returns APIErrorcode if any to be replied to the client. // Additionally, returns the accessKey used in the request, and if this request is by an admin. -func (s *Service) CheckRequestAuthTypeCredential(ctx context.Context, r *http.Request) (cred *handlers.AccessKeyRecord, s3Err handlers.ErrorCode) { +func (s *Service) CheckRequestAuthTypeCredential(ctx context.Context, r *http.Request) (cred *services.AccessKey, rErr *responses.Error) { // check signature switch GetRequestAuthType(r) { case AuthTypeSigned, AuthTypePresigned: region := "" - if s3Err = s.IsReqAuthenticated(ctx, r, region, ServiceS3); s3Err != handlers.ErrCodeNone { - return cred, s3Err + if rErr = s.IsReqAuthenticated(ctx, r, region, ServiceS3); rErr != nil { + return } - cred, s3Err = s.getReqAccessKeyV4(r, region, ServiceS3) + cred, rErr = s.getReqAccessKeyV4(r, region, ServiceS3) default: - return cred, handlers.ErrCodeSignatureVersionNotSupported - } - if s3Err != handlers.ErrCodeNone { - return cred, s3Err + rErr = responses.ErrSignatureVersionNotSupported + return } - return cred, handlers.ErrCodeNone + return } -func (s *Service) ReqSignatureV4Verify(r *http.Request, region string, stype serviceType) (s3Error handlers.ErrorCode) { +func (s *Service) ReqSignatureV4Verify(r *http.Request, region string, stype serviceType) *responses.Error { sha256sum := getContentSha256Cksum(r, stype) switch { case IsRequestSignatureV4(r): @@ -45,18 +44,19 @@ func (s *Service) ReqSignatureV4Verify(r *http.Request, region string, stype ser case isRequestPresignedSignatureV4(r): return s.doesPresignedSignatureMatch(sha256sum, r, region, stype) default: - return handlers.ErrCodeAccessDenied + return responses.ErrAccessDenied } } // IsReqAuthenticated Verify if request has valid AWS Signature Version '4'. -func (s *Service) IsReqAuthenticated(ctx context.Context, r *http.Request, region string, stype serviceType) (s3Error handlers.ErrorCode) { - if errCode := s.ReqSignatureV4Verify(r, region, stype); errCode != handlers.ErrCodeNone { - return errCode +func (s *Service) IsReqAuthenticated(ctx context.Context, r *http.Request, region string, stype serviceType) (rErr *responses.Error) { + if rErr = s.ReqSignatureV4Verify(r, region, stype); rErr != nil { + return } clientETag, err := etag.FromContentMD5(r.Header) if err != nil { - return handlers.ErrCodeInvalidDigest + rErr = responses.ErrInvalidDigest + return } // Extract either 'X-Amz-Content-Sha256' header or 'X-Amz-Content-Sha256' query parameter (if V4 presigned) @@ -66,13 +66,15 @@ func (s *Service) IsReqAuthenticated(ctx context.Context, r *http.Request, regio if sha256Sum, ok := r.Form[consts.AmzContentSha256]; ok && len(sha256Sum) > 0 { contentSHA256, err = hex.DecodeString(sha256Sum[0]) if err != nil { - return handlers.ErrCodeContentSHA256Mismatch + rErr = responses.ErrContentSHA256Mismatch + return } } } else if _, ok := r.Header[consts.AmzContentSha256]; !skipSHA256 && ok { contentSHA256, err = hex.DecodeString(r.Header.Get(consts.AmzContentSha256)) if err != nil || len(contentSHA256) == 0 { - return handlers.ErrCodeContentSHA256Mismatch + rErr = responses.ErrContentSHA256Mismatch + return } } @@ -80,39 +82,40 @@ func (s *Service) IsReqAuthenticated(ctx context.Context, r *http.Request, regio // The verification happens implicit during reading. reader, err := hash.NewReader(r.Body, -1, clientETag.String(), hex.EncodeToString(contentSHA256), -1) if err != nil { - return handlers.ErrCodeInternalError + rErr = responses.ErrInternalError + return } r.Body = reader - return handlers.ErrCodeNone + return } //// ValidateAdminSignature validate admin Signature -//func (s *Service) ValidateAdminSignature(ctx context.Context, r *http.Request, region string) (Credentials, map[string]interface{}, bool, handlers.ErrorCode) { +//func (s *Service) ValidateAdminSignature(ctx context.Context, r *http.Request, region string) (Credentials, map[string]interface{}, bool, handlers.Errorcode) { // var cred Credentials // var owner bool -// s3Err := handlers.ErrCodeAccessDenied +// s3Err := handlers.ErrcodeAccessDenied // if _, ok := r.Header[consts.AmzContentSha256]; ok && // GetRequestAuthType(r) == AuthTypeSigned { // // We only support admin credentials to access admin APIs. // cred, s3Err = GetReqAccessKeyV4(r, region, ServiceS3) -// if s3Err != handlers.ErrCodeNone { +// if s3Err != handlers.ErrcodeNone { // return cred, nil, owner, s3Err // } // // // we only support V4 (no presign) with auth body // s3Err = s.IsReqAuthenticated(ctx, r, region, ServiceS3) // } -// if s3Err != handlers.ErrCodeNone { +// if s3Err != handlers.ErrcodeNone { // return cred, nil, owner, s3Err // } // -// return cred, nil, owner, handlers.ErrCodeNone +// return cred, nil, owner, handlers.ErrcodeNone //} //// -//func (s *Service) GetCredential(r *http.Request) (cred auth.Credentials, owner bool, s3Err handlers.ErrorCode) { +//func (s *Service) GetCredential(r *http.Request) (cred auth.Credentials, owner bool, s3Err handlers.Errorcode) { // switch GetRequestAuthType(r) { // case AuthTypeUnknown: -// s3Err = handlers.ErrCodeSignatureVersionNotSupported +// s3Err = handlers.ErrcodeSignatureVersionNotSupported // case AuthTypeSignedV2, AuthTypePresignedV2: // cred, owner, s3Err = s.getReqAccessKeyV2(r) // case AuthTypeStreamingSigned, AuthTypePresigned, AuthTypeSigned: diff --git a/s3/services/auth/service.go b/s3/services/auth/service.go index 5ae457535..5e8678a28 100644 --- a/s3/services/auth/service.go +++ b/s3/services/auth/service.go @@ -2,20 +2,20 @@ package auth import ( "context" - "net/http" - - "github.com/bittorrent/go-btfs/s3/handlers" + "github.com/bittorrent/go-btfs/s3/handlers/responses" + "github.com/bittorrent/go-btfs/s3/providers" "github.com/bittorrent/go-btfs/s3/services" + "net/http" ) -var _ handlers.AuthService = (*Service)(nil) +var _ services.AuthService = (*Service)(nil) type Service struct { - providers services.Providerser - accessKeySvc handlers.AccessKeyService + providers providers.Providerser + accessKeySvc services.AccessKeyService } -func NewService(providers services.Providerser, accessKeySvc handlers.AccessKeyService, options ...Option) (svc *Service) { +func NewService(providers providers.Providerser, accessKeySvc services.AccessKeyService, options ...Option) (svc *Service) { svc = &Service{ providers: providers, accessKeySvc: accessKeySvc, @@ -26,6 +26,6 @@ func NewService(providers services.Providerser, accessKeySvc handlers.AccessKeyS return } -func (s *Service) VerifySignature(ctx context.Context, r *http.Request) (accessKeyRecord *handlers.AccessKeyRecord, err handlers.ErrorCode) { +func (s *Service) VerifySignature(ctx context.Context, r *http.Request) (accessKeyRecord *services.AccessKey, err *responses.Error) { return s.CheckRequestAuthTypeCredential(ctx, r) } diff --git a/s3/services/auth/signature-v4-parser.go b/s3/services/auth/signature-v4-parser.go index d6f206706..ac443efaf 100644 --- a/s3/services/auth/signature-v4-parser.go +++ b/s3/services/auth/signature-v4-parser.go @@ -18,13 +18,14 @@ package auth import ( + "github.com/bittorrent/go-btfs/s3/handlers/responses" + "github.com/bittorrent/go-btfs/s3/services" "net/http" "net/url" "strings" "time" "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/handlers" ) // credentialHeader data type represents structured form of Credential @@ -50,21 +51,21 @@ func (c credentialHeader) getScope() string { } // parse credentialHeader string into its structured form. -func parseCredentialHeader(credElement string, region string, stype serviceType) (ch credentialHeader, aec handlers.ErrorCode) { +func parseCredentialHeader(credElement string, region string, stype serviceType) (ch credentialHeader, rErr *responses.Error) { creds := strings.SplitN(strings.TrimSpace(credElement), "=", 2) if len(creds) != 2 { - return ch, handlers.ErrCodeMissingFields + return ch, responses.ErrMissingFields } if creds[0] != "Credential" { - return ch, handlers.ErrCodeMissingCredTag + return ch, responses.ErrMissingCredTag } credElements := strings.Split(strings.TrimSpace(creds[1]), consts.SlashSeparator) if len(credElements) < 5 { - return ch, handlers.ErrCodeCredMalformed + return ch, responses.ErrCredMalformed } accessKey := strings.Join(credElements[:len(credElements)-4], consts.SlashSeparator) // The access key may contain one or more `/` //if !IsAccessKeyValid(accessKey) { - // return ch, handlers.ErrCodeInvalidAccessKeyID + // return ch, handlers.ErrcodeInvalidAccessKeyID //} // Save access key id. cred := credentialHeader{ @@ -74,7 +75,7 @@ func parseCredentialHeader(credElement string, region string, stype serviceType) var e error cred.scope.date, e = time.Parse(yyyymmdd, credElements[0]) if e != nil { - return ch, handlers.ErrCodeAuthorizationHeaderMalformed + return ch, responses.ErrAuthorizationHeaderMalformed } cred.scope.region = credElements[1] @@ -89,53 +90,53 @@ func parseCredentialHeader(credElement string, region string, stype serviceType) } // Should validate region, only if region is set. if !isValidRegion(sRegion, region) { - return ch, handlers.ErrCodeAuthorizationHeaderMalformed + return ch, responses.ErrAuthorizationHeaderMalformed } if credElements[2] != string(stype) { //switch stype { //case ServiceSTS: - // return ch, handlers.ErrCodeAuthorizationHeaderMalformed + // return ch, handlers.ErrcodeAuthorizationHeaderMalformed //} - return ch, handlers.ErrCodeAuthorizationHeaderMalformed + return ch, responses.ErrAuthorizationHeaderMalformed } cred.scope.service = credElements[2] if credElements[3] != "aws4_request" { - return ch, handlers.ErrCodeAuthorizationHeaderMalformed + return ch, responses.ErrAuthorizationHeaderMalformed } cred.scope.request = credElements[3] - return cred, handlers.ErrCodeNone + return cred, nil } // Parse signature from signature tag. -func parseSignature(signElement string) (string, handlers.ErrorCode) { +func parseSignature(signElement string) (string, *responses.Error) { signFields := strings.Split(strings.TrimSpace(signElement), "=") if len(signFields) != 2 { - return "", handlers.ErrCodeMissingFields + return "", responses.ErrMissingFields } if signFields[0] != "Signature" { - return "", handlers.ErrCodeMissingSignTag + return "", responses.ErrMissingSignTag } if signFields[1] == "" { - return "", handlers.ErrCodeMissingFields + return "", responses.ErrMissingFields } signature := signFields[1] - return signature, handlers.ErrCodeNone + return signature, nil } // Parse slice of signed headers from signed headers tag. -func parseSignedHeader(signedHdrElement string) ([]string, handlers.ErrorCode) { +func parseSignedHeader(signedHdrElement string) ([]string, *responses.Error) { signedHdrFields := strings.Split(strings.TrimSpace(signedHdrElement), "=") if len(signedHdrFields) != 2 { - return nil, handlers.ErrCodeMissingFields + return nil, responses.ErrMissingFields } if signedHdrFields[0] != "SignedHeaders" { - return nil, handlers.ErrCodeMissingSignHeadersTag + return nil, responses.ErrMissingSignHeadersTag } if signedHdrFields[1] == "" { - return nil, handlers.ErrCodeMissingFields + return nil, responses.ErrMissingFields } signedHeaders := strings.Split(signedHdrFields[1], ";") - return signedHeaders, handlers.ErrCodeNone + return signedHeaders, nil } // signValues data type represents structured form of AWS Signature V4 header. @@ -162,81 +163,81 @@ type preSignValues struct { // querystring += &X-Amz-Signature=signature // // verifies if any of the necessary query params are missing in the presigned request. -func doesV4PresignParamsExist(query url.Values) handlers.ErrorCode { +func doesV4PresignParamsExist(query url.Values) *responses.Error { v4PresignQueryParams := []string{consts.AmzAlgorithm, consts.AmzCredential, consts.AmzSignature, consts.AmzDate, consts.AmzSignedHeaders, consts.AmzExpires} for _, v4PresignQueryParam := range v4PresignQueryParams { if _, ok := query[v4PresignQueryParam]; !ok { - return handlers.ErrCodeInvalidQueryParams + return responses.ErrInvalidQueryParams } } - return handlers.ErrCodeNone + return nil } // Parses all the presigned signature values into separate elements. -func parsePreSignV4(query url.Values, region string, stype serviceType) (psv preSignValues, aec handlers.ErrorCode) { +func parsePreSignV4(query url.Values, region string, stype serviceType) (psv preSignValues, rErr *responses.Error) { // verify whether the required query params exist. - aec = doesV4PresignParamsExist(query) - if aec != handlers.ErrCodeNone { - return psv, aec + rErr = doesV4PresignParamsExist(query) + if rErr != nil { + return psv, rErr } // Verify if the query algorithm is supported or not. if query.Get(consts.AmzAlgorithm) != signV4Algorithm { - return psv, handlers.ErrCodeAuthorizationHeaderMalformed + return psv, responses.ErrAuthorizationHeaderMalformed } // Initialize signature version '4' structured header. preSignV4Values := preSignValues{} // Save credential. - preSignV4Values.Credential, aec = parseCredentialHeader("Credential="+query.Get(consts.AmzCredential), region, stype) - if aec != handlers.ErrCodeNone { - return psv, aec + preSignV4Values.Credential, rErr = parseCredentialHeader("Credential="+query.Get(consts.AmzCredential), region, stype) + if rErr != nil { + return psv, rErr } var e error // Save date in native time.Time. preSignV4Values.Date, e = time.Parse(iso8601Format, query.Get(consts.AmzDate)) if e != nil { - return psv, handlers.ErrCodeAuthorizationHeaderMalformed + return psv, responses.ErrAuthorizationHeaderMalformed } // Save expires in native time.Duration. preSignV4Values.Expires, e = time.ParseDuration(query.Get(consts.AmzExpires) + "s") if e != nil { - return psv, handlers.ErrCodeAuthorizationHeaderMalformed + return psv, responses.ErrAuthorizationHeaderMalformed } if preSignV4Values.Expires < 0 { - return psv, handlers.ErrCodeAuthorizationHeaderMalformed + return psv, responses.ErrAuthorizationHeaderMalformed } // Check if Expiry time is less than 7 days (value in seconds). if preSignV4Values.Expires.Seconds() > 604800 { - return psv, handlers.ErrCodeAuthorizationHeaderMalformed + return psv, responses.ErrAuthorizationHeaderMalformed } // Save signed headers. - preSignV4Values.SignedHeaders, aec = parseSignedHeader("SignedHeaders=" + query.Get(consts.AmzSignedHeaders)) - if aec != handlers.ErrCodeNone { - return psv, aec + preSignV4Values.SignedHeaders, rErr = parseSignedHeader("SignedHeaders=" + query.Get(consts.AmzSignedHeaders)) + if rErr != nil { + return psv, rErr } // Save signature. - preSignV4Values.Signature, aec = parseSignature("Signature=" + query.Get(consts.AmzSignature)) - if aec != handlers.ErrCodeNone { - return psv, aec + preSignV4Values.Signature, rErr = parseSignature("Signature=" + query.Get(consts.AmzSignature)) + if rErr != nil { + return psv, rErr } // Return structed form of signature query string. - return preSignV4Values, handlers.ErrCodeNone + return preSignV4Values, nil } // Parses signature version '4' header of the following form. // // Authorization: algorithm Credential=accessKeyID/credScope, \ // SignedHeaders=signedHeaders, Signature=signature -func parseSignV4(v4Auth string, region string, stype serviceType) (sv signValues, aec handlers.ErrorCode) { +func parseSignV4(v4Auth string, region string, stype serviceType) (sv signValues, rErr *responses.Error) { // credElement is fetched first to skip replacing the space in access key. credElement := strings.TrimPrefix(strings.Split(strings.TrimSpace(v4Auth), ",")[0], signV4Algorithm) // Replace all spaced strings, some clients can send spaced @@ -244,66 +245,65 @@ func parseSignV4(v4Auth string, region string, stype serviceType) (sv signValues // to make parsing easier. v4Auth = strings.ReplaceAll(v4Auth, " ", "") if v4Auth == "" { - return sv, handlers.ErrCodeAuthHeaderEmpty + return sv, responses.ErrAuthHeaderEmpty } // Verify if the header algorithm is supported or not. if !strings.HasPrefix(v4Auth, signV4Algorithm) { - return sv, handlers.ErrCodeSignatureVersionNotSupported + return sv, responses.ErrSignatureVersionNotSupported } // Strip off the Algorithm prefix. v4Auth = strings.TrimPrefix(v4Auth, signV4Algorithm) authFields := strings.Split(strings.TrimSpace(v4Auth), ",") if len(authFields) != 3 { - return sv, handlers.ErrCodeMissingFields + return sv, responses.ErrMissingFields } // Initialize signature version '4' structured header. signV4Values := signValues{} - var s3Err handlers.ErrorCode // Save credentail values. - signV4Values.Credential, s3Err = parseCredentialHeader(strings.TrimSpace(credElement), region, stype) - if s3Err != handlers.ErrCodeNone { - return sv, s3Err + signV4Values.Credential, rErr = parseCredentialHeader(strings.TrimSpace(credElement), region, stype) + if rErr != nil { + return sv, rErr } // Save signed headers. - signV4Values.SignedHeaders, s3Err = parseSignedHeader(authFields[1]) - if s3Err != handlers.ErrCodeNone { - return sv, s3Err + signV4Values.SignedHeaders, rErr = parseSignedHeader(authFields[1]) + if rErr != nil { + return sv, rErr } // Save signature. - signV4Values.Signature, s3Err = parseSignature(authFields[2]) - if s3Err != handlers.ErrCodeNone { - return sv, s3Err + signV4Values.Signature, rErr = parseSignature(authFields[2]) + if rErr != nil { + return sv, rErr } // Return the structure here. - return signV4Values, handlers.ErrCodeNone + return signV4Values, nil } -func (s *Service) getReqAccessKeyV4(r *http.Request, region string, stype serviceType) (*handlers.AccessKeyRecord, handlers.ErrorCode) { - ch, s3Err := parseCredentialHeader("Credential="+r.Form.Get(consts.AmzCredential), region, stype) - if s3Err != handlers.ErrCodeNone { +func (s *Service) getReqAccessKeyV4(r *http.Request, region string, stype serviceType) (*services.AccessKey, *responses.Error) { + ch, rErr := parseCredentialHeader("Credential="+r.Form.Get(consts.AmzCredential), region, stype) + if rErr != nil { // Strip off the Algorithm prefix. v4Auth := strings.TrimPrefix(r.Header.Get("Authorization"), signV4Algorithm) authFields := strings.Split(strings.TrimSpace(v4Auth), ",") if len(authFields) != 3 { - return &handlers.AccessKeyRecord{}, handlers.ErrCodeMissingFields + return &services.AccessKey{}, responses.ErrMissingFields } - ch, s3Err = parseCredentialHeader(authFields[0], region, stype) - if s3Err != handlers.ErrCodeNone { - return &handlers.AccessKeyRecord{}, s3Err + ch, rErr = parseCredentialHeader(authFields[0], region, stype) + if rErr != nil { + return &services.AccessKey{}, rErr } } // check accessKey. record, err := s.accessKeySvc.Get(ch.accessKey) if err != nil { - return &handlers.AccessKeyRecord{}, handlers.ErrCodeNoSuchUserPolicy + return &services.AccessKey{}, responses.ErrNoSuchUserPolicy } - return record, handlers.ErrCodeNone + return record, nil } diff --git a/s3/services/auth/signature-v4-utils.go b/s3/services/auth/signature-v4-utils.go index 6128cc233..2279820d7 100644 --- a/s3/services/auth/signature-v4-utils.go +++ b/s3/services/auth/signature-v4-utils.go @@ -60,13 +60,13 @@ func contains(slice interface{}, elem interface{}) bool { } // extractSignedHeaders extract signed headers from Authorization header -func extractSignedHeaders(signedHeaders []string, r *http.Request) (http.Header, handlers.ErrorCode) { +func extractSignedHeaders(signedHeaders []string, r *http.Request) (http.Header, handlers.Errorcode) { reqHeaders := r.Header reqQueries := r.Form // find whether "host" is part of list of signed headers. - // if not return ErrCodeUnsignedHeaders. "host" is mandatory. + // if not return ErrcodeUnsignedHeaders. "host" is mandatory. if !contains(signedHeaders, "host") { - return nil, handlers.ErrCodeUnsignedHeaders + return nil, handlers.ErrcodeUnsignedHeaders } extractedSignedHeaders := make(http.Header) for _, header := range signedHeaders { @@ -116,10 +116,10 @@ func extractSignedHeaders(signedHeaders []string, r *http.Request) (http.Header, // calculation to be compatible with such clients. extractedSignedHeaders.Set(header, strconv.FormatInt(r.ContentLength, 10)) default: - return nil, handlers.ErrCodeUnsignedHeaders + return nil, handlers.ErrcodeUnsignedHeaders } } - return extractedSignedHeaders, handlers.ErrCodeNone + return extractedSignedHeaders, handlers.ErrcodeNone } // Returns SHA256 for calculating canonical-request. diff --git a/s3/services/auth/signature-v4.go b/s3/services/auth/signature-v4.go index 989fedb38..e094db035 100644 --- a/s3/services/auth/signature-v4.go +++ b/s3/services/auth/signature-v4.go @@ -19,13 +19,13 @@ package auth import ( "crypto/subtle" + "github.com/bittorrent/go-btfs/s3/handlers/responses" "net/http" "net/url" "strconv" "time" "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/handlers" "github.com/bittorrent/go-btfs/s3/set" "github.com/bittorrent/go-btfs/s3/utils" ) @@ -57,37 +57,40 @@ func compareSignatureV4(sig1, sig2 string) bool { // DoesPresignedSignatureMatch - Verify queryString headers with presigned signature // - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html // -// returns handlers.ErrCodeNone if the signature matches. -func (s *Service) doesPresignedSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType) handlers.ErrorCode { +// returns handlers.ErrcodeNone if the signature matches. +func (s *Service) doesPresignedSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType) (rErr *responses.Error) { // Copy request req := *r // Parse request query string. - pSignValues, errCode := parsePreSignV4(req.Form, region, stype) - if errCode != handlers.ErrCodeNone { - return errCode + pSignValues, rErr := parsePreSignV4(req.Form, region, stype) + if rErr != nil { + return } // get access_info by accessKey cred, err := s.accessKeySvc.Get(pSignValues.Credential.accessKey) if err != nil { - return handlers.ErrCodeNoSuchUserPolicy + rErr = responses.ErrNoSuchUserPolicy + return } // Extract all the signed headers along with its values. - extractedSignedHeaders, errCode := extractSignedHeaders(pSignValues.SignedHeaders, r) - if errCode != handlers.ErrCodeNone { - return errCode + extractedSignedHeaders, rErr := extractSignedHeaders(pSignValues.SignedHeaders, r) + if rErr != nil { + return } // If the host which signed the request is slightly ahead in time (by less than MaxSkewTime) the // request should still be allowed. if pSignValues.Date.After(time.Now().UTC().Add(consts.MaxSkewTime)) { - return handlers.ErrCodeRequestNotReadyYet + rErr = responses.ErrRequestNotReadyYet + return } if time.Now().UTC().Sub(pSignValues.Date) > pSignValues.Expires { - return handlers.ErrCodeExpiredPresignRequest + rErr = responses.ErrExpiredPresignRequest + return } // Save the date and expires. @@ -138,23 +141,27 @@ func (s *Service) doesPresignedSignatureMatch(hashedPayload string, r *http.Requ // Verify if date query is same. if req.Form.Get(consts.AmzDate) != query.Get(consts.AmzDate) { - return handlers.ErrCodeSignatureDoesNotMatch + rErr = responses.ErrSignatureDoesNotMatch } // Verify if expires query is same. if req.Form.Get(consts.AmzExpires) != query.Get(consts.AmzExpires) { - return handlers.ErrCodeSignatureDoesNotMatch + rErr = responses.ErrSignatureDoesNotMatch + return } // Verify if signed headers query is same. if req.Form.Get(consts.AmzSignedHeaders) != query.Get(consts.AmzSignedHeaders) { - return handlers.ErrCodeSignatureDoesNotMatch + rErr = responses.ErrSignatureDoesNotMatch + return } // Verify if credential query is same. if req.Form.Get(consts.AmzCredential) != query.Get(consts.AmzCredential) { - return handlers.ErrCodeSignatureDoesNotMatch + rErr = responses.ErrSignatureDoesNotMatch + return } // Verify if sha256 payload query is same. if clntHashedPayload != "" && clntHashedPayload != query.Get(consts.AmzContentSha256) { - return handlers.ErrCodeContentSHA256Mismatch + rErr = responses.ErrContentSHA256Mismatch + return } // not check SessionToken. //// Verify if security token is correct. @@ -179,16 +186,17 @@ func (s *Service) doesPresignedSignatureMatch(hashedPayload string, r *http.Requ // Verify signature. if !compareSignatureV4(req.Form.Get(consts.AmzSignature), newSignature) { - return handlers.ErrCodeSignatureDoesNotMatch + rErr = responses.ErrSignatureDoesNotMatch + return } - return handlers.ErrCodeNone + + return } // DoesSignatureMatch - Verify authorization header with calculated header in accordance with // - http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html // -// returns handlers.ErrCodeNone if signature matches. -func (s *Service) doesSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType) handlers.ErrorCode { +func (s *Service) doesSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType) (rErr *responses.Error) { // Copy request. req := *r @@ -196,34 +204,37 @@ func (s *Service) doesSignatureMatch(hashedPayload string, r *http.Request, regi v4Auth := req.Header.Get(consts.Authorization) // Parse signature version '4' header. - signV4Values, errCode := parseSignV4(v4Auth, region, stype) - if errCode != handlers.ErrCodeNone { - return errCode + signV4Values, rErr := parseSignV4(v4Auth, region, stype) + if rErr != nil { + return } // Extract all the signed headers along with its values. - extractedSignedHeaders, errCode := extractSignedHeaders(signV4Values.SignedHeaders, r) - if errCode != handlers.ErrCodeNone { - return errCode + extractedSignedHeaders, rErr := extractSignedHeaders(signV4Values.SignedHeaders, r) + if rErr != nil { + return } cred, err := s.accessKeySvc.Get(signV4Values.Credential.accessKey) if err != nil { - return handlers.ErrCodeNoSuchUserPolicy + rErr = responses.ErrNoSuchUserPolicy + return } // Extract date, if not present throw error. var date string if date = req.Header.Get(consts.AmzDate); date == "" { if date = r.Header.Get(consts.Date); date == "" { - return handlers.ErrCodeMissingDateHeader + rErr = responses.ErrMissingDateHeader + return } } // Parse date header. - t, e := time.Parse(iso8601Format, date) - if e != nil { - return handlers.ErrCodeAuthorizationHeaderMalformed + t, err := time.Parse(iso8601Format, date) + if err != nil { + rErr = responses.ErrAuthorizationHeaderMalformed + return } // Query string. @@ -244,11 +255,11 @@ func (s *Service) doesSignatureMatch(hashedPayload string, r *http.Request, regi // Verify if signature match. if !compareSignatureV4(newSignature, signV4Values.Signature) { - return handlers.ErrCodeSignatureDoesNotMatch + rErr = responses.ErrSignatureDoesNotMatch + return } - // Return error none. - return handlers.ErrCodeNone + return } //// getScope generate a string of a specific date, an AWS region, and a service. diff --git a/s3/services/bucket/service.go b/s3/services/bucket/service.go index d251d2175..9328140de 100644 --- a/s3/services/bucket/service.go +++ b/s3/services/bucket/service.go @@ -2,13 +2,13 @@ package bucket import ( "context" + "github.com/bittorrent/go-btfs/s3/providers" + "github.com/bittorrent/go-btfs/s3/services" "time" "github.com/bittorrent/go-btfs/s3/action" "github.com/bittorrent/go-btfs/s3/ctxmu" - "github.com/bittorrent/go-btfs/s3/handlers" "github.com/bittorrent/go-btfs/s3/policy" - "github.com/bittorrent/go-btfs/s3/services" "github.com/syndtr/goleveldb/leveldb" ) @@ -17,18 +17,18 @@ const ( defaultUpdateTimeoutMS = 200 ) -var _ handlers.BucketService = (*Service)(nil) +var _ services.BucketService = (*Service)(nil) // Service captures all bucket metadata for a given cluster. type Service struct { - providers services.Providerser + providers providers.Providerser emptyBucket func(ctx context.Context, bucket string) (bool, error) locks *ctxmu.MultiCtxRWMutex updateTimeout time.Duration } // NewService - creates new policy system. -func NewService(providers services.Providerser, options ...Option) (s *Service) { +func NewService(providers providers.Providerser, options ...Option) (s *Service) { s = &Service{ providers: providers, locks: ctxmu.NewDefaultMultiCtxRWMutex(), @@ -40,10 +40,10 @@ func NewService(providers services.Providerser, options ...Option) (s *Service) return s } -func (s *Service) CheckACL(accessKeyRecord *handlers.AccessKeyRecord, bucketName string, action action.Action) (err error) { +func (s *Service) CheckACL(accessKeyRecord *services.AccessKey, bucketName string, action action.Action) (err error) { //需要判断bucketName是否为空字符串 if bucketName == "" { - return handlers.ErrBucketNotFound + return services.ErrBucketNotFound } bucketMeta, err := s.GetBucketMeta(context.Background(), bucketName) @@ -52,14 +52,14 @@ func (s *Service) CheckACL(accessKeyRecord *handlers.AccessKeyRecord, bucketName } if policy.IsAllowed(bucketMeta.Owner == accessKeyRecord.Key, bucketMeta.Acl, action) == false { - return handlers.ErrBucketAccessDenied + return services.ErrBucketAccessDenied } return } // NewBucketMetadata creates handlers.BucketMetadata with the supplied name and Created to Now. -func (s *Service) NewBucketMetadata(name, region, accessKey, acl string) *handlers.BucketMetadata { - return &handlers.BucketMetadata{ +func (s *Service) NewBucketMetadata(name, region, accessKey, acl string) *services.BucketMetadata { + return &services.BucketMetadata{ Name: name, Region: region, Owner: accessKey, @@ -69,7 +69,7 @@ func (s *Service) NewBucketMetadata(name, region, accessKey, acl string) *handle } // lockSetBucketMeta - sets a new metadata in-db -func (s *Service) lockSetBucketMeta(bucket string, meta *handlers.BucketMetadata) error { +func (s *Service) lockSetBucketMeta(bucket string, meta *services.BucketMetadata) error { return s.providers.GetStateStore().Put(bucketPrefix+bucket, meta) } @@ -87,22 +87,22 @@ func (s *Service) CreateBucket(ctx context.Context, bucket, region, accessKey, a return s.lockSetBucketMeta(bucket, s.NewBucketMetadata(bucket, region, accessKey, acl)) } -func (s *Service) lockGetBucketMeta(bucket string) (meta handlers.BucketMetadata, err error) { +func (s *Service) lockGetBucketMeta(bucket string) (meta services.BucketMetadata, err error) { err = s.providers.GetStateStore().Get(bucketPrefix+bucket, &meta) if err == leveldb.ErrNotFound { - err = handlers.ErrBucketNotFound + err = services.ErrBucketNotFound } return meta, err } // GetBucketMeta metadata for a bucket. -func (s *Service) GetBucketMeta(ctx context.Context, bucket string) (meta handlers.BucketMetadata, err error) { +func (s *Service) GetBucketMeta(ctx context.Context, bucket string) (meta services.BucketMetadata, err error) { ctx, cancel := context.WithTimeout(context.Background(), s.updateTimeout) defer cancel() err = s.locks.RLock(ctx, bucket) if err != nil { - return handlers.BucketMetadata{Name: bucket}, err + return services.BucketMetadata{Name: bucket}, err } defer s.locks.RUnlock(bucket) @@ -133,7 +133,7 @@ func (s *Service) DeleteBucket(ctx context.Context, bucket string) error { if empty, err := s.emptyBucket(ctx, bucket); err != nil { return err } else if !empty { - return handlers.ErrSetBucketEmptyFailed + return services.ErrSetBucketEmptyFailed } return s.providers.GetStateStore().Delete(bucketPrefix + bucket) @@ -144,9 +144,9 @@ func (s *Service) SetEmptyBucket(emptyBucket func(ctx context.Context, bucket st } // GetAllBucketsOfUser metadata for all bucket. -func (s *Service) GetAllBucketsOfUser(username string) (list []*handlers.BucketMetadata, err error) { +func (s *Service) GetAllBucketsOfUser(username string) (list []*services.BucketMetadata, err error) { err = s.providers.GetStateStore().Iterate(bucketPrefix, func(key, _ []byte) (stop bool, er error) { - record := &handlers.BucketMetadata{} + record := &services.BucketMetadata{} er = s.providers.GetStateStore().Get(string(key), record) if er != nil { return diff --git a/s3/services/cors/service.go b/s3/services/cors/service.go index c61ba0074..b76e87846 100644 --- a/s3/services/cors/service.go +++ b/s3/services/cors/service.go @@ -2,7 +2,7 @@ package cors import ( "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/handlers" + "github.com/bittorrent/go-btfs/s3/services" "net/http" ) @@ -43,7 +43,7 @@ var ( } ) -var _ handlers.CorsService = (*Service)(nil) +var _ services.CorsService = (*Service)(nil) type Service struct { allowOrigins []string diff --git a/s3/services/multipart/service.go b/s3/services/multipart/service.go index de8c518b3..6ac36a72b 100644 --- a/s3/services/multipart/service.go +++ b/s3/services/multipart/service.go @@ -1,11 +1,11 @@ package multipart import ( - "github.com/bittorrent/go-btfs/s3/handlers" + "github.com/bittorrent/go-btfs/s3/services" "io" ) -var _ handlers.MultipartService = (*Service)(nil) +var _ services.MultipartService = (*Service)(nil) type Service struct { } diff --git a/s3/services/proto.go b/s3/services/proto.go new file mode 100644 index 000000000..41b7dd04c --- /dev/null +++ b/s3/services/proto.go @@ -0,0 +1,84 @@ +package services + +import ( + "context" + "github.com/bittorrent/go-btfs/s3/handlers/responses" + "net/http" + "time" + + "errors" + "github.com/bittorrent/go-btfs/s3/action" +) + +type CorsService interface { + GetAllowOrigins() []string + GetAllowMethods() []string + GetAllowHeaders() []string +} + +type AccessKeyService interface { + Generate() (record *AccessKey, err error) + Enable(key string) (err error) + Disable(key string) (err error) + Reset(key string) (err error) + Delete(key string) (err error) + Get(key string) (ack *AccessKey, err error) + List() (list []*AccessKey, err error) +} + +type AuthService interface { + VerifySignature(ctx context.Context, r *http.Request) (ack *AccessKey, rErr *responses.Error) +} + +type BucketService interface { + CheckACL(accessKeyRecord *AccessKey, bucketName string, action action.Action) (err error) + CreateBucket(ctx context.Context, bucket, region, accessKey, acl string) error + GetBucketMeta(ctx context.Context, bucket string) (meta BucketMetadata, err error) + HasBucket(ctx context.Context, bucket string) bool + SetEmptyBucket(emptyBucket func(ctx context.Context, bucket string) (bool, error)) + DeleteBucket(ctx context.Context, bucket string) error + GetAllBucketsOfUser(username string) (list []*BucketMetadata, err error) + UpdateBucketAcl(ctx context.Context, bucket, acl string) error + GetBucketAcl(ctx context.Context, bucket string) (string, error) +} + +type ObjectService interface { +} + +type MultipartService interface { +} + +var ( + ErrSignVersionNotSupport = errors.New("sign version is not support") + ErrInvalidArgument = errors.New("invalid argument") + ErrInvalidBucketName = errors.New("bucket name is invalid") + ErrBucketNotFound = errors.New("bucket is not found") + ErrBucketAlreadyExists = errors.New("bucket is already exists") + ErrBucketAccessDenied = errors.New("bucket access denied") + ErrSetBucketEmptyFailed = errors.New("set bucket empty failed") + ErrCreateBucket = errors.New("create bucket failed") + ErrNoSuchUserPolicy = errors.New("no such user policy") + ErrNotImplemented = errors.New("not implemented") + ErrAccessKeyIsNotFound = errors.New("access-key is not found") +) + +type AccessKey struct { + Key string `json:"key"` + Secret string `json:"secret"` + Enable bool `json:"enable"` + IsDeleted bool `json:"is_deleted"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +// BucketMetadata contains bucket metadata. +type BucketMetadata struct { + Name string + Region string + Owner string + Acl string + Created time.Time +} + +type ObjectMetadata struct { +} diff --git a/s3/services/providerser_errors.go b/s3/services/providerser_errors.go deleted file mode 100644 index 0ddf07ef3..000000000 --- a/s3/services/providerser_errors.go +++ /dev/null @@ -1,7 +0,0 @@ -package services - -import "errors" - -var ( - ErrStateStoreNotFound = errors.New("not found") -) diff --git a/s3/services/providerser_types.go b/s3/services/providerser_types.go deleted file mode 100644 index 6808d76d1..000000000 --- a/s3/services/providerser_types.go +++ /dev/null @@ -1,3 +0,0 @@ -package services - -type StateStoreIterFunc func(key, value []byte) (stop bool, err error) From 7996a1eb3d7e4fe0fd7d122c889b71a6280304db Mon Sep 17 00:00:00 2001 From: Steve Date: Wed, 23 Aug 2023 16:26:01 +0800 Subject: [PATCH 048/139] feat: add auth middleware --- s3/handlers/proto.go | 2 ++ s3/routers/routers.go | 5 ++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/s3/handlers/proto.go b/s3/handlers/proto.go index b75df0668..c7ed48cfa 100644 --- a/s3/handlers/proto.go +++ b/s3/handlers/proto.go @@ -5,9 +5,11 @@ import ( ) type Handlerser interface { + // middlewares Cors(handler http.Handler) http.Handler Auth(handler http.Handler) http.Handler + // handlers PutBucketHandler(w http.ResponseWriter, r *http.Request) HeadBucketHandler(w http.ResponseWriter, r *http.Request) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) diff --git a/s3/routers/routers.go b/s3/routers/routers.go index b3ce65dfe..02d0a4b50 100644 --- a/s3/routers/routers.go +++ b/s3/routers/routers.go @@ -23,7 +23,10 @@ func NewRouters(handlers handlers.Handlerser, options ...Option) (routers *Route func (routers *Routers) Register() http.Handler { root := mux.NewRouter() - root.Use(routers.handlers.Cors) + root.Use( + routers.handlers.Cors, + routers.handlers.Auth, + ) bucket := root.PathPrefix("/{bucket}").Subrouter() bucket.Methods(http.MethodGet).HandlerFunc(routers.handlers.GetBucketAclHandler).Queries("acl", "") From 3b817d4c4c584d39dc0fe82c25d79ca86001e396 Mon Sep 17 00:00:00 2001 From: steve Date: Wed, 23 Aug 2023 19:27:31 +0800 Subject: [PATCH 049/139] feat: adjust code structure --- cmd/btfs/daemon.go | 10 -- s3/handlers/cctx/access_key.go | 10 +- s3/handlers/cctx/cctx.go | 25 +++ s3/handlers/cctx/handle_err.go | 23 +++ s3/handlers/cctx/key.go | 7 - s3/handlers/handlers.go | 155 ++++++++++--------- s3/handlers/proto.go | 1 + s3/handlers/requests/parsers.go | 14 +- s3/handlers/requests/types.go | 5 + s3/handlers/responses/wirters.go | 8 +- s3/handlers/responses/writers_common.go | 27 +++- s3/handlers/to_response_err.go | 18 --- s3/routers/routers.go | 1 + s3/services/accesskey/service.go | 4 +- s3/services/auth/check_handler_auth.go | 25 ++- s3/services/auth/service.go | 3 +- s3/services/auth/signature-v4-parser.go | 124 ++++++++------- s3/services/auth/signature-v4-utils.go | 10 +- s3/services/auth/signature-v4.go | 53 ++++--- s3/services/bucket/service.go | 21 +-- s3/{handlers/responses => services}/error.go | 7 +- s3/services/proto.go | 18 +-- 22 files changed, 301 insertions(+), 268 deletions(-) create mode 100644 s3/handlers/cctx/cctx.go create mode 100644 s3/handlers/cctx/handle_err.go delete mode 100644 s3/handlers/cctx/key.go delete mode 100644 s3/handlers/to_response_err.go rename s3/{handlers/responses => services}/error.go (99%) diff --git a/cmd/btfs/daemon.go b/cmd/btfs/daemon.go index 884eba03f..e921485f4 100644 --- a/cmd/btfs/daemon.go +++ b/cmd/btfs/daemon.go @@ -8,9 +8,6 @@ import ( _ "expvar" "fmt" "github.com/bittorrent/go-btfs/s3" - "github.com/bittorrent/go-btfs/s3/providers" - "github.com/bittorrent/go-btfs/s3/providers/filestore" - s3statestore "github.com/bittorrent/go-btfs/s3/providers/statestore" "github.com/bittorrent/go-btfs/s3/services/accesskey" "io/ioutil" "math/rand" @@ -1469,10 +1466,3 @@ func CheckExistLastOnlineReportV2(cfg *config.Config, configRoot string, chainId } return nil } - -func buildS3Providers(storageStore storage.StateStorer) *providers.Providers { - return providers.NewProviders( - s3statestore.NewStorageStateStoreProxy(storageStore), - filestore.NewLocalShell(), - ) -} diff --git a/s3/handlers/cctx/access_key.go b/s3/handlers/cctx/access_key.go index 2eb57c310..279c92e5a 100644 --- a/s3/handlers/cctx/access_key.go +++ b/s3/handlers/cctx/access_key.go @@ -1,21 +1,17 @@ package cctx import ( - "context" "github.com/bittorrent/go-btfs/s3/services" "net/http" ) func SetAccessKey(r *http.Request, ack *services.AccessKey) { - ctx := context.WithValue(r.Context(), keyOfAccessKey, ack) - r.WithContext(ctx) + set(r, keyOfAccessKey, ack) + return } func GetAccessKey(r *http.Request) (ack *services.AccessKey) { - v := r.Context().Value(keyOfAccessKey) - if v == nil { - return - } + v := get(r, keyOfAccessKey) ack, _ = v.(*services.AccessKey) return } diff --git a/s3/handlers/cctx/cctx.go b/s3/handlers/cctx/cctx.go new file mode 100644 index 000000000..797043a1e --- /dev/null +++ b/s3/handlers/cctx/cctx.go @@ -0,0 +1,25 @@ +package cctx + +import ( + "context" + "net/http" +) + +type key *struct{} + +var ( + keyOfAccessKey = new(struct{}) + keyOfHandleInf = new(struct{}) +) + +func set(r *http.Request, k key, v any) { + ctx := context.WithValue(r.Context(), k, v) + nr := r.WithContext(ctx) + *r = *nr + return +} + +func get(r *http.Request, k key) (v any) { + v = r.Context().Value(k) + return +} diff --git a/s3/handlers/cctx/handle_err.go b/s3/handlers/cctx/handle_err.go new file mode 100644 index 000000000..f07b91f4c --- /dev/null +++ b/s3/handlers/cctx/handle_err.go @@ -0,0 +1,23 @@ +package cctx + +import ( + "net/http" +) + +type handleInfo struct { + name string + err error +} + +func SetHandleInf(r *http.Request, name string, err error) { + set(r, keyOfHandleInf, handleInfo{name, err}) + return +} + +func GetHandleInf(r *http.Request) (name string, err error) { + v := get(r, keyOfHandleInf) + inf, _ := v.(handleInfo) + name = inf.name + err = inf.err + return +} diff --git a/s3/handlers/cctx/key.go b/s3/handlers/cctx/key.go deleted file mode 100644 index 8abc7c5a1..000000000 --- a/s3/handlers/cctx/key.go +++ /dev/null @@ -1,7 +0,0 @@ -package cctx - -type key struct{} - -var ( - keyOfAccessKey = &key{} -) diff --git a/s3/handlers/handlers.go b/s3/handlers/handlers.go index 49fb36c65..4866bccd1 100644 --- a/s3/handlers/handlers.go +++ b/s3/handlers/handlers.go @@ -8,6 +8,7 @@ import ( "github.com/bittorrent/go-btfs/s3/handlers/responses" "github.com/bittorrent/go-btfs/s3/services" "net/http" + "runtime" s3action "github.com/bittorrent/go-btfs/s3/action" "github.com/bittorrent/go-btfs/s3/consts" @@ -58,9 +59,9 @@ func (h *Handlers) Cors(handler http.Handler) http.Handler { func (h *Handlers) Auth(handler http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ack, rErr := h.authSvc.VerifySignature(r.Context(), r) - if rErr != nil { - responses.WriteErrorResponse(w, r, rErr) + ack, err := h.authSvc.VerifySignature(r.Context(), r) + if err != nil { + responses.WriteErrorResponse(w, r, err) return } cctx.SetAccessKey(r, ack) @@ -68,83 +69,84 @@ func (h *Handlers) Auth(handler http.Handler) http.Handler { }) } -func (h *Handlers) PutBucketHandler(w http.ResponseWriter, r *http.Request) { - fmt.Println("... PutBucketHandler: begin") +func (h *Handlers) Log(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + handler.ServeHTTP(w, r) + hname, herr := cctx.GetHandleInf(r) + fmt.Printf("[%-4s] %s | %s | %v\n", r.Method, r.URL, hname, herr) + }) +} - ctx := r.Context() +func (h *Handlers) PutBucketHandler(w http.ResponseWriter, r *http.Request) { + var err error + defer func() { + cctx.SetHandleInf(r, fnName(), err) + }() req, err := requests.ParsePubBucketRequest(r) if err != nil { - responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestBody) + responses.WriteErrorResponse(w, r, services.ErrInvalidRequestBody) return } - //err = h.bucketSvc.CheckACL(accessKeyRecord, req.Bucket, s3action.CreateBucketAction) - //if err != nil { - // WriteErrorResponse(w, r, ToApiError(ctx, ErrNoSuchUserPolicy)) - // return - //} + ctx := r.Context() if err = s3utils.CheckValidBucketNameStrict(req.Bucket); err != nil { - responses.WriteErrorResponse(w, r, responses.ErrInvalidBucketName) + responses.WriteErrorResponse(w, r, services.ErrInvalidBucketName) return } - fmt.Println("4") if !requests.CheckAclPermissionType(&req.ACL) { - responses.WriteErrorResponse(w, r, responses.ErrNotImplemented) + err = services.ErrNotImplemented + responses.WriteErrorResponse(w, r, services.ErrNotImplemented) return } - fmt.Println("3") if ok := h.bucketSvc.HasBucket(ctx, req.Bucket); ok { - responses.WriteErrorResponseHeadersOnly(w, r, responses.ErrBucketAlreadyExists) + err = services.ErrBucketAlreadyExists + responses.WriteErrorResponseHeadersOnly(w, r, services.ErrBucketAlreadyExists) return } - fmt.Println("2") err = h.bucketSvc.CreateBucket(ctx, req.Bucket, req.Region, cctx.GetAccessKey(r).Key, req.ACL) if err != nil { - responses.WriteErrorResponse(w, r, responses.ErrInternalError) + responses.WriteErrorResponse(w, r, services.ErrInternalError) return } - fmt.Println("1") // Make sure to add Location information here only for bucket if cp := requests.PathClean(r.URL.Path); cp != "" { w.Header().Set(consts.Location, cp) // Clean any trailing slashes. } - fmt.Println("0") - responses.WritePutBucketResponse(w, r) return } func (h *Handlers) HeadBucketHandler(w http.ResponseWriter, r *http.Request) { - fmt.Println("... HeadBucketHandler: begin") + var err error + defer func() { + cctx.SetHandleInf(r, fnName(), err) + }() - ctx := r.Context() - ack := cctx.GetAccessKey(r) - - req := &requests.HeadBucketRequest{} - err := req.Bind(r) + req, err := requests.ParseHeadBucketRequest(r) if err != nil { - responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestBody) + responses.WriteErrorResponse(w, r, services.ErrInvalidRequestBody) return } - fmt.Println("... head bucket ", req) + ctx := r.Context() + ack := cctx.GetAccessKey(r) err = h.bucketSvc.CheckACL(ack, req.Bucket, s3action.HeadBucketAction) if err != nil { - responses.WriteErrorResponse(w, r, ToResponseErr(err)) + responses.WriteErrorResponse(w, r, err) return } if ok := h.bucketSvc.HasBucket(ctx, req.Bucket); !ok { - responses.WriteErrorResponseHeadersOnly(w, r, responses.ErrNoSuchBucket) + responses.WriteErrorResponseHeadersOnly(w, r, services.ErrNoSuchBucket) return } @@ -152,28 +154,31 @@ func (h *Handlers) HeadBucketHandler(w http.ResponseWriter, r *http.Request) { } func (h *Handlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) { - fmt.Println("... DeleteBucketHandler: begin") - - ctx := r.Context() - ack := cctx.GetAccessKey(r) + var err error + defer func() { + cctx.SetHandleInf(r, fnName(), err) + }() req := &requests.DeleteBucketRequest{} - err := req.Bind(r) + err = req.Bind(r) if err != nil { - responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestBody) + responses.WriteErrorResponse(w, r, services.ErrInvalidRequestBody) return } + ctx := r.Context() + ack := cctx.GetAccessKey(r) + err = h.bucketSvc.CheckACL(ack, req.Bucket, s3action.HeadBucketAction) if err != nil { - responses.WriteErrorResponse(w, r, ToResponseErr(err)) + responses.WriteErrorResponse(w, r, err) return } //todo check all errors. err = h.bucketSvc.DeleteBucket(ctx, req.Bucket) if err != nil { - responses.WriteErrorResponse(w, r, ToResponseErr(err)) + responses.WriteErrorResponse(w, r, err) return } @@ -181,27 +186,30 @@ func (h *Handlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) { } func (h *Handlers) ListBucketsHandler(w http.ResponseWriter, r *http.Request) { - fmt.Println("... ListBucketsHandler: begin") - - ack := cctx.GetAccessKey(r) + var err error + defer func() { + cctx.SetHandleInf(r, fnName(), err) + }() req := &requests.ListBucketsRequest{} - err := req.Bind(r) + err = req.Bind(r) if err != nil { - responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestBody) + responses.WriteErrorResponse(w, r, services.ErrInvalidRequestBody) return } + ack := cctx.GetAccessKey(r) + err = h.bucketSvc.CheckACL(ack, req.Bucket, s3action.ListBucketAction) if err != nil { - responses.WriteErrorResponse(w, r, ToResponseErr(err)) + responses.WriteErrorResponse(w, r, err) return } //todo check all errors bucketMetas, err := h.bucketSvc.GetAllBucketsOfUser(ack.Key) if err != nil { - responses.WriteErrorResponse(w, r, ToResponseErr(err)) + responses.WriteErrorResponse(w, r, err) return } @@ -209,73 +217,82 @@ func (h *Handlers) ListBucketsHandler(w http.ResponseWriter, r *http.Request) { } func (h *Handlers) GetBucketAclHandler(w http.ResponseWriter, r *http.Request) { - fmt.Println("... get acl req: begin") - - ctx := r.Context() - ack := cctx.GetAccessKey(r) + var err error + defer func() { + cctx.SetHandleInf(r, fnName(), err) + }() req := &requests.GetBucketAclRequest{} - err := req.Bind(r) + err = req.Bind(r) if err != nil { - responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestBody) + responses.WriteErrorResponse(w, r, services.ErrInvalidRequestBody) return } - fmt.Println("... get acl req: ", req) + ctx := r.Context() + ack := cctx.GetAccessKey(r) err = h.bucketSvc.CheckACL(ack, req.Bucket, s3action.GetBucketAclAction) if err != nil { - responses.WriteErrorResponse(w, r, ToResponseErr(err)) + responses.WriteErrorResponse(w, r, err) return } if !h.bucketSvc.HasBucket(ctx, req.Bucket) { - responses.WriteErrorResponseHeadersOnly(w, r, responses.ErrNoSuchBucket) + responses.WriteErrorResponseHeadersOnly(w, r, services.ErrNoSuchBucket) return } //todo check all errors acl, err := h.bucketSvc.GetBucketAcl(ctx, req.Bucket) if err != nil { - responses.WriteErrorResponse(w, r, ToResponseErr(err)) + responses.WriteErrorResponse(w, r, err) return } - fmt.Println("... get acl = ", req) - - responses.WriteGetBucketAclResponse(w, r, ack, acl) + responses.WriteGetBucketAclResponse(w, r, ack.Key, acl) } func (h *Handlers) PutBucketAclHandler(w http.ResponseWriter, r *http.Request) { - fmt.Println("... PutBucketAclHandler: begin") - - ctx := r.Context() - ack := cctx.GetAccessKey(r) + var err error + defer func() { + cctx.SetHandleInf(r, fnName(), err) + }() req := &requests.PutBucketAclRequest{} - err := req.Bind(r) + err = req.Bind(r) if err != nil || len(req.ACL) == 0 || len(req.Bucket) == 0 { - responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestBody) + responses.WriteErrorResponse(w, r, services.ErrInvalidRequestBody) return } + ctx := r.Context() + ack := cctx.GetAccessKey(r) + err = h.bucketSvc.CheckACL(ack, req.Bucket, s3action.PutBucketAclAction) if err != nil { - responses.WriteErrorResponse(w, r, ToResponseErr(err)) + responses.WriteErrorResponse(w, r, err) return } if !requests.CheckAclPermissionType(&req.ACL) { - responses.WriteErrorResponse(w, r, responses.ErrNotImplemented) + responses.WriteErrorResponse(w, r, services.ErrNotImplemented) return } //todo check all errors err = h.bucketSvc.UpdateBucketAcl(ctx, req.Bucket, req.ACL) if err != nil { - responses.WriteErrorResponse(w, r, ToResponseErr(err)) + responses.WriteErrorResponse(w, r, err) return } //todo check no return? responses.WritePutBucketAclResponse(w, r) } + +func fnName() string { + pc := make([]uintptr, 1) + runtime.Callers(3, pc) + f := runtime.FuncForPC(pc[0]) + return f.Name() +} diff --git a/s3/handlers/proto.go b/s3/handlers/proto.go index c7ed48cfa..4b2c90e24 100644 --- a/s3/handlers/proto.go +++ b/s3/handlers/proto.go @@ -8,6 +8,7 @@ type Handlerser interface { // middlewares Cors(handler http.Handler) http.Handler Auth(handler http.Handler) http.Handler + Log(handler http.Handler) http.Handler // handlers PutBucketHandler(w http.ResponseWriter, r *http.Request) diff --git a/s3/handlers/requests/parsers.go b/s3/handlers/requests/parsers.go index b9a5a7e39..a086ef023 100644 --- a/s3/handlers/requests/parsers.go +++ b/s3/handlers/requests/parsers.go @@ -2,7 +2,7 @@ package requests import ( "encoding/xml" - "github.com/bittorrent/go-btfs/s3/handlers/responses" + "github.com/bittorrent/go-btfs/s3/services" "net/http" "path" @@ -44,12 +44,8 @@ func ParsePubBucketRequest(r *http.Request) (req *PutBucketRequest, err error) { return } -// HeadBucketRequest . -type HeadBucketRequest struct { - Bucket string -} - -func (req *HeadBucketRequest) Bind(r *http.Request) (err error) { +func ParseHeadBucketRequest(r *http.Request) (req *HeadBucketRequest, err error) { + req = &HeadBucketRequest{} vars := mux.Vars(r) bucket := vars["bucket"] @@ -121,7 +117,7 @@ func (req *PutBucketAclRequest) Bind(r *http.Request) (err error) { /*********************************/ // Parses location constraint from the incoming reader. -func parseLocationConstraint(r *http.Request) (location string, s3Error *responses.Error) { +func parseLocationConstraint(r *http.Request) (location string, s3Error *services.Error) { // If the request has no body with content-length set to 0, // we do not have to validate location constraint. Bucket will // be created at default region. @@ -129,7 +125,7 @@ func parseLocationConstraint(r *http.Request) (location string, s3Error *respons err := utils.XmlDecoder(r.Body, &locationConstraint, r.ContentLength) if err != nil && r.ContentLength != 0 { // Treat all other failures as XML parsing errors. - return "", responses.ErrMalformedXML + return "", services.ErrMalformedXML } // else for both err as nil or io.EOF location = locationConstraint.Location if location == "" { diff --git a/s3/handlers/requests/types.go b/s3/handlers/requests/types.go index 0dddc3dc0..2d257b844 100644 --- a/s3/handlers/requests/types.go +++ b/s3/handlers/requests/types.go @@ -6,3 +6,8 @@ type PutBucketRequest struct { ACL string Region string } + +// HeadBucketRequest . +type HeadBucketRequest struct { + Bucket string +} diff --git a/s3/handlers/responses/wirters.go b/s3/handlers/responses/wirters.go index 4fc2cb56c..c59f4599d 100644 --- a/s3/handlers/responses/wirters.go +++ b/s3/handlers/responses/wirters.go @@ -46,13 +46,13 @@ func WriteListBucketsResponse(w http.ResponseWriter, r *http.Request, bucketMeta return } -func WriteGetBucketAclResponse(w http.ResponseWriter, r *http.Request, ack *services.AccessKey, acl string) { +func WriteGetBucketAclResponse(w http.ResponseWriter, r *http.Request, key string, acl string) { resp := GetBucketAclResponse{} fmt.Printf(" -1- get acl resp: %+v \n", resp) - id := ack.Key + id := key if resp.Owner.DisplayName == "" { - resp.Owner.DisplayName = ack.Key + resp.Owner.DisplayName = key resp.Owner.ID = id } fmt.Printf(" -2- get acl resp: %+v \n", resp) @@ -61,7 +61,7 @@ func WriteGetBucketAclResponse(w http.ResponseWriter, r *http.Request, ack *serv resp.AccessControlList.Grant = append(resp.AccessControlList.Grant, Grant{ Grantee: Grantee{ ID: id, - DisplayName: ack.Key, + DisplayName: key, Type: "CanonicalUser", XMLXSI: "CanonicalUser", XMLNS: "http://www.w3.org/2001/XMLSchema-instance"}, diff --git a/s3/handlers/responses/writers_common.go b/s3/handlers/responses/writers_common.go index c867897e0..a74481f9a 100644 --- a/s3/handlers/responses/writers_common.go +++ b/s3/handlers/responses/writers_common.go @@ -4,8 +4,10 @@ import ( "bytes" "encoding/json" "encoding/xml" + "errors" "fmt" "github.com/bittorrent/go-btfs/s3/consts" + "github.com/bittorrent/go-btfs/s3/services" "github.com/gorilla/mux" logging "github.com/ipfs/go-log/v2" "net/http" @@ -45,13 +47,20 @@ type RESTErrorResponse struct { BucketName string `xml:"BucketName,omitempty" json:"BucketName,omitempty"` } -func WriteErrorResponseHeadersOnly(w http.ResponseWriter, r *http.Request, rerr *Error) { +func WriteErrorResponseHeadersOnly(w http.ResponseWriter, r *http.Request, err error) { + var rerr *services.Error + if !errors.As(err, &rerr) { + rerr = services.ErrInternalError + } writeResponse(w, r, rerr.HTTPStatusCode(), nil, mimeNone) } // WriteErrorResponse write ErrorResponse -func WriteErrorResponse(w http.ResponseWriter, r *http.Request, rerr *Error) { - fmt.Println("response err: ", rerr.Error(), r.URL, r.Method, r.Header) +func WriteErrorResponse(w http.ResponseWriter, r *http.Request, err error) { + var rerr *services.Error + if !errors.As(err, &rerr) { + rerr = services.ErrInternalError + } vars := mux.Vars(r) bucket := vars["bucket"] object := vars["object"] @@ -121,16 +130,20 @@ func encodeXMLResponse(response interface{}) []byte { // WriteErrorResponseJSON - writes error response in JSON format; // useful for admin APIs. -func WriteErrorResponseJSON(w http.ResponseWriter, err *Error, reqURL *url.URL, host string) { +func WriteErrorResponseJSON(w http.ResponseWriter, err error, reqURL *url.URL, host string) { + var rerr *services.Error + if !errors.As(err, &rerr) { + rerr = services.ErrInternalError + } // Generate error response. - errorResponse := getAPIErrorResponse(err, reqURL.Path, w.Header().Get(consts.AmzRequestID), host) + errorResponse := getAPIErrorResponse(rerr, reqURL.Path, w.Header().Get(consts.AmzRequestID), host) encodedErrorResponse := encodeResponseJSON(errorResponse) - writeResponseSimple(w, err.HTTPStatusCode(), encodedErrorResponse, mimeJSON) + writeResponseSimple(w, rerr.HTTPStatusCode(), encodedErrorResponse, mimeJSON) } // getErrorResponse gets in standard error and resource value and // provides a encodable populated response values -func getAPIErrorResponse(err *Error, resource, requestID, hostID string) APIErrorResponse { +func getAPIErrorResponse(err *services.Error, resource, requestID, hostID string) APIErrorResponse { return APIErrorResponse{ Code: err.Code(), Message: err.Description(), diff --git a/s3/handlers/to_response_err.go b/s3/handlers/to_response_err.go deleted file mode 100644 index 3c456acfc..000000000 --- a/s3/handlers/to_response_err.go +++ /dev/null @@ -1,18 +0,0 @@ -package handlers - -import ( - "github.com/bittorrent/go-btfs/s3/handlers/responses" - "github.com/bittorrent/go-btfs/s3/services" -) - -var toResponseErr = map[error]*responses.Error{ - services.ErrBucketNotFound: responses.ErrNoSuchBucket, -} - -func ToResponseErr(err error) (rerr *responses.Error) { - rerr, ok := toResponseErr[err] - if !ok { - rerr = responses.ErrInternalError - } - return -} diff --git a/s3/routers/routers.go b/s3/routers/routers.go index 02d0a4b50..2beb81551 100644 --- a/s3/routers/routers.go +++ b/s3/routers/routers.go @@ -26,6 +26,7 @@ func (routers *Routers) Register() http.Handler { root.Use( routers.handlers.Cors, routers.handlers.Auth, + routers.handlers.Log, ) bucket := root.PathPrefix("/{bucket}").Subrouter() diff --git a/s3/services/accesskey/service.go b/s3/services/accesskey/service.go index 8aceaba0f..e8b8c90d4 100644 --- a/s3/services/accesskey/service.go +++ b/s3/services/accesskey/service.go @@ -95,7 +95,7 @@ func (svc *Service) Get(key string) (ack *services.AccessKey, err error) { return } if errors.Is(err, providers.ErrStateStoreNotFound) || ack.IsDeleted { - err = services.ErrAccessKeyIsNotFound + err = services.ErrAccessKeyNotFound } return } @@ -155,7 +155,7 @@ func (svc *Service) update(key string, args *updateArgs) (err error) { return } if errors.Is(err, storage.ErrNotFound) || record.IsDeleted { - err = services.ErrAccessKeyIsNotFound + err = services.ErrAccessKeyNotFound return } diff --git a/s3/services/auth/check_handler_auth.go b/s3/services/auth/check_handler_auth.go index fd73eaba9..f724e220e 100644 --- a/s3/services/auth/check_handler_auth.go +++ b/s3/services/auth/check_handler_auth.go @@ -3,7 +3,6 @@ package auth import ( "context" "encoding/hex" - "github.com/bittorrent/go-btfs/s3/handlers/responses" "github.com/bittorrent/go-btfs/s3/services" "net/http" @@ -19,24 +18,24 @@ import ( // // returns APIErrorcode if any to be replied to the client. // Additionally, returns the accessKey used in the request, and if this request is by an admin. -func (s *Service) CheckRequestAuthTypeCredential(ctx context.Context, r *http.Request) (cred *services.AccessKey, rErr *responses.Error) { +func (s *Service) CheckRequestAuthTypeCredential(ctx context.Context, r *http.Request) (cred *services.AccessKey, err error) { // check signature switch GetRequestAuthType(r) { case AuthTypeSigned, AuthTypePresigned: region := "" - if rErr = s.IsReqAuthenticated(ctx, r, region, ServiceS3); rErr != nil { + if err = s.IsReqAuthenticated(ctx, r, region, ServiceS3); err != nil { return } - cred, rErr = s.getReqAccessKeyV4(r, region, ServiceS3) + cred, err = s.getReqAccessKeyV4(r, region, ServiceS3) default: - rErr = responses.ErrSignatureVersionNotSupported + err = services.ErrSignatureVersionNotSupported return } return } -func (s *Service) ReqSignatureV4Verify(r *http.Request, region string, stype serviceType) *responses.Error { +func (s *Service) ReqSignatureV4Verify(r *http.Request, region string, stype serviceType) error { sha256sum := getContentSha256Cksum(r, stype) switch { case IsRequestSignatureV4(r): @@ -44,18 +43,18 @@ func (s *Service) ReqSignatureV4Verify(r *http.Request, region string, stype ser case isRequestPresignedSignatureV4(r): return s.doesPresignedSignatureMatch(sha256sum, r, region, stype) default: - return responses.ErrAccessDenied + return services.ErrAccessDenied } } // IsReqAuthenticated Verify if request has valid AWS Signature Version '4'. -func (s *Service) IsReqAuthenticated(ctx context.Context, r *http.Request, region string, stype serviceType) (rErr *responses.Error) { - if rErr = s.ReqSignatureV4Verify(r, region, stype); rErr != nil { +func (s *Service) IsReqAuthenticated(ctx context.Context, r *http.Request, region string, stype serviceType) (err error) { + if err = s.ReqSignatureV4Verify(r, region, stype); err != nil { return } clientETag, err := etag.FromContentMD5(r.Header) if err != nil { - rErr = responses.ErrInvalidDigest + err = services.ErrInvalidDigest return } @@ -66,14 +65,14 @@ func (s *Service) IsReqAuthenticated(ctx context.Context, r *http.Request, regio if sha256Sum, ok := r.Form[consts.AmzContentSha256]; ok && len(sha256Sum) > 0 { contentSHA256, err = hex.DecodeString(sha256Sum[0]) if err != nil { - rErr = responses.ErrContentSHA256Mismatch + err = services.ErrContentSHA256Mismatch return } } } else if _, ok := r.Header[consts.AmzContentSha256]; !skipSHA256 && ok { contentSHA256, err = hex.DecodeString(r.Header.Get(consts.AmzContentSha256)) if err != nil || len(contentSHA256) == 0 { - rErr = responses.ErrContentSHA256Mismatch + err = services.ErrContentSHA256Mismatch return } } @@ -82,7 +81,7 @@ func (s *Service) IsReqAuthenticated(ctx context.Context, r *http.Request, regio // The verification happens implicit during reading. reader, err := hash.NewReader(r.Body, -1, clientETag.String(), hex.EncodeToString(contentSHA256), -1) if err != nil { - rErr = responses.ErrInternalError + err = services.ErrInternalError return } r.Body = reader diff --git a/s3/services/auth/service.go b/s3/services/auth/service.go index 5e8678a28..f883dad2a 100644 --- a/s3/services/auth/service.go +++ b/s3/services/auth/service.go @@ -2,7 +2,6 @@ package auth import ( "context" - "github.com/bittorrent/go-btfs/s3/handlers/responses" "github.com/bittorrent/go-btfs/s3/providers" "github.com/bittorrent/go-btfs/s3/services" "net/http" @@ -26,6 +25,6 @@ func NewService(providers providers.Providerser, accessKeySvc services.AccessKey return } -func (s *Service) VerifySignature(ctx context.Context, r *http.Request) (accessKeyRecord *services.AccessKey, err *responses.Error) { +func (s *Service) VerifySignature(ctx context.Context, r *http.Request) (accessKeyRecord *services.AccessKey, err error) { return s.CheckRequestAuthTypeCredential(ctx, r) } diff --git a/s3/services/auth/signature-v4-parser.go b/s3/services/auth/signature-v4-parser.go index ac443efaf..28f524650 100644 --- a/s3/services/auth/signature-v4-parser.go +++ b/s3/services/auth/signature-v4-parser.go @@ -18,7 +18,6 @@ package auth import ( - "github.com/bittorrent/go-btfs/s3/handlers/responses" "github.com/bittorrent/go-btfs/s3/services" "net/http" "net/url" @@ -51,17 +50,17 @@ func (c credentialHeader) getScope() string { } // parse credentialHeader string into its structured form. -func parseCredentialHeader(credElement string, region string, stype serviceType) (ch credentialHeader, rErr *responses.Error) { +func parseCredentialHeader(credElement string, region string, stype serviceType) (ch credentialHeader, err error) { creds := strings.SplitN(strings.TrimSpace(credElement), "=", 2) if len(creds) != 2 { - return ch, responses.ErrMissingFields + return ch, services.ErrMissingFields } if creds[0] != "Credential" { - return ch, responses.ErrMissingCredTag + return ch, services.ErrMissingCredTag } credElements := strings.Split(strings.TrimSpace(creds[1]), consts.SlashSeparator) if len(credElements) < 5 { - return ch, responses.ErrCredMalformed + return ch, services.ErrCredMalformed } accessKey := strings.Join(credElements[:len(credElements)-4], consts.SlashSeparator) // The access key may contain one or more `/` //if !IsAccessKeyValid(accessKey) { @@ -75,7 +74,7 @@ func parseCredentialHeader(credElement string, region string, stype serviceType) var e error cred.scope.date, e = time.Parse(yyyymmdd, credElements[0]) if e != nil { - return ch, responses.ErrAuthorizationHeaderMalformed + return ch, services.ErrAuthorizationHeaderMalformed } cred.scope.region = credElements[1] @@ -90,50 +89,50 @@ func parseCredentialHeader(credElement string, region string, stype serviceType) } // Should validate region, only if region is set. if !isValidRegion(sRegion, region) { - return ch, responses.ErrAuthorizationHeaderMalformed + return ch, services.ErrAuthorizationHeaderMalformed } if credElements[2] != string(stype) { //switch stype { //case ServiceSTS: // return ch, handlers.ErrcodeAuthorizationHeaderMalformed //} - return ch, responses.ErrAuthorizationHeaderMalformed + return ch, services.ErrAuthorizationHeaderMalformed } cred.scope.service = credElements[2] if credElements[3] != "aws4_request" { - return ch, responses.ErrAuthorizationHeaderMalformed + return ch, services.ErrAuthorizationHeaderMalformed } cred.scope.request = credElements[3] return cred, nil } // Parse signature from signature tag. -func parseSignature(signElement string) (string, *responses.Error) { +func parseSignature(signElement string) (string, error) { signFields := strings.Split(strings.TrimSpace(signElement), "=") if len(signFields) != 2 { - return "", responses.ErrMissingFields + return "", services.ErrMissingFields } if signFields[0] != "Signature" { - return "", responses.ErrMissingSignTag + return "", services.ErrMissingSignTag } if signFields[1] == "" { - return "", responses.ErrMissingFields + return "", services.ErrMissingFields } signature := signFields[1] return signature, nil } // Parse slice of signed headers from signed headers tag. -func parseSignedHeader(signedHdrElement string) ([]string, *responses.Error) { +func parseSignedHeader(signedHdrElement string) ([]string, error) { signedHdrFields := strings.Split(strings.TrimSpace(signedHdrElement), "=") if len(signedHdrFields) != 2 { - return nil, responses.ErrMissingFields + return nil, services.ErrMissingFields } if signedHdrFields[0] != "SignedHeaders" { - return nil, responses.ErrMissingSignHeadersTag + return nil, services.ErrMissingSignHeadersTag } if signedHdrFields[1] == "" { - return nil, responses.ErrMissingFields + return nil, services.ErrMissingFields } signedHeaders := strings.Split(signedHdrFields[1], ";") return signedHeaders, nil @@ -163,70 +162,69 @@ type preSignValues struct { // querystring += &X-Amz-Signature=signature // // verifies if any of the necessary query params are missing in the presigned request. -func doesV4PresignParamsExist(query url.Values) *responses.Error { +func doesV4PresignParamsExist(query url.Values) error { v4PresignQueryParams := []string{consts.AmzAlgorithm, consts.AmzCredential, consts.AmzSignature, consts.AmzDate, consts.AmzSignedHeaders, consts.AmzExpires} for _, v4PresignQueryParam := range v4PresignQueryParams { if _, ok := query[v4PresignQueryParam]; !ok { - return responses.ErrInvalidQueryParams + return services.ErrInvalidQueryParams } } return nil } // Parses all the presigned signature values into separate elements. -func parsePreSignV4(query url.Values, region string, stype serviceType) (psv preSignValues, rErr *responses.Error) { +func parsePreSignV4(query url.Values, region string, stype serviceType) (psv preSignValues, err error) { // verify whether the required query params exist. - rErr = doesV4PresignParamsExist(query) - if rErr != nil { - return psv, rErr + err = doesV4PresignParamsExist(query) + if err != nil { + return psv, err } // Verify if the query algorithm is supported or not. if query.Get(consts.AmzAlgorithm) != signV4Algorithm { - return psv, responses.ErrAuthorizationHeaderMalformed + return psv, services.ErrAuthorizationHeaderMalformed } // Initialize signature version '4' structured header. preSignV4Values := preSignValues{} // Save credential. - preSignV4Values.Credential, rErr = parseCredentialHeader("Credential="+query.Get(consts.AmzCredential), region, stype) - if rErr != nil { - return psv, rErr + preSignV4Values.Credential, err = parseCredentialHeader("Credential="+query.Get(consts.AmzCredential), region, stype) + if err != nil { + return psv, err } - var e error // Save date in native time.Time. - preSignV4Values.Date, e = time.Parse(iso8601Format, query.Get(consts.AmzDate)) - if e != nil { - return psv, responses.ErrAuthorizationHeaderMalformed + preSignV4Values.Date, err = time.Parse(iso8601Format, query.Get(consts.AmzDate)) + if err != nil { + return psv, services.ErrAuthorizationHeaderMalformed } // Save expires in native time.Duration. - preSignV4Values.Expires, e = time.ParseDuration(query.Get(consts.AmzExpires) + "s") - if e != nil { - return psv, responses.ErrAuthorizationHeaderMalformed + preSignV4Values.Expires, err = time.ParseDuration(query.Get(consts.AmzExpires) + "s") + if err != nil { + return psv, services.ErrAuthorizationHeaderMalformed } if preSignV4Values.Expires < 0 { - return psv, responses.ErrAuthorizationHeaderMalformed + return psv, services.ErrAuthorizationHeaderMalformed } // Check if Expiry time is less than 7 days (value in seconds). if preSignV4Values.Expires.Seconds() > 604800 { - return psv, responses.ErrAuthorizationHeaderMalformed + return psv, services.ErrAuthorizationHeaderMalformed } // Save signed headers. - preSignV4Values.SignedHeaders, rErr = parseSignedHeader("SignedHeaders=" + query.Get(consts.AmzSignedHeaders)) - if rErr != nil { - return psv, rErr + preSignV4Values.SignedHeaders, err = parseSignedHeader("SignedHeaders=" + query.Get(consts.AmzSignedHeaders)) + if err != nil { + return psv, err } // Save signature. - preSignV4Values.Signature, rErr = parseSignature("Signature=" + query.Get(consts.AmzSignature)) - if rErr != nil { - return psv, rErr + preSignV4Values.Signature, err = parseSignature("Signature=" + query.Get(consts.AmzSignature)) + if err != nil { + return psv, err } // Return structed form of signature query string. @@ -237,7 +235,7 @@ func parsePreSignV4(query url.Values, region string, stype serviceType) (psv pre // // Authorization: algorithm Credential=accessKeyID/credScope, \ // SignedHeaders=signedHeaders, Signature=signature -func parseSignV4(v4Auth string, region string, stype serviceType) (sv signValues, rErr *responses.Error) { +func parseSignV4(v4Auth string, region string, stype serviceType) (sv signValues, err error) { // credElement is fetched first to skip replacing the space in access key. credElement := strings.TrimPrefix(strings.Split(strings.TrimSpace(v4Auth), ",")[0], signV4Algorithm) // Replace all spaced strings, some clients can send spaced @@ -245,65 +243,65 @@ func parseSignV4(v4Auth string, region string, stype serviceType) (sv signValues // to make parsing easier. v4Auth = strings.ReplaceAll(v4Auth, " ", "") if v4Auth == "" { - return sv, responses.ErrAuthHeaderEmpty + return sv, services.ErrAuthHeaderEmpty } // Verify if the header algorithm is supported or not. if !strings.HasPrefix(v4Auth, signV4Algorithm) { - return sv, responses.ErrSignatureVersionNotSupported + return sv, services.ErrSignatureVersionNotSupported } // Strip off the Algorithm prefix. v4Auth = strings.TrimPrefix(v4Auth, signV4Algorithm) authFields := strings.Split(strings.TrimSpace(v4Auth), ",") if len(authFields) != 3 { - return sv, responses.ErrMissingFields + return sv, services.ErrMissingFields } // Initialize signature version '4' structured header. signV4Values := signValues{} // Save credentail values. - signV4Values.Credential, rErr = parseCredentialHeader(strings.TrimSpace(credElement), region, stype) - if rErr != nil { - return sv, rErr + signV4Values.Credential, err = parseCredentialHeader(strings.TrimSpace(credElement), region, stype) + if err != nil { + return sv, err } // Save signed headers. - signV4Values.SignedHeaders, rErr = parseSignedHeader(authFields[1]) - if rErr != nil { - return sv, rErr + signV4Values.SignedHeaders, err = parseSignedHeader(authFields[1]) + if err != nil { + return sv, err } // Save signature. - signV4Values.Signature, rErr = parseSignature(authFields[2]) - if rErr != nil { - return sv, rErr + signV4Values.Signature, err = parseSignature(authFields[2]) + if err != nil { + return sv, err } // Return the structure here. return signV4Values, nil } -func (s *Service) getReqAccessKeyV4(r *http.Request, region string, stype serviceType) (*services.AccessKey, *responses.Error) { - ch, rErr := parseCredentialHeader("Credential="+r.Form.Get(consts.AmzCredential), region, stype) - if rErr != nil { +func (s *Service) getReqAccessKeyV4(r *http.Request, region string, stype serviceType) (*services.AccessKey, error) { + ch, err := parseCredentialHeader("Credential="+r.Form.Get(consts.AmzCredential), region, stype) + if err != nil { // Strip off the Algorithm prefix. v4Auth := strings.TrimPrefix(r.Header.Get("Authorization"), signV4Algorithm) authFields := strings.Split(strings.TrimSpace(v4Auth), ",") if len(authFields) != 3 { - return &services.AccessKey{}, responses.ErrMissingFields + return &services.AccessKey{}, services.ErrMissingFields } - ch, rErr = parseCredentialHeader(authFields[0], region, stype) - if rErr != nil { - return &services.AccessKey{}, rErr + ch, err = parseCredentialHeader(authFields[0], region, stype) + if err != nil { + return &services.AccessKey{}, err } } // check accessKey. record, err := s.accessKeySvc.Get(ch.accessKey) if err != nil { - return &services.AccessKey{}, responses.ErrNoSuchUserPolicy + return &services.AccessKey{}, services.ErrNoSuchUserPolicy } return record, nil } diff --git a/s3/services/auth/signature-v4-utils.go b/s3/services/auth/signature-v4-utils.go index 2279820d7..0d65f28d2 100644 --- a/s3/services/auth/signature-v4-utils.go +++ b/s3/services/auth/signature-v4-utils.go @@ -18,13 +18,13 @@ package auth import ( + "github.com/bittorrent/go-btfs/s3/services" "net/http" "reflect" "strconv" "strings" "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/handlers" ) // http Header "x-amz-content-sha256" == "UNSIGNED-PAYLOAD" indicates that the @@ -60,13 +60,13 @@ func contains(slice interface{}, elem interface{}) bool { } // extractSignedHeaders extract signed headers from Authorization header -func extractSignedHeaders(signedHeaders []string, r *http.Request) (http.Header, handlers.Errorcode) { +func extractSignedHeaders(signedHeaders []string, r *http.Request) (http.Header, error) { reqHeaders := r.Header reqQueries := r.Form // find whether "host" is part of list of signed headers. // if not return ErrcodeUnsignedHeaders. "host" is mandatory. if !contains(signedHeaders, "host") { - return nil, handlers.ErrcodeUnsignedHeaders + return nil, services.ErrUnsignedHeaders } extractedSignedHeaders := make(http.Header) for _, header := range signedHeaders { @@ -116,10 +116,10 @@ func extractSignedHeaders(signedHeaders []string, r *http.Request) (http.Header, // calculation to be compatible with such clients. extractedSignedHeaders.Set(header, strconv.FormatInt(r.ContentLength, 10)) default: - return nil, handlers.ErrcodeUnsignedHeaders + return nil, services.ErrUnsignedHeaders } } - return extractedSignedHeaders, handlers.ErrcodeNone + return extractedSignedHeaders, nil } // Returns SHA256 for calculating canonical-request. diff --git a/s3/services/auth/signature-v4.go b/s3/services/auth/signature-v4.go index e094db035..86d7fc0f3 100644 --- a/s3/services/auth/signature-v4.go +++ b/s3/services/auth/signature-v4.go @@ -19,7 +19,7 @@ package auth import ( "crypto/subtle" - "github.com/bittorrent/go-btfs/s3/handlers/responses" + "github.com/bittorrent/go-btfs/s3/services" "net/http" "net/url" "strconv" @@ -58,38 +58,38 @@ func compareSignatureV4(sig1, sig2 string) bool { // - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html // // returns handlers.ErrcodeNone if the signature matches. -func (s *Service) doesPresignedSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType) (rErr *responses.Error) { +func (s *Service) doesPresignedSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType) (err error) { // Copy request req := *r // Parse request query string. - pSignValues, rErr := parsePreSignV4(req.Form, region, stype) - if rErr != nil { + pSignValues, err := parsePreSignV4(req.Form, region, stype) + if err != nil { return } // get access_info by accessKey cred, err := s.accessKeySvc.Get(pSignValues.Credential.accessKey) if err != nil { - rErr = responses.ErrNoSuchUserPolicy + err = services.ErrNoSuchUserPolicy return } // Extract all the signed headers along with its values. - extractedSignedHeaders, rErr := extractSignedHeaders(pSignValues.SignedHeaders, r) - if rErr != nil { + extractedSignedHeaders, err := extractSignedHeaders(pSignValues.SignedHeaders, r) + if err != nil { return } // If the host which signed the request is slightly ahead in time (by less than MaxSkewTime) the // request should still be allowed. if pSignValues.Date.After(time.Now().UTC().Add(consts.MaxSkewTime)) { - rErr = responses.ErrRequestNotReadyYet + err = services.ErrRequestNotReadyYet return } if time.Now().UTC().Sub(pSignValues.Date) > pSignValues.Expires { - rErr = responses.ErrExpiredPresignRequest + err = services.ErrExpiredPresignRequest return } @@ -141,26 +141,26 @@ func (s *Service) doesPresignedSignatureMatch(hashedPayload string, r *http.Requ // Verify if date query is same. if req.Form.Get(consts.AmzDate) != query.Get(consts.AmzDate) { - rErr = responses.ErrSignatureDoesNotMatch + err = services.ErrSignatureDoesNotMatch } // Verify if expires query is same. if req.Form.Get(consts.AmzExpires) != query.Get(consts.AmzExpires) { - rErr = responses.ErrSignatureDoesNotMatch + err = services.ErrSignatureDoesNotMatch return } // Verify if signed headers query is same. if req.Form.Get(consts.AmzSignedHeaders) != query.Get(consts.AmzSignedHeaders) { - rErr = responses.ErrSignatureDoesNotMatch + err = services.ErrSignatureDoesNotMatch return } // Verify if credential query is same. if req.Form.Get(consts.AmzCredential) != query.Get(consts.AmzCredential) { - rErr = responses.ErrSignatureDoesNotMatch + err = services.ErrSignatureDoesNotMatch return } // Verify if sha256 payload query is same. if clntHashedPayload != "" && clntHashedPayload != query.Get(consts.AmzContentSha256) { - rErr = responses.ErrContentSHA256Mismatch + err = services.ErrContentSHA256Mismatch return } // not check SessionToken. @@ -186,7 +186,7 @@ func (s *Service) doesPresignedSignatureMatch(hashedPayload string, r *http.Requ // Verify signature. if !compareSignatureV4(req.Form.Get(consts.AmzSignature), newSignature) { - rErr = responses.ErrSignatureDoesNotMatch + err = services.ErrSignatureDoesNotMatch return } @@ -195,8 +195,7 @@ func (s *Service) doesPresignedSignatureMatch(hashedPayload string, r *http.Requ // DoesSignatureMatch - Verify authorization header with calculated header in accordance with // - http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html -// -func (s *Service) doesSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType) (rErr *responses.Error) { +func (s *Service) doesSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType) (err error) { // Copy request. req := *r @@ -204,20 +203,24 @@ func (s *Service) doesSignatureMatch(hashedPayload string, r *http.Request, regi v4Auth := req.Header.Get(consts.Authorization) // Parse signature version '4' header. - signV4Values, rErr := parseSignV4(v4Auth, region, stype) - if rErr != nil { + signV4Values, err := parseSignV4(v4Auth, region, stype) + if err != nil { return } // Extract all the signed headers along with its values. - extractedSignedHeaders, rErr := extractSignedHeaders(signV4Values.SignedHeaders, r) - if rErr != nil { + extractedSignedHeaders, err := extractSignedHeaders(signV4Values.SignedHeaders, r) + if err != nil { return } cred, err := s.accessKeySvc.Get(signV4Values.Credential.accessKey) + if err == services.ErrAccessKeyNotFound { + err = services.ErrNoSuchUserPolicy + return + } + if err != nil { - rErr = responses.ErrNoSuchUserPolicy return } @@ -225,7 +228,7 @@ func (s *Service) doesSignatureMatch(hashedPayload string, r *http.Request, regi var date string if date = req.Header.Get(consts.AmzDate); date == "" { if date = r.Header.Get(consts.Date); date == "" { - rErr = responses.ErrMissingDateHeader + err = services.ErrMissingDateHeader return } } @@ -233,7 +236,7 @@ func (s *Service) doesSignatureMatch(hashedPayload string, r *http.Request, regi // Parse date header. t, err := time.Parse(iso8601Format, date) if err != nil { - rErr = responses.ErrAuthorizationHeaderMalformed + err = services.ErrAuthorizationHeaderMalformed return } @@ -255,7 +258,7 @@ func (s *Service) doesSignatureMatch(hashedPayload string, r *http.Request, regi // Verify if signature match. if !compareSignatureV4(newSignature, signV4Values.Signature) { - rErr = responses.ErrSignatureDoesNotMatch + err = services.ErrSignatureDoesNotMatch return } diff --git a/s3/services/bucket/service.go b/s3/services/bucket/service.go index 9328140de..c09501b6c 100644 --- a/s3/services/bucket/service.go +++ b/s3/services/bucket/service.go @@ -2,6 +2,7 @@ package bucket import ( "context" + "errors" "github.com/bittorrent/go-btfs/s3/providers" "github.com/bittorrent/go-btfs/s3/services" "time" @@ -9,7 +10,6 @@ import ( "github.com/bittorrent/go-btfs/s3/action" "github.com/bittorrent/go-btfs/s3/ctxmu" "github.com/bittorrent/go-btfs/s3/policy" - "github.com/syndtr/goleveldb/leveldb" ) const ( @@ -43,7 +43,7 @@ func NewService(providers providers.Providerser, options ...Option) (s *Service) func (s *Service) CheckACL(accessKeyRecord *services.AccessKey, bucketName string, action action.Action) (err error) { //需要判断bucketName是否为空字符串 if bucketName == "" { - return services.ErrBucketNotFound + return services.ErrNoSuchBucket } bucketMeta, err := s.GetBucketMeta(context.Background(), bucketName) @@ -52,7 +52,7 @@ func (s *Service) CheckACL(accessKeyRecord *services.AccessKey, bucketName strin } if policy.IsAllowed(bucketMeta.Owner == accessKeyRecord.Key, bucketMeta.Acl, action) == false { - return services.ErrBucketAccessDenied + return services.ErrAccessDenied } return } @@ -89,10 +89,10 @@ func (s *Service) CreateBucket(ctx context.Context, bucket, region, accessKey, a func (s *Service) lockGetBucketMeta(bucket string) (meta services.BucketMetadata, err error) { err = s.providers.GetStateStore().Get(bucketPrefix+bucket, &meta) - if err == leveldb.ErrNotFound { - err = services.ErrBucketNotFound + if errors.Is(err, providers.ErrStateStoreNotFound) { + err = services.ErrNoSuchBucket } - return meta, err + return } // GetBucketMeta metadata for a bucket. @@ -130,10 +130,13 @@ func (s *Service) DeleteBucket(ctx context.Context, bucket string) error { return err } - if empty, err := s.emptyBucket(ctx, bucket); err != nil { + empty, err := s.emptyBucket(ctx, bucket) + if err != nil { return err - } else if !empty { - return services.ErrSetBucketEmptyFailed + } + + if !empty { + return errors.New("bucket not empty") } return s.providers.GetStateStore().Delete(bucketPrefix + bucket) diff --git a/s3/handlers/responses/error.go b/s3/services/error.go similarity index 99% rename from s3/handlers/responses/error.go rename to s3/services/error.go index e886a514a..c7fff68b6 100644 --- a/s3/handlers/responses/error.go +++ b/s3/services/error.go @@ -1,4 +1,4 @@ -package responses +package services import ( "fmt" @@ -1032,4 +1032,9 @@ var ( description: "The JSON was not well-formed or did not validate against our published format.", httpStatusCode: http.StatusBadRequest, } + ErrAccessKeyNotFound = &Error{ + code: "AccessKeyNotFound", + description: "", + httpStatusCode: http.StatusBadRequest, + } ) diff --git a/s3/services/proto.go b/s3/services/proto.go index 41b7dd04c..2972a1753 100644 --- a/s3/services/proto.go +++ b/s3/services/proto.go @@ -2,11 +2,9 @@ package services import ( "context" - "github.com/bittorrent/go-btfs/s3/handlers/responses" "net/http" "time" - "errors" "github.com/bittorrent/go-btfs/s3/action" ) @@ -27,7 +25,7 @@ type AccessKeyService interface { } type AuthService interface { - VerifySignature(ctx context.Context, r *http.Request) (ack *AccessKey, rErr *responses.Error) + VerifySignature(ctx context.Context, r *http.Request) (ack *AccessKey, err error) } type BucketService interface { @@ -48,20 +46,6 @@ type ObjectService interface { type MultipartService interface { } -var ( - ErrSignVersionNotSupport = errors.New("sign version is not support") - ErrInvalidArgument = errors.New("invalid argument") - ErrInvalidBucketName = errors.New("bucket name is invalid") - ErrBucketNotFound = errors.New("bucket is not found") - ErrBucketAlreadyExists = errors.New("bucket is already exists") - ErrBucketAccessDenied = errors.New("bucket access denied") - ErrSetBucketEmptyFailed = errors.New("set bucket empty failed") - ErrCreateBucket = errors.New("create bucket failed") - ErrNoSuchUserPolicy = errors.New("no such user policy") - ErrNotImplemented = errors.New("not implemented") - ErrAccessKeyIsNotFound = errors.New("access-key is not found") -) - type AccessKey struct { Key string `json:"key"` Secret string `json:"secret"` From 366a184806befba0d07f2c94347d6ac405c6ae2a Mon Sep 17 00:00:00 2001 From: steve Date: Thu, 24 Aug 2023 04:28:01 +0800 Subject: [PATCH 050/139] optmize: code structure & auth bug --- s3/cctx/access_key.go | 17 + s3/{handlers => }/cctx/cctx.go | 0 s3/{handlers => }/cctx/handle_err.go | 0 s3/handlers/cctx/access_key.go | 17 - s3/handlers/handlers.go | 91 ++-- s3/{handlers => }/requests/parsers.go | 4 +- s3/{handlers => }/requests/types.go | 0 s3/{handlers => }/responses/types.go | 0 s3/{handlers => }/responses/types_common.go | 0 s3/{handlers => }/responses/wirters.go | 4 +- s3/{handlers => }/responses/writers_common.go | 27 +- s3/routers/{routerser.go => proto.go} | 0 s3/routers/routers.go | 2 +- s3/server.go | 2 +- s3/services/accesskey/proto.go | 27 ++ s3/services/accesskey/service.go | 43 +- s3/services/accesskey/service_instance.go | 29 +- s3/services/accesskey/service_options.go | 6 +- s3/services/auth/check_handler_auth.go | 40 +- s3/services/auth/proto.go | 11 + s3/services/auth/service.go | 16 +- s3/services/auth/service_options.go | 2 +- s3/services/auth/signature-v4-parser.go | 69 +-- s3/services/auth/signature-v4-utils.go | 4 +- s3/services/auth/signature-v4.go | 59 ++- s3/services/bucket/proto.go | 29 ++ s3/services/bucket/service.go | 49 +- s3/services/bucket/service_option.go | 2 +- s3/services/cors/proto.go | 7 + s3/services/cors/service.go | 17 +- s3/services/cors/service_options.go | 8 +- s3/services/multipart/proto.go | 4 + s3/services/multipart/service.go | 12 +- s3/services/multipart/service_options.go | 2 +- s3/services/proto.go | 68 --- s3/services/{error.go => response_error.go} | 422 +++++++++--------- 36 files changed, 564 insertions(+), 526 deletions(-) create mode 100644 s3/cctx/access_key.go rename s3/{handlers => }/cctx/cctx.go (100%) rename s3/{handlers => }/cctx/handle_err.go (100%) delete mode 100644 s3/handlers/cctx/access_key.go rename s3/{handlers => }/requests/parsers.go (98%) rename s3/{handlers => }/requests/types.go (100%) rename s3/{handlers => }/responses/types.go (100%) rename s3/{handlers => }/responses/types_common.go (100%) rename s3/{handlers => }/responses/wirters.go (95%) rename s3/{handlers => }/responses/writers_common.go (88%) rename s3/routers/{routerser.go => proto.go} (100%) create mode 100644 s3/services/accesskey/proto.go create mode 100644 s3/services/auth/proto.go create mode 100644 s3/services/bucket/proto.go create mode 100644 s3/services/cors/proto.go create mode 100644 s3/services/multipart/proto.go delete mode 100644 s3/services/proto.go rename s3/services/{error.go => response_error.go} (77%) diff --git a/s3/cctx/access_key.go b/s3/cctx/access_key.go new file mode 100644 index 000000000..9e4b600dd --- /dev/null +++ b/s3/cctx/access_key.go @@ -0,0 +1,17 @@ +package cctx + +import ( + "github.com/bittorrent/go-btfs/s3/services/accesskey" + "net/http" +) + +func SetAccessKey(r *http.Request, ack *accesskey.AccessKey) { + set(r, keyOfAccessKey, ack) + return +} + +func GetAccessKey(r *http.Request) (ack *accesskey.AccessKey) { + v := get(r, keyOfAccessKey) + ack, _ = v.(*accesskey.AccessKey) + return +} diff --git a/s3/handlers/cctx/cctx.go b/s3/cctx/cctx.go similarity index 100% rename from s3/handlers/cctx/cctx.go rename to s3/cctx/cctx.go diff --git a/s3/handlers/cctx/handle_err.go b/s3/cctx/handle_err.go similarity index 100% rename from s3/handlers/cctx/handle_err.go rename to s3/cctx/handle_err.go diff --git a/s3/handlers/cctx/access_key.go b/s3/handlers/cctx/access_key.go deleted file mode 100644 index 279c92e5a..000000000 --- a/s3/handlers/cctx/access_key.go +++ /dev/null @@ -1,17 +0,0 @@ -package cctx - -import ( - "github.com/bittorrent/go-btfs/s3/services" - "net/http" -) - -func SetAccessKey(r *http.Request, ack *services.AccessKey) { - set(r, keyOfAccessKey, ack) - return -} - -func GetAccessKey(r *http.Request) (ack *services.AccessKey) { - v := get(r, keyOfAccessKey) - ack, _ = v.(*services.AccessKey) - return -} diff --git a/s3/handlers/handlers.go b/s3/handlers/handlers.go index 4866bccd1..404a58619 100644 --- a/s3/handlers/handlers.go +++ b/s3/handlers/handlers.go @@ -3,43 +3,40 @@ package handlers import ( "fmt" - "github.com/bittorrent/go-btfs/s3/handlers/cctx" - "github.com/bittorrent/go-btfs/s3/handlers/requests" - "github.com/bittorrent/go-btfs/s3/handlers/responses" + "github.com/bittorrent/go-btfs/s3/cctx" + "github.com/bittorrent/go-btfs/s3/requests" + "github.com/bittorrent/go-btfs/s3/responses" "github.com/bittorrent/go-btfs/s3/services" + "github.com/bittorrent/go-btfs/s3/services/auth" + "github.com/bittorrent/go-btfs/s3/services/bucket" + "github.com/bittorrent/go-btfs/s3/services/cors" "net/http" "runtime" s3action "github.com/bittorrent/go-btfs/s3/action" "github.com/bittorrent/go-btfs/s3/consts" "github.com/bittorrent/go-btfs/s3/s3utils" - "github.com/rs/cors" + rscors "github.com/rs/cors" ) var _ Handlerser = (*Handlers)(nil) type Handlers struct { - corsSvc services.CorsService - authSvc services.AuthService - bucketSvc services.BucketService - objectSvc services.ObjectService - multipartSvc services.MultipartService + corsSvc cors.Service + authSvc auth.Service + bucketSvc bucket.Service } func NewHandlers( - corsSvc services.CorsService, - authSvc services.AuthService, - bucketSvc services.BucketService, - objectSvc services.ObjectService, - multipartSvc services.MultipartService, + corsSvc cors.Service, + authSvc auth.Service, + bucketSvc bucket.Service, options ...Option, ) (handlers *Handlers) { handlers = &Handlers{ - corsSvc: corsSvc, - authSvc: authSvc, - bucketSvc: bucketSvc, - objectSvc: objectSvc, - multipartSvc: multipartSvc, + corsSvc: corsSvc, + authSvc: authSvc, + bucketSvc: bucketSvc, } for _, option := range options { option(handlers) @@ -48,7 +45,7 @@ func NewHandlers( } func (h *Handlers) Cors(handler http.Handler) http.Handler { - return cors.New(cors.Options{ + return rscors.New(rscors.Options{ AllowedOrigins: h.corsSvc.GetAllowOrigins(), AllowedMethods: h.corsSvc.GetAllowMethods(), AllowedHeaders: h.corsSvc.GetAllowHeaders(), @@ -57,23 +54,33 @@ func (h *Handlers) Cors(handler http.Handler) http.Handler { }).Handler(handler) } +func (h *Handlers) Log(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Printf("[REQ] %4s | %s\n", r.Method, r.URL) + handler.ServeHTTP(w, r) + hname, herr := cctx.GetHandleInf(r) + fmt.Printf("[RSP] %4s | %s | %s | %v\n", r.Method, r.URL, hname, herr) + }) +} + func (h *Handlers) Auth(handler http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var err error + defer func() { + if err != nil { + cctx.SetHandleInf(r, fnName(), err) + } + }() + ack, err := h.authSvc.VerifySignature(r.Context(), r) if err != nil { responses.WriteErrorResponse(w, r, err) return } + cctx.SetAccessKey(r, ack) - handler.ServeHTTP(w, r) - }) -} -func (h *Handlers) Log(handler http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { handler.ServeHTTP(w, r) - hname, herr := cctx.GetHandleInf(r) - fmt.Printf("[%-4s] %s | %s | %v\n", r.Method, r.URL, hname, herr) }) } @@ -85,32 +92,32 @@ func (h *Handlers) PutBucketHandler(w http.ResponseWriter, r *http.Request) { req, err := requests.ParsePubBucketRequest(r) if err != nil { - responses.WriteErrorResponse(w, r, services.ErrInvalidRequestBody) + responses.WriteErrorResponse(w, r, services.RespErrInvalidRequestBody) return } ctx := r.Context() if err = s3utils.CheckValidBucketNameStrict(req.Bucket); err != nil { - responses.WriteErrorResponse(w, r, services.ErrInvalidBucketName) + responses.WriteErrorResponse(w, r, services.RespErrInvalidBucketName) return } if !requests.CheckAclPermissionType(&req.ACL) { - err = services.ErrNotImplemented - responses.WriteErrorResponse(w, r, services.ErrNotImplemented) + err = services.RespErrNotImplemented + responses.WriteErrorResponse(w, r, services.RespErrNotImplemented) return } if ok := h.bucketSvc.HasBucket(ctx, req.Bucket); ok { - err = services.ErrBucketAlreadyExists - responses.WriteErrorResponseHeadersOnly(w, r, services.ErrBucketAlreadyExists) + err = services.RespErrBucketAlreadyExists + responses.WriteErrorResponseHeadersOnly(w, r, services.RespErrBucketAlreadyExists) return } err = h.bucketSvc.CreateBucket(ctx, req.Bucket, req.Region, cctx.GetAccessKey(r).Key, req.ACL) if err != nil { - responses.WriteErrorResponse(w, r, services.ErrInternalError) + responses.WriteErrorResponse(w, r, services.RespErrInternalError) return } @@ -132,7 +139,7 @@ func (h *Handlers) HeadBucketHandler(w http.ResponseWriter, r *http.Request) { req, err := requests.ParseHeadBucketRequest(r) if err != nil { - responses.WriteErrorResponse(w, r, services.ErrInvalidRequestBody) + responses.WriteErrorResponse(w, r, services.RespErrInvalidRequestBody) return } @@ -146,7 +153,7 @@ func (h *Handlers) HeadBucketHandler(w http.ResponseWriter, r *http.Request) { } if ok := h.bucketSvc.HasBucket(ctx, req.Bucket); !ok { - responses.WriteErrorResponseHeadersOnly(w, r, services.ErrNoSuchBucket) + responses.WriteErrorResponseHeadersOnly(w, r, services.RespErrNoSuchBucket) return } @@ -162,7 +169,7 @@ func (h *Handlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) { req := &requests.DeleteBucketRequest{} err = req.Bind(r) if err != nil { - responses.WriteErrorResponse(w, r, services.ErrInvalidRequestBody) + responses.WriteErrorResponse(w, r, services.RespErrInvalidRequestBody) return } @@ -194,7 +201,7 @@ func (h *Handlers) ListBucketsHandler(w http.ResponseWriter, r *http.Request) { req := &requests.ListBucketsRequest{} err = req.Bind(r) if err != nil { - responses.WriteErrorResponse(w, r, services.ErrInvalidRequestBody) + responses.WriteErrorResponse(w, r, services.RespErrInvalidRequestBody) return } @@ -225,7 +232,7 @@ func (h *Handlers) GetBucketAclHandler(w http.ResponseWriter, r *http.Request) { req := &requests.GetBucketAclRequest{} err = req.Bind(r) if err != nil { - responses.WriteErrorResponse(w, r, services.ErrInvalidRequestBody) + responses.WriteErrorResponse(w, r, services.RespErrInvalidRequestBody) return } @@ -239,7 +246,7 @@ func (h *Handlers) GetBucketAclHandler(w http.ResponseWriter, r *http.Request) { } if !h.bucketSvc.HasBucket(ctx, req.Bucket) { - responses.WriteErrorResponseHeadersOnly(w, r, services.ErrNoSuchBucket) + responses.WriteErrorResponseHeadersOnly(w, r, services.RespErrNoSuchBucket) return } //todo check all errors @@ -261,7 +268,7 @@ func (h *Handlers) PutBucketAclHandler(w http.ResponseWriter, r *http.Request) { req := &requests.PutBucketAclRequest{} err = req.Bind(r) if err != nil || len(req.ACL) == 0 || len(req.Bucket) == 0 { - responses.WriteErrorResponse(w, r, services.ErrInvalidRequestBody) + responses.WriteErrorResponse(w, r, services.RespErrInvalidRequestBody) return } @@ -275,7 +282,7 @@ func (h *Handlers) PutBucketAclHandler(w http.ResponseWriter, r *http.Request) { } if !requests.CheckAclPermissionType(&req.ACL) { - responses.WriteErrorResponse(w, r, services.ErrNotImplemented) + responses.WriteErrorResponse(w, r, services.RespErrNotImplemented) return } diff --git a/s3/handlers/requests/parsers.go b/s3/requests/parsers.go similarity index 98% rename from s3/handlers/requests/parsers.go rename to s3/requests/parsers.go index a086ef023..ca2beccb6 100644 --- a/s3/handlers/requests/parsers.go +++ b/s3/requests/parsers.go @@ -117,7 +117,7 @@ func (req *PutBucketAclRequest) Bind(r *http.Request) (err error) { /*********************************/ // Parses location constraint from the incoming reader. -func parseLocationConstraint(r *http.Request) (location string, s3Error *services.Error) { +func parseLocationConstraint(r *http.Request) (location string, s3Error *services.ResponseError) { // If the request has no body with content-length set to 0, // we do not have to validate location constraint. Bucket will // be created at default region. @@ -125,7 +125,7 @@ func parseLocationConstraint(r *http.Request) (location string, s3Error *service err := utils.XmlDecoder(r.Body, &locationConstraint, r.ContentLength) if err != nil && r.ContentLength != 0 { // Treat all other failures as XML parsing errors. - return "", services.ErrMalformedXML + return "", services.RespErrMalformedXML } // else for both err as nil or io.EOF location = locationConstraint.Location if location == "" { diff --git a/s3/handlers/requests/types.go b/s3/requests/types.go similarity index 100% rename from s3/handlers/requests/types.go rename to s3/requests/types.go diff --git a/s3/handlers/responses/types.go b/s3/responses/types.go similarity index 100% rename from s3/handlers/responses/types.go rename to s3/responses/types.go diff --git a/s3/handlers/responses/types_common.go b/s3/responses/types_common.go similarity index 100% rename from s3/handlers/responses/types_common.go rename to s3/responses/types_common.go diff --git a/s3/handlers/responses/wirters.go b/s3/responses/wirters.go similarity index 95% rename from s3/handlers/responses/wirters.go rename to s3/responses/wirters.go index c59f4599d..43dce5c38 100644 --- a/s3/handlers/responses/wirters.go +++ b/s3/responses/wirters.go @@ -2,7 +2,7 @@ package responses import ( "fmt" - "github.com/bittorrent/go-btfs/s3/services" + "github.com/bittorrent/go-btfs/s3/services/bucket" "net/http" "github.com/aws/aws-sdk-go/aws" @@ -25,7 +25,7 @@ func WriteDeleteBucketResponse(w http.ResponseWriter) { return } -func WriteListBucketsResponse(w http.ResponseWriter, r *http.Request, bucketMetas []*services.BucketMetadata) { +func WriteListBucketsResponse(w http.ResponseWriter, r *http.Request, bucketMetas []*bucket.Bucket) { var buckets []*s3.Bucket for _, b := range bucketMetas { buckets = append(buckets, &s3.Bucket{ diff --git a/s3/handlers/responses/writers_common.go b/s3/responses/writers_common.go similarity index 88% rename from s3/handlers/responses/writers_common.go rename to s3/responses/writers_common.go index a74481f9a..1563eedf3 100644 --- a/s3/handlers/responses/writers_common.go +++ b/s3/responses/writers_common.go @@ -29,7 +29,7 @@ const ( // APIErrorResponse - error response format type APIErrorResponse struct { - XMLName xml.Name `xml:"Error" json:"-"` + XMLName xml.Name `xml:"ResponseError" json:"-"` Code string Message string Resource string @@ -47,19 +47,30 @@ type RESTErrorResponse struct { BucketName string `xml:"BucketName,omitempty" json:"BucketName,omitempty"` } +func getRESTErrorResponse(err *services.ResponseError, resource string, bucket, object string) RESTErrorResponse { + return RESTErrorResponse{ + Code: err.Code(), + BucketName: bucket, + Key: object, + Message: err.Description(), + Resource: resource, + RequestID: fmt.Sprintf("%d", time.Now().UnixNano()), + } +} + func WriteErrorResponseHeadersOnly(w http.ResponseWriter, r *http.Request, err error) { - var rerr *services.Error + var rerr *services.ResponseError if !errors.As(err, &rerr) { - rerr = services.ErrInternalError + rerr = services.RespErrInternalError } writeResponse(w, r, rerr.HTTPStatusCode(), nil, mimeNone) } // WriteErrorResponse write ErrorResponse func WriteErrorResponse(w http.ResponseWriter, r *http.Request, err error) { - var rerr *services.Error + var rerr *services.ResponseError if !errors.As(err, &rerr) { - rerr = services.ErrInternalError + rerr = services.RespErrInternalError } vars := mux.Vars(r) bucket := vars["bucket"] @@ -131,9 +142,9 @@ func encodeXMLResponse(response interface{}) []byte { // WriteErrorResponseJSON - writes error response in JSON format; // useful for admin APIs. func WriteErrorResponseJSON(w http.ResponseWriter, err error, reqURL *url.URL, host string) { - var rerr *services.Error + var rerr *services.ResponseError if !errors.As(err, &rerr) { - rerr = services.ErrInternalError + rerr = services.RespErrInternalError } // Generate error response. errorResponse := getAPIErrorResponse(rerr, reqURL.Path, w.Header().Get(consts.AmzRequestID), host) @@ -143,7 +154,7 @@ func WriteErrorResponseJSON(w http.ResponseWriter, err error, reqURL *url.URL, h // getErrorResponse gets in standard error and resource value and // provides a encodable populated response values -func getAPIErrorResponse(err *services.Error, resource, requestID, hostID string) APIErrorResponse { +func getAPIErrorResponse(err *services.ResponseError, resource, requestID, hostID string) APIErrorResponse { return APIErrorResponse{ Code: err.Code(), Message: err.Description(), diff --git a/s3/routers/routerser.go b/s3/routers/proto.go similarity index 100% rename from s3/routers/routerser.go rename to s3/routers/proto.go diff --git a/s3/routers/routers.go b/s3/routers/routers.go index 2beb81551..a07f7b0af 100644 --- a/s3/routers/routers.go +++ b/s3/routers/routers.go @@ -25,8 +25,8 @@ func (routers *Routers) Register() http.Handler { root.Use( routers.handlers.Cors, - routers.handlers.Auth, routers.handlers.Log, + routers.handlers.Auth, ) bucket := root.PathPrefix("/{bucket}").Subrouter() diff --git a/s3/server.go b/s3/server.go index 5271b609a..c8b569256 100644 --- a/s3/server.go +++ b/s3/server.go @@ -38,7 +38,7 @@ func NewServer(storageStore storage.StateStorer) *server.Server { bucketSvc := bucket.NewService(ps) // handlers - hs := handlers.NewHandlers(corsSvc, authSvc, bucketSvc, nil, nil) + hs := handlers.NewHandlers(corsSvc, authSvc, bucketSvc) // routers rs := routers.NewRouters(hs) diff --git a/s3/services/accesskey/proto.go b/s3/services/accesskey/proto.go new file mode 100644 index 000000000..40535f0b2 --- /dev/null +++ b/s3/services/accesskey/proto.go @@ -0,0 +1,27 @@ +package accesskey + +import ( + "errors" + "time" +) + +var ErrNotFound = errors.New("not found") + +type Service interface { + Generate() (record *AccessKey, err error) + Enable(key string) (err error) + Disable(key string) (err error) + Reset(key string) (err error) + Delete(key string) (err error) + Get(key string) (ack *AccessKey, err error) + List() (list []*AccessKey, err error) +} + +type AccessKey struct { + Key string `json:"key"` + Secret string `json:"secret"` + Enable bool `json:"enable"` + IsDeleted bool `json:"is_deleted"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} diff --git a/s3/services/accesskey/service.go b/s3/services/accesskey/service.go index e8b8c90d4..219a6d962 100644 --- a/s3/services/accesskey/service.go +++ b/s3/services/accesskey/service.go @@ -5,7 +5,6 @@ import ( "errors" "github.com/bittorrent/go-btfs/s3/ctxmu" "github.com/bittorrent/go-btfs/s3/providers" - "github.com/bittorrent/go-btfs/s3/services" "github.com/bittorrent/go-btfs/transaction/storage" "github.com/bittorrent/go-btfs/utils" "github.com/google/uuid" @@ -18,9 +17,9 @@ const ( defaultUpdateTimeoutMS = 200 ) -var _ services.AccessKeyService = (*Service)(nil) +var _ Service = (*service)(nil) -type Service struct { +type service struct { providers providers.Providerser secretLength int storeKeyPrefix string @@ -28,8 +27,8 @@ type Service struct { updateTimeout time.Duration } -func NewService(providers providers.Providerser, options ...Option) (svc *Service) { - svc = &Service{ +func NewService(providers providers.Providerser, options ...Option) Service { + svc := &service{ providers: providers, secretLength: defaultSecretLength, storeKeyPrefix: defaultStoreKeyPrefix, @@ -42,9 +41,9 @@ func NewService(providers providers.Providerser, options ...Option) (svc *Servic return svc } -func (svc *Service) Generate() (record *services.AccessKey, err error) { +func (svc *service) Generate() (record *AccessKey, err error) { now := time.Now() - record = &services.AccessKey{ + record = &AccessKey{ Key: svc.newKey(), Secret: svc.newSecret(), Enable: true, @@ -56,7 +55,7 @@ func (svc *Service) Generate() (record *services.AccessKey, err error) { return } -func (svc *Service) Enable(key string) (err error) { +func (svc *service) Enable(key string) (err error) { enable := true err = svc.update(key, &updateArgs{ Enable: &enable, @@ -64,7 +63,7 @@ func (svc *Service) Enable(key string) (err error) { return } -func (svc *Service) Disable(key string) (err error) { +func (svc *service) Disable(key string) (err error) { enable := false err = svc.update(key, &updateArgs{ Enable: &enable, @@ -72,7 +71,7 @@ func (svc *Service) Disable(key string) (err error) { return } -func (svc *Service) Reset(key string) (err error) { +func (svc *service) Reset(key string) (err error) { secret := svc.newSecret() err = svc.update(key, &updateArgs{ Secret: &secret, @@ -80,7 +79,7 @@ func (svc *Service) Reset(key string) (err error) { return } -func (svc *Service) Delete(key string) (err error) { +func (svc *service) Delete(key string) (err error) { isDelete := true err = svc.update(key, &updateArgs{ IsDelete: &isDelete, @@ -88,21 +87,21 @@ func (svc *Service) Delete(key string) (err error) { return } -func (svc *Service) Get(key string) (ack *services.AccessKey, err error) { - ack = &services.AccessKey{} +func (svc *service) Get(key string) (ack *AccessKey, err error) { + ack = &AccessKey{} err = svc.providers.GetStateStore().Get(svc.getStoreKey(key), ack) if err != nil && !errors.Is(err, providers.ErrStateStoreNotFound) { return } if errors.Is(err, providers.ErrStateStoreNotFound) || ack.IsDeleted { - err = services.ErrAccessKeyNotFound + err = ErrNotFound } return } -func (svc *Service) List() (list []*services.AccessKey, err error) { +func (svc *service) List() (list []*AccessKey, err error) { err = svc.providers.GetStateStore().Iterate(svc.storeKeyPrefix, func(key, _ []byte) (stop bool, er error) { - record := &services.AccessKey{} + record := &AccessKey{} er = svc.providers.GetStateStore().Get(string(key), record) if er != nil { return @@ -116,17 +115,17 @@ func (svc *Service) List() (list []*services.AccessKey, err error) { return } -func (svc *Service) newKey() (key string) { +func (svc *service) newKey() (key string) { key = uuid.NewString() return } -func (svc *Service) newSecret() (secret string) { +func (svc *service) newSecret() (secret string) { secret = utils.RandomString(svc.secretLength) return } -func (svc *Service) getStoreKey(key string) (storeKey string) { +func (svc *service) getStoreKey(key string) (storeKey string) { storeKey = svc.storeKeyPrefix + key return } @@ -137,7 +136,7 @@ type updateArgs struct { IsDelete *bool } -func (svc *Service) update(key string, args *updateArgs) (err error) { +func (svc *service) update(key string, args *updateArgs) (err error) { ctx, cancel := context.WithTimeout(context.Background(), svc.updateTimeout) defer cancel() @@ -147,7 +146,7 @@ func (svc *Service) update(key string, args *updateArgs) (err error) { } defer svc.locks.Unlock(key) - record := &services.AccessKey{} + record := &AccessKey{} stk := svc.getStoreKey(key) err = svc.providers.GetStateStore().Get(stk, record) @@ -155,7 +154,7 @@ func (svc *Service) update(key string, args *updateArgs) (err error) { return } if errors.Is(err, storage.ErrNotFound) || record.IsDeleted { - err = services.ErrAccessKeyNotFound + err = ErrNotFound return } diff --git a/s3/services/accesskey/service_instance.go b/s3/services/accesskey/service_instance.go index ab9d20fc5..5499e9405 100644 --- a/s3/services/accesskey/service_instance.go +++ b/s3/services/accesskey/service_instance.go @@ -2,48 +2,47 @@ package accesskey import ( "github.com/bittorrent/go-btfs/s3/providers" - "github.com/bittorrent/go-btfs/s3/services" "sync" ) -var service *Service +var svcInstance Service var once sync.Once func InitService(providers providers.Providerser, options ...Option) { once.Do(func() { - service = NewService(providers, options...) + svcInstance = NewService(providers, options...) }) } -func GetService() *Service { - return service +func GetServiceInstance() Service { + return svcInstance } -func Generate() (ack *services.AccessKey, err error) { - return service.Generate() +func Generate() (ack *AccessKey, err error) { + return svcInstance.Generate() } func Enable(key string) (err error) { - return service.Enable(key) + return svcInstance.Enable(key) } func Disable(key string) (err error) { - return service.Disable(key) + return svcInstance.Disable(key) } func Reset(key string) (err error) { - return service.Reset(key) + return svcInstance.Reset(key) } func Delete(key string) (err error) { - return service.Delete(key) + return svcInstance.Delete(key) } -func Get(key string) (record *services.AccessKey, err error) { - return service.Get(key) +func Get(key string) (record *AccessKey, err error) { + return svcInstance.Get(key) } -func List() (list []*services.AccessKey, err error) { - return service.List() +func List() (list []*AccessKey, err error) { + return svcInstance.List() } diff --git a/s3/services/accesskey/service_options.go b/s3/services/accesskey/service_options.go index d13493b8a..25f1617a6 100644 --- a/s3/services/accesskey/service_options.go +++ b/s3/services/accesskey/service_options.go @@ -1,15 +1,15 @@ package accesskey -type Option func(svc *Service) +type Option func(svc *service) func WithSecretLength(length int) Option { - return func(svc *Service) { + return func(svc *service) { svc.secretLength = length } } func WithStoreKeyPrefix(prefix string) Option { - return func(svc *Service) { + return func(svc *service) { svc.storeKeyPrefix = prefix } } diff --git a/s3/services/auth/check_handler_auth.go b/s3/services/auth/check_handler_auth.go index f724e220e..58ad4b835 100644 --- a/s3/services/auth/check_handler_auth.go +++ b/s3/services/auth/check_handler_auth.go @@ -4,6 +4,7 @@ import ( "context" "encoding/hex" "github.com/bittorrent/go-btfs/s3/services" + "github.com/bittorrent/go-btfs/s3/services/accesskey" "net/http" "github.com/bittorrent/go-btfs/s3/consts" @@ -18,24 +19,21 @@ import ( // // returns APIErrorcode if any to be replied to the client. // Additionally, returns the accessKey used in the request, and if this request is by an admin. -func (s *Service) CheckRequestAuthTypeCredential(ctx context.Context, r *http.Request) (cred *services.AccessKey, err error) { - // check signature +func (s *service) CheckRequestAuthTypeCredential(ctx context.Context, r *http.Request) (ack *accesskey.AccessKey, err error) { switch GetRequestAuthType(r) { + case AuthTypeAnonymous: + ack = new(accesskey.AccessKey) + return case AuthTypeSigned, AuthTypePresigned: - region := "" - if err = s.IsReqAuthenticated(ctx, r, region, ServiceS3); err != nil { - return - } - cred, err = s.getReqAccessKeyV4(r, region, ServiceS3) + ack, err = s.IsReqAuthenticated(ctx, r, "", ServiceS3) + return default: - err = services.ErrSignatureVersionNotSupported + err = services.RespErrSignatureVersionNotSupported return } - - return } -func (s *Service) ReqSignatureV4Verify(r *http.Request, region string, stype serviceType) error { +func (s *service) ReqSignatureV4Verify(r *http.Request, region string, stype serviceType) (ack *accesskey.AccessKey, err error) { sha256sum := getContentSha256Cksum(r, stype) switch { case IsRequestSignatureV4(r): @@ -43,18 +41,20 @@ func (s *Service) ReqSignatureV4Verify(r *http.Request, region string, stype ser case isRequestPresignedSignatureV4(r): return s.doesPresignedSignatureMatch(sha256sum, r, region, stype) default: - return services.ErrAccessDenied + return nil, services.RespErrAccessDenied } } // IsReqAuthenticated Verify if request has valid AWS Signature Version '4'. -func (s *Service) IsReqAuthenticated(ctx context.Context, r *http.Request, region string, stype serviceType) (err error) { - if err = s.ReqSignatureV4Verify(r, region, stype); err != nil { +func (s *service) IsReqAuthenticated(ctx context.Context, r *http.Request, region string, stype serviceType) (ack *accesskey.AccessKey, err error) { + ack, err = s.ReqSignatureV4Verify(r, region, stype) + if err != nil { return } + clientETag, err := etag.FromContentMD5(r.Header) if err != nil { - err = services.ErrInvalidDigest + err = services.RespErrInvalidDigest return } @@ -65,14 +65,14 @@ func (s *Service) IsReqAuthenticated(ctx context.Context, r *http.Request, regio if sha256Sum, ok := r.Form[consts.AmzContentSha256]; ok && len(sha256Sum) > 0 { contentSHA256, err = hex.DecodeString(sha256Sum[0]) if err != nil { - err = services.ErrContentSHA256Mismatch + err = services.RespErrContentSHA256Mismatch return } } } else if _, ok := r.Header[consts.AmzContentSha256]; !skipSHA256 && ok { contentSHA256, err = hex.DecodeString(r.Header.Get(consts.AmzContentSha256)) if err != nil || len(contentSHA256) == 0 { - err = services.ErrContentSHA256Mismatch + err = services.RespErrContentSHA256Mismatch return } } @@ -81,7 +81,7 @@ func (s *Service) IsReqAuthenticated(ctx context.Context, r *http.Request, regio // The verification happens implicit during reading. reader, err := hash.NewReader(r.Body, -1, clientETag.String(), hex.EncodeToString(contentSHA256), -1) if err != nil { - err = services.ErrInternalError + err = services.RespErrInternalError return } r.Body = reader @@ -89,7 +89,7 @@ func (s *Service) IsReqAuthenticated(ctx context.Context, r *http.Request, regio } //// ValidateAdminSignature validate admin Signature -//func (s *Service) ValidateAdminSignature(ctx context.Context, r *http.Request, region string) (Credentials, map[string]interface{}, bool, handlers.Errorcode) { +//func (s *service) ValidateAdminSignature(ctx context.Context, r *http.Request, region string) (Credentials, map[string]interface{}, bool, handlers.Errorcode) { // var cred Credentials // var owner bool // s3Err := handlers.ErrcodeAccessDenied @@ -111,7 +111,7 @@ func (s *Service) IsReqAuthenticated(ctx context.Context, r *http.Request, regio // return cred, nil, owner, handlers.ErrcodeNone //} //// -//func (s *Service) GetCredential(r *http.Request) (cred auth.Credentials, owner bool, s3Err handlers.Errorcode) { +//func (s *service) GetCredential(r *http.Request) (cred auth.Credentials, owner bool, s3Err handlers.Errorcode) { // switch GetRequestAuthType(r) { // case AuthTypeUnknown: // s3Err = handlers.ErrcodeSignatureVersionNotSupported diff --git a/s3/services/auth/proto.go b/s3/services/auth/proto.go new file mode 100644 index 000000000..36d7fbb03 --- /dev/null +++ b/s3/services/auth/proto.go @@ -0,0 +1,11 @@ +package auth + +import ( + "context" + "github.com/bittorrent/go-btfs/s3/services/accesskey" + "net/http" +) + +type Service interface { + VerifySignature(ctx context.Context, r *http.Request) (ack *accesskey.AccessKey, err error) +} diff --git a/s3/services/auth/service.go b/s3/services/auth/service.go index f883dad2a..ed7102ff3 100644 --- a/s3/services/auth/service.go +++ b/s3/services/auth/service.go @@ -3,28 +3,28 @@ package auth import ( "context" "github.com/bittorrent/go-btfs/s3/providers" - "github.com/bittorrent/go-btfs/s3/services" + "github.com/bittorrent/go-btfs/s3/services/accesskey" "net/http" ) -var _ services.AuthService = (*Service)(nil) +var _ Service = (*service)(nil) -type Service struct { +type service struct { providers providers.Providerser - accessKeySvc services.AccessKeyService + accessKeySvc accesskey.Service } -func NewService(providers providers.Providerser, accessKeySvc services.AccessKeyService, options ...Option) (svc *Service) { - svc = &Service{ +func NewService(providers providers.Providerser, accessKeySvc accesskey.Service, options ...Option) Service { + svc := &service{ providers: providers, accessKeySvc: accessKeySvc, } for _, option := range options { option(svc) } - return + return svc } -func (s *Service) VerifySignature(ctx context.Context, r *http.Request) (accessKeyRecord *services.AccessKey, err error) { +func (s *service) VerifySignature(ctx context.Context, r *http.Request) (accessKeyRecord *accesskey.AccessKey, err error) { return s.CheckRequestAuthTypeCredential(ctx, r) } diff --git a/s3/services/auth/service_options.go b/s3/services/auth/service_options.go index 7b0e351fd..fb9830f04 100644 --- a/s3/services/auth/service_options.go +++ b/s3/services/auth/service_options.go @@ -1,3 +1,3 @@ package auth -type Option func(svc *Service) +type Option func(svc *service) diff --git a/s3/services/auth/signature-v4-parser.go b/s3/services/auth/signature-v4-parser.go index 28f524650..acc36b557 100644 --- a/s3/services/auth/signature-v4-parser.go +++ b/s3/services/auth/signature-v4-parser.go @@ -18,7 +18,9 @@ package auth import ( + "errors" "github.com/bittorrent/go-btfs/s3/services" + "github.com/bittorrent/go-btfs/s3/services/accesskey" "net/http" "net/url" "strings" @@ -53,14 +55,14 @@ func (c credentialHeader) getScope() string { func parseCredentialHeader(credElement string, region string, stype serviceType) (ch credentialHeader, err error) { creds := strings.SplitN(strings.TrimSpace(credElement), "=", 2) if len(creds) != 2 { - return ch, services.ErrMissingFields + return ch, services.RespErrMissingFields } if creds[0] != "Credential" { - return ch, services.ErrMissingCredTag + return ch, services.RespErrMissingCredTag } credElements := strings.Split(strings.TrimSpace(creds[1]), consts.SlashSeparator) if len(credElements) < 5 { - return ch, services.ErrCredMalformed + return ch, services.RespErrCredMalformed } accessKey := strings.Join(credElements[:len(credElements)-4], consts.SlashSeparator) // The access key may contain one or more `/` //if !IsAccessKeyValid(accessKey) { @@ -74,7 +76,7 @@ func parseCredentialHeader(credElement string, region string, stype serviceType) var e error cred.scope.date, e = time.Parse(yyyymmdd, credElements[0]) if e != nil { - return ch, services.ErrAuthorizationHeaderMalformed + return ch, services.RespErrAuthorizationHeaderMalformed } cred.scope.region = credElements[1] @@ -89,18 +91,18 @@ func parseCredentialHeader(credElement string, region string, stype serviceType) } // Should validate region, only if region is set. if !isValidRegion(sRegion, region) { - return ch, services.ErrAuthorizationHeaderMalformed + return ch, services.RespErrAuthorizationHeaderMalformed } if credElements[2] != string(stype) { //switch stype { //case ServiceSTS: // return ch, handlers.ErrcodeAuthorizationHeaderMalformed //} - return ch, services.ErrAuthorizationHeaderMalformed + return ch, services.RespErrAuthorizationHeaderMalformed } cred.scope.service = credElements[2] if credElements[3] != "aws4_request" { - return ch, services.ErrAuthorizationHeaderMalformed + return ch, services.RespErrAuthorizationHeaderMalformed } cred.scope.request = credElements[3] return cred, nil @@ -110,13 +112,13 @@ func parseCredentialHeader(credElement string, region string, stype serviceType) func parseSignature(signElement string) (string, error) { signFields := strings.Split(strings.TrimSpace(signElement), "=") if len(signFields) != 2 { - return "", services.ErrMissingFields + return "", services.RespErrMissingFields } if signFields[0] != "Signature" { - return "", services.ErrMissingSignTag + return "", services.RespErrMissingSignTag } if signFields[1] == "" { - return "", services.ErrMissingFields + return "", services.RespErrMissingFields } signature := signFields[1] return signature, nil @@ -126,13 +128,13 @@ func parseSignature(signElement string) (string, error) { func parseSignedHeader(signedHdrElement string) ([]string, error) { signedHdrFields := strings.Split(strings.TrimSpace(signedHdrElement), "=") if len(signedHdrFields) != 2 { - return nil, services.ErrMissingFields + return nil, services.RespErrMissingFields } if signedHdrFields[0] != "SignedHeaders" { - return nil, services.ErrMissingSignHeadersTag + return nil, services.RespErrMissingSignHeadersTag } if signedHdrFields[1] == "" { - return nil, services.ErrMissingFields + return nil, services.RespErrMissingFields } signedHeaders := strings.Split(signedHdrFields[1], ";") return signedHeaders, nil @@ -166,7 +168,7 @@ func doesV4PresignParamsExist(query url.Values) error { v4PresignQueryParams := []string{consts.AmzAlgorithm, consts.AmzCredential, consts.AmzSignature, consts.AmzDate, consts.AmzSignedHeaders, consts.AmzExpires} for _, v4PresignQueryParam := range v4PresignQueryParams { if _, ok := query[v4PresignQueryParam]; !ok { - return services.ErrInvalidQueryParams + return services.RespErrInvalidQueryParams } } return nil @@ -182,7 +184,7 @@ func parsePreSignV4(query url.Values, region string, stype serviceType) (psv pre // Verify if the query algorithm is supported or not. if query.Get(consts.AmzAlgorithm) != signV4Algorithm { - return psv, services.ErrAuthorizationHeaderMalformed + return psv, services.RespErrAuthorizationHeaderMalformed } // Initialize signature version '4' structured header. @@ -197,22 +199,22 @@ func parsePreSignV4(query url.Values, region string, stype serviceType) (psv pre // Save date in native time.Time. preSignV4Values.Date, err = time.Parse(iso8601Format, query.Get(consts.AmzDate)) if err != nil { - return psv, services.ErrAuthorizationHeaderMalformed + return psv, services.RespErrAuthorizationHeaderMalformed } // Save expires in native time.Duration. preSignV4Values.Expires, err = time.ParseDuration(query.Get(consts.AmzExpires) + "s") if err != nil { - return psv, services.ErrAuthorizationHeaderMalformed + return psv, services.RespErrAuthorizationHeaderMalformed } if preSignV4Values.Expires < 0 { - return psv, services.ErrAuthorizationHeaderMalformed + return psv, services.RespErrAuthorizationHeaderMalformed } // Check if Expiry time is less than 7 days (value in seconds). if preSignV4Values.Expires.Seconds() > 604800 { - return psv, services.ErrAuthorizationHeaderMalformed + return psv, services.RespErrAuthorizationHeaderMalformed } // Save signed headers. @@ -243,19 +245,19 @@ func parseSignV4(v4Auth string, region string, stype serviceType) (sv signValues // to make parsing easier. v4Auth = strings.ReplaceAll(v4Auth, " ", "") if v4Auth == "" { - return sv, services.ErrAuthHeaderEmpty + return sv, services.RespErrAuthHeaderEmpty } // Verify if the header algorithm is supported or not. if !strings.HasPrefix(v4Auth, signV4Algorithm) { - return sv, services.ErrSignatureVersionNotSupported + return sv, services.RespErrSignatureVersionNotSupported } // Strip off the Algorithm prefix. v4Auth = strings.TrimPrefix(v4Auth, signV4Algorithm) authFields := strings.Split(strings.TrimSpace(v4Auth), ",") if len(authFields) != 3 { - return sv, services.ErrMissingFields + return sv, services.RespErrMissingFields } // Initialize signature version '4' structured header. @@ -283,25 +285,34 @@ func parseSignV4(v4Auth string, region string, stype serviceType) (sv signValues return signV4Values, nil } -func (s *Service) getReqAccessKeyV4(r *http.Request, region string, stype serviceType) (*services.AccessKey, error) { +func (s *service) getReqAccessKeyV4(r *http.Request, region string, stype serviceType) (ack *accesskey.AccessKey, err error) { ch, err := parseCredentialHeader("Credential="+r.Form.Get(consts.AmzCredential), region, stype) if err != nil { // Strip off the Algorithm prefix. v4Auth := strings.TrimPrefix(r.Header.Get("Authorization"), signV4Algorithm) authFields := strings.Split(strings.TrimSpace(v4Auth), ",") if len(authFields) != 3 { - return &services.AccessKey{}, services.ErrMissingFields + err = services.RespErrMissingFields + return } ch, err = parseCredentialHeader(authFields[0], region, stype) if err != nil { - return &services.AccessKey{}, err + return } } - // check accessKey. - record, err := s.accessKeySvc.Get(ch.accessKey) + ack, err = s.accessKeySvc.Get(ch.accessKey) + if errors.Is(err, accesskey.ErrNotFound) { + err = services.RespErrInvalidAccessKeyID + return + } if err != nil { - return &services.AccessKey{}, services.ErrNoSuchUserPolicy + return + } + if !ack.Enable { + err = services.RespErrAccessKeyDisabled + return } - return record, nil + + return } diff --git a/s3/services/auth/signature-v4-utils.go b/s3/services/auth/signature-v4-utils.go index 0d65f28d2..c59f54b96 100644 --- a/s3/services/auth/signature-v4-utils.go +++ b/s3/services/auth/signature-v4-utils.go @@ -66,7 +66,7 @@ func extractSignedHeaders(signedHeaders []string, r *http.Request) (http.Header, // find whether "host" is part of list of signed headers. // if not return ErrcodeUnsignedHeaders. "host" is mandatory. if !contains(signedHeaders, "host") { - return nil, services.ErrUnsignedHeaders + return nil, services.RespErrUnsignedHeaders } extractedSignedHeaders := make(http.Header) for _, header := range signedHeaders { @@ -116,7 +116,7 @@ func extractSignedHeaders(signedHeaders []string, r *http.Request) (http.Header, // calculation to be compatible with such clients. extractedSignedHeaders.Set(header, strconv.FormatInt(r.ContentLength, 10)) default: - return nil, services.ErrUnsignedHeaders + return nil, services.RespErrUnsignedHeaders } } return extractedSignedHeaders, nil diff --git a/s3/services/auth/signature-v4.go b/s3/services/auth/signature-v4.go index 86d7fc0f3..deebf1ba0 100644 --- a/s3/services/auth/signature-v4.go +++ b/s3/services/auth/signature-v4.go @@ -19,7 +19,9 @@ package auth import ( "crypto/subtle" + "errors" "github.com/bittorrent/go-btfs/s3/services" + "github.com/bittorrent/go-btfs/s3/services/accesskey" "net/http" "net/url" "strconv" @@ -58,7 +60,7 @@ func compareSignatureV4(sig1, sig2 string) bool { // - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html // // returns handlers.ErrcodeNone if the signature matches. -func (s *Service) doesPresignedSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType) (err error) { +func (s *service) doesPresignedSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType) (ack *accesskey.AccessKey, err error) { // Copy request req := *r @@ -68,10 +70,16 @@ func (s *Service) doesPresignedSignatureMatch(hashedPayload string, r *http.Requ return } - // get access_info by accessKey - cred, err := s.accessKeySvc.Get(pSignValues.Credential.accessKey) + // Check accesskey + ack, err = s.accessKeySvc.Get(pSignValues.Credential.accessKey) + if errors.Is(err, accesskey.ErrNotFound) { + err = services.RespErrInvalidAccessKeyID + } if err != nil { - err = services.ErrNoSuchUserPolicy + return + } + if !ack.Enable { + err = services.RespErrAccessKeyDisabled return } @@ -84,12 +92,12 @@ func (s *Service) doesPresignedSignatureMatch(hashedPayload string, r *http.Requ // If the host which signed the request is slightly ahead in time (by less than MaxSkewTime) the // request should still be allowed. if pSignValues.Date.After(time.Now().UTC().Add(consts.MaxSkewTime)) { - err = services.ErrRequestNotReadyYet + err = services.RespErrRequestNotReadyYet return } if time.Now().UTC().Sub(pSignValues.Date) > pSignValues.Expires { - err = services.ErrExpiredPresignRequest + err = services.RespErrExpiredPresignRequest return } @@ -116,7 +124,7 @@ func (s *Service) doesPresignedSignatureMatch(hashedPayload string, r *http.Requ query.Set(consts.AmzDate, t.Format(iso8601Format)) query.Set(consts.AmzExpires, strconv.Itoa(expireSeconds)) query.Set(consts.AmzSignedHeaders, utils.GetSignedHeaders(extractedSignedHeaders)) - query.Set(consts.AmzCredential, cred.Key+consts.SlashSeparator+pSignValues.Credential.getScope()) + query.Set(consts.AmzCredential, ack.Key+consts.SlashSeparator+pSignValues.Credential.getScope()) defaultSigParams := set.CreateStringSet( consts.AmzContentSha256, @@ -141,26 +149,26 @@ func (s *Service) doesPresignedSignatureMatch(hashedPayload string, r *http.Requ // Verify if date query is same. if req.Form.Get(consts.AmzDate) != query.Get(consts.AmzDate) { - err = services.ErrSignatureDoesNotMatch + err = services.RespErrSignatureDoesNotMatch } // Verify if expires query is same. if req.Form.Get(consts.AmzExpires) != query.Get(consts.AmzExpires) { - err = services.ErrSignatureDoesNotMatch + err = services.RespErrSignatureDoesNotMatch return } // Verify if signed headers query is same. if req.Form.Get(consts.AmzSignedHeaders) != query.Get(consts.AmzSignedHeaders) { - err = services.ErrSignatureDoesNotMatch + err = services.RespErrSignatureDoesNotMatch return } // Verify if credential query is same. if req.Form.Get(consts.AmzCredential) != query.Get(consts.AmzCredential) { - err = services.ErrSignatureDoesNotMatch + err = services.RespErrSignatureDoesNotMatch return } // Verify if sha256 payload query is same. if clntHashedPayload != "" && clntHashedPayload != query.Get(consts.AmzContentSha256) { - err = services.ErrContentSHA256Mismatch + err = services.RespErrContentSHA256Mismatch return } // not check SessionToken. @@ -178,7 +186,7 @@ func (s *Service) doesPresignedSignatureMatch(hashedPayload string, r *http.Requ presignedStringToSign := utils.GetStringToSign(presignedCanonicalReq, t, pSignValues.Credential.getScope()) // Get hmac presigned signing key. - presignedSigningKey := utils.GetSigningKey(cred.Secret, pSignValues.Credential.scope.date, + presignedSigningKey := utils.GetSigningKey(ack.Secret, pSignValues.Credential.scope.date, pSignValues.Credential.scope.region, string(stype)) // Get new signature. @@ -186,7 +194,7 @@ func (s *Service) doesPresignedSignatureMatch(hashedPayload string, r *http.Requ // Verify signature. if !compareSignatureV4(req.Form.Get(consts.AmzSignature), newSignature) { - err = services.ErrSignatureDoesNotMatch + err = services.RespErrSignatureDoesNotMatch return } @@ -195,7 +203,7 @@ func (s *Service) doesPresignedSignatureMatch(hashedPayload string, r *http.Requ // DoesSignatureMatch - Verify authorization header with calculated header in accordance with // - http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html -func (s *Service) doesSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType) (err error) { +func (s *service) doesSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType) (ack *accesskey.AccessKey, err error) { // Copy request. req := *r @@ -214,21 +222,24 @@ func (s *Service) doesSignatureMatch(hashedPayload string, r *http.Request, regi return } - cred, err := s.accessKeySvc.Get(signV4Values.Credential.accessKey) - if err == services.ErrAccessKeyNotFound { - err = services.ErrNoSuchUserPolicy - return + // Check accesskey + ack, err = s.accessKeySvc.Get(signV4Values.Credential.accessKey) + if errors.Is(err, accesskey.ErrNotFound) { + err = services.RespErrInvalidAccessKeyID } - if err != nil { return } + if !ack.Enable { + err = services.RespErrAccessKeyDisabled + return + } // Extract date, if not present throw error. var date string if date = req.Header.Get(consts.AmzDate); date == "" { if date = r.Header.Get(consts.Date); date == "" { - err = services.ErrMissingDateHeader + err = services.RespErrMissingDateHeader return } } @@ -236,7 +247,7 @@ func (s *Service) doesSignatureMatch(hashedPayload string, r *http.Request, regi // Parse date header. t, err := time.Parse(iso8601Format, date) if err != nil { - err = services.ErrAuthorizationHeaderMalformed + err = services.RespErrAuthorizationHeaderMalformed return } @@ -250,7 +261,7 @@ func (s *Service) doesSignatureMatch(hashedPayload string, r *http.Request, regi stringToSign := utils.GetStringToSign(canonicalRequest, t, signV4Values.Credential.getScope()) // Get hmac signing key. - signingKey := utils.GetSigningKey(cred.Secret, signV4Values.Credential.scope.date, + signingKey := utils.GetSigningKey(ack.Secret, signV4Values.Credential.scope.date, signV4Values.Credential.scope.region, string(stype)) // Calculate signature. @@ -258,7 +269,7 @@ func (s *Service) doesSignatureMatch(hashedPayload string, r *http.Request, regi // Verify if signature match. if !compareSignatureV4(newSignature, signV4Values.Signature) { - err = services.ErrSignatureDoesNotMatch + err = services.RespErrSignatureDoesNotMatch return } diff --git a/s3/services/bucket/proto.go b/s3/services/bucket/proto.go new file mode 100644 index 000000000..d1ebf2b82 --- /dev/null +++ b/s3/services/bucket/proto.go @@ -0,0 +1,29 @@ +package bucket + +import ( + "context" + "github.com/bittorrent/go-btfs/s3/action" + "github.com/bittorrent/go-btfs/s3/services/accesskey" + "time" +) + +type Service interface { + CheckACL(accessKeyRecord *accesskey.AccessKey, bucketName string, action action.Action) (err error) + CreateBucket(ctx context.Context, bucket, region, accessKey, acl string) error + GetBucketMeta(ctx context.Context, bucket string) (meta Bucket, err error) + HasBucket(ctx context.Context, bucket string) bool + SetEmptyBucket(emptyBucket func(ctx context.Context, bucket string) (bool, error)) + DeleteBucket(ctx context.Context, bucket string) error + GetAllBucketsOfUser(username string) (list []*Bucket, err error) + UpdateBucketAcl(ctx context.Context, bucket, acl string) error + GetBucketAcl(ctx context.Context, bucket string) (string, error) +} + +// Bucket contains bucket metadata. +type Bucket struct { + Name string + Region string + Owner string + Acl string + Created time.Time +} diff --git a/s3/services/bucket/service.go b/s3/services/bucket/service.go index c09501b6c..ecc603e1d 100644 --- a/s3/services/bucket/service.go +++ b/s3/services/bucket/service.go @@ -5,6 +5,7 @@ import ( "errors" "github.com/bittorrent/go-btfs/s3/providers" "github.com/bittorrent/go-btfs/s3/services" + "github.com/bittorrent/go-btfs/s3/services/accesskey" "time" "github.com/bittorrent/go-btfs/s3/action" @@ -17,10 +18,10 @@ const ( defaultUpdateTimeoutMS = 200 ) -var _ services.BucketService = (*Service)(nil) +var _ Service = (*service)(nil) -// Service captures all bucket metadata for a given cluster. -type Service struct { +// service captures all bucket metadata for a given cluster. +type service struct { providers providers.Providerser emptyBucket func(ctx context.Context, bucket string) (bool, error) locks *ctxmu.MultiCtxRWMutex @@ -28,8 +29,8 @@ type Service struct { } // NewService - creates new policy system. -func NewService(providers providers.Providerser, options ...Option) (s *Service) { - s = &Service{ +func NewService(providers providers.Providerser, options ...Option) Service { + s := &service{ providers: providers, locks: ctxmu.NewDefaultMultiCtxRWMutex(), updateTimeout: time.Duration(defaultUpdateTimeoutMS) * time.Millisecond, @@ -40,10 +41,10 @@ func NewService(providers providers.Providerser, options ...Option) (s *Service) return s } -func (s *Service) CheckACL(accessKeyRecord *services.AccessKey, bucketName string, action action.Action) (err error) { +func (s *service) CheckACL(accessKeyRecord *accesskey.AccessKey, bucketName string, action action.Action) (err error) { //需要判断bucketName是否为空字符串 if bucketName == "" { - return services.ErrNoSuchBucket + return services.RespErrNoSuchBucket } bucketMeta, err := s.GetBucketMeta(context.Background(), bucketName) @@ -52,14 +53,14 @@ func (s *Service) CheckACL(accessKeyRecord *services.AccessKey, bucketName strin } if policy.IsAllowed(bucketMeta.Owner == accessKeyRecord.Key, bucketMeta.Acl, action) == false { - return services.ErrAccessDenied + return services.RespErrAccessDenied } return } -// NewBucketMetadata creates handlers.BucketMetadata with the supplied name and Created to Now. -func (s *Service) NewBucketMetadata(name, region, accessKey, acl string) *services.BucketMetadata { - return &services.BucketMetadata{ +// NewBucketMetadata creates handlers.Bucket with the supplied name and Created to Now. +func (s *service) NewBucketMetadata(name, region, accessKey, acl string) *Bucket { + return &Bucket{ Name: name, Region: region, Owner: accessKey, @@ -69,12 +70,12 @@ func (s *Service) NewBucketMetadata(name, region, accessKey, acl string) *servic } // lockSetBucketMeta - sets a new metadata in-db -func (s *Service) lockSetBucketMeta(bucket string, meta *services.BucketMetadata) error { +func (s *service) lockSetBucketMeta(bucket string, meta *Bucket) error { return s.providers.GetStateStore().Put(bucketPrefix+bucket, meta) } // CreateBucket - create a new Bucket -func (s *Service) CreateBucket(ctx context.Context, bucket, region, accessKey, acl string) error { +func (s *service) CreateBucket(ctx context.Context, bucket, region, accessKey, acl string) error { ctx, cancel := context.WithTimeout(context.Background(), s.updateTimeout) defer cancel() @@ -87,22 +88,22 @@ func (s *Service) CreateBucket(ctx context.Context, bucket, region, accessKey, a return s.lockSetBucketMeta(bucket, s.NewBucketMetadata(bucket, region, accessKey, acl)) } -func (s *Service) lockGetBucketMeta(bucket string) (meta services.BucketMetadata, err error) { +func (s *service) lockGetBucketMeta(bucket string) (meta Bucket, err error) { err = s.providers.GetStateStore().Get(bucketPrefix+bucket, &meta) if errors.Is(err, providers.ErrStateStoreNotFound) { - err = services.ErrNoSuchBucket + err = services.RespErrNoSuchBucket } return } // GetBucketMeta metadata for a bucket. -func (s *Service) GetBucketMeta(ctx context.Context, bucket string) (meta services.BucketMetadata, err error) { +func (s *service) GetBucketMeta(ctx context.Context, bucket string) (meta Bucket, err error) { ctx, cancel := context.WithTimeout(context.Background(), s.updateTimeout) defer cancel() err = s.locks.RLock(ctx, bucket) if err != nil { - return services.BucketMetadata{Name: bucket}, err + return Bucket{Name: bucket}, err } defer s.locks.RUnlock(bucket) @@ -110,13 +111,13 @@ func (s *Service) GetBucketMeta(ctx context.Context, bucket string) (meta servic } // HasBucket metadata for a bucket. -func (s *Service) HasBucket(ctx context.Context, bucket string) bool { +func (s *service) HasBucket(ctx context.Context, bucket string) bool { _, err := s.GetBucketMeta(ctx, bucket) return err == nil } // DeleteBucket bucket. -func (s *Service) DeleteBucket(ctx context.Context, bucket string) error { +func (s *service) DeleteBucket(ctx context.Context, bucket string) error { ctx, cancel := context.WithTimeout(context.Background(), s.updateTimeout) defer cancel() @@ -142,14 +143,14 @@ func (s *Service) DeleteBucket(ctx context.Context, bucket string) error { return s.providers.GetStateStore().Delete(bucketPrefix + bucket) } -func (s *Service) SetEmptyBucket(emptyBucket func(ctx context.Context, bucket string) (bool, error)) { +func (s *service) SetEmptyBucket(emptyBucket func(ctx context.Context, bucket string) (bool, error)) { s.emptyBucket = emptyBucket } // GetAllBucketsOfUser metadata for all bucket. -func (s *Service) GetAllBucketsOfUser(username string) (list []*services.BucketMetadata, err error) { +func (s *service) GetAllBucketsOfUser(username string) (list []*Bucket, err error) { err = s.providers.GetStateStore().Iterate(bucketPrefix, func(key, _ []byte) (stop bool, er error) { - record := &services.BucketMetadata{} + record := &Bucket{} er = s.providers.GetStateStore().Get(string(key), record) if er != nil { return @@ -165,7 +166,7 @@ func (s *Service) GetAllBucketsOfUser(username string) (list []*services.BucketM } // UpdateBucketAcl . -func (s *Service) UpdateBucketAcl(ctx context.Context, bucket, acl string) error { +func (s *service) UpdateBucketAcl(ctx context.Context, bucket, acl string) error { ctx, cancel := context.WithTimeout(context.Background(), s.updateTimeout) defer cancel() @@ -185,7 +186,7 @@ func (s *Service) UpdateBucketAcl(ctx context.Context, bucket, acl string) error } // GetBucketAcl . -func (s *Service) GetBucketAcl(ctx context.Context, bucket string) (string, error) { +func (s *service) GetBucketAcl(ctx context.Context, bucket string) (string, error) { meta, err := s.GetBucketMeta(ctx, bucket) if err != nil { return "", err diff --git a/s3/services/bucket/service_option.go b/s3/services/bucket/service_option.go index 0b648a3a5..e01c02fde 100644 --- a/s3/services/bucket/service_option.go +++ b/s3/services/bucket/service_option.go @@ -1,3 +1,3 @@ package bucket -type Option func(svc *Service) +type Option func(svc *service) diff --git a/s3/services/cors/proto.go b/s3/services/cors/proto.go new file mode 100644 index 000000000..55115b9f6 --- /dev/null +++ b/s3/services/cors/proto.go @@ -0,0 +1,7 @@ +package cors + +type Service interface { + GetAllowOrigins() []string + GetAllowMethods() []string + GetAllowHeaders() []string +} diff --git a/s3/services/cors/service.go b/s3/services/cors/service.go index b76e87846..97ade9c68 100644 --- a/s3/services/cors/service.go +++ b/s3/services/cors/service.go @@ -2,7 +2,6 @@ package cors import ( "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/services" "net/http" ) @@ -43,16 +42,16 @@ var ( } ) -var _ services.CorsService = (*Service)(nil) +var _ Service = (*service)(nil) -type Service struct { +type service struct { allowOrigins []string allowMethods []string allowHeaders []string } -func NewService(options ...Option) (svc *Service) { - svc = &Service{ +func NewService(options ...Option) Service { + svc := &service{ allowOrigins: defaultAllowOrigins, allowMethods: defaultAllowMethods, allowHeaders: defaultAllowHeaders, @@ -60,17 +59,17 @@ func NewService(options ...Option) (svc *Service) { for _, option := range options { option(svc) } - return + return svc } -func (svc *Service) GetAllowOrigins() []string { +func (svc *service) GetAllowOrigins() []string { return svc.allowOrigins } -func (svc *Service) GetAllowMethods() []string { +func (svc *service) GetAllowMethods() []string { return svc.allowMethods } -func (svc *Service) GetAllowHeaders() []string { +func (svc *service) GetAllowHeaders() []string { return svc.allowHeaders } diff --git a/s3/services/cors/service_options.go b/s3/services/cors/service_options.go index 1b3ee5721..c25cbfc89 100644 --- a/s3/services/cors/service_options.go +++ b/s3/services/cors/service_options.go @@ -1,21 +1,21 @@ package cors -type Option func(svc *Service) +type Option func(svc *service) func WithAllowOrigins(origins []string) Option { - return func(svc *Service) { + return func(svc *service) { svc.allowOrigins = origins } } func WithAllowMethods(methods []string) Option { - return func(svc *Service) { + return func(svc *service) { svc.allowMethods = methods } } func WithAllowHeaders(headers []string) Option { - return func(svc *Service) { + return func(svc *service) { svc.allowHeaders = headers } } diff --git a/s3/services/multipart/proto.go b/s3/services/multipart/proto.go new file mode 100644 index 000000000..2673587e2 --- /dev/null +++ b/s3/services/multipart/proto.go @@ -0,0 +1,4 @@ +package multipart + +type Service interface { +} diff --git a/s3/services/multipart/service.go b/s3/services/multipart/service.go index 6ac36a72b..62fb2ccb3 100644 --- a/s3/services/multipart/service.go +++ b/s3/services/multipart/service.go @@ -5,20 +5,20 @@ import ( "io" ) -var _ services.MultipartService = (*Service)(nil) +var _ services.MultipartService = (*service)(nil) -type Service struct { +type service struct { } -func NewService(options ...Option) (svc *Service) { - svc = &Service{} +func NewService(options ...Option) Service { + svc := &service{} for _, option := range options { option(svc) } - return + return svc } -func (svc *Service) multiReader() io.Reader { +func (svc *service) multiReader() io.Reader { var ( r1 io.Reader r2 io.Reader diff --git a/s3/services/multipart/service_options.go b/s3/services/multipart/service_options.go index 38cfa3705..e3dcdf9c2 100644 --- a/s3/services/multipart/service_options.go +++ b/s3/services/multipart/service_options.go @@ -1,3 +1,3 @@ package multipart -type Option func(svc *Service) +type Option func(svc *service) diff --git a/s3/services/proto.go b/s3/services/proto.go deleted file mode 100644 index 2972a1753..000000000 --- a/s3/services/proto.go +++ /dev/null @@ -1,68 +0,0 @@ -package services - -import ( - "context" - "net/http" - "time" - - "github.com/bittorrent/go-btfs/s3/action" -) - -type CorsService interface { - GetAllowOrigins() []string - GetAllowMethods() []string - GetAllowHeaders() []string -} - -type AccessKeyService interface { - Generate() (record *AccessKey, err error) - Enable(key string) (err error) - Disable(key string) (err error) - Reset(key string) (err error) - Delete(key string) (err error) - Get(key string) (ack *AccessKey, err error) - List() (list []*AccessKey, err error) -} - -type AuthService interface { - VerifySignature(ctx context.Context, r *http.Request) (ack *AccessKey, err error) -} - -type BucketService interface { - CheckACL(accessKeyRecord *AccessKey, bucketName string, action action.Action) (err error) - CreateBucket(ctx context.Context, bucket, region, accessKey, acl string) error - GetBucketMeta(ctx context.Context, bucket string) (meta BucketMetadata, err error) - HasBucket(ctx context.Context, bucket string) bool - SetEmptyBucket(emptyBucket func(ctx context.Context, bucket string) (bool, error)) - DeleteBucket(ctx context.Context, bucket string) error - GetAllBucketsOfUser(username string) (list []*BucketMetadata, err error) - UpdateBucketAcl(ctx context.Context, bucket, acl string) error - GetBucketAcl(ctx context.Context, bucket string) (string, error) -} - -type ObjectService interface { -} - -type MultipartService interface { -} - -type AccessKey struct { - Key string `json:"key"` - Secret string `json:"secret"` - Enable bool `json:"enable"` - IsDeleted bool `json:"is_deleted"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` -} - -// BucketMetadata contains bucket metadata. -type BucketMetadata struct { - Name string - Region string - Owner string - Acl string - Created time.Time -} - -type ObjectMetadata struct { -} diff --git a/s3/services/error.go b/s3/services/response_error.go similarity index 77% rename from s3/services/error.go rename to s3/services/response_error.go index c7fff68b6..c86f5e0cb 100644 --- a/s3/services/error.go +++ b/s3/services/response_error.go @@ -5,1036 +5,1026 @@ import ( "net/http" ) -type Error struct { +type ResponseError struct { code string description string httpStatusCode int } -func (err *Error) Code() string { +func (err *ResponseError) Code() string { return err.code } -func (err *Error) Description() string { +func (err *ResponseError) Description() string { return err.description } -func (err *Error) HTTPStatusCode() int { +func (err *ResponseError) HTTPStatusCode() int { return err.httpStatusCode } -func (err *Error) Error() string { - return fmt.Sprintf( - "code <%s>, description <%s>, status <%d>", - err.code, - err.description, - err.httpStatusCode, - ) +func (err *ResponseError) Error() string { + return fmt.Sprintf("[%s]%s", err.code, err.description) } // Errors http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html var ( - ErrInvalidCopyDest = &Error{ + RespErrInvalidCopyDest = &ResponseError{ code: "InvalidRequest", description: "This copy request is illegal because it is trying to copy an object to itself without changing the object's metadata, storage class, website redirect location or encryption attributes.", httpStatusCode: http.StatusBadRequest, } - ErrInvalidCopySource = &Error{ + RespErrInvalidCopySource = &ResponseError{ code: "InvalidArgument", description: "Copy Source must mention the source bucket and key: sourcebucket/sourcekey.", httpStatusCode: http.StatusBadRequest, } - ErrInvalidMetadataDirective = &Error{ + RespErrInvalidMetadataDirective = &ResponseError{ code: "InvalidArgument", description: "Unknown metadata directive.", httpStatusCode: http.StatusBadRequest, } - ErrInvalidRequestBody = &Error{ + RespErrInvalidRequestBody = &ResponseError{ code: "InvalidArgument", description: "Body shouldn't be set for this request.", httpStatusCode: http.StatusBadRequest, } - ErrInvalidMaxUploads = &Error{ + RespErrInvalidMaxUploads = &ResponseError{ code: "InvalidArgument", description: "Argument max-uploads must be an integer between 0 and 2147483647", httpStatusCode: http.StatusBadRequest, } - ErrInvalidMaxKeys = &Error{ + RespErrInvalidMaxKeys = &ResponseError{ code: "InvalidArgument", description: "Argument maxKeys must be an integer between 0 and 2147483647", httpStatusCode: http.StatusBadRequest, } - ErrInvalidEncodingMethod = &Error{ + RespErrInvalidEncodingMethod = &ResponseError{ code: "InvalidArgument", description: "Invalid Encoding Method specified in Request", httpStatusCode: http.StatusBadRequest, } - ErrInvalidMaxParts = &Error{ + RespErrInvalidMaxParts = &ResponseError{ code: "InvalidArgument", description: "Part number must be an integer between 1 and 10000, inclusive", httpStatusCode: http.StatusBadRequest, } - ErrInvalidPartNumberMarker = &Error{ + RespErrInvalidPartNumberMarker = &ResponseError{ code: "InvalidArgument", description: "Argument partNumberMarker must be an integer.", httpStatusCode: http.StatusBadRequest, } - ErrInvalidPolicyDocument = &Error{ + RespErrInvalidPolicyDocument = &ResponseError{ code: "InvalidPolicyDocument", description: "The content of the form does not meet the conditions specified in the policy document.", httpStatusCode: http.StatusBadRequest, } - ErrAccessDenied = &Error{ + RespErrAccessDenied = &ResponseError{ code: "AccessDenied", description: "Access Denied.", httpStatusCode: http.StatusForbidden, } - ErrBadDigest = &Error{ + RespErrBadDigest = &ResponseError{ code: "BadDigest", description: "The Content-Md5 you specified did not match what we received.", httpStatusCode: http.StatusBadRequest, } - ErrEntityTooSmall = &Error{ + RespErrEntityTooSmall = &ResponseError{ code: "EntityTooSmall", description: "Your proposed upload is smaller than the minimum allowed object size.", httpStatusCode: http.StatusBadRequest, } - ErrEntityTooLarge = &Error{ + RespErrEntityTooLarge = &ResponseError{ code: "EntityTooLarge", description: "Your proposed upload exceeds the maximum allowed object size.", httpStatusCode: http.StatusBadRequest, } - ErrIncompleteBody = &Error{ + RespErrIncompleteBody = &ResponseError{ code: "IncompleteBody", description: "You did not provide the number of bytes specified by the Content-Length HTTP header.", httpStatusCode: http.StatusBadRequest, } - ErrInternalError = &Error{ + RespErrInternalError = &ResponseError{ code: "InternalError", description: "We encountered an internal error, please try again.", httpStatusCode: http.StatusInternalServerError, } - ErrInvalidAccessKeyID = &Error{ + RespErrInvalidAccessKeyID = &ResponseError{ code: "InvalidAccessKeyId", description: "The Access Key Id you provided does not exist in our records.", httpStatusCode: http.StatusForbidden, } - ErrAccessKeyDisabled = &Error{ + RespErrAccessKeyDisabled = &ResponseError{ code: "InvalidAccessKeyId", description: "Your account is disabled; please contact your administrator.", httpStatusCode: http.StatusForbidden, } - ErrInvalidBucketName = &Error{ + RespErrInvalidBucketName = &ResponseError{ code: "InvalidBucketName", description: "The specified bucket is not valid.", httpStatusCode: http.StatusBadRequest, } - ErrInvalidDigest = &Error{ + RespErrInvalidDigest = &ResponseError{ code: "InvalidDigest", description: "The Content-Md5 you specified is not valid.", httpStatusCode: http.StatusBadRequest, } - ErrInvalidRange = &Error{ + RespErrInvalidRange = &ResponseError{ code: "InvalidRange", description: "The requested range is not satisfiable", httpStatusCode: http.StatusRequestedRangeNotSatisfiable, } - ErrInvalidRangePartNumber = &Error{ + RespErrInvalidRangePartNumber = &ResponseError{ code: "InvalidRequest", description: "Cannot specify both Range header and partNumber query parameter", httpStatusCode: http.StatusBadRequest, } - ErrMalformedXML = &Error{ + RespErrMalformedXML = &ResponseError{ code: "MalformedXML", description: "The XML you provided was not well-formed or did not validate against our published schema.", httpStatusCode: http.StatusBadRequest, } - ErrMissingContentLength = &Error{ + RespErrMissingContentLength = &ResponseError{ code: "MissingContentLength", description: "You must provide the Content-Length HTTP header.", httpStatusCode: http.StatusLengthRequired, } - ErrMissingContentMD5 = &Error{ + RespErrMissingContentMD5 = &ResponseError{ code: "MissingContentMD5", description: "Missing required header for this request: Content-Md5.", httpStatusCode: http.StatusBadRequest, } - ErrMissingSecurityHeader = &Error{ + RespErrMissingSecurityHeader = &ResponseError{ code: "MissingSecurityHeader", description: "Your request was missing a required header", httpStatusCode: http.StatusBadRequest, } - ErrMissingRequestBodyError = &Error{ + RespErrMissingRequestBodyError = &ResponseError{ code: "MissingRequestBodyError", description: "Request body is empty.", httpStatusCode: http.StatusLengthRequired, } - ErrNoSuchBucket = &Error{ + RespErrNoSuchBucket = &ResponseError{ code: "NoSuchBucket", description: "The specified bucket does not exist", httpStatusCode: http.StatusNotFound, } - ErrNoSuchBucketPolicy = &Error{ + RespErrNoSuchBucketPolicy = &ResponseError{ code: "NoSuchBucketPolicy", description: "The bucket policy does not exist", httpStatusCode: http.StatusNotFound, } - ErrNoSuchLifecycleConfiguration = &Error{ + RespErrNoSuchLifecycleConfiguration = &ResponseError{ code: "NoSuchLifecycleConfiguration", description: "The lifecycle configuration does not exist", httpStatusCode: http.StatusNotFound, } - ErrNoSuchUser = &Error{ + RespErrNoSuchUser = &ResponseError{ code: "NoSuchUser", description: "The specified user does not exist", httpStatusCode: http.StatusConflict, } - ErrUserAlreadyExists = &Error{ + RespErrUserAlreadyExists = &ResponseError{ code: "UserAlreadyExists", description: "The request was rejected because it attempted to create a resource that already exists .", httpStatusCode: http.StatusConflict, } - ErrNoSuchUserPolicy = &Error{ + RespErrNoSuchUserPolicy = &ResponseError{ code: "NoSuchUserPolicy", description: "The specified user policy does not exist", httpStatusCode: http.StatusConflict, } - ErrUserPolicyAlreadyExists = &Error{ + RespErrUserPolicyAlreadyExists = &ResponseError{ code: "UserPolicyAlreadyExists", description: "The same user policy already exists .", httpStatusCode: http.StatusConflict, } - ErrNoSuchKey = &Error{ + RespErrNoSuchKey = &ResponseError{ code: "NoSuchKey", description: "The specified key does not exist.", httpStatusCode: http.StatusNotFound, } - ErrNoSuchUpload = &Error{ + RespErrNoSuchUpload = &ResponseError{ code: "NoSuchUpload", description: "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.", httpStatusCode: http.StatusNotFound, } - ErrInvalidVersionID = &Error{ + RespErrInvalidVersionID = &ResponseError{ code: "InvalidArgument", description: "Invalid version id specified", httpStatusCode: http.StatusBadRequest, } - ErrNoSuchVersion = &Error{ + RespErrNoSuchVersion = &ResponseError{ code: "NoSuchVersion", description: "The specified version does not exist.", httpStatusCode: http.StatusNotFound, } - ErrNotImplemented = &Error{ + RespErrNotImplemented = &ResponseError{ code: "NotImplemented", description: "A header you provided implies functionality that is not implemented", httpStatusCode: http.StatusNotImplemented, } - ErrPreconditionFailed = &Error{ + RespErrPreconditionFailed = &ResponseError{ code: "PreconditionFailed", description: "At least one of the pre-conditions you specified did not hold", httpStatusCode: http.StatusPreconditionFailed, } - ErrRequestTimeTooSkewed = &Error{ + RespErrRequestTimeTooSkewed = &ResponseError{ code: "RequestTimeTooSkewed", description: "The difference between the request time and the server's time is too large.", httpStatusCode: http.StatusForbidden, } - ErrSignatureDoesNotMatch = &Error{ + RespErrSignatureDoesNotMatch = &ResponseError{ code: "SignatureDoesNotMatch", description: "The request signature we calculated does not match the signature you provided. Check your key and signing method.", httpStatusCode: http.StatusForbidden, } - ErrMethodNotAllowed = &Error{ + RespErrMethodNotAllowed = &ResponseError{ code: "MethodNotAllowed", description: "The specified method is not allowed against this resource.", httpStatusCode: http.StatusMethodNotAllowed, } - ErrInvalidPart = &Error{ + RespErrInvalidPart = &ResponseError{ code: "InvalidPart", description: "One or more of the specified parts could not be found. The part may not have been uploaded, or the specified entity tag may not match the part's entity tag.", httpStatusCode: http.StatusBadRequest, } - ErrInvalidPartOrder = &Error{ + RespErrInvalidPartOrder = &ResponseError{ code: "InvalidPartOrder", description: "The list of parts was not in ascending order. The parts list must be specified in order by part number.", httpStatusCode: http.StatusBadRequest, } - ErrInvalidObjectState = &Error{ + RespErrInvalidObjectState = &ResponseError{ code: "InvalidObjectState", description: "The operation is not valid for the current state of the object.", httpStatusCode: http.StatusForbidden, } - ErrAuthorizationHeaderMalformed = &Error{ + RespErrAuthorizationHeaderMalformed = &ResponseError{ code: "AuthorizationHeaderMalformed", description: "The authorization header is malformed; the region is wrong; expecting 'us-east-1'.", httpStatusCode: http.StatusBadRequest, } - ErrMalformedPOSTRequest = &Error{ + RespErrMalformedPOSTRequest = &ResponseError{ code: "MalformedPOSTRequest", description: "The body of your POST request is not well-formed multipart/form-data.", httpStatusCode: http.StatusBadRequest, } - ErrPOSTFileRequired = &Error{ + RespErrPOSTFileRequired = &ResponseError{ code: "InvalidArgument", description: "POST requires exactly one file upload per request.", httpStatusCode: http.StatusBadRequest, } - ErrSignatureVersionNotSupported = &Error{ + RespErrSignatureVersionNotSupported = &ResponseError{ code: "InvalidRequest", description: "The authorization mechanism you have provided is not supported. Please use AWS4-HMAC-SHA256.", httpStatusCode: http.StatusBadRequest, } - ErrBucketNotEmpty = &Error{ + RespErrBucketNotEmpty = &ResponseError{ code: "BucketNotEmpty", description: "The bucket you tried to delete is not empty", httpStatusCode: http.StatusConflict, } - ErrBucketAlreadyExists = &Error{ + RespErrBucketAlreadyExists = &ResponseError{ code: "BucketAlreadyExists", description: "The requested bucket name is not available. The bucket namespace is shared by all users of the system. Please select a different name and try again.", httpStatusCode: http.StatusConflict, } - ErrAllAccessDisabled = &Error{ + RespErrAllAccessDisabled = &ResponseError{ code: "AllAccessDisabled", description: "All access to this resource has been disabled.", httpStatusCode: http.StatusForbidden, } - ErrMalformedPolicy = &Error{ + RespErrMalformedPolicy = &ResponseError{ code: "MalformedPolicy", description: "Policy has invalid resource.", httpStatusCode: http.StatusBadRequest, } - ErrMissingFields = &Error{ // todo + RespErrMissingFields = &ResponseError{ // todo code: "InvalidRequest", description: "ErrMissingFields", httpStatusCode: http.StatusBadRequest, } - ErrMissingCredTag = &Error{ + RespErrMissingCredTag = &ResponseError{ code: "InvalidRequest", description: "Missing Credential field for this request.", httpStatusCode: http.StatusBadRequest, } - ErrCredMalformed = &Error{ // todo + RespErrCredMalformed = &ResponseError{ // todo code: "InvalidRequest", description: "ErrCredMalformed", httpStatusCode: http.StatusBadRequest, } - ErrInvalidRegion = &Error{ + RespErrInvalidRegion = &ResponseError{ code: "InvalidRegion", description: "Region does not match.", httpStatusCode: http.StatusBadRequest, } - ErrMissingSignTag = &Error{ + RespErrMissingSignTag = &ResponseError{ code: "AccessDenied", description: "Signature header missing Signature field.", httpStatusCode: http.StatusBadRequest, } - ErrMissingSignHeadersTag = &Error{ + RespErrMissingSignHeadersTag = &ResponseError{ code: "InvalidArgument", description: "Signature header missing SignedHeaders field.", httpStatusCode: http.StatusBadRequest, } - ErrAuthHeaderEmpty = &Error{ + RespErrAuthHeaderEmpty = &ResponseError{ code: "InvalidArgument", description: "Authorization header is invalid -- one and only one ' ' (space) required.", httpStatusCode: http.StatusBadRequest, } - ErrMissingDateHeader = &Error{ + RespErrMissingDateHeader = &ResponseError{ code: "AccessDenied", description: "AWS authentication requires a valid Date or x-amz-date header", httpStatusCode: http.StatusBadRequest, } - ErrExpiredPresignRequest = &Error{ + RespErrExpiredPresignRequest = &ResponseError{ code: "AccessDenied", description: "Request has expired", httpStatusCode: http.StatusForbidden, } - ErrRequestNotReadyYet = &Error{ + RespErrRequestNotReadyYet = &ResponseError{ code: "AccessDenied", description: "Request is not valid yet", httpStatusCode: http.StatusForbidden, } - ErrSlowDown = &Error{ + RespErrSlowDown = &ResponseError{ code: "SlowDown", description: "Resource requested is unreadable, please reduce your request rate", httpStatusCode: http.StatusServiceUnavailable, } - ErrBadRequest = &Error{ + RespErrBadRequest = &ResponseError{ code: "BadRequest", description: "400 BadRequest", httpStatusCode: http.StatusBadRequest, } - ErrKeyTooLongError = &Error{ + RespErrKeyTooLongError = &ResponseError{ code: "KeyTooLongError", description: "Your key is too long", httpStatusCode: http.StatusBadRequest, } - ErrUnsignedHeaders = &Error{ + RespErrUnsignedHeaders = &ResponseError{ code: "AccessDenied", description: "There were headers present in the request which were not signed", httpStatusCode: http.StatusBadRequest, } - ErrBucketAlreadyOwnedByYou = &Error{ + RespErrBucketAlreadyOwnedByYou = &ResponseError{ code: "BucketAlreadyOwnedByYou", description: "Your previous request to create the named bucket succeeded and you already own it.", httpStatusCode: http.StatusConflict, } - ErrInvalidDuration = &Error{ + RespErrInvalidDuration = &ResponseError{ code: "InvalidDuration", description: "Duration provided in the request is invalid.", httpStatusCode: http.StatusBadRequest, } - ErrInvalidBucketObjectLockConfiguration = &Error{ + RespErrInvalidBucketObjectLockConfiguration = &ResponseError{ code: "InvalidRequest", description: "Bucket is missing ObjectLockConfiguration", httpStatusCode: http.StatusBadRequest, } - ErrBucketTaggingNotFound = &Error{ + RespErrBucketTaggingNotFound = &ResponseError{ code: "NoSuchTagSet", description: "The TagSet does not exist", httpStatusCode: http.StatusNotFound, } - ErrObjectLockConfigurationNotAllowed = &Error{ + RespErrObjectLockConfigurationNotAllowed = &ResponseError{ code: "InvalidBucketState", description: "Object Lock configuration cannot be enabled on existing buckets", httpStatusCode: http.StatusConflict, } - ErrNoSuchCORSConfiguration = &Error{ + RespErrNoSuchCORSConfiguration = &ResponseError{ code: "NoSuchCORSConfiguration", description: "The CORS configuration does not exist", httpStatusCode: http.StatusNotFound, } - ErrNoSuchWebsiteConfiguration = &Error{ + RespErrNoSuchWebsiteConfiguration = &ResponseError{ code: "NoSuchWebsiteConfiguration", description: "The specified bucket does not have a website configuration", httpStatusCode: http.StatusNotFound, } - ErrReplicationConfigurationNotFoundError = &Error{ + RespErrReplicationConfigurationNotFoundError = &ResponseError{ code: "ReplicationConfigurationNotFoundError", description: "The replication configuration was not found", httpStatusCode: http.StatusNotFound, } - ErrReplicationNeedsVersioningError = &Error{ + RespErrReplicationNeedsVersioningError = &ResponseError{ code: "InvalidRequest", description: "Versioning must be 'Enabled' on the bucket to apply a replication configuration", httpStatusCode: http.StatusBadRequest, } - ErrReplicationBucketNeedsVersioningError = &Error{ + RespErrReplicationBucketNeedsVersioningError = &ResponseError{ code: "InvalidRequest", description: "Versioning must be 'Enabled' on the bucket to add a replication target", httpStatusCode: http.StatusBadRequest, } - ErrNoSuchObjectLockConfiguration = &Error{ + RespErrNoSuchObjectLockConfiguration = &ResponseError{ code: "NoSuchObjectLockConfiguration", description: "The specified object does not have a ObjectLock configuration", httpStatusCode: http.StatusBadRequest, } - ErrObjectLocked = &Error{ + RespErrObjectLocked = &ResponseError{ code: "InvalidRequest", description: "Object is WORM protected and cannot be overwritten", httpStatusCode: http.StatusBadRequest, } - ErrInvalidRetentionDate = &Error{ + RespErrInvalidRetentionDate = &ResponseError{ code: "InvalidRequest", description: "Date must be provided in ISO 8601 format", httpStatusCode: http.StatusBadRequest, } - ErrPastObjectLockRetainDate = &Error{ + RespErrPastObjectLockRetainDate = &ResponseError{ code: "InvalidRequest", description: "the retain until date must be in the future", httpStatusCode: http.StatusBadRequest, } - ErrUnknownWORMModeDirective = &Error{ + RespErrUnknownWORMModeDirective = &ResponseError{ code: "InvalidRequest", description: "unknown wormMode directive", httpStatusCode: http.StatusBadRequest, } - ErrObjectLockInvalidHeaders = &Error{ + RespErrObjectLockInvalidHeaders = &ResponseError{ code: "InvalidRequest", description: "x-amz-object-lock-retain-until-date and x-amz-object-lock-mode must both be supplied", httpStatusCode: http.StatusBadRequest, } - ErrObjectRestoreAlreadyInProgress = &Error{ + RespErrObjectRestoreAlreadyInProgress = &ResponseError{ code: "RestoreAlreadyInProgress", description: "Object restore is already in progress", httpStatusCode: http.StatusConflict, } // Bucket notification related errors. - ErrEventNotification = &Error{ + RespErrEventNotification = &ResponseError{ code: "InvalidArgument", description: "A specified event is not supported for notifications.", httpStatusCode: http.StatusBadRequest, } - ErrARNNotification = &Error{ + RespErrARNNotification = &ResponseError{ code: "InvalidArgument", description: "A specified destination ARN does not exist or is not well-formed. Verify the destination ARN.", httpStatusCode: http.StatusBadRequest, } - ErrRegionNotification = &Error{ + RespErrRegionNotification = &ResponseError{ code: "InvalidArgument", description: "A specified destination is in a different region than the bucket. You must use a destination that resides in the same region as the bucket.", httpStatusCode: http.StatusBadRequest, } - ErrOverlappingFilterNotification = &Error{ + RespErrOverlappingFilterNotification = &ResponseError{ code: "InvalidArgument", description: "An object key name filtering rule defined with overlapping prefixes, overlapping suffixes, or overlapping combinations of prefixes and suffixes for the same event types.", httpStatusCode: http.StatusBadRequest, } - ErrFilterNameInvalid = &Error{ + RespErrFilterNameInvalid = &ResponseError{ code: "InvalidArgument", description: "filter rule name must be either prefix or suffix", httpStatusCode: http.StatusBadRequest, } - ErrFilterNamePrefix = &Error{ + RespErrFilterNamePrefix = &ResponseError{ code: "InvalidArgument", description: "Cannot specify more than one prefix rule in a filter.", httpStatusCode: http.StatusBadRequest, } - ErrFilterNameSuffix = &Error{ + RespErrFilterNameSuffix = &ResponseError{ code: "InvalidArgument", description: "Cannot specify more than one suffix rule in a filter.", httpStatusCode: http.StatusBadRequest, } - ErrFilterValueInvalid = &Error{ + RespErrFilterValueInvalid = &ResponseError{ code: "InvalidArgument", description: "Size of filter rule value cannot exceed 1024 bytes in UTF-8 representation", httpStatusCode: http.StatusBadRequest, } - ErrOverlappingConfigs = &Error{ + RespErrOverlappingConfigs = &ResponseError{ code: "InvalidArgument", description: "Configurations overlap. Configurations on the same bucket cannot share a common event type.", httpStatusCode: http.StatusBadRequest, } - ErrContentSHA256Mismatch = &Error{ //todo + RespErrContentSHA256Mismatch = &ResponseError{ //todo code: "InvalidArgument", description: "ErrContentSHA256Mismatch", httpStatusCode: http.StatusBadRequest, } - ErrInvalidCopyPartRange = &Error{ + RespErrInvalidCopyPartRange = &ResponseError{ code: "InvalidArgument", description: "The x-amz-copy-source-range value must be of the form bytes=first-last where first and last are the zero-based offsets of the first and last bytes to copy", httpStatusCode: http.StatusBadRequest, } - ErrInvalidCopyPartRangeSource = &Error{ + RespErrInvalidCopyPartRangeSource = &ResponseError{ code: "InvalidArgument", description: "Range specified is not valid for source object", httpStatusCode: http.StatusBadRequest, } - ErrMetadataTooLarge = &Error{ + RespErrMetadataTooLarge = &ResponseError{ code: "MetadataTooLarge", description: "Your metadata headers exceed the maximum allowed metadata size.", httpStatusCode: http.StatusBadRequest, } - ErrInvalidTagDirective = &Error{ + RespErrInvalidTagDirective = &ResponseError{ code: "InvalidArgument", description: "Unknown tag directive.", httpStatusCode: http.StatusBadRequest, } - ErrInvalidEncryptionMethod = &Error{ + RespErrInvalidEncryptionMethod = &ResponseError{ code: "InvalidRequest", description: "The encryption method specified is not supported", httpStatusCode: http.StatusBadRequest, } - ErrInvalidQueryParams = &Error{ + RespErrInvalidQueryParams = &ResponseError{ code: "AuthorizationQueryParametersError", description: "Query-string authentication version 4 requires the X-Amz-Algorithm, X-Amz-Credential, X-Amz-Signature, X-Amz-Date, X-Amz-SignedHeaders, and X-Amz-Expires parameters.", httpStatusCode: http.StatusBadRequest, } - ErrNoAccessKey = &Error{ + RespErrNoAccessKey = &ResponseError{ code: "AccessDenied", description: "No AWSAccessKey was presented", httpStatusCode: http.StatusForbidden, } - ErrInvalidToken = &Error{ + RespErrInvalidToken = &ResponseError{ code: "InvalidTokenId", description: "The security token included in the request is invalid", httpStatusCode: http.StatusForbidden, } // S3 extensions. - ErrInvalidObjectName = &Error{ + RespErrInvalidObjectName = &ResponseError{ code: "InvalidObjectName", description: "Object name contains unsupported characters.", httpStatusCode: http.StatusBadRequest, } - ErrInvalidObjectNamePrefixSlash = &Error{ + RespErrInvalidObjectNamePrefixSlash = &ResponseError{ code: "InvalidObjectName", description: "Object name contains a leading slash.", httpStatusCode: http.StatusBadRequest, } - ErrClientDisconnected = &Error{ + RespErrClientDisconnected = &ResponseError{ code: "ClientDisconnected", description: "Client disconnected before response was ready", httpStatusCode: 499, // No official code, use nginx value. } - ErrOperationTimedOut = &Error{ + RespErrOperationTimedOut = &ResponseError{ code: "RequestTimeout", description: "A timeout occurred while trying to lock a resource, please reduce your request rate", httpStatusCode: http.StatusServiceUnavailable, } - ErrOperationMaxedOut = &Error{ + RespErrOperationMaxedOut = &ResponseError{ code: "SlowDown", description: "A timeout exceeded while waiting to proceed with the request, please reduce your request rate", httpStatusCode: http.StatusServiceUnavailable, } - ErrUnsupportedMetadata = &Error{ + RespErrUnsupportedMetadata = &ResponseError{ code: "InvalidArgument", description: "Your metadata headers are not supported.", httpStatusCode: http.StatusBadRequest, } // Generic Invalid-Request error. Should be used for response errors only for unlikely - // corner case errors for which introducing new APIErrorcode is not worth it. LogIf() + // corner case errors for which introducing new APIRespErrorcode is not worth it. LogIf() // should be used to log the error at the source of the error for debugging purposes. - ErrInvalidRequest = &Error{ + ErrInvalidRequest = &ResponseError{ code: "InvalidRequest", description: "Invalid Request", httpStatusCode: http.StatusBadRequest, } - ErrIncorrectContinuationToken = &Error{ + RespErrIncorrectContinuationToken = &ResponseError{ code: "InvalidArgument", description: "The continuation token provided is incorrect", httpStatusCode: http.StatusBadRequest, } - ErrInvalidFormatAccessKey = &Error{ + RespErrInvalidFormatAccessKey = &ResponseError{ code: "InvalidAccessKeyId", description: "The Access Key Id you provided contains invalid characters.", httpStatusCode: http.StatusBadRequest, } - // S3 Select API Errors - ErrEmptyRequestBody = &Error{ + // S3 Select API RespErrors + ErrEmptyRequestBody = &ResponseError{ code: "EmptyRequestBody", description: "Request body cannot be empty.", httpStatusCode: http.StatusBadRequest, } - ErrUnsupportedFunction = &Error{ + RespErrUnsupportedFunction = &ResponseError{ code: "UnsupportedFunction", description: "Encountered an unsupported SQL function.", httpStatusCode: http.StatusBadRequest, } - ErrInvalidDataSource = &Error{ + RespErrInvalidDataSource = &ResponseError{ code: "InvalidDataSource", description: "Invalid data source type. Only CSV and JSON are supported at this time.", httpStatusCode: http.StatusBadRequest, } - ErrInvalidExpressionType = &Error{ + RespErrInvalidExpressionType = &ResponseError{ code: "InvalidExpressionType", description: "The ExpressionType is invalid. Only SQL expressions are supported at this time.", httpStatusCode: http.StatusBadRequest, } - ErrBusy = &Error{ + RespErrBusy = &ResponseError{ code: "Busy", description: "The service is unavailable. Please retry.", httpStatusCode: http.StatusServiceUnavailable, } - ErrUnauthorizedAccess = &Error{ + RespErrUnauthorizedAccess = &ResponseError{ code: "UnauthorizedAccess", description: "You are not authorized to perform this operation", httpStatusCode: http.StatusUnauthorized, } - ErrExpressionTooLong = &Error{ + RespErrExpressionTooLong = &ResponseError{ code: "ExpressionTooLong", description: "The SQL expression is too long: The maximum byte-length for the SQL expression is 256 KB.", httpStatusCode: http.StatusBadRequest, } - ErrIllegalSQLFunctionArgument = &Error{ + RespErrIllegalSQLFunctionArgument = &ResponseError{ code: "IllegalSqlFunctionArgument", description: "Illegal argument was used in the SQL function.", httpStatusCode: http.StatusBadRequest, } - ErrInvalidKeyPath = &Error{ + RespErrInvalidKeyPath = &ResponseError{ code: "InvalidKeyPath", description: "Key path in the SQL expression is invalid.", httpStatusCode: http.StatusBadRequest, } - ErrInvalidCompressionFormat = &Error{ + RespErrInvalidCompressionFormat = &ResponseError{ code: "InvalidCompressionFormat", description: "The file is not in a supported compression format. Only GZIP is supported at this time.", httpStatusCode: http.StatusBadRequest, } - ErrInvalidFileHeaderInfo = &Error{ + RespErrInvalidFileHeaderInfo = &ResponseError{ code: "InvalidFileHeaderInfo", description: "The FileHeaderInfo is invalid. Only NONE, USE, and IGNORE are supported.", httpStatusCode: http.StatusBadRequest, } - ErrInvalidJSONType = &Error{ + RespErrInvalidJSONType = &ResponseError{ code: "InvalidJsonType", description: "The JsonType is invalid. Only DOCUMENT and LINES are supported at this time.", httpStatusCode: http.StatusBadRequest, } - ErrInvalidQuoteFields = &Error{ + RespErrInvalidQuoteFields = &ResponseError{ code: "InvalidQuoteFields", description: "The QuoteFields is invalid. Only ALWAYS and ASNEEDED are supported.", httpStatusCode: http.StatusBadRequest, } - ErrInvalidRequestParameter = &Error{ + RespErrInvalidRequestParameter = &ResponseError{ code: "InvalidRequestParameter", description: "The value of a parameter in SelectRequest element is invalid. Check the service API documentation and try again.", httpStatusCode: http.StatusBadRequest, } - ErrInvalidDataType = &Error{ + RespErrInvalidDataType = &ResponseError{ code: "InvalidDataType", description: "The SQL expression contains an invalid data type.", httpStatusCode: http.StatusBadRequest, } - ErrInvalidTextEncoding = &Error{ + RespErrInvalidTextEncoding = &ResponseError{ code: "InvalidTextEncoding", description: "Invalid encoding type. Only UTF-8 encoding is supported at this time.", httpStatusCode: http.StatusBadRequest, } - ErrInvalidTableAlias = &Error{ + RespErrInvalidTableAlias = &ResponseError{ code: "InvalidTableAlias", description: "The SQL expression contains an invalid table alias.", httpStatusCode: http.StatusBadRequest, } - ErrMissingRequiredParameter = &Error{ + RespErrMissingRequiredParameter = &ResponseError{ code: "MissingRequiredParameter", description: "The SelectRequest entity is missing a required parameter. Check the service documentation and try again.", httpStatusCode: http.StatusBadRequest, } - ErrObjectSerializationConflict = &Error{ + RespErrObjectSerializationConflict = &ResponseError{ code: "ObjectSerializationConflict", description: "The SelectRequest entity can only contain one of CSV or JSON. Check the service documentation and try again.", httpStatusCode: http.StatusBadRequest, } - ErrUnsupportedSQLOperation = &Error{ + RespErrUnsupportedSQLOperation = &ResponseError{ code: "UnsupportedSqlOperation", description: "Encountered an unsupported SQL operation.", httpStatusCode: http.StatusBadRequest, } - ErrUnsupportedSQLStructure = &Error{ + RespErrUnsupportedSQLStructure = &ResponseError{ code: "UnsupportedSqlStructure", description: "Encountered an unsupported SQL structure. Check the SQL Reference.", httpStatusCode: http.StatusBadRequest, } - ErrUnsupportedSyntax = &Error{ + RespErrUnsupportedSyntax = &ResponseError{ code: "UnsupportedSyntax", description: "Encountered invalid syntax.", httpStatusCode: http.StatusBadRequest, } - ErrUnsupportedRangeHeader = &Error{ + RespErrUnsupportedRangeHeader = &ResponseError{ code: "UnsupportedRangeHeader", description: "Range header is not supported for this operation.", httpStatusCode: http.StatusBadRequest, } - ErrLexerInvalidChar = &Error{ + RespErrLexerInvalidChar = &ResponseError{ code: "LexerInvalidChar", description: "The SQL expression contains an invalid character.", httpStatusCode: http.StatusBadRequest, } - ErrLexerInvalidOperator = &Error{ + RespErrLexerInvalidOperator = &ResponseError{ code: "LexerInvalidOperator", description: "The SQL expression contains an invalid literal.", httpStatusCode: http.StatusBadRequest, } - ErrLexerInvalidLiteral = &Error{ + RespErrLexerInvalidLiteral = &ResponseError{ code: "LexerInvalidLiteral", description: "The SQL expression contains an invalid operator.", httpStatusCode: http.StatusBadRequest, } - ErrLexerInvalidIONLiteral = &Error{ + RespErrLexerInvalidIONLiteral = &ResponseError{ code: "LexerInvalidIONLiteral", description: "The SQL expression contains an invalid operator.", httpStatusCode: http.StatusBadRequest, } - ErrParseExpectedDatePart = &Error{ + RespErrParseExpectedDatePart = &ResponseError{ code: "ParseExpectedDatePart", description: "Did not find the expected date part in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - ErrParseExpectedKeyword = &Error{ + RespErrParseExpectedKeyword = &ResponseError{ code: "ParseExpectedKeyword", description: "Did not find the expected keyword in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - ErrParseExpectedTokenType = &Error{ + RespErrParseExpectedTokenType = &ResponseError{ code: "ParseExpectedTokenType", description: "Did not find the expected token in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - ErrParseExpected2TokenTypes = &Error{ + RespErrParseExpected2TokenTypes = &ResponseError{ code: "ParseExpected2TokenTypes", description: "Did not find the expected token in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - ErrParseExpectedNumber = &Error{ + RespErrParseExpectedNumber = &ResponseError{ code: "ParseExpectedNumber", description: "Did not find the expected number in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - ErrParseExpectedRightParenBuiltinFunctionCall = &Error{ + RespErrParseExpectedRightParenBuiltinFunctionCall = &ResponseError{ code: "ParseExpectedRightParenBuiltinFunctionCall", description: "Did not find the expected right parenthesis character in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - ErrParseExpectedTypeName = &Error{ + RespErrParseExpectedTypeName = &ResponseError{ code: "ParseExpectedTypeName", description: "Did not find the expected type name in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - ErrParseExpectedWhenClause = &Error{ + RespErrParseExpectedWhenClause = &ResponseError{ code: "ParseExpectedWhenClause", description: "Did not find the expected WHEN clause in the SQL expression. CASE is not supported.", httpStatusCode: http.StatusBadRequest, } - ErrParseUnsupportedToken = &Error{ + RespErrParseUnsupportedToken = &ResponseError{ code: "ParseUnsupportedToken", description: "The SQL expression contains an unsupported token.", httpStatusCode: http.StatusBadRequest, } - ErrParseUnsupportedLiteralsGroupBy = &Error{ + RespErrParseUnsupportedLiteralsGroupBy = &ResponseError{ code: "ParseUnsupportedLiteralsGroupBy", description: "The SQL expression contains an unsupported use of GROUP BY.", httpStatusCode: http.StatusBadRequest, } - ErrParseExpectedMember = &Error{ + RespErrParseExpectedMember = &ResponseError{ code: "ParseExpectedMember", description: "The SQL expression contains an unsupported use of MEMBER.", httpStatusCode: http.StatusBadRequest, } - ErrParseUnsupportedSelect = &Error{ + RespErrParseUnsupportedSelect = &ResponseError{ code: "ParseUnsupportedSelect", description: "The SQL expression contains an unsupported use of SELECT.", httpStatusCode: http.StatusBadRequest, } - ErrParseUnsupportedCase = &Error{ + RespErrParseUnsupportedCase = &ResponseError{ code: "ParseUnsupportedCase", description: "The SQL expression contains an unsupported use of CASE.", httpStatusCode: http.StatusBadRequest, } - ErrParseUnsupportedCaseClause = &Error{ + RespErrParseUnsupportedCaseClause = &ResponseError{ code: "ParseUnsupportedCaseClause", description: "The SQL expression contains an unsupported use of CASE.", httpStatusCode: http.StatusBadRequest, } - ErrParseUnsupportedAlias = &Error{ + RespErrParseUnsupportedAlias = &ResponseError{ code: "ParseUnsupportedAlias", description: "The SQL expression contains an unsupported use of ALIAS.", httpStatusCode: http.StatusBadRequest, } - ErrParseUnsupportedSyntax = &Error{ + RespErrParseUnsupportedSyntax = &ResponseError{ code: "ParseUnsupportedSyntax", description: "The SQL expression contains unsupported syntax.", httpStatusCode: http.StatusBadRequest, } - ErrParseUnknownOperator = &Error{ + RespErrParseUnknownOperator = &ResponseError{ code: "ParseUnknownOperator", description: "The SQL expression contains an invalid operator.", httpStatusCode: http.StatusBadRequest, } - ErrParseMissingIdentAfterAt = &Error{ + RespErrParseMissingIdentAfterAt = &ResponseError{ code: "ParseMissingIdentAfterAt", description: "Did not find the expected identifier after the @ symbol in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - ErrParseUnexpectedOperator = &Error{ + RespErrParseUnexpectedOperator = &ResponseError{ code: "ParseUnexpectedOperator", description: "The SQL expression contains an unexpected operator.", httpStatusCode: http.StatusBadRequest, } - ErrParseUnexpectedTerm = &Error{ + RespErrParseUnexpectedTerm = &ResponseError{ code: "ParseUnexpectedTerm", description: "The SQL expression contains an unexpected term.", httpStatusCode: http.StatusBadRequest, } - ErrParseUnexpectedToken = &Error{ + RespErrParseUnexpectedToken = &ResponseError{ code: "ParseUnexpectedToken", description: "The SQL expression contains an unexpected token.", httpStatusCode: http.StatusBadRequest, } - ErrParseUnexpectedKeyword = &Error{ + RespErrParseUnexpectedKeyword = &ResponseError{ code: "ParseUnexpectedKeyword", description: "The SQL expression contains an unexpected keyword.", httpStatusCode: http.StatusBadRequest, } - ErrParseExpectedExpression = &Error{ + RespErrParseExpectedExpression = &ResponseError{ code: "ParseExpectedExpression", description: "Did not find the expected SQL expression.", httpStatusCode: http.StatusBadRequest, } - ErrParseExpectedLeftParenAfterCast = &Error{ + RespErrParseExpectedLeftParenAfterCast = &ResponseError{ code: "ParseExpectedLeftParenAfterCast", description: "Did not find expected the left parenthesis in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - ErrParseExpectedLeftParenValueConstructor = &Error{ + RespErrParseExpectedLeftParenValueConstructor = &ResponseError{ code: "ParseExpectedLeftParenValueConstructor", description: "Did not find expected the left parenthesis in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - ErrParseExpectedLeftParenBuiltinFunctionCall = &Error{ + RespErrParseExpectedLeftParenBuiltinFunctionCall = &ResponseError{ code: "ParseExpectedLeftParenBuiltinFunctionCall", description: "Did not find the expected left parenthesis in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - ErrParseExpectedArgumentDelimiter = &Error{ + RespErrParseExpectedArgumentDelimiter = &ResponseError{ code: "ParseExpectedArgumentDelimiter", description: "Did not find the expected argument delimiter in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - ErrParseCastArity = &Error{ + RespErrParseCastArity = &ResponseError{ code: "ParseCastArity", description: "The SQL expression CAST has incorrect arity.", httpStatusCode: http.StatusBadRequest, } - ErrParseInvalidTypeParam = &Error{ + RespErrParseInvalidTypeParam = &ResponseError{ code: "ParseInvalidTypeParam", description: "The SQL expression contains an invalid parameter value.", httpStatusCode: http.StatusBadRequest, } - ErrParseEmptySelect = &Error{ + RespErrParseEmptySelect = &ResponseError{ code: "ParseEmptySelect", description: "The SQL expression contains an empty SELECT.", httpStatusCode: http.StatusBadRequest, } - ErrParseSelectMissingFrom = &Error{ + RespErrParseSelectMissingFrom = &ResponseError{ code: "ParseSelectMissingFrom", description: "GROUP is not supported in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - ErrParseExpectedIdentForGroupName = &Error{ + RespErrParseExpectedIdentForGroupName = &ResponseError{ code: "ParseExpectedIdentForGroupName", description: "GROUP is not supported in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - ErrParseExpectedIdentForAlias = &Error{ + RespErrParseExpectedIdentForAlias = &ResponseError{ code: "ParseExpectedIdentForAlias", description: "Did not find the expected identifier for the alias in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - ErrParseUnsupportedCallWithStar = &Error{ + RespErrParseUnsupportedCallWithStar = &ResponseError{ code: "ParseUnsupportedCallWithStar", description: "Only COUNT with (*) as a parameter is supported in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - ErrParseNonUnaryAgregateFunctionCall = &Error{ + RespErrParseNonUnaryAgregateFunctionCall = &ResponseError{ code: "ParseNonUnaryAgregateFunctionCall", description: "Only one argument is supported for aggregate functions in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - ErrParseMalformedJoin = &Error{ + RespErrParseMalformedJoin = &ResponseError{ code: "ParseMalformedJoin", description: "JOIN is not supported in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - ErrParseExpectedIdentForAt = &Error{ + RespErrParseExpectedIdentForAt = &ResponseError{ code: "ParseExpectedIdentForAt", description: "Did not find the expected identifier for AT name in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - ErrParseAsteriskIsNotAloneInSelectList = &Error{ + RespErrParseAsteriskIsNotAloneInSelectList = &ResponseError{ code: "ParseAsteriskIsNotAloneInSelectList", description: "Other expressions are not allowed in the SELECT list when '*' is used without dot notation in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - ErrParseCannotMixSqbAndWildcardInSelectList = &Error{ + RespErrParseCannotMixSqbAndWildcardInSelectList = &ResponseError{ code: "ParseCannotMixSqbAndWildcardInSelectList", description: "Cannot mix [] and * in the same expression in a SELECT list in SQL expression.", httpStatusCode: http.StatusBadRequest, } - ErrParseInvalidContextForWildcardInSelectList = &Error{ + RespErrParseInvalidContextForWildcardInSelectList = &ResponseError{ code: "ParseInvalidContextForWildcardInSelectList", description: "Invalid use of * in SELECT list in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - ErrIncorrectSQLFunctionArgumentType = &Error{ + RespErrIncorrectSQLFunctionArgumentType = &ResponseError{ code: "IncorrectSqlFunctionArgumentType", description: "Incorrect type of arguments in function call in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - ErrValueParseFailure = &Error{ + RespErrValueParseFailure = &ResponseError{ code: "ValueParseFailure", description: "Time stamp parse failure in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - ErrEvaluatorInvalidArguments = &Error{ + RespErrEvaluatorInvalidArguments = &ResponseError{ code: "EvaluatorInvalidArguments", description: "Incorrect number of arguments in the function call in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - ErrIntegerOverflow = &Error{ + RespErrIntegerOverflow = &ResponseError{ code: "IntegerOverflow", description: "Int overflow or underflow in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - ErrLikeInvalidInputs = &Error{ + RespErrLikeInvalidInputs = &ResponseError{ code: "LikeInvalidInputs", description: "Invalid argument given to the LIKE clause in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - ErrCastFailed = &Error{ + RespErrCastFailed = &ResponseError{ code: "CastFailed", description: "Attempt to convert from one data type to another using CAST failed in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - ErrInvalidCast = &Error{ + RespErrInvalidCast = &ResponseError{ code: "InvalidCast", description: "Attempt to convert from one data type to another using CAST failed in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - ErrEvaluatorInvalidTimestampFormatPattern = &Error{ + RespErrEvaluatorInvalidTimestampFormatPattern = &ResponseError{ code: "EvaluatorInvalidTimestampFormatPattern", description: "Time stamp format pattern requires additional fields in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - ErrEvaluatorInvalidTimestampFormatPatternSymbolForParsing = &Error{ + RespErrEvaluatorInvalidTimestampFormatPatternSymbolForParsing = &ResponseError{ code: "EvaluatorInvalidTimestampFormatPatternSymbolForParsing", description: "Time stamp format pattern contains a valid format symbol that cannot be applied to time stamp parsing in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - ErrEvaluatorTimestampFormatPatternDuplicateFields = &Error{ + RespErrEvaluatorTimestampFormatPatternDuplicateFields = &ResponseError{ code: "EvaluatorTimestampFormatPatternDuplicateFields", description: "Time stamp format pattern contains multiple format specifiers representing the time stamp field in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - ErrEvaluatorTimestampFormatPatternHourClockAmPmMismatch = &Error{ + RespErrEvaluatorTimestampFormatPatternHourClockAmPmMismatch = &ResponseError{ code: "EvaluatorUnterminatedTimestampFormatPatternToken", description: "Time stamp format pattern contains unterminated token in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - ErrEvaluatorUnterminatedTimestampFormatPatternToken = &Error{ + RespErrEvaluatorUnterminatedTimestampFormatPatternToken = &ResponseError{ code: "EvaluatorInvalidTimestampFormatPatternToken", description: "Time stamp format pattern contains an invalid token in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - ErrEvaluatorInvalidTimestampFormatPatternToken = &Error{ + RespErrEvaluatorInvalidTimestampFormatPatternToken = &ResponseError{ code: "EvaluatorInvalidTimestampFormatPatternToken", description: "Time stamp format pattern contains an invalid token in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - ErrEvaluatorInvalidTimestampFormatPatternSymbol = &Error{ + RespErrEvaluatorInvalidTimestampFormatPatternSymbol = &ResponseError{ code: "EvaluatorInvalidTimestampFormatPatternSymbol", description: "Time stamp format pattern contains an invalid symbol in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - ErrEvaluatorBindingDoesNotExist = &Error{ + RespErrEvaluatorBindingDoesNotExist = &ResponseError{ code: "ErrEvaluatorBindingDoesNotExist", description: "A column name or a path provided does not exist in the SQL expression", httpStatusCode: http.StatusBadRequest, } - ErrMissingHeaders = &Error{ + RespErrMissingHeaders = &ResponseError{ code: "MissingHeaders", description: "Some headers in the query are missing from the file. Check the file and try again.", httpStatusCode: http.StatusBadRequest, } - ErrInvalidColumnIndex = &Error{ + RespErrInvalidColumnIndex = &ResponseError{ code: "InvalidColumnIndex", description: "The column index is invalid. Please check the service documentation and try again.", httpStatusCode: http.StatusBadRequest, } - ErrPostPolicyConditionInvalidFormat = &Error{ + RespErrPostPolicyConditionInvalidFormat = &ResponseError{ code: "PostPolicyInvalidKeyName", description: "Invalid according to Policy: Policy Conditions failed", httpStatusCode: http.StatusForbidden, } - ErrMalformedJSON = &Error{ + RespErrMalformedJSON = &ResponseError{ code: "MalformedJSON", description: "The JSON was not well-formed or did not validate against our published format.", httpStatusCode: http.StatusBadRequest, } - ErrAccessKeyNotFound = &Error{ - code: "AccessKeyNotFound", - description: "", - httpStatusCode: http.StatusBadRequest, - } ) From ea2bb725190953b7c166ce009a330b5242a88677 Mon Sep 17 00:00:00 2001 From: steve Date: Thu, 24 Aug 2023 06:21:03 +0800 Subject: [PATCH 051/139] feat: put object --- s3/handlers/handlers.go | 64 ++++++++++++++++++++++++++++++++++ s3/handlers/proto.go | 5 +-- s3/requests/parsers_common.go | 41 ++++++++++++++++++++++ s3/responses/writers_common.go | 1 + s3/routers/routers.go | 7 ++-- s3/services/bucket/service.go | 11 ++++-- 6 files changed, 122 insertions(+), 7 deletions(-) create mode 100644 s3/requests/parsers_common.go diff --git a/s3/handlers/handlers.go b/s3/handlers/handlers.go index 404a58619..b729f2cc7 100644 --- a/s3/handlers/handlers.go +++ b/s3/handlers/handlers.go @@ -4,6 +4,7 @@ package handlers import ( "fmt" "github.com/bittorrent/go-btfs/s3/cctx" + "github.com/bittorrent/go-btfs/s3/etag" "github.com/bittorrent/go-btfs/s3/requests" "github.com/bittorrent/go-btfs/s3/responses" "github.com/bittorrent/go-btfs/s3/services" @@ -297,6 +298,69 @@ func (h *Handlers) PutBucketAclHandler(w http.ResponseWriter, r *http.Request) { responses.WritePutBucketAclResponse(w, r) } +// PutObjectHandler http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html +func (h *Handlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) { + var err error + defer func() { + cctx.SetHandleInf(r, fnName(), err) + }() + + // X-Amz-Copy-Source shouldn't be set for this call. + if _, ok := r.Header[consts.AmzCopySource]; ok { + responses.WriteErrorResponse(w, r, services.RespErrInvalidCopySource) + return + } + + buc, obj, err := requests.ParseBucketAndObject(r) + if err != nil { + responses.WriteErrorResponse(w, r, services.RespErrInvalidRequestParameter) + return + } + + clientETag, err := etag.FromContentMD5(r.Header) + if err != nil { + responses.WriteErrorResponse(w, r, services.RespErrInvalidDigest) + return + } + _ = clientETag + + size := r.ContentLength + // todo: streaming signed + + if size == -1 { + responses.WriteErrorResponse(w, r, services.RespErrMissingContentLength) + return + } + if size == 0 { + responses.WriteErrorResponse(w, r, services.RespErrEntityTooSmall) + return + } + + if size > consts.MaxObjectSize { + responses.WriteErrorResponse(w, r, services.RespErrEntityTooLarge) + return + } + + ctx := r.Context() + ack := cctx.GetAccessKey(r) + + err = h.bucketSvc.CheckACL(ack, buc, s3action.PutObjectAction) + if err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + + // todo: convert error + err = s3utils.CheckPutObjectArgs(ctx, buc, obj) + if err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + + // todo + fmt.Println("need put object...", buc, obj) +} + func fnName() string { pc := make([]uintptr, 1) runtime.Callers(3, pc) diff --git a/s3/handlers/proto.go b/s3/handlers/proto.go index 4b2c90e24..f5927ff15 100644 --- a/s3/handlers/proto.go +++ b/s3/handlers/proto.go @@ -10,7 +10,7 @@ type Handlerser interface { Auth(handler http.Handler) http.Handler Log(handler http.Handler) http.Handler - // handlers + // bucket PutBucketHandler(w http.ResponseWriter, r *http.Request) HeadBucketHandler(w http.ResponseWriter, r *http.Request) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) @@ -18,5 +18,6 @@ type Handlerser interface { GetBucketAclHandler(w http.ResponseWriter, r *http.Request) PutBucketAclHandler(w http.ResponseWriter, r *http.Request) - //PutObjectHandler(w http.ResponseWriter, r *http.Request) + // object + PutObjectHandler(w http.ResponseWriter, r *http.Request) } diff --git a/s3/requests/parsers_common.go b/s3/requests/parsers_common.go new file mode 100644 index 000000000..2042eb217 --- /dev/null +++ b/s3/requests/parsers_common.go @@ -0,0 +1,41 @@ +package requests + +import ( + "github.com/gorilla/mux" + "net/http" + "net/url" + "path" +) + +func ParseBucketAndObject(r *http.Request) (bucket, object string, err error) { + vars := mux.Vars(r) + bucket = vars["bucket"] + object, err = unescapePath(vars["object"]) + return +} + +// unescapePath is similar to url.PathUnescape or url.QueryUnescape +// depending on input, additionally also handles situations such as +// `//` are normalized as `/`, also removes any `/` prefix before +// returning. +func unescapePath(p string) (string, error) { + ep, err := url.PathUnescape(p) + if err != nil { + return "", err + } + return trimLeadingSlash(ep), nil +} + +func trimLeadingSlash(ep string) string { + if len(ep) > 0 && ep[0] == '/' { + // Path ends with '/' preserve it + if ep[len(ep)-1] == '/' && len(ep) > 1 { + ep = path.Clean(ep) + ep += "/" + } else { + ep = path.Clean(ep) + } + ep = ep[1:] + } + return ep +} diff --git a/s3/responses/writers_common.go b/s3/responses/writers_common.go index 1563eedf3..f2db9c0c2 100644 --- a/s3/responses/writers_common.go +++ b/s3/responses/writers_common.go @@ -102,6 +102,7 @@ func WriteXMLResponse(w http.ResponseWriter, r *http.Request, statusCode int, re } func writeResponse(w http.ResponseWriter, r *http.Request, statusCode int, response []byte, mType mimeType) { + fmt.Println(r.Method, r.URL, statusCode) setCommonHeaders(w, r) if response != nil { w.Header().Set(consts.ContentLength, strconv.Itoa(len(response))) diff --git a/s3/routers/routers.go b/s3/routers/routers.go index a07f7b0af..681fccb18 100644 --- a/s3/routers/routers.go +++ b/s3/routers/routers.go @@ -30,6 +30,10 @@ func (routers *Routers) Register() http.Handler { ) bucket := root.PathPrefix("/{bucket}").Subrouter() + + //object + bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(routers.handlers.PutObjectHandler) + bucket.Methods(http.MethodGet).HandlerFunc(routers.handlers.GetBucketAclHandler).Queries("acl", "") bucket.Methods(http.MethodPut).HandlerFunc(routers.handlers.PutBucketAclHandler).Queries("acl", "") @@ -39,8 +43,5 @@ func (routers *Routers) Register() http.Handler { root.Methods(http.MethodGet).Path("/").HandlerFunc(routers.handlers.ListBucketsHandler) - //object - //bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(routers.handlers.PutObjectHandler) - return root } diff --git a/s3/services/bucket/service.go b/s3/services/bucket/service.go index ecc603e1d..e20eb60ae 100644 --- a/s3/services/bucket/service.go +++ b/s3/services/bucket/service.go @@ -41,7 +41,14 @@ func NewService(providers providers.Providerser, options ...Option) Service { return s } -func (s *service) CheckACL(accessKeyRecord *accesskey.AccessKey, bucketName string, action action.Action) (err error) { +func (s *service) CheckACL(ack *accesskey.AccessKey, bucketName string, act action.Action) (err error) { + if act == action.ListBucketAction { + if ack.Key == "" { + err = services.RespErrAccessDenied + } + return + } + //需要判断bucketName是否为空字符串 if bucketName == "" { return services.RespErrNoSuchBucket @@ -52,7 +59,7 @@ func (s *service) CheckACL(accessKeyRecord *accesskey.AccessKey, bucketName stri return err } - if policy.IsAllowed(bucketMeta.Owner == accessKeyRecord.Key, bucketMeta.Acl, action) == false { + if policy.IsAllowed(bucketMeta.Owner == ack.Key, bucketMeta.Acl, act) == false { return services.RespErrAccessDenied } return From a689141121fecbbead81f2c36f913767c572833b Mon Sep 17 00:00:00 2001 From: fish <920886811@163.com> Date: Thu, 24 Aug 2023 15:58:15 +0800 Subject: [PATCH 052/139] chore: --- s3/handlers/handlers.go | 32 ++++++++++++-------------------- s3/server.go | 1 + s3/services/bucket/proto.go | 1 + s3/services/bucket/service.go | 10 ++++++++++ 4 files changed, 24 insertions(+), 20 deletions(-) diff --git a/s3/handlers/handlers.go b/s3/handlers/handlers.go index b729f2cc7..7fcaa3229 100644 --- a/s3/handlers/handlers.go +++ b/s3/handlers/handlers.go @@ -147,14 +147,14 @@ func (h *Handlers) HeadBucketHandler(w http.ResponseWriter, r *http.Request) { ctx := r.Context() ack := cctx.GetAccessKey(r) - err = h.bucketSvc.CheckACL(ack, req.Bucket, s3action.HeadBucketAction) - if err != nil { - responses.WriteErrorResponse(w, r, err) + if ok := h.bucketSvc.HasBucket(ctx, req.Bucket); !ok { + responses.WriteErrorResponseHeadersOnly(w, r, services.RespErrNoSuchBucket) return } - if ok := h.bucketSvc.HasBucket(ctx, req.Bucket); !ok { - responses.WriteErrorResponseHeadersOnly(w, r, services.RespErrNoSuchBucket) + err = h.bucketSvc.CheckACL(ack, req.Bucket, s3action.HeadBucketAction) + if err != nil { + responses.WriteErrorResponse(w, r, err) return } @@ -199,18 +199,9 @@ func (h *Handlers) ListBucketsHandler(w http.ResponseWriter, r *http.Request) { cctx.SetHandleInf(r, fnName(), err) }() - req := &requests.ListBucketsRequest{} - err = req.Bind(r) - if err != nil { - responses.WriteErrorResponse(w, r, services.RespErrInvalidRequestBody) - return - } - ack := cctx.GetAccessKey(r) - - err = h.bucketSvc.CheckACL(ack, req.Bucket, s3action.ListBucketAction) - if err != nil { - responses.WriteErrorResponse(w, r, err) + if ack.Key == "" { + responses.WriteErrorResponse(w, r, services.RespErrNoAccessKey) return } @@ -240,16 +231,17 @@ func (h *Handlers) GetBucketAclHandler(w http.ResponseWriter, r *http.Request) { ctx := r.Context() ack := cctx.GetAccessKey(r) + if !h.bucketSvc.HasBucket(ctx, req.Bucket) { + responses.WriteErrorResponseHeadersOnly(w, r, services.RespErrNoSuchBucket) + return + } + err = h.bucketSvc.CheckACL(ack, req.Bucket, s3action.GetBucketAclAction) if err != nil { responses.WriteErrorResponse(w, r, err) return } - if !h.bucketSvc.HasBucket(ctx, req.Bucket) { - responses.WriteErrorResponseHeadersOnly(w, r, services.RespErrNoSuchBucket) - return - } //todo check all errors acl, err := h.bucketSvc.GetBucketAcl(ctx, req.Bucket) if err != nil { diff --git a/s3/server.go b/s3/server.go index c8b569256..b762abc50 100644 --- a/s3/server.go +++ b/s3/server.go @@ -36,6 +36,7 @@ func NewServer(storageStore storage.StateStorer) *server.Server { accessKeySvc := accesskey.NewService(ps) authSvc := auth.NewService(ps, accessKeySvc) bucketSvc := bucket.NewService(ps) + bucketSvc.SetEmptyBucket(bucketSvc.EmptyBucket) //todo EmptyBucket参数后续更新为object对象 // handlers hs := handlers.NewHandlers(corsSvc, authSvc, bucketSvc) diff --git a/s3/services/bucket/proto.go b/s3/services/bucket/proto.go index d1ebf2b82..c6599231c 100644 --- a/s3/services/bucket/proto.go +++ b/s3/services/bucket/proto.go @@ -17,6 +17,7 @@ type Service interface { GetAllBucketsOfUser(username string) (list []*Bucket, err error) UpdateBucketAcl(ctx context.Context, bucket, acl string) error GetBucketAcl(ctx context.Context, bucket string) (string, error) + EmptyBucket(ctx context.Context, bucket string) (bool, error) } // Bucket contains bucket metadata. diff --git a/s3/services/bucket/service.go b/s3/services/bucket/service.go index e20eb60ae..6da10744e 100644 --- a/s3/services/bucket/service.go +++ b/s3/services/bucket/service.go @@ -200,3 +200,13 @@ func (s *service) GetBucketAcl(ctx context.Context, bucket string) (string, erro } return meta.Acl, nil } + +// EmptyBucket object中后续添加 +func (s *service) EmptyBucket(ctx context.Context, bucket string) (bool, error) { + //loi, err := s.ListObjects(ctx, bucket, "", "", "", 1) + //if err != nil { + // return false, err + //} + //return len(loi.Objects) == 0, nil + return true, nil +} From 98a169f336470224ba6faa952d013fa0c3fbf8ad Mon Sep 17 00:00:00 2001 From: fish <920886811@163.com> Date: Thu, 24 Aug 2023 16:10:50 +0800 Subject: [PATCH 053/139] mod: mod bucket parse req --- s3/handlers/handlers.go | 11 ++++------- s3/requests/parsers.go | 14 +++++++++----- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/s3/handlers/handlers.go b/s3/handlers/handlers.go index 7fcaa3229..0b7387e22 100644 --- a/s3/handlers/handlers.go +++ b/s3/handlers/handlers.go @@ -91,7 +91,7 @@ func (h *Handlers) PutBucketHandler(w http.ResponseWriter, r *http.Request) { cctx.SetHandleInf(r, fnName(), err) }() - req, err := requests.ParsePubBucketRequest(r) + req, err := requests.ParsePutBucketRequest(r) if err != nil { responses.WriteErrorResponse(w, r, services.RespErrInvalidRequestBody) return @@ -167,8 +167,7 @@ func (h *Handlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) { cctx.SetHandleInf(r, fnName(), err) }() - req := &requests.DeleteBucketRequest{} - err = req.Bind(r) + req, err := requests.ParseDeleteBucketRequest(r) if err != nil { responses.WriteErrorResponse(w, r, services.RespErrInvalidRequestBody) return @@ -221,8 +220,7 @@ func (h *Handlers) GetBucketAclHandler(w http.ResponseWriter, r *http.Request) { cctx.SetHandleInf(r, fnName(), err) }() - req := &requests.GetBucketAclRequest{} - err = req.Bind(r) + req, err := requests.ParseGetBucketAclRequest(r) if err != nil { responses.WriteErrorResponse(w, r, services.RespErrInvalidRequestBody) return @@ -258,8 +256,7 @@ func (h *Handlers) PutBucketAclHandler(w http.ResponseWriter, r *http.Request) { cctx.SetHandleInf(r, fnName(), err) }() - req := &requests.PutBucketAclRequest{} - err = req.Bind(r) + req, err := requests.ParsePutBucketAclRequest(r) if err != nil || len(req.ACL) == 0 || len(req.Bucket) == 0 { responses.WriteErrorResponse(w, r, services.RespErrInvalidRequestBody) return diff --git a/s3/requests/parsers.go b/s3/requests/parsers.go index ca2beccb6..b2f4d629f 100644 --- a/s3/requests/parsers.go +++ b/s3/requests/parsers.go @@ -22,7 +22,7 @@ import ( // return //} -func ParsePubBucketRequest(r *http.Request) (req *PutBucketRequest, err error) { +func ParsePutBucketRequest(r *http.Request) (req *PutBucketRequest, err error) { req = &PutBucketRequest{} vars := mux.Vars(r) @@ -59,11 +59,12 @@ type DeleteBucketRequest struct { Bucket string } -func (req *DeleteBucketRequest) Bind(r *http.Request) (err error) { +func ParseDeleteBucketRequest(r *http.Request) (req *DeleteBucketRequest, err error) { vars := mux.Vars(r) bucket := vars["bucket"] //set request + req = &DeleteBucketRequest{} req.Bucket = bucket return } @@ -73,11 +74,12 @@ type ListBucketsRequest struct { Bucket string } -func (req *ListBucketsRequest) Bind(r *http.Request) (err error) { +func ParseListBucketsRequest(r *http.Request) (req *ListBucketsRequest, err error) { vars := mux.Vars(r) bucket := vars["bucket"] //set request + req = &ListBucketsRequest{} req.Bucket = bucket return } @@ -87,11 +89,12 @@ type GetBucketAclRequest struct { Bucket string } -func (req *GetBucketAclRequest) Bind(r *http.Request) (err error) { +func ParseGetBucketAclRequest(r *http.Request) (req *GetBucketAclRequest, err error) { vars := mux.Vars(r) bucket := vars["bucket"] //set request + req = &GetBucketAclRequest{} req.Bucket = bucket return } @@ -102,13 +105,14 @@ type PutBucketAclRequest struct { ACL string } -func (req *PutBucketAclRequest) Bind(r *http.Request) (err error) { +func ParsePutBucketAclRequest(r *http.Request) (req *PutBucketAclRequest, err error) { vars := mux.Vars(r) bucket := vars["bucket"] acl := r.Header.Get(consts.AmzACL) //set request + req = &PutBucketAclRequest{} req.Bucket = bucket req.ACL = acl return From 4df4864acceb75fc29f628be39dfe4b96b5badaa Mon Sep 17 00:00:00 2001 From: steve Date: Thu, 24 Aug 2023 20:00:40 +0800 Subject: [PATCH 054/139] optmize: adjust place of response error --- s3/handlers/handlers.go | 59 +-- s3/requests/parsers.go | 6 +- .../response_error.go => responses/errors.go} | 412 +++++++++--------- s3/responses/writers_common.go | 19 +- s3/services/auth/check_handler_auth.go | 14 +- s3/services/auth/signature-v4-parser.go | 52 +-- s3/services/auth/signature-v4-utils.go | 6 +- s3/services/auth/signature-v4.go | 32 +- s3/services/bucket/proto.go | 3 + s3/services/bucket/service.go | 27 +- 10 files changed, 315 insertions(+), 315 deletions(-) rename s3/{services/response_error.go => responses/errors.go} (77%) diff --git a/s3/handlers/handlers.go b/s3/handlers/handlers.go index 0b7387e22..d6cae47fd 100644 --- a/s3/handlers/handlers.go +++ b/s3/handlers/handlers.go @@ -2,12 +2,12 @@ package handlers import ( + "errors" "fmt" "github.com/bittorrent/go-btfs/s3/cctx" "github.com/bittorrent/go-btfs/s3/etag" "github.com/bittorrent/go-btfs/s3/requests" "github.com/bittorrent/go-btfs/s3/responses" - "github.com/bittorrent/go-btfs/s3/services" "github.com/bittorrent/go-btfs/s3/services/auth" "github.com/bittorrent/go-btfs/s3/services/bucket" "github.com/bittorrent/go-btfs/s3/services/cors" @@ -93,32 +93,39 @@ func (h *Handlers) PutBucketHandler(w http.ResponseWriter, r *http.Request) { req, err := requests.ParsePutBucketRequest(r) if err != nil { - responses.WriteErrorResponse(w, r, services.RespErrInvalidRequestBody) + responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestBody) return } ctx := r.Context() + ack := cctx.GetAccessKey(r) + + err = h.bucketSvc.CheckACL(ack, "", s3action.PutBucketAclAction) + if err != nil { + responses.WriteErrorResponse(w, r, responses.ErrAccessDenied) + return + } if err = s3utils.CheckValidBucketNameStrict(req.Bucket); err != nil { - responses.WriteErrorResponse(w, r, services.RespErrInvalidBucketName) + responses.WriteErrorResponse(w, r, responses.ErrInvalidBucketName) return } if !requests.CheckAclPermissionType(&req.ACL) { - err = services.RespErrNotImplemented - responses.WriteErrorResponse(w, r, services.RespErrNotImplemented) + err = responses.ErrNotImplemented + responses.WriteErrorResponse(w, r, responses.ErrNotImplemented) return } if ok := h.bucketSvc.HasBucket(ctx, req.Bucket); ok { - err = services.RespErrBucketAlreadyExists - responses.WriteErrorResponseHeadersOnly(w, r, services.RespErrBucketAlreadyExists) + err = responses.ErrBucketAlreadyExists + responses.WriteErrorResponseHeadersOnly(w, r, responses.ErrBucketAlreadyExists) return } err = h.bucketSvc.CreateBucket(ctx, req.Bucket, req.Region, cctx.GetAccessKey(r).Key, req.ACL) if err != nil { - responses.WriteErrorResponse(w, r, services.RespErrInternalError) + responses.WriteErrorResponse(w, r, responses.ErrInternalError) return } @@ -140,21 +147,19 @@ func (h *Handlers) HeadBucketHandler(w http.ResponseWriter, r *http.Request) { req, err := requests.ParseHeadBucketRequest(r) if err != nil { - responses.WriteErrorResponse(w, r, services.RespErrInvalidRequestBody) + responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestBody) return } - ctx := r.Context() ack := cctx.GetAccessKey(r) - if ok := h.bucketSvc.HasBucket(ctx, req.Bucket); !ok { - responses.WriteErrorResponseHeadersOnly(w, r, services.RespErrNoSuchBucket) + err = h.bucketSvc.CheckACL(ack, req.Bucket, s3action.HeadBucketAction) + if errors.Is(err, bucket.ErrNotFound) { + responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) return } - - err = h.bucketSvc.CheckACL(ack, req.Bucket, s3action.HeadBucketAction) if err != nil { - responses.WriteErrorResponse(w, r, err) + responses.WriteErrorResponse(w, r, responses.ErrAccessDenied) return } @@ -169,7 +174,7 @@ func (h *Handlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) { req, err := requests.ParseDeleteBucketRequest(r) if err != nil { - responses.WriteErrorResponse(w, r, services.RespErrInvalidRequestBody) + responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestBody) return } @@ -200,7 +205,7 @@ func (h *Handlers) ListBucketsHandler(w http.ResponseWriter, r *http.Request) { ack := cctx.GetAccessKey(r) if ack.Key == "" { - responses.WriteErrorResponse(w, r, services.RespErrNoAccessKey) + responses.WriteErrorResponse(w, r, responses.ErrNoAccessKey) return } @@ -222,7 +227,7 @@ func (h *Handlers) GetBucketAclHandler(w http.ResponseWriter, r *http.Request) { req, err := requests.ParseGetBucketAclRequest(r) if err != nil { - responses.WriteErrorResponse(w, r, services.RespErrInvalidRequestBody) + responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestBody) return } @@ -230,7 +235,7 @@ func (h *Handlers) GetBucketAclHandler(w http.ResponseWriter, r *http.Request) { ack := cctx.GetAccessKey(r) if !h.bucketSvc.HasBucket(ctx, req.Bucket) { - responses.WriteErrorResponseHeadersOnly(w, r, services.RespErrNoSuchBucket) + responses.WriteErrorResponseHeadersOnly(w, r, responses.ErrNoSuchBucket) return } @@ -258,7 +263,7 @@ func (h *Handlers) PutBucketAclHandler(w http.ResponseWriter, r *http.Request) { req, err := requests.ParsePutBucketAclRequest(r) if err != nil || len(req.ACL) == 0 || len(req.Bucket) == 0 { - responses.WriteErrorResponse(w, r, services.RespErrInvalidRequestBody) + responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestBody) return } @@ -272,7 +277,7 @@ func (h *Handlers) PutBucketAclHandler(w http.ResponseWriter, r *http.Request) { } if !requests.CheckAclPermissionType(&req.ACL) { - responses.WriteErrorResponse(w, r, services.RespErrNotImplemented) + responses.WriteErrorResponse(w, r, responses.ErrNotImplemented) return } @@ -296,19 +301,19 @@ func (h *Handlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) { // X-Amz-Copy-Source shouldn't be set for this call. if _, ok := r.Header[consts.AmzCopySource]; ok { - responses.WriteErrorResponse(w, r, services.RespErrInvalidCopySource) + responses.WriteErrorResponse(w, r, responses.ErrInvalidCopySource) return } buc, obj, err := requests.ParseBucketAndObject(r) if err != nil { - responses.WriteErrorResponse(w, r, services.RespErrInvalidRequestParameter) + responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) return } clientETag, err := etag.FromContentMD5(r.Header) if err != nil { - responses.WriteErrorResponse(w, r, services.RespErrInvalidDigest) + responses.WriteErrorResponse(w, r, responses.ErrInvalidDigest) return } _ = clientETag @@ -317,16 +322,16 @@ func (h *Handlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) { // todo: streaming signed if size == -1 { - responses.WriteErrorResponse(w, r, services.RespErrMissingContentLength) + responses.WriteErrorResponse(w, r, responses.ErrMissingContentLength) return } if size == 0 { - responses.WriteErrorResponse(w, r, services.RespErrEntityTooSmall) + responses.WriteErrorResponse(w, r, responses.ErrEntityTooSmall) return } if size > consts.MaxObjectSize { - responses.WriteErrorResponse(w, r, services.RespErrEntityTooLarge) + responses.WriteErrorResponse(w, r, responses.ErrEntityTooLarge) return } diff --git a/s3/requests/parsers.go b/s3/requests/parsers.go index b2f4d629f..98368fcf1 100644 --- a/s3/requests/parsers.go +++ b/s3/requests/parsers.go @@ -2,7 +2,7 @@ package requests import ( "encoding/xml" - "github.com/bittorrent/go-btfs/s3/services" + "github.com/bittorrent/go-btfs/s3/responses" "net/http" "path" @@ -121,7 +121,7 @@ func ParsePutBucketAclRequest(r *http.Request) (req *PutBucketAclRequest, err er /*********************************/ // Parses location constraint from the incoming reader. -func parseLocationConstraint(r *http.Request) (location string, s3Error *services.ResponseError) { +func parseLocationConstraint(r *http.Request) (location string, s3Error *responses.Error) { // If the request has no body with content-length set to 0, // we do not have to validate location constraint. Bucket will // be created at default region. @@ -129,7 +129,7 @@ func parseLocationConstraint(r *http.Request) (location string, s3Error *service err := utils.XmlDecoder(r.Body, &locationConstraint, r.ContentLength) if err != nil && r.ContentLength != 0 { // Treat all other failures as XML parsing errors. - return "", services.RespErrMalformedXML + return "", responses.ErrMalformedXML } // else for both err as nil or io.EOF location = locationConstraint.Location if location == "" { diff --git a/s3/services/response_error.go b/s3/responses/errors.go similarity index 77% rename from s3/services/response_error.go rename to s3/responses/errors.go index c86f5e0cb..1a9858924 100644 --- a/s3/services/response_error.go +++ b/s3/responses/errors.go @@ -1,1028 +1,1028 @@ -package services +package responses import ( "fmt" "net/http" ) -type ResponseError struct { +type Error struct { code string description string httpStatusCode int } -func (err *ResponseError) Code() string { +func (err *Error) Code() string { return err.code } -func (err *ResponseError) Description() string { +func (err *Error) Description() string { return err.description } -func (err *ResponseError) HTTPStatusCode() int { +func (err *Error) HTTPStatusCode() int { return err.httpStatusCode } -func (err *ResponseError) Error() string { +func (err *Error) Error() string { return fmt.Sprintf("[%s]%s", err.code, err.description) } // Errors http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html var ( - RespErrInvalidCopyDest = &ResponseError{ + ErrInvalidCopyDest = &Error{ code: "InvalidRequest", description: "This copy request is illegal because it is trying to copy an object to itself without changing the object's metadata, storage class, website redirect location or encryption attributes.", httpStatusCode: http.StatusBadRequest, } - RespErrInvalidCopySource = &ResponseError{ + ErrInvalidCopySource = &Error{ code: "InvalidArgument", description: "Copy Source must mention the source bucket and key: sourcebucket/sourcekey.", httpStatusCode: http.StatusBadRequest, } - RespErrInvalidMetadataDirective = &ResponseError{ + ErrInvalidMetadataDirective = &Error{ code: "InvalidArgument", description: "Unknown metadata directive.", httpStatusCode: http.StatusBadRequest, } - RespErrInvalidRequestBody = &ResponseError{ + ErrInvalidRequestBody = &Error{ code: "InvalidArgument", description: "Body shouldn't be set for this request.", httpStatusCode: http.StatusBadRequest, } - RespErrInvalidMaxUploads = &ResponseError{ + ErrInvalidMaxUploads = &Error{ code: "InvalidArgument", description: "Argument max-uploads must be an integer between 0 and 2147483647", httpStatusCode: http.StatusBadRequest, } - RespErrInvalidMaxKeys = &ResponseError{ + ErrInvalidMaxKeys = &Error{ code: "InvalidArgument", description: "Argument maxKeys must be an integer between 0 and 2147483647", httpStatusCode: http.StatusBadRequest, } - RespErrInvalidEncodingMethod = &ResponseError{ + ErrInvalidEncodingMethod = &Error{ code: "InvalidArgument", description: "Invalid Encoding Method specified in Request", httpStatusCode: http.StatusBadRequest, } - RespErrInvalidMaxParts = &ResponseError{ + ErrInvalidMaxParts = &Error{ code: "InvalidArgument", description: "Part number must be an integer between 1 and 10000, inclusive", httpStatusCode: http.StatusBadRequest, } - RespErrInvalidPartNumberMarker = &ResponseError{ + ErrInvalidPartNumberMarker = &Error{ code: "InvalidArgument", description: "Argument partNumberMarker must be an integer.", httpStatusCode: http.StatusBadRequest, } - RespErrInvalidPolicyDocument = &ResponseError{ + ErrInvalidPolicyDocument = &Error{ code: "InvalidPolicyDocument", description: "The content of the form does not meet the conditions specified in the policy document.", httpStatusCode: http.StatusBadRequest, } - RespErrAccessDenied = &ResponseError{ + ErrAccessDenied = &Error{ code: "AccessDenied", description: "Access Denied.", httpStatusCode: http.StatusForbidden, } - RespErrBadDigest = &ResponseError{ + ErrBadDigest = &Error{ code: "BadDigest", description: "The Content-Md5 you specified did not match what we received.", httpStatusCode: http.StatusBadRequest, } - RespErrEntityTooSmall = &ResponseError{ + ErrEntityTooSmall = &Error{ code: "EntityTooSmall", description: "Your proposed upload is smaller than the minimum allowed object size.", httpStatusCode: http.StatusBadRequest, } - RespErrEntityTooLarge = &ResponseError{ + ErrEntityTooLarge = &Error{ code: "EntityTooLarge", description: "Your proposed upload exceeds the maximum allowed object size.", httpStatusCode: http.StatusBadRequest, } - RespErrIncompleteBody = &ResponseError{ + ErrIncompleteBody = &Error{ code: "IncompleteBody", description: "You did not provide the number of bytes specified by the Content-Length HTTP header.", httpStatusCode: http.StatusBadRequest, } - RespErrInternalError = &ResponseError{ + ErrInternalError = &Error{ code: "InternalError", description: "We encountered an internal error, please try again.", httpStatusCode: http.StatusInternalServerError, } - RespErrInvalidAccessKeyID = &ResponseError{ + ErrInvalidAccessKeyID = &Error{ code: "InvalidAccessKeyId", description: "The Access Key Id you provided does not exist in our records.", httpStatusCode: http.StatusForbidden, } - RespErrAccessKeyDisabled = &ResponseError{ + ErrAccessKeyDisabled = &Error{ code: "InvalidAccessKeyId", description: "Your account is disabled; please contact your administrator.", httpStatusCode: http.StatusForbidden, } - RespErrInvalidBucketName = &ResponseError{ + ErrInvalidBucketName = &Error{ code: "InvalidBucketName", description: "The specified bucket is not valid.", httpStatusCode: http.StatusBadRequest, } - RespErrInvalidDigest = &ResponseError{ + ErrInvalidDigest = &Error{ code: "InvalidDigest", description: "The Content-Md5 you specified is not valid.", httpStatusCode: http.StatusBadRequest, } - RespErrInvalidRange = &ResponseError{ + ErrInvalidRange = &Error{ code: "InvalidRange", description: "The requested range is not satisfiable", httpStatusCode: http.StatusRequestedRangeNotSatisfiable, } - RespErrInvalidRangePartNumber = &ResponseError{ + ErrInvalidRangePartNumber = &Error{ code: "InvalidRequest", description: "Cannot specify both Range header and partNumber query parameter", httpStatusCode: http.StatusBadRequest, } - RespErrMalformedXML = &ResponseError{ + ErrMalformedXML = &Error{ code: "MalformedXML", description: "The XML you provided was not well-formed or did not validate against our published schema.", httpStatusCode: http.StatusBadRequest, } - RespErrMissingContentLength = &ResponseError{ + ErrMissingContentLength = &Error{ code: "MissingContentLength", description: "You must provide the Content-Length HTTP header.", httpStatusCode: http.StatusLengthRequired, } - RespErrMissingContentMD5 = &ResponseError{ + ErrMissingContentMD5 = &Error{ code: "MissingContentMD5", description: "Missing required header for this request: Content-Md5.", httpStatusCode: http.StatusBadRequest, } - RespErrMissingSecurityHeader = &ResponseError{ + ErrMissingSecurityHeader = &Error{ code: "MissingSecurityHeader", description: "Your request was missing a required header", httpStatusCode: http.StatusBadRequest, } - RespErrMissingRequestBodyError = &ResponseError{ + ErrMissingRequestBodyError = &Error{ code: "MissingRequestBodyError", description: "Request body is empty.", httpStatusCode: http.StatusLengthRequired, } - RespErrNoSuchBucket = &ResponseError{ + ErrNoSuchBucket = &Error{ code: "NoSuchBucket", description: "The specified bucket does not exist", httpStatusCode: http.StatusNotFound, } - RespErrNoSuchBucketPolicy = &ResponseError{ + ErrNoSuchBucketPolicy = &Error{ code: "NoSuchBucketPolicy", description: "The bucket policy does not exist", httpStatusCode: http.StatusNotFound, } - RespErrNoSuchLifecycleConfiguration = &ResponseError{ + ErrNoSuchLifecycleConfiguration = &Error{ code: "NoSuchLifecycleConfiguration", description: "The lifecycle configuration does not exist", httpStatusCode: http.StatusNotFound, } - RespErrNoSuchUser = &ResponseError{ + ErrNoSuchUser = &Error{ code: "NoSuchUser", description: "The specified user does not exist", httpStatusCode: http.StatusConflict, } - RespErrUserAlreadyExists = &ResponseError{ + ErrUserAlreadyExists = &Error{ code: "UserAlreadyExists", description: "The request was rejected because it attempted to create a resource that already exists .", httpStatusCode: http.StatusConflict, } - RespErrNoSuchUserPolicy = &ResponseError{ + ErrNoSuchUserPolicy = &Error{ code: "NoSuchUserPolicy", description: "The specified user policy does not exist", httpStatusCode: http.StatusConflict, } - RespErrUserPolicyAlreadyExists = &ResponseError{ + ErrUserPolicyAlreadyExists = &Error{ code: "UserPolicyAlreadyExists", description: "The same user policy already exists .", httpStatusCode: http.StatusConflict, } - RespErrNoSuchKey = &ResponseError{ + ErrNoSuchKey = &Error{ code: "NoSuchKey", description: "The specified key does not exist.", httpStatusCode: http.StatusNotFound, } - RespErrNoSuchUpload = &ResponseError{ + ErrNoSuchUpload = &Error{ code: "NoSuchUpload", description: "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.", httpStatusCode: http.StatusNotFound, } - RespErrInvalidVersionID = &ResponseError{ + ErrInvalidVersionID = &Error{ code: "InvalidArgument", description: "Invalid version id specified", httpStatusCode: http.StatusBadRequest, } - RespErrNoSuchVersion = &ResponseError{ + ErrNoSuchVersion = &Error{ code: "NoSuchVersion", description: "The specified version does not exist.", httpStatusCode: http.StatusNotFound, } - RespErrNotImplemented = &ResponseError{ + ErrNotImplemented = &Error{ code: "NotImplemented", description: "A header you provided implies functionality that is not implemented", httpStatusCode: http.StatusNotImplemented, } - RespErrPreconditionFailed = &ResponseError{ + ErrPreconditionFailed = &Error{ code: "PreconditionFailed", description: "At least one of the pre-conditions you specified did not hold", httpStatusCode: http.StatusPreconditionFailed, } - RespErrRequestTimeTooSkewed = &ResponseError{ + ErrRequestTimeTooSkewed = &Error{ code: "RequestTimeTooSkewed", description: "The difference between the request time and the server's time is too large.", httpStatusCode: http.StatusForbidden, } - RespErrSignatureDoesNotMatch = &ResponseError{ + ErrSignatureDoesNotMatch = &Error{ code: "SignatureDoesNotMatch", description: "The request signature we calculated does not match the signature you provided. Check your key and signing method.", httpStatusCode: http.StatusForbidden, } - RespErrMethodNotAllowed = &ResponseError{ + ErrMethodNotAllowed = &Error{ code: "MethodNotAllowed", description: "The specified method is not allowed against this resource.", httpStatusCode: http.StatusMethodNotAllowed, } - RespErrInvalidPart = &ResponseError{ + ErrInvalidPart = &Error{ code: "InvalidPart", description: "One or more of the specified parts could not be found. The part may not have been uploaded, or the specified entity tag may not match the part's entity tag.", httpStatusCode: http.StatusBadRequest, } - RespErrInvalidPartOrder = &ResponseError{ + ErrInvalidPartOrder = &Error{ code: "InvalidPartOrder", description: "The list of parts was not in ascending order. The parts list must be specified in order by part number.", httpStatusCode: http.StatusBadRequest, } - RespErrInvalidObjectState = &ResponseError{ + ErrInvalidObjectState = &Error{ code: "InvalidObjectState", description: "The operation is not valid for the current state of the object.", httpStatusCode: http.StatusForbidden, } - RespErrAuthorizationHeaderMalformed = &ResponseError{ + ErrAuthorizationHeaderMalformed = &Error{ code: "AuthorizationHeaderMalformed", description: "The authorization header is malformed; the region is wrong; expecting 'us-east-1'.", httpStatusCode: http.StatusBadRequest, } - RespErrMalformedPOSTRequest = &ResponseError{ + ErrMalformedPOSTRequest = &Error{ code: "MalformedPOSTRequest", description: "The body of your POST request is not well-formed multipart/form-data.", httpStatusCode: http.StatusBadRequest, } - RespErrPOSTFileRequired = &ResponseError{ + ErrPOSTFileRequired = &Error{ code: "InvalidArgument", description: "POST requires exactly one file upload per request.", httpStatusCode: http.StatusBadRequest, } - RespErrSignatureVersionNotSupported = &ResponseError{ + ErrSignatureVersionNotSupported = &Error{ code: "InvalidRequest", description: "The authorization mechanism you have provided is not supported. Please use AWS4-HMAC-SHA256.", httpStatusCode: http.StatusBadRequest, } - RespErrBucketNotEmpty = &ResponseError{ + ErrBucketNotEmpty = &Error{ code: "BucketNotEmpty", description: "The bucket you tried to delete is not empty", httpStatusCode: http.StatusConflict, } - RespErrBucketAlreadyExists = &ResponseError{ + ErrBucketAlreadyExists = &Error{ code: "BucketAlreadyExists", description: "The requested bucket name is not available. The bucket namespace is shared by all users of the system. Please select a different name and try again.", httpStatusCode: http.StatusConflict, } - RespErrAllAccessDisabled = &ResponseError{ + ErrAllAccessDisabled = &Error{ code: "AllAccessDisabled", description: "All access to this resource has been disabled.", httpStatusCode: http.StatusForbidden, } - RespErrMalformedPolicy = &ResponseError{ + ErrMalformedPolicy = &Error{ code: "MalformedPolicy", description: "Policy has invalid resource.", httpStatusCode: http.StatusBadRequest, } - RespErrMissingFields = &ResponseError{ // todo + ErrMissingFields = &Error{ // todo code: "InvalidRequest", description: "ErrMissingFields", httpStatusCode: http.StatusBadRequest, } - RespErrMissingCredTag = &ResponseError{ + ErrMissingCredTag = &Error{ code: "InvalidRequest", description: "Missing Credential field for this request.", httpStatusCode: http.StatusBadRequest, } - RespErrCredMalformed = &ResponseError{ // todo + ErrCredMalformed = &Error{ // todo code: "InvalidRequest", description: "ErrCredMalformed", httpStatusCode: http.StatusBadRequest, } - RespErrInvalidRegion = &ResponseError{ + ErrInvalidRegion = &Error{ code: "InvalidRegion", description: "Region does not match.", httpStatusCode: http.StatusBadRequest, } - RespErrMissingSignTag = &ResponseError{ + ErrMissingSignTag = &Error{ code: "AccessDenied", description: "Signature header missing Signature field.", httpStatusCode: http.StatusBadRequest, } - RespErrMissingSignHeadersTag = &ResponseError{ + ErrMissingSignHeadersTag = &Error{ code: "InvalidArgument", description: "Signature header missing SignedHeaders field.", httpStatusCode: http.StatusBadRequest, } - RespErrAuthHeaderEmpty = &ResponseError{ + ErrAuthHeaderEmpty = &Error{ code: "InvalidArgument", description: "Authorization header is invalid -- one and only one ' ' (space) required.", httpStatusCode: http.StatusBadRequest, } - RespErrMissingDateHeader = &ResponseError{ + ErrMissingDateHeader = &Error{ code: "AccessDenied", description: "AWS authentication requires a valid Date or x-amz-date header", httpStatusCode: http.StatusBadRequest, } - RespErrExpiredPresignRequest = &ResponseError{ + ErrExpiredPresignRequest = &Error{ code: "AccessDenied", description: "Request has expired", httpStatusCode: http.StatusForbidden, } - RespErrRequestNotReadyYet = &ResponseError{ + ErrRequestNotReadyYet = &Error{ code: "AccessDenied", description: "Request is not valid yet", httpStatusCode: http.StatusForbidden, } - RespErrSlowDown = &ResponseError{ + ErrSlowDown = &Error{ code: "SlowDown", description: "Resource requested is unreadable, please reduce your request rate", httpStatusCode: http.StatusServiceUnavailable, } - RespErrBadRequest = &ResponseError{ + ErrBadRequest = &Error{ code: "BadRequest", description: "400 BadRequest", httpStatusCode: http.StatusBadRequest, } - RespErrKeyTooLongError = &ResponseError{ + ErrKeyTooLongError = &Error{ code: "KeyTooLongError", description: "Your key is too long", httpStatusCode: http.StatusBadRequest, } - RespErrUnsignedHeaders = &ResponseError{ + ErrUnsignedHeaders = &Error{ code: "AccessDenied", description: "There were headers present in the request which were not signed", httpStatusCode: http.StatusBadRequest, } - RespErrBucketAlreadyOwnedByYou = &ResponseError{ + ErrBucketAlreadyOwnedByYou = &Error{ code: "BucketAlreadyOwnedByYou", description: "Your previous request to create the named bucket succeeded and you already own it.", httpStatusCode: http.StatusConflict, } - RespErrInvalidDuration = &ResponseError{ + ErrInvalidDuration = &Error{ code: "InvalidDuration", description: "Duration provided in the request is invalid.", httpStatusCode: http.StatusBadRequest, } - RespErrInvalidBucketObjectLockConfiguration = &ResponseError{ + ErrInvalidBucketObjectLockConfiguration = &Error{ code: "InvalidRequest", description: "Bucket is missing ObjectLockConfiguration", httpStatusCode: http.StatusBadRequest, } - RespErrBucketTaggingNotFound = &ResponseError{ + ErrBucketTaggingNotFound = &Error{ code: "NoSuchTagSet", description: "The TagSet does not exist", httpStatusCode: http.StatusNotFound, } - RespErrObjectLockConfigurationNotAllowed = &ResponseError{ + ErrObjectLockConfigurationNotAllowed = &Error{ code: "InvalidBucketState", description: "Object Lock configuration cannot be enabled on existing buckets", httpStatusCode: http.StatusConflict, } - RespErrNoSuchCORSConfiguration = &ResponseError{ + ErrNoSuchCORSConfiguration = &Error{ code: "NoSuchCORSConfiguration", description: "The CORS configuration does not exist", httpStatusCode: http.StatusNotFound, } - RespErrNoSuchWebsiteConfiguration = &ResponseError{ + ErrNoSuchWebsiteConfiguration = &Error{ code: "NoSuchWebsiteConfiguration", description: "The specified bucket does not have a website configuration", httpStatusCode: http.StatusNotFound, } - RespErrReplicationConfigurationNotFoundError = &ResponseError{ + ErrReplicationConfigurationNotFoundError = &Error{ code: "ReplicationConfigurationNotFoundError", description: "The replication configuration was not found", httpStatusCode: http.StatusNotFound, } - RespErrReplicationNeedsVersioningError = &ResponseError{ + ErrReplicationNeedsVersioningError = &Error{ code: "InvalidRequest", description: "Versioning must be 'Enabled' on the bucket to apply a replication configuration", httpStatusCode: http.StatusBadRequest, } - RespErrReplicationBucketNeedsVersioningError = &ResponseError{ + ErrReplicationBucketNeedsVersioningError = &Error{ code: "InvalidRequest", description: "Versioning must be 'Enabled' on the bucket to add a replication target", httpStatusCode: http.StatusBadRequest, } - RespErrNoSuchObjectLockConfiguration = &ResponseError{ + ErrNoSuchObjectLockConfiguration = &Error{ code: "NoSuchObjectLockConfiguration", description: "The specified object does not have a ObjectLock configuration", httpStatusCode: http.StatusBadRequest, } - RespErrObjectLocked = &ResponseError{ + ErrObjectLocked = &Error{ code: "InvalidRequest", description: "Object is WORM protected and cannot be overwritten", httpStatusCode: http.StatusBadRequest, } - RespErrInvalidRetentionDate = &ResponseError{ + ErrInvalidRetentionDate = &Error{ code: "InvalidRequest", description: "Date must be provided in ISO 8601 format", httpStatusCode: http.StatusBadRequest, } - RespErrPastObjectLockRetainDate = &ResponseError{ + ErrPastObjectLockRetainDate = &Error{ code: "InvalidRequest", description: "the retain until date must be in the future", httpStatusCode: http.StatusBadRequest, } - RespErrUnknownWORMModeDirective = &ResponseError{ + ErrUnknownWORMModeDirective = &Error{ code: "InvalidRequest", description: "unknown wormMode directive", httpStatusCode: http.StatusBadRequest, } - RespErrObjectLockInvalidHeaders = &ResponseError{ + ErrObjectLockInvalidHeaders = &Error{ code: "InvalidRequest", description: "x-amz-object-lock-retain-until-date and x-amz-object-lock-mode must both be supplied", httpStatusCode: http.StatusBadRequest, } - RespErrObjectRestoreAlreadyInProgress = &ResponseError{ + ErrObjectRestoreAlreadyInProgress = &Error{ code: "RestoreAlreadyInProgress", description: "Object restore is already in progress", httpStatusCode: http.StatusConflict, } // Bucket notification related errors. - RespErrEventNotification = &ResponseError{ + ErrEventNotification = &Error{ code: "InvalidArgument", description: "A specified event is not supported for notifications.", httpStatusCode: http.StatusBadRequest, } - RespErrARNNotification = &ResponseError{ + ErrARNNotification = &Error{ code: "InvalidArgument", description: "A specified destination ARN does not exist or is not well-formed. Verify the destination ARN.", httpStatusCode: http.StatusBadRequest, } - RespErrRegionNotification = &ResponseError{ + ErrRegionNotification = &Error{ code: "InvalidArgument", description: "A specified destination is in a different region than the bucket. You must use a destination that resides in the same region as the bucket.", httpStatusCode: http.StatusBadRequest, } - RespErrOverlappingFilterNotification = &ResponseError{ + ErrOverlappingFilterNotification = &Error{ code: "InvalidArgument", description: "An object key name filtering rule defined with overlapping prefixes, overlapping suffixes, or overlapping combinations of prefixes and suffixes for the same event types.", httpStatusCode: http.StatusBadRequest, } - RespErrFilterNameInvalid = &ResponseError{ + ErrFilterNameInvalid = &Error{ code: "InvalidArgument", description: "filter rule name must be either prefix or suffix", httpStatusCode: http.StatusBadRequest, } - RespErrFilterNamePrefix = &ResponseError{ + ErrFilterNamePrefix = &Error{ code: "InvalidArgument", description: "Cannot specify more than one prefix rule in a filter.", httpStatusCode: http.StatusBadRequest, } - RespErrFilterNameSuffix = &ResponseError{ + ErrFilterNameSuffix = &Error{ code: "InvalidArgument", description: "Cannot specify more than one suffix rule in a filter.", httpStatusCode: http.StatusBadRequest, } - RespErrFilterValueInvalid = &ResponseError{ + ErrFilterValueInvalid = &Error{ code: "InvalidArgument", description: "Size of filter rule value cannot exceed 1024 bytes in UTF-8 representation", httpStatusCode: http.StatusBadRequest, } - RespErrOverlappingConfigs = &ResponseError{ + ErrOverlappingConfigs = &Error{ code: "InvalidArgument", description: "Configurations overlap. Configurations on the same bucket cannot share a common event type.", httpStatusCode: http.StatusBadRequest, } - RespErrContentSHA256Mismatch = &ResponseError{ //todo + ErrContentSHA256Mismatch = &Error{ //todo code: "InvalidArgument", description: "ErrContentSHA256Mismatch", httpStatusCode: http.StatusBadRequest, } - RespErrInvalidCopyPartRange = &ResponseError{ + ErrInvalidCopyPartRange = &Error{ code: "InvalidArgument", description: "The x-amz-copy-source-range value must be of the form bytes=first-last where first and last are the zero-based offsets of the first and last bytes to copy", httpStatusCode: http.StatusBadRequest, } - RespErrInvalidCopyPartRangeSource = &ResponseError{ + ErrInvalidCopyPartRangeSource = &Error{ code: "InvalidArgument", description: "Range specified is not valid for source object", httpStatusCode: http.StatusBadRequest, } - RespErrMetadataTooLarge = &ResponseError{ + ErrMetadataTooLarge = &Error{ code: "MetadataTooLarge", description: "Your metadata headers exceed the maximum allowed metadata size.", httpStatusCode: http.StatusBadRequest, } - RespErrInvalidTagDirective = &ResponseError{ + ErrInvalidTagDirective = &Error{ code: "InvalidArgument", description: "Unknown tag directive.", httpStatusCode: http.StatusBadRequest, } - RespErrInvalidEncryptionMethod = &ResponseError{ + ErrInvalidEncryptionMethod = &Error{ code: "InvalidRequest", description: "The encryption method specified is not supported", httpStatusCode: http.StatusBadRequest, } - RespErrInvalidQueryParams = &ResponseError{ + ErrInvalidQueryParams = &Error{ code: "AuthorizationQueryParametersError", description: "Query-string authentication version 4 requires the X-Amz-Algorithm, X-Amz-Credential, X-Amz-Signature, X-Amz-Date, X-Amz-SignedHeaders, and X-Amz-Expires parameters.", httpStatusCode: http.StatusBadRequest, } - RespErrNoAccessKey = &ResponseError{ + ErrNoAccessKey = &Error{ code: "AccessDenied", description: "No AWSAccessKey was presented", httpStatusCode: http.StatusForbidden, } - RespErrInvalidToken = &ResponseError{ + ErrInvalidToken = &Error{ code: "InvalidTokenId", description: "The security token included in the request is invalid", httpStatusCode: http.StatusForbidden, } // S3 extensions. - RespErrInvalidObjectName = &ResponseError{ + ErrInvalidObjectName = &Error{ code: "InvalidObjectName", description: "Object name contains unsupported characters.", httpStatusCode: http.StatusBadRequest, } - RespErrInvalidObjectNamePrefixSlash = &ResponseError{ + ErrInvalidObjectNamePrefixSlash = &Error{ code: "InvalidObjectName", description: "Object name contains a leading slash.", httpStatusCode: http.StatusBadRequest, } - RespErrClientDisconnected = &ResponseError{ + ErrClientDisconnected = &Error{ code: "ClientDisconnected", description: "Client disconnected before response was ready", httpStatusCode: 499, // No official code, use nginx value. } - RespErrOperationTimedOut = &ResponseError{ + ErrOperationTimedOut = &Error{ code: "RequestTimeout", description: "A timeout occurred while trying to lock a resource, please reduce your request rate", httpStatusCode: http.StatusServiceUnavailable, } - RespErrOperationMaxedOut = &ResponseError{ + ErrOperationMaxedOut = &Error{ code: "SlowDown", description: "A timeout exceeded while waiting to proceed with the request, please reduce your request rate", httpStatusCode: http.StatusServiceUnavailable, } - RespErrUnsupportedMetadata = &ResponseError{ + ErrUnsupportedMetadata = &Error{ code: "InvalidArgument", description: "Your metadata headers are not supported.", httpStatusCode: http.StatusBadRequest, } // Generic Invalid-Request error. Should be used for response errors only for unlikely - // corner case errors for which introducing new APIRespErrorcode is not worth it. LogIf() + // corner case errors for which introducing new APIorcode is not worth it. LogIf() // should be used to log the error at the source of the error for debugging purposes. - ErrInvalidRequest = &ResponseError{ + ErrErrInvalidRequest = &Error{ code: "InvalidRequest", description: "Invalid Request", httpStatusCode: http.StatusBadRequest, } - RespErrIncorrectContinuationToken = &ResponseError{ + ErrIncorrectContinuationToken = &Error{ code: "InvalidArgument", description: "The continuation token provided is incorrect", httpStatusCode: http.StatusBadRequest, } - RespErrInvalidFormatAccessKey = &ResponseError{ + ErrInvalidFormatAccessKey = &Error{ code: "InvalidAccessKeyId", description: "The Access Key Id you provided contains invalid characters.", httpStatusCode: http.StatusBadRequest, } - // S3 Select API RespErrors - ErrEmptyRequestBody = &ResponseError{ + // S3 Select API ors + ErrErrEmptyRequestBody = &Error{ code: "EmptyRequestBody", description: "Request body cannot be empty.", httpStatusCode: http.StatusBadRequest, } - RespErrUnsupportedFunction = &ResponseError{ + ErrUnsupportedFunction = &Error{ code: "UnsupportedFunction", description: "Encountered an unsupported SQL function.", httpStatusCode: http.StatusBadRequest, } - RespErrInvalidDataSource = &ResponseError{ + ErrInvalidDataSource = &Error{ code: "InvalidDataSource", description: "Invalid data source type. Only CSV and JSON are supported at this time.", httpStatusCode: http.StatusBadRequest, } - RespErrInvalidExpressionType = &ResponseError{ + ErrInvalidExpressionType = &Error{ code: "InvalidExpressionType", description: "The ExpressionType is invalid. Only SQL expressions are supported at this time.", httpStatusCode: http.StatusBadRequest, } - RespErrBusy = &ResponseError{ + ErrBusy = &Error{ code: "Busy", description: "The service is unavailable. Please retry.", httpStatusCode: http.StatusServiceUnavailable, } - RespErrUnauthorizedAccess = &ResponseError{ + ErrUnauthorizedAccess = &Error{ code: "UnauthorizedAccess", description: "You are not authorized to perform this operation", httpStatusCode: http.StatusUnauthorized, } - RespErrExpressionTooLong = &ResponseError{ + ErrExpressionTooLong = &Error{ code: "ExpressionTooLong", description: "The SQL expression is too long: The maximum byte-length for the SQL expression is 256 KB.", httpStatusCode: http.StatusBadRequest, } - RespErrIllegalSQLFunctionArgument = &ResponseError{ + ErrIllegalSQLFunctionArgument = &Error{ code: "IllegalSqlFunctionArgument", description: "Illegal argument was used in the SQL function.", httpStatusCode: http.StatusBadRequest, } - RespErrInvalidKeyPath = &ResponseError{ + ErrInvalidKeyPath = &Error{ code: "InvalidKeyPath", description: "Key path in the SQL expression is invalid.", httpStatusCode: http.StatusBadRequest, } - RespErrInvalidCompressionFormat = &ResponseError{ + ErrInvalidCompressionFormat = &Error{ code: "InvalidCompressionFormat", description: "The file is not in a supported compression format. Only GZIP is supported at this time.", httpStatusCode: http.StatusBadRequest, } - RespErrInvalidFileHeaderInfo = &ResponseError{ + ErrInvalidFileHeaderInfo = &Error{ code: "InvalidFileHeaderInfo", description: "The FileHeaderInfo is invalid. Only NONE, USE, and IGNORE are supported.", httpStatusCode: http.StatusBadRequest, } - RespErrInvalidJSONType = &ResponseError{ + ErrInvalidJSONType = &Error{ code: "InvalidJsonType", description: "The JsonType is invalid. Only DOCUMENT and LINES are supported at this time.", httpStatusCode: http.StatusBadRequest, } - RespErrInvalidQuoteFields = &ResponseError{ + ErrInvalidQuoteFields = &Error{ code: "InvalidQuoteFields", description: "The QuoteFields is invalid. Only ALWAYS and ASNEEDED are supported.", httpStatusCode: http.StatusBadRequest, } - RespErrInvalidRequestParameter = &ResponseError{ + ErrInvalidRequestParameter = &Error{ code: "InvalidRequestParameter", description: "The value of a parameter in SelectRequest element is invalid. Check the service API documentation and try again.", httpStatusCode: http.StatusBadRequest, } - RespErrInvalidDataType = &ResponseError{ + ErrInvalidDataType = &Error{ code: "InvalidDataType", description: "The SQL expression contains an invalid data type.", httpStatusCode: http.StatusBadRequest, } - RespErrInvalidTextEncoding = &ResponseError{ + ErrInvalidTextEncoding = &Error{ code: "InvalidTextEncoding", description: "Invalid encoding type. Only UTF-8 encoding is supported at this time.", httpStatusCode: http.StatusBadRequest, } - RespErrInvalidTableAlias = &ResponseError{ + ErrInvalidTableAlias = &Error{ code: "InvalidTableAlias", description: "The SQL expression contains an invalid table alias.", httpStatusCode: http.StatusBadRequest, } - RespErrMissingRequiredParameter = &ResponseError{ + ErrMissingRequiredParameter = &Error{ code: "MissingRequiredParameter", description: "The SelectRequest entity is missing a required parameter. Check the service documentation and try again.", httpStatusCode: http.StatusBadRequest, } - RespErrObjectSerializationConflict = &ResponseError{ + ErrObjectSerializationConflict = &Error{ code: "ObjectSerializationConflict", description: "The SelectRequest entity can only contain one of CSV or JSON. Check the service documentation and try again.", httpStatusCode: http.StatusBadRequest, } - RespErrUnsupportedSQLOperation = &ResponseError{ + ErrUnsupportedSQLOperation = &Error{ code: "UnsupportedSqlOperation", description: "Encountered an unsupported SQL operation.", httpStatusCode: http.StatusBadRequest, } - RespErrUnsupportedSQLStructure = &ResponseError{ + ErrUnsupportedSQLStructure = &Error{ code: "UnsupportedSqlStructure", description: "Encountered an unsupported SQL structure. Check the SQL Reference.", httpStatusCode: http.StatusBadRequest, } - RespErrUnsupportedSyntax = &ResponseError{ + ErrUnsupportedSyntax = &Error{ code: "UnsupportedSyntax", description: "Encountered invalid syntax.", httpStatusCode: http.StatusBadRequest, } - RespErrUnsupportedRangeHeader = &ResponseError{ + ErrUnsupportedRangeHeader = &Error{ code: "UnsupportedRangeHeader", description: "Range header is not supported for this operation.", httpStatusCode: http.StatusBadRequest, } - RespErrLexerInvalidChar = &ResponseError{ + ErrLexerInvalidChar = &Error{ code: "LexerInvalidChar", description: "The SQL expression contains an invalid character.", httpStatusCode: http.StatusBadRequest, } - RespErrLexerInvalidOperator = &ResponseError{ + ErrLexerInvalidOperator = &Error{ code: "LexerInvalidOperator", description: "The SQL expression contains an invalid literal.", httpStatusCode: http.StatusBadRequest, } - RespErrLexerInvalidLiteral = &ResponseError{ + ErrLexerInvalidLiteral = &Error{ code: "LexerInvalidLiteral", description: "The SQL expression contains an invalid operator.", httpStatusCode: http.StatusBadRequest, } - RespErrLexerInvalidIONLiteral = &ResponseError{ + ErrLexerInvalidIONLiteral = &Error{ code: "LexerInvalidIONLiteral", description: "The SQL expression contains an invalid operator.", httpStatusCode: http.StatusBadRequest, } - RespErrParseExpectedDatePart = &ResponseError{ + ErrParseExpectedDatePart = &Error{ code: "ParseExpectedDatePart", description: "Did not find the expected date part in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - RespErrParseExpectedKeyword = &ResponseError{ + ErrParseExpectedKeyword = &Error{ code: "ParseExpectedKeyword", description: "Did not find the expected keyword in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - RespErrParseExpectedTokenType = &ResponseError{ + ErrParseExpectedTokenType = &Error{ code: "ParseExpectedTokenType", description: "Did not find the expected token in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - RespErrParseExpected2TokenTypes = &ResponseError{ + ErrParseExpected2TokenTypes = &Error{ code: "ParseExpected2TokenTypes", description: "Did not find the expected token in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - RespErrParseExpectedNumber = &ResponseError{ + ErrParseExpectedNumber = &Error{ code: "ParseExpectedNumber", description: "Did not find the expected number in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - RespErrParseExpectedRightParenBuiltinFunctionCall = &ResponseError{ + ErrParseExpectedRightParenBuiltinFunctionCall = &Error{ code: "ParseExpectedRightParenBuiltinFunctionCall", description: "Did not find the expected right parenthesis character in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - RespErrParseExpectedTypeName = &ResponseError{ + ErrParseExpectedTypeName = &Error{ code: "ParseExpectedTypeName", description: "Did not find the expected type name in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - RespErrParseExpectedWhenClause = &ResponseError{ + ErrParseExpectedWhenClause = &Error{ code: "ParseExpectedWhenClause", description: "Did not find the expected WHEN clause in the SQL expression. CASE is not supported.", httpStatusCode: http.StatusBadRequest, } - RespErrParseUnsupportedToken = &ResponseError{ + ErrParseUnsupportedToken = &Error{ code: "ParseUnsupportedToken", description: "The SQL expression contains an unsupported token.", httpStatusCode: http.StatusBadRequest, } - RespErrParseUnsupportedLiteralsGroupBy = &ResponseError{ + ErrParseUnsupportedLiteralsGroupBy = &Error{ code: "ParseUnsupportedLiteralsGroupBy", description: "The SQL expression contains an unsupported use of GROUP BY.", httpStatusCode: http.StatusBadRequest, } - RespErrParseExpectedMember = &ResponseError{ + ErrParseExpectedMember = &Error{ code: "ParseExpectedMember", description: "The SQL expression contains an unsupported use of MEMBER.", httpStatusCode: http.StatusBadRequest, } - RespErrParseUnsupportedSelect = &ResponseError{ + ErrParseUnsupportedSelect = &Error{ code: "ParseUnsupportedSelect", description: "The SQL expression contains an unsupported use of SELECT.", httpStatusCode: http.StatusBadRequest, } - RespErrParseUnsupportedCase = &ResponseError{ + ErrParseUnsupportedCase = &Error{ code: "ParseUnsupportedCase", description: "The SQL expression contains an unsupported use of CASE.", httpStatusCode: http.StatusBadRequest, } - RespErrParseUnsupportedCaseClause = &ResponseError{ + ErrParseUnsupportedCaseClause = &Error{ code: "ParseUnsupportedCaseClause", description: "The SQL expression contains an unsupported use of CASE.", httpStatusCode: http.StatusBadRequest, } - RespErrParseUnsupportedAlias = &ResponseError{ + ErrParseUnsupportedAlias = &Error{ code: "ParseUnsupportedAlias", description: "The SQL expression contains an unsupported use of ALIAS.", httpStatusCode: http.StatusBadRequest, } - RespErrParseUnsupportedSyntax = &ResponseError{ + ErrParseUnsupportedSyntax = &Error{ code: "ParseUnsupportedSyntax", description: "The SQL expression contains unsupported syntax.", httpStatusCode: http.StatusBadRequest, } - RespErrParseUnknownOperator = &ResponseError{ + ErrParseUnknownOperator = &Error{ code: "ParseUnknownOperator", description: "The SQL expression contains an invalid operator.", httpStatusCode: http.StatusBadRequest, } - RespErrParseMissingIdentAfterAt = &ResponseError{ + ErrParseMissingIdentAfterAt = &Error{ code: "ParseMissingIdentAfterAt", description: "Did not find the expected identifier after the @ symbol in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - RespErrParseUnexpectedOperator = &ResponseError{ + ErrParseUnexpectedOperator = &Error{ code: "ParseUnexpectedOperator", description: "The SQL expression contains an unexpected operator.", httpStatusCode: http.StatusBadRequest, } - RespErrParseUnexpectedTerm = &ResponseError{ + ErrParseUnexpectedTerm = &Error{ code: "ParseUnexpectedTerm", description: "The SQL expression contains an unexpected term.", httpStatusCode: http.StatusBadRequest, } - RespErrParseUnexpectedToken = &ResponseError{ + ErrParseUnexpectedToken = &Error{ code: "ParseUnexpectedToken", description: "The SQL expression contains an unexpected token.", httpStatusCode: http.StatusBadRequest, } - RespErrParseUnexpectedKeyword = &ResponseError{ + ErrParseUnexpectedKeyword = &Error{ code: "ParseUnexpectedKeyword", description: "The SQL expression contains an unexpected keyword.", httpStatusCode: http.StatusBadRequest, } - RespErrParseExpectedExpression = &ResponseError{ + ErrParseExpectedExpression = &Error{ code: "ParseExpectedExpression", description: "Did not find the expected SQL expression.", httpStatusCode: http.StatusBadRequest, } - RespErrParseExpectedLeftParenAfterCast = &ResponseError{ + ErrParseExpectedLeftParenAfterCast = &Error{ code: "ParseExpectedLeftParenAfterCast", description: "Did not find expected the left parenthesis in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - RespErrParseExpectedLeftParenValueConstructor = &ResponseError{ + ErrParseExpectedLeftParenValueConstructor = &Error{ code: "ParseExpectedLeftParenValueConstructor", description: "Did not find expected the left parenthesis in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - RespErrParseExpectedLeftParenBuiltinFunctionCall = &ResponseError{ + ErrParseExpectedLeftParenBuiltinFunctionCall = &Error{ code: "ParseExpectedLeftParenBuiltinFunctionCall", description: "Did not find the expected left parenthesis in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - RespErrParseExpectedArgumentDelimiter = &ResponseError{ + ErrParseExpectedArgumentDelimiter = &Error{ code: "ParseExpectedArgumentDelimiter", description: "Did not find the expected argument delimiter in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - RespErrParseCastArity = &ResponseError{ + ErrParseCastArity = &Error{ code: "ParseCastArity", description: "The SQL expression CAST has incorrect arity.", httpStatusCode: http.StatusBadRequest, } - RespErrParseInvalidTypeParam = &ResponseError{ + ErrParseInvalidTypeParam = &Error{ code: "ParseInvalidTypeParam", description: "The SQL expression contains an invalid parameter value.", httpStatusCode: http.StatusBadRequest, } - RespErrParseEmptySelect = &ResponseError{ + ErrParseEmptySelect = &Error{ code: "ParseEmptySelect", description: "The SQL expression contains an empty SELECT.", httpStatusCode: http.StatusBadRequest, } - RespErrParseSelectMissingFrom = &ResponseError{ + ErrParseSelectMissingFrom = &Error{ code: "ParseSelectMissingFrom", description: "GROUP is not supported in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - RespErrParseExpectedIdentForGroupName = &ResponseError{ + ErrParseExpectedIdentForGroupName = &Error{ code: "ParseExpectedIdentForGroupName", description: "GROUP is not supported in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - RespErrParseExpectedIdentForAlias = &ResponseError{ + ErrParseExpectedIdentForAlias = &Error{ code: "ParseExpectedIdentForAlias", description: "Did not find the expected identifier for the alias in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - RespErrParseUnsupportedCallWithStar = &ResponseError{ + ErrParseUnsupportedCallWithStar = &Error{ code: "ParseUnsupportedCallWithStar", description: "Only COUNT with (*) as a parameter is supported in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - RespErrParseNonUnaryAgregateFunctionCall = &ResponseError{ + ErrParseNonUnaryAgregateFunctionCall = &Error{ code: "ParseNonUnaryAgregateFunctionCall", description: "Only one argument is supported for aggregate functions in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - RespErrParseMalformedJoin = &ResponseError{ + ErrParseMalformedJoin = &Error{ code: "ParseMalformedJoin", description: "JOIN is not supported in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - RespErrParseExpectedIdentForAt = &ResponseError{ + ErrParseExpectedIdentForAt = &Error{ code: "ParseExpectedIdentForAt", description: "Did not find the expected identifier for AT name in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - RespErrParseAsteriskIsNotAloneInSelectList = &ResponseError{ + ErrParseAsteriskIsNotAloneInSelectList = &Error{ code: "ParseAsteriskIsNotAloneInSelectList", description: "Other expressions are not allowed in the SELECT list when '*' is used without dot notation in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - RespErrParseCannotMixSqbAndWildcardInSelectList = &ResponseError{ + ErrParseCannotMixSqbAndWildcardInSelectList = &Error{ code: "ParseCannotMixSqbAndWildcardInSelectList", description: "Cannot mix [] and * in the same expression in a SELECT list in SQL expression.", httpStatusCode: http.StatusBadRequest, } - RespErrParseInvalidContextForWildcardInSelectList = &ResponseError{ + ErrParseInvalidContextForWildcardInSelectList = &Error{ code: "ParseInvalidContextForWildcardInSelectList", description: "Invalid use of * in SELECT list in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - RespErrIncorrectSQLFunctionArgumentType = &ResponseError{ + ErrIncorrectSQLFunctionArgumentType = &Error{ code: "IncorrectSqlFunctionArgumentType", description: "Incorrect type of arguments in function call in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - RespErrValueParseFailure = &ResponseError{ + ErrValueParseFailure = &Error{ code: "ValueParseFailure", description: "Time stamp parse failure in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - RespErrEvaluatorInvalidArguments = &ResponseError{ + ErrEvaluatorInvalidArguments = &Error{ code: "EvaluatorInvalidArguments", description: "Incorrect number of arguments in the function call in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - RespErrIntegerOverflow = &ResponseError{ + ErrIntegerOverflow = &Error{ code: "IntegerOverflow", description: "Int overflow or underflow in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - RespErrLikeInvalidInputs = &ResponseError{ + ErrLikeInvalidInputs = &Error{ code: "LikeInvalidInputs", description: "Invalid argument given to the LIKE clause in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - RespErrCastFailed = &ResponseError{ + ErrCastFailed = &Error{ code: "CastFailed", description: "Attempt to convert from one data type to another using CAST failed in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - RespErrInvalidCast = &ResponseError{ + ErrInvalidCast = &Error{ code: "InvalidCast", description: "Attempt to convert from one data type to another using CAST failed in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - RespErrEvaluatorInvalidTimestampFormatPattern = &ResponseError{ + ErrEvaluatorInvalidTimestampFormatPattern = &Error{ code: "EvaluatorInvalidTimestampFormatPattern", description: "Time stamp format pattern requires additional fields in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - RespErrEvaluatorInvalidTimestampFormatPatternSymbolForParsing = &ResponseError{ + ErrEvaluatorInvalidTimestampFormatPatternSymbolForParsing = &Error{ code: "EvaluatorInvalidTimestampFormatPatternSymbolForParsing", description: "Time stamp format pattern contains a valid format symbol that cannot be applied to time stamp parsing in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - RespErrEvaluatorTimestampFormatPatternDuplicateFields = &ResponseError{ + ErrEvaluatorTimestampFormatPatternDuplicateFields = &Error{ code: "EvaluatorTimestampFormatPatternDuplicateFields", description: "Time stamp format pattern contains multiple format specifiers representing the time stamp field in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - RespErrEvaluatorTimestampFormatPatternHourClockAmPmMismatch = &ResponseError{ + ErrEvaluatorTimestampFormatPatternHourClockAmPmMismatch = &Error{ code: "EvaluatorUnterminatedTimestampFormatPatternToken", description: "Time stamp format pattern contains unterminated token in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - RespErrEvaluatorUnterminatedTimestampFormatPatternToken = &ResponseError{ + ErrEvaluatorUnterminatedTimestampFormatPatternToken = &Error{ code: "EvaluatorInvalidTimestampFormatPatternToken", description: "Time stamp format pattern contains an invalid token in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - RespErrEvaluatorInvalidTimestampFormatPatternToken = &ResponseError{ + ErrEvaluatorInvalidTimestampFormatPatternToken = &Error{ code: "EvaluatorInvalidTimestampFormatPatternToken", description: "Time stamp format pattern contains an invalid token in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - RespErrEvaluatorInvalidTimestampFormatPatternSymbol = &ResponseError{ + ErrEvaluatorInvalidTimestampFormatPatternSymbol = &Error{ code: "EvaluatorInvalidTimestampFormatPatternSymbol", description: "Time stamp format pattern contains an invalid symbol in the SQL expression.", httpStatusCode: http.StatusBadRequest, } - RespErrEvaluatorBindingDoesNotExist = &ResponseError{ + ErrEvaluatorBindingDoesNotExist = &Error{ code: "ErrEvaluatorBindingDoesNotExist", description: "A column name or a path provided does not exist in the SQL expression", httpStatusCode: http.StatusBadRequest, } - RespErrMissingHeaders = &ResponseError{ + ErrMissingHeaders = &Error{ code: "MissingHeaders", description: "Some headers in the query are missing from the file. Check the file and try again.", httpStatusCode: http.StatusBadRequest, } - RespErrInvalidColumnIndex = &ResponseError{ + ErrInvalidColumnIndex = &Error{ code: "InvalidColumnIndex", description: "The column index is invalid. Please check the service documentation and try again.", httpStatusCode: http.StatusBadRequest, } - RespErrPostPolicyConditionInvalidFormat = &ResponseError{ + ErrPostPolicyConditionInvalidFormat = &Error{ code: "PostPolicyInvalidKeyName", description: "Invalid according to Policy: Policy Conditions failed", httpStatusCode: http.StatusForbidden, } - RespErrMalformedJSON = &ResponseError{ + ErrMalformedJSON = &Error{ code: "MalformedJSON", description: "The JSON was not well-formed or did not validate against our published format.", httpStatusCode: http.StatusBadRequest, diff --git a/s3/responses/writers_common.go b/s3/responses/writers_common.go index f2db9c0c2..6cb70c879 100644 --- a/s3/responses/writers_common.go +++ b/s3/responses/writers_common.go @@ -7,7 +7,6 @@ import ( "errors" "fmt" "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/services" "github.com/gorilla/mux" logging "github.com/ipfs/go-log/v2" "net/http" @@ -29,7 +28,7 @@ const ( // APIErrorResponse - error response format type APIErrorResponse struct { - XMLName xml.Name `xml:"ResponseError" json:"-"` + XMLName xml.Name `xml:"Error" json:"-"` Code string Message string Resource string @@ -47,7 +46,7 @@ type RESTErrorResponse struct { BucketName string `xml:"BucketName,omitempty" json:"BucketName,omitempty"` } -func getRESTErrorResponse(err *services.ResponseError, resource string, bucket, object string) RESTErrorResponse { +func getRESTErrorResponse(err *Error, resource string, bucket, object string) RESTErrorResponse { return RESTErrorResponse{ Code: err.Code(), BucketName: bucket, @@ -59,18 +58,18 @@ func getRESTErrorResponse(err *services.ResponseError, resource string, bucket, } func WriteErrorResponseHeadersOnly(w http.ResponseWriter, r *http.Request, err error) { - var rerr *services.ResponseError + var rerr *Error if !errors.As(err, &rerr) { - rerr = services.RespErrInternalError + rerr = ErrInternalError } writeResponse(w, r, rerr.HTTPStatusCode(), nil, mimeNone) } // WriteErrorResponse write ErrorResponse func WriteErrorResponse(w http.ResponseWriter, r *http.Request, err error) { - var rerr *services.ResponseError + var rerr *Error if !errors.As(err, &rerr) { - rerr = services.RespErrInternalError + rerr = ErrInternalError } vars := mux.Vars(r) bucket := vars["bucket"] @@ -143,9 +142,9 @@ func encodeXMLResponse(response interface{}) []byte { // WriteErrorResponseJSON - writes error response in JSON format; // useful for admin APIs. func WriteErrorResponseJSON(w http.ResponseWriter, err error, reqURL *url.URL, host string) { - var rerr *services.ResponseError + var rerr *Error if !errors.As(err, &rerr) { - rerr = services.RespErrInternalError + rerr = ErrInternalError } // Generate error response. errorResponse := getAPIErrorResponse(rerr, reqURL.Path, w.Header().Get(consts.AmzRequestID), host) @@ -155,7 +154,7 @@ func WriteErrorResponseJSON(w http.ResponseWriter, err error, reqURL *url.URL, h // getErrorResponse gets in standard error and resource value and // provides a encodable populated response values -func getAPIErrorResponse(err *services.ResponseError, resource, requestID, hostID string) APIErrorResponse { +func getAPIErrorResponse(err *Error, resource, requestID, hostID string) APIErrorResponse { return APIErrorResponse{ Code: err.Code(), Message: err.Description(), diff --git a/s3/services/auth/check_handler_auth.go b/s3/services/auth/check_handler_auth.go index 58ad4b835..414e053b9 100644 --- a/s3/services/auth/check_handler_auth.go +++ b/s3/services/auth/check_handler_auth.go @@ -3,7 +3,7 @@ package auth import ( "context" "encoding/hex" - "github.com/bittorrent/go-btfs/s3/services" + "github.com/bittorrent/go-btfs/s3/responses" "github.com/bittorrent/go-btfs/s3/services/accesskey" "net/http" @@ -28,7 +28,7 @@ func (s *service) CheckRequestAuthTypeCredential(ctx context.Context, r *http.Re ack, err = s.IsReqAuthenticated(ctx, r, "", ServiceS3) return default: - err = services.RespErrSignatureVersionNotSupported + err = responses.ErrSignatureVersionNotSupported return } } @@ -41,7 +41,7 @@ func (s *service) ReqSignatureV4Verify(r *http.Request, region string, stype ser case isRequestPresignedSignatureV4(r): return s.doesPresignedSignatureMatch(sha256sum, r, region, stype) default: - return nil, services.RespErrAccessDenied + return nil, responses.ErrAccessDenied } } @@ -54,7 +54,7 @@ func (s *service) IsReqAuthenticated(ctx context.Context, r *http.Request, regio clientETag, err := etag.FromContentMD5(r.Header) if err != nil { - err = services.RespErrInvalidDigest + err = responses.ErrInvalidDigest return } @@ -65,14 +65,14 @@ func (s *service) IsReqAuthenticated(ctx context.Context, r *http.Request, regio if sha256Sum, ok := r.Form[consts.AmzContentSha256]; ok && len(sha256Sum) > 0 { contentSHA256, err = hex.DecodeString(sha256Sum[0]) if err != nil { - err = services.RespErrContentSHA256Mismatch + err = responses.ErrContentSHA256Mismatch return } } } else if _, ok := r.Header[consts.AmzContentSha256]; !skipSHA256 && ok { contentSHA256, err = hex.DecodeString(r.Header.Get(consts.AmzContentSha256)) if err != nil || len(contentSHA256) == 0 { - err = services.RespErrContentSHA256Mismatch + err = responses.ErrContentSHA256Mismatch return } } @@ -81,7 +81,7 @@ func (s *service) IsReqAuthenticated(ctx context.Context, r *http.Request, regio // The verification happens implicit during reading. reader, err := hash.NewReader(r.Body, -1, clientETag.String(), hex.EncodeToString(contentSHA256), -1) if err != nil { - err = services.RespErrInternalError + err = responses.ErrInternalError return } r.Body = reader diff --git a/s3/services/auth/signature-v4-parser.go b/s3/services/auth/signature-v4-parser.go index acc36b557..04f7b3d70 100644 --- a/s3/services/auth/signature-v4-parser.go +++ b/s3/services/auth/signature-v4-parser.go @@ -19,7 +19,7 @@ package auth import ( "errors" - "github.com/bittorrent/go-btfs/s3/services" + "github.com/bittorrent/go-btfs/s3/responses" "github.com/bittorrent/go-btfs/s3/services/accesskey" "net/http" "net/url" @@ -55,14 +55,14 @@ func (c credentialHeader) getScope() string { func parseCredentialHeader(credElement string, region string, stype serviceType) (ch credentialHeader, err error) { creds := strings.SplitN(strings.TrimSpace(credElement), "=", 2) if len(creds) != 2 { - return ch, services.RespErrMissingFields + return ch, responses.ErrMissingFields } if creds[0] != "Credential" { - return ch, services.RespErrMissingCredTag + return ch, responses.ErrMissingCredTag } credElements := strings.Split(strings.TrimSpace(creds[1]), consts.SlashSeparator) if len(credElements) < 5 { - return ch, services.RespErrCredMalformed + return ch, responses.ErrCredMalformed } accessKey := strings.Join(credElements[:len(credElements)-4], consts.SlashSeparator) // The access key may contain one or more `/` //if !IsAccessKeyValid(accessKey) { @@ -76,7 +76,7 @@ func parseCredentialHeader(credElement string, region string, stype serviceType) var e error cred.scope.date, e = time.Parse(yyyymmdd, credElements[0]) if e != nil { - return ch, services.RespErrAuthorizationHeaderMalformed + return ch, responses.ErrAuthorizationHeaderMalformed } cred.scope.region = credElements[1] @@ -91,18 +91,18 @@ func parseCredentialHeader(credElement string, region string, stype serviceType) } // Should validate region, only if region is set. if !isValidRegion(sRegion, region) { - return ch, services.RespErrAuthorizationHeaderMalformed + return ch, responses.ErrAuthorizationHeaderMalformed } if credElements[2] != string(stype) { //switch stype { //case ServiceSTS: // return ch, handlers.ErrcodeAuthorizationHeaderMalformed //} - return ch, services.RespErrAuthorizationHeaderMalformed + return ch, responses.ErrAuthorizationHeaderMalformed } cred.scope.service = credElements[2] if credElements[3] != "aws4_request" { - return ch, services.RespErrAuthorizationHeaderMalformed + return ch, responses.ErrAuthorizationHeaderMalformed } cred.scope.request = credElements[3] return cred, nil @@ -112,13 +112,13 @@ func parseCredentialHeader(credElement string, region string, stype serviceType) func parseSignature(signElement string) (string, error) { signFields := strings.Split(strings.TrimSpace(signElement), "=") if len(signFields) != 2 { - return "", services.RespErrMissingFields + return "", responses.ErrMissingFields } if signFields[0] != "Signature" { - return "", services.RespErrMissingSignTag + return "", responses.ErrMissingSignTag } if signFields[1] == "" { - return "", services.RespErrMissingFields + return "", responses.ErrMissingFields } signature := signFields[1] return signature, nil @@ -128,13 +128,13 @@ func parseSignature(signElement string) (string, error) { func parseSignedHeader(signedHdrElement string) ([]string, error) { signedHdrFields := strings.Split(strings.TrimSpace(signedHdrElement), "=") if len(signedHdrFields) != 2 { - return nil, services.RespErrMissingFields + return nil, responses.ErrMissingFields } if signedHdrFields[0] != "SignedHeaders" { - return nil, services.RespErrMissingSignHeadersTag + return nil, responses.ErrMissingSignHeadersTag } if signedHdrFields[1] == "" { - return nil, services.RespErrMissingFields + return nil, responses.ErrMissingFields } signedHeaders := strings.Split(signedHdrFields[1], ";") return signedHeaders, nil @@ -168,7 +168,7 @@ func doesV4PresignParamsExist(query url.Values) error { v4PresignQueryParams := []string{consts.AmzAlgorithm, consts.AmzCredential, consts.AmzSignature, consts.AmzDate, consts.AmzSignedHeaders, consts.AmzExpires} for _, v4PresignQueryParam := range v4PresignQueryParams { if _, ok := query[v4PresignQueryParam]; !ok { - return services.RespErrInvalidQueryParams + return responses.ErrInvalidQueryParams } } return nil @@ -184,7 +184,7 @@ func parsePreSignV4(query url.Values, region string, stype serviceType) (psv pre // Verify if the query algorithm is supported or not. if query.Get(consts.AmzAlgorithm) != signV4Algorithm { - return psv, services.RespErrAuthorizationHeaderMalformed + return psv, responses.ErrAuthorizationHeaderMalformed } // Initialize signature version '4' structured header. @@ -199,22 +199,22 @@ func parsePreSignV4(query url.Values, region string, stype serviceType) (psv pre // Save date in native time.Time. preSignV4Values.Date, err = time.Parse(iso8601Format, query.Get(consts.AmzDate)) if err != nil { - return psv, services.RespErrAuthorizationHeaderMalformed + return psv, responses.ErrAuthorizationHeaderMalformed } // Save expires in native time.Duration. preSignV4Values.Expires, err = time.ParseDuration(query.Get(consts.AmzExpires) + "s") if err != nil { - return psv, services.RespErrAuthorizationHeaderMalformed + return psv, responses.ErrAuthorizationHeaderMalformed } if preSignV4Values.Expires < 0 { - return psv, services.RespErrAuthorizationHeaderMalformed + return psv, responses.ErrAuthorizationHeaderMalformed } // Check if Expiry time is less than 7 days (value in seconds). if preSignV4Values.Expires.Seconds() > 604800 { - return psv, services.RespErrAuthorizationHeaderMalformed + return psv, responses.ErrAuthorizationHeaderMalformed } // Save signed headers. @@ -245,19 +245,19 @@ func parseSignV4(v4Auth string, region string, stype serviceType) (sv signValues // to make parsing easier. v4Auth = strings.ReplaceAll(v4Auth, " ", "") if v4Auth == "" { - return sv, services.RespErrAuthHeaderEmpty + return sv, responses.ErrAuthHeaderEmpty } // Verify if the header algorithm is supported or not. if !strings.HasPrefix(v4Auth, signV4Algorithm) { - return sv, services.RespErrSignatureVersionNotSupported + return sv, responses.ErrSignatureVersionNotSupported } // Strip off the Algorithm prefix. v4Auth = strings.TrimPrefix(v4Auth, signV4Algorithm) authFields := strings.Split(strings.TrimSpace(v4Auth), ",") if len(authFields) != 3 { - return sv, services.RespErrMissingFields + return sv, responses.ErrMissingFields } // Initialize signature version '4' structured header. @@ -292,7 +292,7 @@ func (s *service) getReqAccessKeyV4(r *http.Request, region string, stype servic v4Auth := strings.TrimPrefix(r.Header.Get("Authorization"), signV4Algorithm) authFields := strings.Split(strings.TrimSpace(v4Auth), ",") if len(authFields) != 3 { - err = services.RespErrMissingFields + err = responses.ErrMissingFields return } ch, err = parseCredentialHeader(authFields[0], region, stype) @@ -303,14 +303,14 @@ func (s *service) getReqAccessKeyV4(r *http.Request, region string, stype servic ack, err = s.accessKeySvc.Get(ch.accessKey) if errors.Is(err, accesskey.ErrNotFound) { - err = services.RespErrInvalidAccessKeyID + err = responses.ErrInvalidAccessKeyID return } if err != nil { return } if !ack.Enable { - err = services.RespErrAccessKeyDisabled + err = responses.ErrAccessKeyDisabled return } diff --git a/s3/services/auth/signature-v4-utils.go b/s3/services/auth/signature-v4-utils.go index c59f54b96..698f90e89 100644 --- a/s3/services/auth/signature-v4-utils.go +++ b/s3/services/auth/signature-v4-utils.go @@ -18,7 +18,7 @@ package auth import ( - "github.com/bittorrent/go-btfs/s3/services" + "github.com/bittorrent/go-btfs/s3/responses" "net/http" "reflect" "strconv" @@ -66,7 +66,7 @@ func extractSignedHeaders(signedHeaders []string, r *http.Request) (http.Header, // find whether "host" is part of list of signed headers. // if not return ErrcodeUnsignedHeaders. "host" is mandatory. if !contains(signedHeaders, "host") { - return nil, services.RespErrUnsignedHeaders + return nil, responses.ErrUnsignedHeaders } extractedSignedHeaders := make(http.Header) for _, header := range signedHeaders { @@ -116,7 +116,7 @@ func extractSignedHeaders(signedHeaders []string, r *http.Request) (http.Header, // calculation to be compatible with such clients. extractedSignedHeaders.Set(header, strconv.FormatInt(r.ContentLength, 10)) default: - return nil, services.RespErrUnsignedHeaders + return nil, responses.ErrUnsignedHeaders } } return extractedSignedHeaders, nil diff --git a/s3/services/auth/signature-v4.go b/s3/services/auth/signature-v4.go index deebf1ba0..3da162b20 100644 --- a/s3/services/auth/signature-v4.go +++ b/s3/services/auth/signature-v4.go @@ -20,7 +20,7 @@ package auth import ( "crypto/subtle" "errors" - "github.com/bittorrent/go-btfs/s3/services" + "github.com/bittorrent/go-btfs/s3/responses" "github.com/bittorrent/go-btfs/s3/services/accesskey" "net/http" "net/url" @@ -73,13 +73,13 @@ func (s *service) doesPresignedSignatureMatch(hashedPayload string, r *http.Requ // Check accesskey ack, err = s.accessKeySvc.Get(pSignValues.Credential.accessKey) if errors.Is(err, accesskey.ErrNotFound) { - err = services.RespErrInvalidAccessKeyID + err = responses.ErrInvalidAccessKeyID } if err != nil { return } if !ack.Enable { - err = services.RespErrAccessKeyDisabled + err = responses.ErrAccessKeyDisabled return } @@ -92,12 +92,12 @@ func (s *service) doesPresignedSignatureMatch(hashedPayload string, r *http.Requ // If the host which signed the request is slightly ahead in time (by less than MaxSkewTime) the // request should still be allowed. if pSignValues.Date.After(time.Now().UTC().Add(consts.MaxSkewTime)) { - err = services.RespErrRequestNotReadyYet + err = responses.ErrRequestNotReadyYet return } if time.Now().UTC().Sub(pSignValues.Date) > pSignValues.Expires { - err = services.RespErrExpiredPresignRequest + err = responses.ErrExpiredPresignRequest return } @@ -149,26 +149,26 @@ func (s *service) doesPresignedSignatureMatch(hashedPayload string, r *http.Requ // Verify if date query is same. if req.Form.Get(consts.AmzDate) != query.Get(consts.AmzDate) { - err = services.RespErrSignatureDoesNotMatch + err = responses.ErrSignatureDoesNotMatch } // Verify if expires query is same. if req.Form.Get(consts.AmzExpires) != query.Get(consts.AmzExpires) { - err = services.RespErrSignatureDoesNotMatch + err = responses.ErrSignatureDoesNotMatch return } // Verify if signed headers query is same. if req.Form.Get(consts.AmzSignedHeaders) != query.Get(consts.AmzSignedHeaders) { - err = services.RespErrSignatureDoesNotMatch + err = responses.ErrSignatureDoesNotMatch return } // Verify if credential query is same. if req.Form.Get(consts.AmzCredential) != query.Get(consts.AmzCredential) { - err = services.RespErrSignatureDoesNotMatch + err = responses.ErrSignatureDoesNotMatch return } // Verify if sha256 payload query is same. if clntHashedPayload != "" && clntHashedPayload != query.Get(consts.AmzContentSha256) { - err = services.RespErrContentSHA256Mismatch + err = responses.ErrContentSHA256Mismatch return } // not check SessionToken. @@ -194,7 +194,7 @@ func (s *service) doesPresignedSignatureMatch(hashedPayload string, r *http.Requ // Verify signature. if !compareSignatureV4(req.Form.Get(consts.AmzSignature), newSignature) { - err = services.RespErrSignatureDoesNotMatch + err = responses.ErrSignatureDoesNotMatch return } @@ -225,13 +225,13 @@ func (s *service) doesSignatureMatch(hashedPayload string, r *http.Request, regi // Check accesskey ack, err = s.accessKeySvc.Get(signV4Values.Credential.accessKey) if errors.Is(err, accesskey.ErrNotFound) { - err = services.RespErrInvalidAccessKeyID + err = responses.ErrInvalidAccessKeyID } if err != nil { return } if !ack.Enable { - err = services.RespErrAccessKeyDisabled + err = responses.ErrAccessKeyDisabled return } @@ -239,7 +239,7 @@ func (s *service) doesSignatureMatch(hashedPayload string, r *http.Request, regi var date string if date = req.Header.Get(consts.AmzDate); date == "" { if date = r.Header.Get(consts.Date); date == "" { - err = services.RespErrMissingDateHeader + err = responses.ErrMissingDateHeader return } } @@ -247,7 +247,7 @@ func (s *service) doesSignatureMatch(hashedPayload string, r *http.Request, regi // Parse date header. t, err := time.Parse(iso8601Format, date) if err != nil { - err = services.RespErrAuthorizationHeaderMalformed + err = responses.ErrAuthorizationHeaderMalformed return } @@ -269,7 +269,7 @@ func (s *service) doesSignatureMatch(hashedPayload string, r *http.Request, regi // Verify if signature match. if !compareSignatureV4(newSignature, signV4Values.Signature) { - err = services.RespErrSignatureDoesNotMatch + err = responses.ErrSignatureDoesNotMatch return } diff --git a/s3/services/bucket/proto.go b/s3/services/bucket/proto.go index c6599231c..440363d71 100644 --- a/s3/services/bucket/proto.go +++ b/s3/services/bucket/proto.go @@ -2,11 +2,14 @@ package bucket import ( "context" + "errors" "github.com/bittorrent/go-btfs/s3/action" "github.com/bittorrent/go-btfs/s3/services/accesskey" "time" ) +var ErrNotFound = errors.New("bucket not found") + type Service interface { CheckACL(accessKeyRecord *accesskey.AccessKey, bucketName string, action action.Action) (err error) CreateBucket(ctx context.Context, bucket, region, accessKey, acl string) error diff --git a/s3/services/bucket/service.go b/s3/services/bucket/service.go index 6da10744e..36af0441b 100644 --- a/s3/services/bucket/service.go +++ b/s3/services/bucket/service.go @@ -4,7 +4,6 @@ import ( "context" "errors" "github.com/bittorrent/go-btfs/s3/providers" - "github.com/bittorrent/go-btfs/s3/services" "github.com/bittorrent/go-btfs/s3/services/accesskey" "time" @@ -42,25 +41,19 @@ func NewService(providers providers.Providerser, options ...Option) Service { } func (s *service) CheckACL(ack *accesskey.AccessKey, bucketName string, act action.Action) (err error) { - if act == action.ListBucketAction { - if ack.Key == "" { - err = services.RespErrAccessDenied + var bucketMeta Bucket + if act != action.CreateBucketAction && act != action.ListBucketAction { + if bucketName == "" { + return ErrNotFound + } + bucketMeta, err = s.GetBucketMeta(context.Background(), bucketName) + if err != nil { + return err } - return - } - - //需要判断bucketName是否为空字符串 - if bucketName == "" { - return services.RespErrNoSuchBucket - } - - bucketMeta, err := s.GetBucketMeta(context.Background(), bucketName) - if err != nil { - return err } if policy.IsAllowed(bucketMeta.Owner == ack.Key, bucketMeta.Acl, act) == false { - return services.RespErrAccessDenied + return errors.New("not allowed") } return } @@ -98,7 +91,7 @@ func (s *service) CreateBucket(ctx context.Context, bucket, region, accessKey, a func (s *service) lockGetBucketMeta(bucket string) (meta Bucket, err error) { err = s.providers.GetStateStore().Get(bucketPrefix+bucket, &meta) if errors.Is(err, providers.ErrStateStoreNotFound) { - err = services.RespErrNoSuchBucket + err = ErrNotFound } return } From 0ecff7941391ecdbba9c682be7e00f8ba720de61 Mon Sep 17 00:00:00 2001 From: steve Date: Thu, 24 Aug 2023 21:15:02 +0800 Subject: [PATCH 055/139] chore: mig sig 01 --- s3/services/auth/auth_type.go | 58 ++- s3/services/auth/check_handler_auth.go | 342 ++++++++++++--- s3/services/auth/service.go | 8 +- s3/services/auth/signature-v2.go | 429 ++++++++++++++++++ s3/services/auth/signature-v4-parser.go | 191 ++++---- s3/services/auth/signature-v4-utils.go | 224 +++++----- s3/services/auth/signature-v4.go | 425 ++++++++---------- s3/services/auth/streaming-signature-v4.go | 481 +++++++++++++++++++++ 8 files changed, 1640 insertions(+), 518 deletions(-) create mode 100644 s3/services/auth/signature-v2.go create mode 100644 s3/services/auth/streaming-signature-v4.go diff --git a/s3/services/auth/auth_type.go b/s3/services/auth/auth_type.go index 5fb74bce4..936eb2aa0 100644 --- a/s3/services/auth/auth_type.go +++ b/s3/services/auth/auth_type.go @@ -1,16 +1,52 @@ package auth import ( + "github.com/bittorrent/go-btfs/s3/consts" "net/http" "net/url" "strings" ) +// Verify if request has JWT. +func isRequestJWT(r *http.Request) bool { + return strings.HasPrefix(r.Header.Get("Authorization"), "Bearer") +} + // IsRequestSignatureV4 Verify if request has AWS Signature Version '4'. func IsRequestSignatureV4(r *http.Request) bool { return strings.HasPrefix(r.Header.Get("Authorization"), signV4Algorithm) } +// Verify if request has AWS Signature Version '2'. +func isRequestSignatureV2(r *http.Request) bool { + return !strings.HasPrefix(r.Header.Get("Authorization"), signV4Algorithm) && + strings.HasPrefix(r.Header.Get("Authorization"), signV2Algorithm) +} + +// Verify if request has AWS PreSign Version '4'. +func isRequestPresignedSignatureV4(r *http.Request) bool { + _, ok := r.URL.Query()["X-Amz-Credential"] + return ok +} + +// Verify request has AWS PreSign Version '2'. +func isRequestPresignedSignatureV2(r *http.Request) bool { + _, ok := r.URL.Query()["AWSAccessKeyId"] + return ok +} + +// Verify if request has AWS Post policy Signature Version '4'. +func isRequestPostPolicySignatureV4(r *http.Request) bool { + return strings.Contains(r.Header.Get("Content-Type"), "multipart/form-data") && + r.Method == http.MethodPost +} + +// Verify if the request has AWS Streaming Signature Version '4'. This is only valid for 'PUT' operation. +func isRequestSignStreamingV4(r *http.Request) bool { + return r.Header.Get("x-amz-content-sha256") == consts.StreamingContentSHA256 && + r.Method == http.MethodPut +} + // AuthType Authorization type. type AuthType int @@ -34,14 +70,32 @@ func GetRequestAuthType(r *http.Request) AuthType { var err error r.Form, err = url.ParseQuery(r.URL.RawQuery) if err != nil { - //log.Infof("parse query failed, query: %s, error: %v", r.URL.RawQuery, err) + log.Infof("parse query failed, query: %s, error: %v", r.URL.RawQuery, err) return AuthTypeUnknown } } - if IsRequestSignatureV4(r) { + if isRequestSignatureV2(r) { + return AuthTypeSignedV2 + } else if isRequestPresignedSignatureV2(r) { + return AuthTypePresignedV2 + } else if isRequestSignStreamingV4(r) { + return AuthTypeStreamingSigned + } else if IsRequestSignatureV4(r) { return AuthTypeSigned } else if isRequestPresignedSignatureV4(r) { return AuthTypePresigned + } else if isRequestJWT(r) { + return AuthTypeJWT + } else if isRequestPostPolicySignatureV4(r) { + return AuthTypePostPolicy + } else if _, ok := r.Form[consts.StsAction]; ok { + return AuthTypeSTS + } else if _, ok := r.Header[consts.Authorization]; !ok { + return AuthTypeAnonymous } return AuthTypeUnknown } + +func IsAuthTypeStreamingSigned(atype AuthType) bool { + return atype == AuthTypeStreamingSigned +} diff --git a/s3/services/auth/check_handler_auth.go b/s3/services/auth/check_handler_auth.go index 414e053b9..7f9fa7518 100644 --- a/s3/services/auth/check_handler_auth.go +++ b/s3/services/auth/check_handler_auth.go @@ -1,61 +1,167 @@ package auth import ( + "bytes" "context" "encoding/hex" - "github.com/bittorrent/go-btfs/s3/responses" - "github.com/bittorrent/go-btfs/s3/services/accesskey" + s3action "github.com/bittorrent/go-btfs/s3/action" + "io" "net/http" + "net/url" + "strconv" + "time" "github.com/bittorrent/go-btfs/s3/consts" "github.com/bittorrent/go-btfs/s3/etag" "github.com/bittorrent/go-btfs/s3/utils/hash" ) +// AuthSys auth and sign system +type AuthSys struct { + Iam *IdentityAMSys + PolicySys *iPolicySys + AdminCred auth.Credentials +} + +// NewAuthSys new an AuthSys +func NewAuthSys(db *uleveldb.ULevelDB, adminCred auth.Credentials) *AuthSys { + return &AuthSys{ + Iam: NewIdentityAMSys(db), + PolicySys: newIPolicySys(db), + AdminCred: adminCred, + } +} + // CheckRequestAuthTypeCredential Check request auth type verifies the incoming http request // - validates the request signature // - validates the policy action if anonymous tests bucket policies if any, // for authenticated requests validates IAM policies. // -// returns APIErrorcode if any to be replied to the client. +// returns APIErrorCode if any to be replied to the client. // Additionally, returns the accessKey used in the request, and if this request is by an admin. -func (s *service) CheckRequestAuthTypeCredential(ctx context.Context, r *http.Request) (ack *accesskey.AccessKey, err error) { +func (s *AuthSys) CheckRequestAuthTypeCredential(ctx context.Context, r *http.Request, action s3action.Action, bucketName, objectName string) (cred auth.Credentials, owner bool, s3Err apierrors.ErrorCode) { switch GetRequestAuthType(r) { - case AuthTypeAnonymous: - ack = new(accesskey.AccessKey) - return + case AuthTypeUnknown, AuthTypeStreamingSigned: + return cred, owner, apierrors.ErrSignatureVersionNotSupported + case AuthTypePresignedV2, AuthTypeSignedV2: + if s3Err = s.IsReqAuthenticatedV2(r); s3Err != apierrors.ErrNone { + return cred, owner, s3Err + } + cred, owner, s3Err = s.getReqAccessKeyV2(r) case AuthTypeSigned, AuthTypePresigned: - ack, err = s.IsReqAuthenticated(ctx, r, "", ServiceS3) - return - default: - err = responses.ErrSignatureVersionNotSupported - return + region := "" + switch action { + case s3action.GetBucketLocationAction, s3action.ListAllMyBucketsAction: + region = "" + } + if s3Err = s.IsReqAuthenticated(ctx, r, region, ServiceS3); s3Err != apierrors.ErrNone { + return cred, owner, s3Err + } + cred, owner, s3Err = s.GetReqAccessKeyV4(r, region, ServiceS3) + } + if s3Err != apierrors.ErrNone { + return cred, owner, s3Err + } + // TODO: Why should a temporary user be replaced with the parent user's account? + //if cred.IsTemp() { + // cred, _ = s.Iam.GetUser(ctx, cred.ParentUser) + //} + if action == s3action.CreateBucketAction { + // To extract region from XML in request body, get copy of request body. + payload, err := io.ReadAll(io.LimitReader(r.Body, consts.MaxLocationConstraintSize)) + if err != nil { + log.Errorf("ReadAll err:%v", err) + return cred, owner, apierrors.ErrMalformedXML + } + + // Populate payload to extract location constraint. + r.Body = io.NopCloser(bytes.NewReader(payload)) + if s.PolicySys.bmSys.HasBucket(ctx, bucketName) { + return cred, owner, apierrors.ErrBucketAlreadyExists + } + } + + // Anonymous user + if cred.AccessKey == "" { + owner = false + } + + // check bucket policy + if s.PolicySys.isAllowed(ctx, auth.Args{ + AccountName: cred.AccessKey, + Action: action, + BucketName: bucketName, + IsOwner: owner, + ObjectName: objectName, + }) { + // Request is allowed return the appropriate access key. + return cred, owner, apierrors.ErrNone + } + if action == s3action.ListBucketVersionsAction { + // In AWS S3 s3:ListBucket permission is same as s3:ListBucketVersions permission + // verify as a fallback. + if s.PolicySys.isAllowed(ctx, auth.Args{ + AccountName: cred.AccessKey, + Action: s3action.ListBucketAction, + BucketName: bucketName, + IsOwner: owner, + ObjectName: objectName, + }) { + // Request is allowed return the appropriate access key. + return cred, owner, apierrors.ErrNone + } + } + + // check user policy + if bucketName == "" || action == s3action.CreateBucketAction { + if s.Iam.IsAllowed(r.Context(), auth.Args{ + AccountName: cred.AccessKey, + Action: action, + BucketName: bucketName, + Conditions: getConditions(r, cred.AccessKey), + ObjectName: objectName, + IsOwner: owner, + }) { + // Request is allowed return the appropriate access key. + return cred, owner, apierrors.ErrNone + } + } else { + if !s.PolicySys.bmSys.HasBucket(ctx, bucketName) { + return cred, owner, apierrors.ErrNoSuchBucket + } } + + return cred, owner, apierrors.ErrAccessDenied } -func (s *service) ReqSignatureV4Verify(r *http.Request, region string, stype serviceType) (ack *accesskey.AccessKey, err error) { - sha256sum := getContentSha256Cksum(r, stype) +// Verify if request has valid AWS Signature Version '2'. +func (s *AuthSys) IsReqAuthenticatedV2(r *http.Request) (s3Error apierrors.ErrorCode) { + if isRequestSignatureV2(r) { + return s.doesSignV2Match(r) + } + return s.doesPresignV2SignatureMatch(r) +} + +func (s *AuthSys) ReqSignatureV4Verify(r *http.Request, region string, stype serviceType) (s3Error apierrors.ErrorCode) { + sha256sum := GetContentSha256Cksum(r, stype) switch { case IsRequestSignatureV4(r): return s.doesSignatureMatch(sha256sum, r, region, stype) case isRequestPresignedSignatureV4(r): return s.doesPresignedSignatureMatch(sha256sum, r, region, stype) default: - return nil, responses.ErrAccessDenied + return apierrors.ErrAccessDenied } } // IsReqAuthenticated Verify if request has valid AWS Signature Version '4'. -func (s *service) IsReqAuthenticated(ctx context.Context, r *http.Request, region string, stype serviceType) (ack *accesskey.AccessKey, err error) { - ack, err = s.ReqSignatureV4Verify(r, region, stype) - if err != nil { - return +func (s *AuthSys) IsReqAuthenticated(ctx context.Context, r *http.Request, region string, stype serviceType) (s3Error apierrors.ErrorCode) { + if errCode := s.ReqSignatureV4Verify(r, region, stype); errCode != apierrors.ErrNone { + return errCode } - clientETag, err := etag.FromContentMD5(r.Header) if err != nil { - err = responses.ErrInvalidDigest - return + return apierrors.ErrInvalidDigest } // Extract either 'X-Amz-Content-Sha256' header or 'X-Amz-Content-Sha256' query parameter (if V4 presigned) @@ -65,15 +171,13 @@ func (s *service) IsReqAuthenticated(ctx context.Context, r *http.Request, regio if sha256Sum, ok := r.Form[consts.AmzContentSha256]; ok && len(sha256Sum) > 0 { contentSHA256, err = hex.DecodeString(sha256Sum[0]) if err != nil { - err = responses.ErrContentSHA256Mismatch - return + return apierrors.ErrContentSHA256Mismatch } } } else if _, ok := r.Header[consts.AmzContentSha256]; !skipSHA256 && ok { contentSHA256, err = hex.DecodeString(r.Header.Get(consts.AmzContentSha256)) if err != nil || len(contentSHA256) == 0 { - err = responses.ErrContentSHA256Mismatch - return + return apierrors.ErrContentSHA256Mismatch } } @@ -81,45 +185,155 @@ func (s *service) IsReqAuthenticated(ctx context.Context, r *http.Request, regio // The verification happens implicit during reading. reader, err := hash.NewReader(r.Body, -1, clientETag.String(), hex.EncodeToString(contentSHA256), -1) if err != nil { - err = responses.ErrInternalError - return + return apierrors.ErrInternalError } r.Body = reader - return + return apierrors.ErrNone } -//// ValidateAdminSignature validate admin Signature -//func (s *service) ValidateAdminSignature(ctx context.Context, r *http.Request, region string) (Credentials, map[string]interface{}, bool, handlers.Errorcode) { -// var cred Credentials -// var owner bool -// s3Err := handlers.ErrcodeAccessDenied -// if _, ok := r.Header[consts.AmzContentSha256]; ok && -// GetRequestAuthType(r) == AuthTypeSigned { -// // We only support admin credentials to access admin APIs. -// cred, s3Err = GetReqAccessKeyV4(r, region, ServiceS3) -// if s3Err != handlers.ErrcodeNone { -// return cred, nil, owner, s3Err -// } -// -// // we only support V4 (no presign) with auth body -// s3Err = s.IsReqAuthenticated(ctx, r, region, ServiceS3) -// } -// if s3Err != handlers.ErrcodeNone { -// return cred, nil, owner, s3Err -// } -// -// return cred, nil, owner, handlers.ErrcodeNone -//} -//// -//func (s *service) GetCredential(r *http.Request) (cred auth.Credentials, owner bool, s3Err handlers.Errorcode) { -// switch GetRequestAuthType(r) { -// case AuthTypeUnknown: -// s3Err = handlers.ErrcodeSignatureVersionNotSupported -// case AuthTypeSignedV2, AuthTypePresignedV2: -// cred, owner, s3Err = s.getReqAccessKeyV2(r) -// case AuthTypeStreamingSigned, AuthTypePresigned, AuthTypeSigned: -// region := "" -// cred, owner, s3Err = s.GetReqAccessKeyV4(r, region, ServiceS3) -// } -// return -//} +// ValidateAdminSignature validate admin Signature +func (s *AuthSys) ValidateAdminSignature(ctx context.Context, r *http.Request, region string) (auth.Credentials, map[string]interface{}, bool, apierrors.ErrorCode) { + var cred auth.Credentials + var owner bool + s3Err := apierrors.ErrAccessDenied + if _, ok := r.Header[consts.AmzContentSha256]; ok && + GetRequestAuthType(r) == AuthTypeSigned { + // We only support admin credentials to access admin APIs. + cred, owner, s3Err = s.GetReqAccessKeyV4(r, region, ServiceS3) + if s3Err != apierrors.ErrNone { + return cred, nil, owner, s3Err + } + + // we only support V4 (no presign) with auth body + s3Err = s.IsReqAuthenticated(ctx, r, region, ServiceS3) + } + if s3Err != apierrors.ErrNone { + return cred, nil, owner, s3Err + } + + return cred, nil, owner, apierrors.ErrNone +} + +func getConditions(r *http.Request, username string) map[string][]string { + currTime := time.Now().UTC() + + principalType := "Anonymous" + if username != "" { + principalType = "User" + } + + at := GetRequestAuthType(r) + var signatureVersion string + switch at { + case AuthTypeSignedV2, AuthTypePresignedV2: + signatureVersion = signV2Algorithm + case AuthTypeSigned, AuthTypePresigned, AuthTypeStreamingSigned, AuthTypePostPolicy: + signatureVersion = signV4Algorithm + } + + var authtype string + switch at { + case AuthTypePresignedV2, AuthTypePresigned: + authtype = "REST-QUERY-STRING" + case AuthTypeSignedV2, AuthTypeSigned, AuthTypeStreamingSigned: + authtype = "REST-HEADER" + case AuthTypePostPolicy: + authtype = "POST" + } + + args := map[string][]string{ + "CurrentTime": {currTime.Format(time.RFC3339)}, + "EpochTime": {strconv.FormatInt(currTime.Unix(), 10)}, + "SecureTransport": {strconv.FormatBool(r.TLS != nil)}, + "UserAgent": {r.UserAgent()}, + "Referer": {r.Referer()}, + "principaltype": {principalType}, + "userid": {username}, + "username": {username}, + "signatureversion": {signatureVersion}, + "authType": {authtype}, + } + + cloneHeader := r.Header.Clone() + + for key, values := range cloneHeader { + if existingValues, found := args[key]; found { + args[key] = append(existingValues, values...) + } else { + args[key] = values + } + } + + cloneURLValues := make(url.Values, len(r.Form)) + for k, v := range r.Form { + cloneURLValues[k] = v + } + + for key, values := range cloneURLValues { + if existingValues, found := args[key]; found { + args[key] = append(existingValues, values...) + } else { + args[key] = values + } + } + + return args +} + +// IsPutActionAllowed - check if PUT operation is allowed on the resource, this +// call verifies bucket policies and IAM policies, supports multi user +// checks etc. +func (s *AuthSys) IsPutActionAllowed(ctx context.Context, r *http.Request, action s3action.Action, bucketName, objectName string) (s3Err apierrors.ErrorCode) { + var cred auth.Credentials + var owner bool + switch GetRequestAuthType(r) { + case AuthTypeUnknown: + return apierrors.ErrSignatureVersionNotSupported + case AuthTypeSignedV2, AuthTypePresignedV2: + cred, owner, s3Err = s.getReqAccessKeyV2(r) + case AuthTypeStreamingSigned, AuthTypePresigned, AuthTypeSigned: + region := "" + cred, owner, s3Err = s.GetReqAccessKeyV4(r, region, ServiceS3) + } + if s3Err != apierrors.ErrNone { + return s3Err + } + + // Do not check for PutObjectRetentionAction permission, + // if mode and retain until date are not set. + // Can happen when bucket has default lock config set + if action == s3action.PutObjectRetentionAction && + r.Header.Get(consts.AmzObjectLockMode) == "" && + r.Header.Get(consts.AmzObjectLockRetainUntilDate) == "" { + return apierrors.ErrNone + } + + // check bucket policy + if s.PolicySys.isAllowed(ctx, auth.Args{ + AccountName: cred.AccessKey, + Action: action, + BucketName: bucketName, + IsOwner: owner, + ObjectName: objectName, + }) { + return apierrors.ErrNone + } + + if !s.PolicySys.bmSys.HasBucket(ctx, bucketName) { + return apierrors.ErrNoSuchBucket + } + return apierrors.ErrAccessDenied +} + +func (s *AuthSys) GetCredential(r *http.Request) (cred auth.Credentials, owner bool, s3Err apierrors.ErrorCode) { + switch GetRequestAuthType(r) { + case AuthTypeUnknown: + s3Err = apierrors.ErrSignatureVersionNotSupported + case AuthTypeSignedV2, AuthTypePresignedV2: + cred, owner, s3Err = s.getReqAccessKeyV2(r) + case AuthTypeStreamingSigned, AuthTypePresigned, AuthTypeSigned: + region := "" + cred, owner, s3Err = s.GetReqAccessKeyV4(r, region, ServiceS3) + } + return +} diff --git a/s3/services/auth/service.go b/s3/services/auth/service.go index ed7102ff3..dfc040024 100644 --- a/s3/services/auth/service.go +++ b/s3/services/auth/service.go @@ -10,15 +10,11 @@ import ( var _ Service = (*service)(nil) type service struct { - providers providers.Providerser - accessKeySvc accesskey.Service + getSecret func(key string) (secret string, disabled bool, err error) } func NewService(providers providers.Providerser, accessKeySvc accesskey.Service, options ...Option) Service { - svc := &service{ - providers: providers, - accessKeySvc: accessKeySvc, - } + svc := &service{} for _, option := range options { option(svc) } diff --git a/s3/services/auth/signature-v2.go b/s3/services/auth/signature-v2.go new file mode 100644 index 000000000..2410ee825 --- /dev/null +++ b/s3/services/auth/signature-v2.go @@ -0,0 +1,429 @@ +/* + * The following code tries to reverse engineer the Amazon S3 APIs, + * and is mostly copied from minio implementation. + */ + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. + +package auth + +import ( + "crypto/hmac" + "crypto/sha1" + "crypto/subtle" + "encoding/base64" + "fmt" + "github.com/bittorrent/go-btfs/s3/apierrors" + "github.com/bittorrent/go-btfs/s3/consts" + "github.com/bittorrent/go-btfs/s3/iam/auth" + "net" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" +) + +// Whitelist resource list that will be used in query string for signature-V2 calculation. +// +// This list should be kept alphabetically sorted, do not hastily edit. +var resourceList = []string{ + "acl", + "cors", + "delete", + "encryption", + "legal-hold", + "lifecycle", + "location", + "logging", + "notification", + "partNumber", + "policy", + "requestPayment", + "response-cache-control", + "response-content-disposition", + "response-content-encoding", + "response-content-language", + "response-content-type", + "response-expires", + "retention", + "select", + "select-type", + "tagging", + "torrent", + "uploadId", + "uploads", + "versionId", + "versioning", + "versions", + "website", +} + +// Signature and API related constants. +const ( + signV2Algorithm = "AWS" +) + +// AWS S3 Signature V2 calculation rule is give here: +// http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationStringToSign +func (s *AuthSys) doesPolicySignatureV2Match(formValues http.Header) (auth.Credentials, apierrors.ErrorCode) { + accessKey := formValues.Get(consts.AmzAccessKeyID) + + r := &http.Request{Header: formValues} + cred, _, s3Err := s.checkKeyValid(r, accessKey) + if s3Err != apierrors.ErrNone { + return cred, s3Err + } + policy := formValues.Get("Policy") + signature := formValues.Get(consts.AmzSignatureV2) + if !compareSignatureV2(signature, calculateSignatureV2(policy, cred.SecretKey)) { + return cred, apierrors.ErrSignatureDoesNotMatch + } + return cred, apierrors.ErrNone +} + +// Escape encodedQuery string into unescaped list of query params, returns error +// if any while unescaping the values. +func unescapeQueries(encodedQuery string) (unescapedQueries []string, err error) { + for _, query := range strings.Split(encodedQuery, "&") { + var unescapedQuery string + unescapedQuery, err = url.QueryUnescape(query) + if err != nil { + return nil, err + } + unescapedQueries = append(unescapedQueries, unescapedQuery) + } + return unescapedQueries, nil +} + +// doesPresignV2SignatureMatch - Verify query headers with presigned signature +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationQueryStringAuth +// +// returns apierrors.ErrNone if matches. S3 errors otherwise. +func (s *AuthSys) doesPresignV2SignatureMatch(r *http.Request) apierrors.ErrorCode { + // r.RequestURI will have raw encoded URI as sent by the client. + tokens := strings.SplitN(r.RequestURI, "?", 2) + encodedResource := tokens[0] + encodedQuery := "" + if len(tokens) == 2 { + encodedQuery = tokens[1] + } + + var ( + filteredQueries []string + gotSignature string + expires string + accessKey string + err error + ) + + var unescapedQueries []string + unescapedQueries, err = unescapeQueries(encodedQuery) + if err != nil { + return apierrors.ErrInvalidQueryParams + } + + // Extract the necessary values from presigned query, construct a list of new filtered queries. + for _, query := range unescapedQueries { + keyval := strings.SplitN(query, "=", 2) + if len(keyval) != 2 { + return apierrors.ErrInvalidQueryParams + } + switch keyval[0] { + case consts.AmzAccessKeyID: + accessKey = keyval[1] + case consts.AmzSignatureV2: + gotSignature = keyval[1] + case consts.Expires: + expires = keyval[1] + default: + filteredQueries = append(filteredQueries, query) + } + } + + // Invalid values returns error. + if accessKey == "" || gotSignature == "" || expires == "" { + return apierrors.ErrInvalidQueryParams + } + + cred, _, s3Err := s.checkKeyValid(r, accessKey) + if s3Err != apierrors.ErrNone { + return s3Err + } + + // Make sure the request has not expired. + expiresInt, err := strconv.ParseInt(expires, 10, 64) + if err != nil { + return apierrors.ErrAuthorizationHeaderMalformed + } + + // Check if the presigned URL has expired. + if expiresInt < time.Now().UTC().Unix() { + return apierrors.ErrExpiredPresignRequest + } + + encodedResource, err = getResource(encodedResource, r.Host) + if err != nil { + return apierrors.ErrInvalidRequest + } + + expectedSignature := preSignatureV2(cred, r.Method, encodedResource, strings.Join(filteredQueries, "&"), r.Header, expires) + if !compareSignatureV2(gotSignature, expectedSignature) { + return apierrors.ErrSignatureDoesNotMatch + } + + r.Form.Del(consts.Expires) + + return apierrors.ErrNone +} + +func (s *AuthSys) getReqAccessKeyV2(r *http.Request) (auth.Credentials, bool, apierrors.ErrorCode) { + if accessKey := r.Form.Get(consts.AmzAccessKeyID); accessKey != "" { + return s.checkKeyValid(r, accessKey) + } + + // below is V2 Signed Auth header format, splitting on `space` (after the `AWS` string). + // Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature + authFields := strings.Split(r.Header.Get(consts.Authorization), " ") + if len(authFields) != 2 { + return auth.Credentials{}, false, apierrors.ErrMissingFields + } + + // Then will be splitting on ":", this will seprate `AWSAccessKeyId` and `Signature` string. + keySignFields := strings.Split(strings.TrimSpace(authFields[1]), ":") + if len(keySignFields) != 2 { + return auth.Credentials{}, false, apierrors.ErrMissingFields + } + + return s.checkKeyValid(r, keySignFields[0]) +} + +// Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature; +// Signature = Base64( HMAC-SHA1( YourSecretKey, UTF-8-Encoding-Of( StringToSign ) ) ); +// +// StringToSign = HTTP-Verb + "\n" + +// Content-Md5 + "\n" + +// Content-Type + "\n" + +// Date + "\n" + +// CanonicalizedProtocolHeaders + +// CanonicalizedResource; +// +// CanonicalizedResource = [ consts.SlashSeparator + Bucket ] + +// + +// [ subresource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; +// +// CanonicalizedProtocolHeaders = + +// doesSignV2Match - Verify authorization header with calculated header in accordance with +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/auth-request-sig-v2.html +// returns true if matches, false otherwise. if error is not nil then it is always false + +func (s *AuthSys) validateV2AuthHeader(r *http.Request) (auth.Credentials, apierrors.ErrorCode) { + var cred auth.Credentials + v2Auth := r.Header.Get(consts.Authorization) + if v2Auth == "" { + return cred, apierrors.ErrAuthHeaderEmpty + } + + // Verify if the header algorithm is supported or not. + if !strings.HasPrefix(v2Auth, signV2Algorithm) { + return cred, apierrors.ErrSignatureVersionNotSupported + } + + cred, _, apiErr := s.getReqAccessKeyV2(r) + if apiErr != apierrors.ErrNone { + return cred, apiErr + } + + return cred, apierrors.ErrNone +} + +func (s *AuthSys) doesSignV2Match(r *http.Request) apierrors.ErrorCode { + v2Auth := r.Header.Get(consts.Authorization) + cred, apiError := s.validateV2AuthHeader(r) + if apiError != apierrors.ErrNone { + return apiError + } + + // r.RequestURI will have raw encoded URI as sent by the client. + tokens := strings.SplitN(r.RequestURI, "?", 2) + encodedResource := tokens[0] + encodedQuery := "" + if len(tokens) == 2 { + encodedQuery = tokens[1] + } + + unescapedQueries, err := unescapeQueries(encodedQuery) + if err != nil { + return apierrors.ErrInvalidQueryParams + } + + encodedResource, err = getResource(encodedResource, r.Host) + if err != nil { + return apierrors.ErrInvalidRequest + } + + prefix := fmt.Sprintf("%s %s:", signV2Algorithm, cred.AccessKey) + if !strings.HasPrefix(v2Auth, prefix) { + return apierrors.ErrSignatureDoesNotMatch + } + v2Auth = v2Auth[len(prefix):] + expectedAuth := signatureV2(cred, r.Method, encodedResource, strings.Join(unescapedQueries, "&"), r.Header) + if !compareSignatureV2(v2Auth, expectedAuth) { + return apierrors.ErrSignatureDoesNotMatch + } + return apierrors.ErrNone +} + +func calculateSignatureV2(stringToSign string, secret string) string { + hm := hmac.New(sha1.New, []byte(secret)) + hm.Write([]byte(stringToSign)) + return base64.StdEncoding.EncodeToString(hm.Sum(nil)) +} + +// Return signature-v2 for the presigned request. +func preSignatureV2(cred auth.Credentials, method string, encodedResource string, encodedQuery string, headers http.Header, expires string) string { + stringToSign := getStringToSignV2(method, encodedResource, encodedQuery, headers, expires) + return calculateSignatureV2(stringToSign, cred.SecretKey) +} + +// Return the signature v2 of a given request. +func signatureV2(cred auth.Credentials, method string, encodedResource string, encodedQuery string, headers http.Header) string { + stringToSign := getStringToSignV2(method, encodedResource, encodedQuery, headers, "") + signature := calculateSignatureV2(stringToSign, cred.SecretKey) + return signature +} + +// compareSignatureV2 returns true if and only if both signatures +// are equal. The signatures are expected to be base64 encoded strings +// according to the AWS S3 signature V2 spec. +func compareSignatureV2(sig1, sig2 string) bool { + // Decode signature string to binary byte-sequence representation is required + // as Base64 encoding of a value is not unique: + // For example "aGVsbG8=" and "aGVsbG8=\r" will result in the same byte slice. + signature1, err := base64.StdEncoding.DecodeString(sig1) + if err != nil { + return false + } + signature2, err := base64.StdEncoding.DecodeString(sig2) + if err != nil { + return false + } + return subtle.ConstantTimeCompare(signature1, signature2) == 1 +} + +// Return canonical headers. +func canonicalizedAmzHeadersV2(headers http.Header) string { + var keys []string + keyval := make(map[string]string, len(headers)) + for key := range headers { + lkey := strings.ToLower(key) + if !strings.HasPrefix(lkey, "x-amz-") { + continue + } + keys = append(keys, lkey) + keyval[lkey] = strings.Join(headers[key], ",") + } + sort.Strings(keys) + var canonicalHeaders []string + for _, key := range keys { + canonicalHeaders = append(canonicalHeaders, key+":"+keyval[key]) + } + return strings.Join(canonicalHeaders, "\n") +} + +// Return canonical resource string. +func canonicalizedResourceV2(encodedResource, encodedQuery string) string { + queries := strings.Split(encodedQuery, "&") + keyval := make(map[string]string) + for _, query := range queries { + key := query + val := "" + index := strings.Index(query, "=") + if index != -1 { + key = query[:index] + val = query[index+1:] + } + keyval[key] = val + } + + var canonicalQueries []string + for _, key := range resourceList { + val, ok := keyval[key] + if !ok { + continue + } + if val == "" { + canonicalQueries = append(canonicalQueries, key) + continue + } + canonicalQueries = append(canonicalQueries, key+"="+val) + } + + // The queries will be already sorted as resourceList is sorted, if canonicalQueries + // is empty strings.Join returns empty. + canonicalQuery := strings.Join(canonicalQueries, "&") + if canonicalQuery != "" { + return encodedResource + "?" + canonicalQuery + } + return encodedResource +} + +// Return string to sign under two different conditions. +// - if expires string is set then string to sign includes date instead of the Date header. +// - if expires string is empty then string to sign includes date header instead. +func getStringToSignV2(method string, encodedResource, encodedQuery string, headers http.Header, expires string) string { + canonicalHeaders := canonicalizedAmzHeadersV2(headers) + if len(canonicalHeaders) > 0 { + canonicalHeaders += "\n" + } + + date := expires // Date is set to expires date for presign operations. + if date == "" { + // If expires date is empty then request header Date is used. + date = headers.Get(consts.Date) + } + + // From the Amazon docs: + // + // StringToSign = HTTP-Verb + "\n" + + // Content-Md5 + "\n" + + // Content-Type + "\n" + + // Date/Expires + "\n" + + // CanonicalizedProtocolHeaders + + // CanonicalizedResource; + stringToSign := strings.Join([]string{ + method, + headers.Get(consts.ContentMD5), + headers.Get(consts.ContentType), + date, + canonicalHeaders, + }, "\n") + + return stringToSign + canonicalizedResourceV2(encodedResource, encodedQuery) +} + +// Returns "/bucketName/objectName" for path-style or virtual-host-style requests. +func getResource(path string, host string) (string, error) { + + // If virtual-host-style is enabled construct the "resource" properly. + if strings.Contains(host, ":") { + // In bucket.mydomain.com:9000, strip out :9000 + var err error + if host, _, err = net.SplitHostPort(host); err != nil { + return "", err + } + } + return path, nil +} diff --git a/s3/services/auth/signature-v4-parser.go b/s3/services/auth/signature-v4-parser.go index 04f7b3d70..3851d1ff6 100644 --- a/s3/services/auth/signature-v4-parser.go +++ b/s3/services/auth/signature-v4-parser.go @@ -18,9 +18,6 @@ package auth import ( - "errors" - "github.com/bittorrent/go-btfs/s3/responses" - "github.com/bittorrent/go-btfs/s3/services/accesskey" "net/http" "net/url" "strings" @@ -51,23 +48,45 @@ func (c credentialHeader) getScope() string { }, consts.SlashSeparator) } +func (s *AuthSys) GetReqAccessKeyV4(r *http.Request, region string, stype serviceType) (auth.Credentials, bool, apierrors.ErrorCode) { + ch, s3Err := parseCredentialHeader("Credential="+r.Form.Get(consts.AmzCredential), region, stype) + if s3Err != apierrors.ErrNone { + // Strip off the Algorithm prefix. + v4Auth := strings.TrimPrefix(r.Header.Get("Authorization"), signV4Algorithm) + authFields := strings.Split(strings.TrimSpace(v4Auth), ",") + if len(authFields) != 3 { + return auth.Credentials{}, false, apierrors.ErrMissingFields + } + ch, s3Err = parseCredentialHeader(authFields[0], region, stype) + if s3Err != apierrors.ErrNone { + return auth.Credentials{}, false, s3Err + } + } + // TODO: Why should a temporary user be replaced with the parent user's account name? + //cerd, _ := s.Iam.GetUser(r.Context(), ch.accessKey) + //if cerd.IsTemp() { + // ch.accessKey = cerd.ParentUser + //} + return s.checkKeyValid(r, ch.accessKey) +} + // parse credentialHeader string into its structured form. -func parseCredentialHeader(credElement string, region string, stype serviceType) (ch credentialHeader, err error) { +func parseCredentialHeader(credElement string, region string, stype serviceType) (ch credentialHeader, aec apierrors.ErrorCode) { creds := strings.SplitN(strings.TrimSpace(credElement), "=", 2) if len(creds) != 2 { - return ch, responses.ErrMissingFields + return ch, apierrors.ErrMissingFields } if creds[0] != "Credential" { - return ch, responses.ErrMissingCredTag + return ch, apierrors.ErrMissingCredTag } credElements := strings.Split(strings.TrimSpace(creds[1]), consts.SlashSeparator) if len(credElements) < 5 { - return ch, responses.ErrCredMalformed + return ch, apierrors.ErrCredMalformed } accessKey := strings.Join(credElements[:len(credElements)-4], consts.SlashSeparator) // The access key may contain one or more `/` - //if !IsAccessKeyValid(accessKey) { - // return ch, handlers.ErrcodeInvalidAccessKeyID - //} + if !auth.IsAccessKeyValid(accessKey) { + return ch, apierrors.ErrInvalidAccessKeyID + } // Save access key id. cred := credentialHeader{ accessKey: accessKey, @@ -76,7 +95,7 @@ func parseCredentialHeader(credElement string, region string, stype serviceType) var e error cred.scope.date, e = time.Parse(yyyymmdd, credElements[0]) if e != nil { - return ch, responses.ErrAuthorizationHeaderMalformed + return ch, apierrors.ErrAuthorizationHeaderMalformed } cred.scope.region = credElements[1] @@ -91,53 +110,53 @@ func parseCredentialHeader(credElement string, region string, stype serviceType) } // Should validate region, only if region is set. if !isValidRegion(sRegion, region) { - return ch, responses.ErrAuthorizationHeaderMalformed + return ch, apierrors.ErrAuthorizationHeaderMalformed } if credElements[2] != string(stype) { - //switch stype { - //case ServiceSTS: - // return ch, handlers.ErrcodeAuthorizationHeaderMalformed - //} - return ch, responses.ErrAuthorizationHeaderMalformed + switch stype { + case ServiceSTS: + return ch, apierrors.ErrAuthorizationHeaderMalformed + } + return ch, apierrors.ErrAuthorizationHeaderMalformed } cred.scope.service = credElements[2] if credElements[3] != "aws4_request" { - return ch, responses.ErrAuthorizationHeaderMalformed + return ch, apierrors.ErrAuthorizationHeaderMalformed } cred.scope.request = credElements[3] - return cred, nil + return cred, apierrors.ErrNone } // Parse signature from signature tag. -func parseSignature(signElement string) (string, error) { +func parseSignature(signElement string) (string, apierrors.ErrorCode) { signFields := strings.Split(strings.TrimSpace(signElement), "=") if len(signFields) != 2 { - return "", responses.ErrMissingFields + return "", apierrors.ErrMissingFields } if signFields[0] != "Signature" { - return "", responses.ErrMissingSignTag + return "", apierrors.ErrMissingSignTag } if signFields[1] == "" { - return "", responses.ErrMissingFields + return "", apierrors.ErrMissingFields } signature := signFields[1] - return signature, nil + return signature, apierrors.ErrNone } // Parse slice of signed headers from signed headers tag. -func parseSignedHeader(signedHdrElement string) ([]string, error) { +func parseSignedHeader(signedHdrElement string) ([]string, apierrors.ErrorCode) { signedHdrFields := strings.Split(strings.TrimSpace(signedHdrElement), "=") if len(signedHdrFields) != 2 { - return nil, responses.ErrMissingFields + return nil, apierrors.ErrMissingFields } if signedHdrFields[0] != "SignedHeaders" { - return nil, responses.ErrMissingSignHeadersTag + return nil, apierrors.ErrMissingSignHeadersTag } if signedHdrFields[1] == "" { - return nil, responses.ErrMissingFields + return nil, apierrors.ErrMissingFields } signedHeaders := strings.Split(signedHdrFields[1], ";") - return signedHeaders, nil + return signedHeaders, apierrors.ErrNone } // signValues data type represents structured form of AWS Signature V4 header. @@ -164,80 +183,81 @@ type preSignValues struct { // querystring += &X-Amz-Signature=signature // // verifies if any of the necessary query params are missing in the presigned request. -func doesV4PresignParamsExist(query url.Values) error { +func doesV4PresignParamsExist(query url.Values) apierrors.ErrorCode { v4PresignQueryParams := []string{consts.AmzAlgorithm, consts.AmzCredential, consts.AmzSignature, consts.AmzDate, consts.AmzSignedHeaders, consts.AmzExpires} for _, v4PresignQueryParam := range v4PresignQueryParams { if _, ok := query[v4PresignQueryParam]; !ok { - return responses.ErrInvalidQueryParams + return apierrors.ErrInvalidQueryParams } } - return nil + return apierrors.ErrNone } // Parses all the presigned signature values into separate elements. -func parsePreSignV4(query url.Values, region string, stype serviceType) (psv preSignValues, err error) { +func parsePreSignV4(query url.Values, region string, stype serviceType) (psv preSignValues, aec apierrors.ErrorCode) { // verify whether the required query params exist. - err = doesV4PresignParamsExist(query) - if err != nil { - return psv, err + aec = doesV4PresignParamsExist(query) + if aec != apierrors.ErrNone { + return psv, aec } // Verify if the query algorithm is supported or not. if query.Get(consts.AmzAlgorithm) != signV4Algorithm { - return psv, responses.ErrAuthorizationHeaderMalformed + return psv, apierrors.ErrAuthorizationHeaderMalformed } // Initialize signature version '4' structured header. preSignV4Values := preSignValues{} // Save credential. - preSignV4Values.Credential, err = parseCredentialHeader("Credential="+query.Get(consts.AmzCredential), region, stype) - if err != nil { - return psv, err + preSignV4Values.Credential, aec = parseCredentialHeader("Credential="+query.Get(consts.AmzCredential), region, stype) + if aec != apierrors.ErrNone { + return psv, aec } + var e error // Save date in native time.Time. - preSignV4Values.Date, err = time.Parse(iso8601Format, query.Get(consts.AmzDate)) - if err != nil { - return psv, responses.ErrAuthorizationHeaderMalformed + preSignV4Values.Date, e = time.Parse(iso8601Format, query.Get(consts.AmzDate)) + if e != nil { + return psv, apierrors.ErrAuthorizationHeaderMalformed } // Save expires in native time.Duration. - preSignV4Values.Expires, err = time.ParseDuration(query.Get(consts.AmzExpires) + "s") - if err != nil { - return psv, responses.ErrAuthorizationHeaderMalformed + preSignV4Values.Expires, e = time.ParseDuration(query.Get(consts.AmzExpires) + "s") + if e != nil { + return psv, apierrors.ErrAuthorizationHeaderMalformed } if preSignV4Values.Expires < 0 { - return psv, responses.ErrAuthorizationHeaderMalformed + return psv, apierrors.ErrAuthorizationHeaderMalformed } // Check if Expiry time is less than 7 days (value in seconds). if preSignV4Values.Expires.Seconds() > 604800 { - return psv, responses.ErrAuthorizationHeaderMalformed + return psv, apierrors.ErrAuthorizationHeaderMalformed } // Save signed headers. - preSignV4Values.SignedHeaders, err = parseSignedHeader("SignedHeaders=" + query.Get(consts.AmzSignedHeaders)) - if err != nil { - return psv, err + preSignV4Values.SignedHeaders, aec = parseSignedHeader("SignedHeaders=" + query.Get(consts.AmzSignedHeaders)) + if aec != apierrors.ErrNone { + return psv, aec } // Save signature. - preSignV4Values.Signature, err = parseSignature("Signature=" + query.Get(consts.AmzSignature)) - if err != nil { - return psv, err + preSignV4Values.Signature, aec = parseSignature("Signature=" + query.Get(consts.AmzSignature)) + if aec != apierrors.ErrNone { + return psv, aec } // Return structed form of signature query string. - return preSignV4Values, nil + return preSignV4Values, apierrors.ErrNone } // Parses signature version '4' header of the following form. // // Authorization: algorithm Credential=accessKeyID/credScope, \ // SignedHeaders=signedHeaders, Signature=signature -func parseSignV4(v4Auth string, region string, stype serviceType) (sv signValues, err error) { +func parseSignV4(v4Auth string, region string, stype serviceType) (sv signValues, aec apierrors.ErrorCode) { // credElement is fetched first to skip replacing the space in access key. credElement := strings.TrimPrefix(strings.Split(strings.TrimSpace(v4Auth), ",")[0], signV4Algorithm) // Replace all spaced strings, some clients can send spaced @@ -245,74 +265,43 @@ func parseSignV4(v4Auth string, region string, stype serviceType) (sv signValues // to make parsing easier. v4Auth = strings.ReplaceAll(v4Auth, " ", "") if v4Auth == "" { - return sv, responses.ErrAuthHeaderEmpty + return sv, apierrors.ErrAuthHeaderEmpty } // Verify if the header algorithm is supported or not. if !strings.HasPrefix(v4Auth, signV4Algorithm) { - return sv, responses.ErrSignatureVersionNotSupported + return sv, apierrors.ErrSignatureVersionNotSupported } // Strip off the Algorithm prefix. v4Auth = strings.TrimPrefix(v4Auth, signV4Algorithm) authFields := strings.Split(strings.TrimSpace(v4Auth), ",") if len(authFields) != 3 { - return sv, responses.ErrMissingFields + return sv, apierrors.ErrMissingFields } // Initialize signature version '4' structured header. signV4Values := signValues{} + var s3Err apierrors.ErrorCode // Save credentail values. - signV4Values.Credential, err = parseCredentialHeader(strings.TrimSpace(credElement), region, stype) - if err != nil { - return sv, err + signV4Values.Credential, s3Err = parseCredentialHeader(strings.TrimSpace(credElement), region, stype) + if s3Err != apierrors.ErrNone { + return sv, s3Err } // Save signed headers. - signV4Values.SignedHeaders, err = parseSignedHeader(authFields[1]) - if err != nil { - return sv, err + signV4Values.SignedHeaders, s3Err = parseSignedHeader(authFields[1]) + if s3Err != apierrors.ErrNone { + return sv, s3Err } // Save signature. - signV4Values.Signature, err = parseSignature(authFields[2]) - if err != nil { - return sv, err + signV4Values.Signature, s3Err = parseSignature(authFields[2]) + if s3Err != apierrors.ErrNone { + return sv, s3Err } // Return the structure here. - return signV4Values, nil -} - -func (s *service) getReqAccessKeyV4(r *http.Request, region string, stype serviceType) (ack *accesskey.AccessKey, err error) { - ch, err := parseCredentialHeader("Credential="+r.Form.Get(consts.AmzCredential), region, stype) - if err != nil { - // Strip off the Algorithm prefix. - v4Auth := strings.TrimPrefix(r.Header.Get("Authorization"), signV4Algorithm) - authFields := strings.Split(strings.TrimSpace(v4Auth), ",") - if len(authFields) != 3 { - err = responses.ErrMissingFields - return - } - ch, err = parseCredentialHeader(authFields[0], region, stype) - if err != nil { - return - } - } - - ack, err = s.accessKeySvc.Get(ch.accessKey) - if errors.Is(err, accesskey.ErrNotFound) { - err = responses.ErrInvalidAccessKeyID - return - } - if err != nil { - return - } - if !ack.Enable { - err = responses.ErrAccessKeyDisabled - return - } - - return + return signV4Values, apierrors.ErrNone } diff --git a/s3/services/auth/signature-v4-utils.go b/s3/services/auth/signature-v4-utils.go index 698f90e89..17d73a777 100644 --- a/s3/services/auth/signature-v4-utils.go +++ b/s3/services/auth/signature-v4-utils.go @@ -18,19 +18,106 @@ package auth import ( - "github.com/bittorrent/go-btfs/s3/responses" + "bytes" + "encoding/hex" + "github.com/bittorrent/go-btfs/s3/consts" + "io" + "io/ioutil" "net/http" "reflect" "strconv" - "strings" - - "github.com/bittorrent/go-btfs/s3/consts" ) // http Header "x-amz-content-sha256" == "UNSIGNED-PAYLOAD" indicates that the // client did not calculate sha256 of the payload. const unsignedPayload = "UNSIGNED-PAYLOAD" +// SkipContentSha256Cksum returns true if caller needs to skip +// payload checksum, false if not. +func SkipContentSha256Cksum(r *http.Request) bool { + var ( + v []string + ok bool + ) + + if isRequestPresignedSignatureV4(r) { + v, ok = r.Form[consts.AmzContentSha256] + if !ok { + v, ok = r.Header[consts.AmzContentSha256] + } + } else { + v, ok = r.Header[consts.AmzContentSha256] + } + + // Skip if no header was set. + if !ok { + return true + } + + // If x-amz-content-sha256 is set and the value is not + // 'UNSIGNED-PAYLOAD' we should validate the content sha256. + switch v[0] { + case unsignedPayload: + return true + case consts.EmptySHA256: + // some broken clients set empty-sha256 + // with > 0 content-length in the body, + // we should skip such clients and allow + // blindly such insecure clients only if + // S3 strict compatibility is disabled. + if r.ContentLength > 0 { + // We return true only in situations when + // deployment has asked MinIO to allow for + // such broken clients and content-length > 0. + return true + } + } + return false +} + +// Returns SHA256 for calculating canonical-request. +func GetContentSha256Cksum(r *http.Request, stype serviceType) string { + if stype == ServiceSTS { + payload, err := ioutil.ReadAll(io.LimitReader(r.Body, consts.StsRequestBodyLimit)) + if err != nil { + log.Errorf("ServiceSTS ReadAll err:%v", err) + } + sum256 := sha256.Sum256(payload) + r.Body = ioutil.NopCloser(bytes.NewReader(payload)) + return hex.EncodeToString(sum256[:]) + } + + var ( + defaultSha256Cksum string + v []string + ok bool + ) + + // For a presigned request we look at the query param for sha256. + if isRequestPresignedSignatureV4(r) { + // X-Amz-Content-Sha256, if not set in presigned requests, checksum + // will default to 'UNSIGNED-PAYLOAD'. + defaultSha256Cksum = unsignedPayload + v, ok = r.Form[consts.AmzContentSha256] + if !ok { + v, ok = r.Header[consts.AmzContentSha256] + } + } else { + // X-Amz-Content-Sha256, if not set in signed requests, checksum + // will default to sha256([]byte("")). + defaultSha256Cksum = consts.EmptySHA256 + v, ok = r.Header[consts.AmzContentSha256] + } + + // We found 'X-Amz-Content-Sha256' return the captured value. + if ok { + return v[0] + } + + // We couldn't find 'X-Amz-Content-Sha256'. + return defaultSha256Cksum +} + // isValidRegion - verify if incoming region value is valid with configured Region. func isValidRegion(reqRegion string, confRegion string) bool { if confRegion == "" { @@ -47,6 +134,28 @@ func isValidRegion(reqRegion string, confRegion string) bool { return reqRegion == confRegion } +// check if the access key is valid and recognized, additionally +// also returns if the access key is owner/admin. +func (s *AuthSys) checkKeyValid(r *http.Request, accessKey string) (auth.Credentials, bool, apierrors.ErrorCode) { + + cred := s.AdminCred + if cred.AccessKey != accessKey { + // Check if the access key is part of users credentials. + ucred, ok := s.Iam.GetUser(r.Context(), accessKey) + if !ok { + // Credentials will be invalid but and disabled + // return a different error in such a scenario. + if ucred.Status == auth.AccountOff { + return cred, false, apierrors.ErrAccessKeyDisabled + } + return cred, false, apierrors.ErrInvalidAccessKeyID + } + cred = ucred + } + owner := cred.AccessKey == s.AdminCred.AccessKey + return cred, owner, apierrors.ErrNone +} + func contains(slice interface{}, elem interface{}) bool { v := reflect.ValueOf(slice) if v.Kind() == reflect.Slice { @@ -60,13 +169,13 @@ func contains(slice interface{}, elem interface{}) bool { } // extractSignedHeaders extract signed headers from Authorization header -func extractSignedHeaders(signedHeaders []string, r *http.Request) (http.Header, error) { +func extractSignedHeaders(signedHeaders []string, r *http.Request) (http.Header, apierrors.ErrorCode) { reqHeaders := r.Header reqQueries := r.Form // find whether "host" is part of list of signed headers. - // if not return ErrcodeUnsignedHeaders. "host" is mandatory. + // if not return ErrUnsignedHeaders. "host" is mandatory. if !contains(signedHeaders, "host") { - return nil, responses.ErrUnsignedHeaders + return nil, apierrors.ErrUnsignedHeaders } extractedSignedHeaders := make(http.Header) for _, header := range signedHeaders { @@ -116,105 +225,8 @@ func extractSignedHeaders(signedHeaders []string, r *http.Request) (http.Header, // calculation to be compatible with such clients. extractedSignedHeaders.Set(header, strconv.FormatInt(r.ContentLength, 10)) default: - return nil, responses.ErrUnsignedHeaders - } - } - return extractedSignedHeaders, nil -} - -// Returns SHA256 for calculating canonical-request. -func getContentSha256Cksum(r *http.Request, stype serviceType) string { - //if stype == ServiceSTS { - // payload, err := ioutil.ReadAll(io.LimitReader(r.Body, consts.StsRequestBodyLimit)) - // if err != nil { - // //log.Errorf("ServiceSTS ReadAll err:%v", err) - // } - // sum256 := sha256.Sum256(payload) - // r.Body = ioutil.NopCloser(bytes.NewReader(payload)) - // return hex.EncodeToString(sum256[:]) - //} - - var ( - defaultSha256Cksum string - v []string - ok bool - ) - - // For a presigned request we look at the query param for sha256. - if isRequestPresignedSignatureV4(r) { - // X-Amz-Content-Sha256, if not set in presigned requests, checksum - // will default to 'UNSIGNED-PAYLOAD'. - defaultSha256Cksum = unsignedPayload - v, ok = r.Form[consts.AmzContentSha256] - if !ok { - v, ok = r.Header[consts.AmzContentSha256] - } - } else { - // X-Amz-Content-Sha256, if not set in signed requests, checksum - // will default to sha256([]byte("")). - defaultSha256Cksum = consts.EmptySHA256 - v, ok = r.Header[consts.AmzContentSha256] - } - - // We found 'X-Amz-Content-Sha256' return the captured value. - if ok { - return v[0] - } - - // We couldn't find 'X-Amz-Content-Sha256'. - return defaultSha256Cksum -} - -// isRequestSignatureV4 Verify if request has AWS Signature Version '4'. -func isRequestSignatureV4(r *http.Request) bool { - return strings.HasPrefix(r.Header.Get("Authorization"), signV4Algorithm) -} - -// Verify if request has AWS PreSign Version '4'. -func isRequestPresignedSignatureV4(r *http.Request) bool { - _, ok := r.URL.Query()["X-Amz-Credential"] - return ok -} - -// SkipContentSha256Cksum returns true if caller needs to skip -// payload checksum, false if not. -func SkipContentSha256Cksum(r *http.Request) bool { - var ( - v []string - ok bool - ) - - if isRequestPresignedSignatureV4(r) { - v, ok = r.Form[consts.AmzContentSha256] - if !ok { - v, ok = r.Header[consts.AmzContentSha256] - } - } else { - v, ok = r.Header[consts.AmzContentSha256] - } - - // Skip if no header was set. - if !ok { - return true - } - - // If x-amz-content-sha256 is set and the value is not - // 'UNSIGNED-PAYLOAD' we should validate the content sha256. - switch v[0] { - case unsignedPayload: - return true - case consts.EmptySHA256: - // some broken clients set empty-sha256 - // with > 0 content-length in the body, - // we should skip such clients and allow - // blindly such insecure clients only if - // S3 strict compatibility is disabled. - if r.ContentLength > 0 { - // We return true only in situations when - // deployment has asked MinIO to allow for - // such broken clients and content-length > 0. - return true + return nil, apierrors.ErrUnsignedHeaders } } - return false + return extractedSignedHeaders, apierrors.ErrNone } diff --git a/s3/services/auth/signature-v4.go b/s3/services/auth/signature-v4.go index 3da162b20..2f14648ca 100644 --- a/s3/services/auth/signature-v4.go +++ b/s3/services/auth/signature-v4.go @@ -15,274 +15,221 @@ // implied. See the License for the specific language governing // permissions and limitations under the License. -package auth +package iam import ( - "crypto/subtle" - "errors" - "github.com/bittorrent/go-btfs/s3/responses" - "github.com/bittorrent/go-btfs/s3/services/accesskey" + "bytes" + "crypto/sha256" + "encoding/hex" + "github.com/bittorrent/go-btfs/s3/apierrors" + "github.com/bittorrent/go-btfs/s3/consts" + "github.com/bittorrent/go-btfs/s3/iam/auth" + "io" + "io/ioutil" "net/http" - "net/url" + "reflect" "strconv" - "time" - - "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/set" - "github.com/bittorrent/go-btfs/s3/utils" -) - -// AWS Signature Version '4' constants. -const ( - signV4Algorithm = "AWS4-HMAC-SHA256" - iso8601Format = "20060102T150405Z" - yyyymmdd = "20060102" -) - -type serviceType string - -const ( - ServiceS3 serviceType = "s3" - ////ServiceSTS STS - //ServiceSTS serviceType = "sts" ) -// compareSignatureV4 returns true if and only if both signatures -// are equal. The signatures are expected to be HEX encoded strings -// according to the AWS S3 signature V4 spec. -func compareSignatureV4(sig1, sig2 string) bool { - // The CTC using []byte(str) works because the hex encoding - // is unique for a sequence of bytes. See also compareSignatureV2. - return subtle.ConstantTimeCompare([]byte(sig1), []byte(sig2)) == 1 -} - -// DoesPresignedSignatureMatch - Verify queryString headers with presigned signature -// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html -// -// returns handlers.ErrcodeNone if the signature matches. -func (s *service) doesPresignedSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType) (ack *accesskey.AccessKey, err error) { - // Copy request - req := *r - - // Parse request query string. - pSignValues, err := parsePreSignV4(req.Form, region, stype) - if err != nil { - return - } +// http Header "x-amz-content-sha256" == "UNSIGNED-PAYLOAD" indicates that the +// client did not calculate sha256 of the payload. +const unsignedPayload = "UNSIGNED-PAYLOAD" - // Check accesskey - ack, err = s.accessKeySvc.Get(pSignValues.Credential.accessKey) - if errors.Is(err, accesskey.ErrNotFound) { - err = responses.ErrInvalidAccessKeyID - } - if err != nil { - return - } - if !ack.Enable { - err = responses.ErrAccessKeyDisabled - return - } - - // Extract all the signed headers along with its values. - extractedSignedHeaders, err := extractSignedHeaders(pSignValues.SignedHeaders, r) - if err != nil { - return - } - - // If the host which signed the request is slightly ahead in time (by less than MaxSkewTime) the - // request should still be allowed. - if pSignValues.Date.After(time.Now().UTC().Add(consts.MaxSkewTime)) { - err = responses.ErrRequestNotReadyYet - return - } +// SkipContentSha256Cksum returns true if caller needs to skip +// payload checksum, false if not. +func SkipContentSha256Cksum(r *http.Request) bool { + var ( + v []string + ok bool + ) - if time.Now().UTC().Sub(pSignValues.Date) > pSignValues.Expires { - err = responses.ErrExpiredPresignRequest - return + if isRequestPresignedSignatureV4(r) { + v, ok = r.Form[consts.AmzContentSha256] + if !ok { + v, ok = r.Header[consts.AmzContentSha256] + } + } else { + v, ok = r.Header[consts.AmzContentSha256] + } + + // Skip if no header was set. + if !ok { + return true + } + + // If x-amz-content-sha256 is set and the value is not + // 'UNSIGNED-PAYLOAD' we should validate the content sha256. + switch v[0] { + case unsignedPayload: + return true + case consts.EmptySHA256: + // some broken clients set empty-sha256 + // with > 0 content-length in the body, + // we should skip such clients and allow + // blindly such insecure clients only if + // S3 strict compatibility is disabled. + if r.ContentLength > 0 { + // We return true only in situations when + // deployment has asked MinIO to allow for + // such broken clients and content-length > 0. + return true + } } + return false +} - // Save the date and expires. - t := pSignValues.Date - expireSeconds := int(pSignValues.Expires / time.Second) - - // Construct new query. - query := make(url.Values) - clntHashedPayload := req.Form.Get(consts.AmzContentSha256) - if clntHashedPayload != "" { - query.Set(consts.AmzContentSha256, hashedPayload) +// Returns SHA256 for calculating canonical-request. +func GetContentSha256Cksum(r *http.Request, stype serviceType) string { + if stype == ServiceSTS { + payload, err := ioutil.ReadAll(io.LimitReader(r.Body, consts.StsRequestBodyLimit)) + if err != nil { + log.Errorf("ServiceSTS ReadAll err:%v", err) + } + sum256 := sha256.Sum256(payload) + r.Body = ioutil.NopCloser(bytes.NewReader(payload)) + return hex.EncodeToString(sum256[:]) } - // not check token? - //token := req.Form.Get(consts.AmzSecurityToken) - //if token != "" { - // query.Set(consts.AmzSecurityToken, cred.SessionToken) - //} - - query.Set(consts.AmzAlgorithm, signV4Algorithm) - - // Construct the query. - query.Set(consts.AmzDate, t.Format(iso8601Format)) - query.Set(consts.AmzExpires, strconv.Itoa(expireSeconds)) - query.Set(consts.AmzSignedHeaders, utils.GetSignedHeaders(extractedSignedHeaders)) - query.Set(consts.AmzCredential, ack.Key+consts.SlashSeparator+pSignValues.Credential.getScope()) - - defaultSigParams := set.CreateStringSet( - consts.AmzContentSha256, - //consts.AmzSecurityToken, - consts.AmzAlgorithm, - consts.AmzDate, - consts.AmzExpires, - consts.AmzSignedHeaders, - consts.AmzCredential, - consts.AmzSignature, + var ( + defaultSha256Cksum string + v []string + ok bool ) - // Add missing query parameters if any provided in the request URL - for k, v := range req.Form { - if !defaultSigParams.Contains(k) { - query[k] = v + // For a presigned request we look at the query param for sha256. + if isRequestPresignedSignatureV4(r) { + // X-Amz-Content-Sha256, if not set in presigned requests, checksum + // will default to 'UNSIGNED-PAYLOAD'. + defaultSha256Cksum = unsignedPayload + v, ok = r.Form[consts.AmzContentSha256] + if !ok { + v, ok = r.Header[consts.AmzContentSha256] } + } else { + // X-Amz-Content-Sha256, if not set in signed requests, checksum + // will default to sha256([]byte("")). + defaultSha256Cksum = consts.EmptySHA256 + v, ok = r.Header[consts.AmzContentSha256] } - // Get the encoded query. - encodedQuery := query.Encode() - - // Verify if date query is same. - if req.Form.Get(consts.AmzDate) != query.Get(consts.AmzDate) { - err = responses.ErrSignatureDoesNotMatch - } - // Verify if expires query is same. - if req.Form.Get(consts.AmzExpires) != query.Get(consts.AmzExpires) { - err = responses.ErrSignatureDoesNotMatch - return + // We found 'X-Amz-Content-Sha256' return the captured value. + if ok { + return v[0] } - // Verify if signed headers query is same. - if req.Form.Get(consts.AmzSignedHeaders) != query.Get(consts.AmzSignedHeaders) { - err = responses.ErrSignatureDoesNotMatch - return - } - // Verify if credential query is same. - if req.Form.Get(consts.AmzCredential) != query.Get(consts.AmzCredential) { - err = responses.ErrSignatureDoesNotMatch - return - } - // Verify if sha256 payload query is same. - if clntHashedPayload != "" && clntHashedPayload != query.Get(consts.AmzContentSha256) { - err = responses.ErrContentSHA256Mismatch - return - } - // not check SessionToken. - //// Verify if security token is correct. - //if token != "" && subtle.ConstantTimeCompare([]byte(token), []byte(cred.SessionToken)) != 1 { - // return handlers.ErrInvalidToken - //} - - // Verify finally if signature is same. - - // Get canonical request. - presignedCanonicalReq := utils.GetCanonicalRequest(extractedSignedHeaders, hashedPayload, encodedQuery, req.URL.Path, req.Method) - - // Get string to sign from canonical request. - presignedStringToSign := utils.GetStringToSign(presignedCanonicalReq, t, pSignValues.Credential.getScope()) - - // Get hmac presigned signing key. - presignedSigningKey := utils.GetSigningKey(ack.Secret, pSignValues.Credential.scope.date, - pSignValues.Credential.scope.region, string(stype)) - - // Get new signature. - newSignature := utils.GetSignature(presignedSigningKey, presignedStringToSign) - // Verify signature. - if !compareSignatureV4(req.Form.Get(consts.AmzSignature), newSignature) { - err = responses.ErrSignatureDoesNotMatch - return - } - - return + // We couldn't find 'X-Amz-Content-Sha256'. + return defaultSha256Cksum } -// DoesSignatureMatch - Verify authorization header with calculated header in accordance with -// - http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html -func (s *service) doesSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType) (ack *accesskey.AccessKey, err error) { - // Copy request. - req := *r - - // Save authorization header. - v4Auth := req.Header.Get(consts.Authorization) - - // Parse signature version '4' header. - signV4Values, err := parseSignV4(v4Auth, region, stype) - if err != nil { - return - } - - // Extract all the signed headers along with its values. - extractedSignedHeaders, err := extractSignedHeaders(signV4Values.SignedHeaders, r) - if err != nil { - return - } - - // Check accesskey - ack, err = s.accessKeySvc.Get(signV4Values.Credential.accessKey) - if errors.Is(err, accesskey.ErrNotFound) { - err = responses.ErrInvalidAccessKeyID +// isValidRegion - verify if incoming region value is valid with configured Region. +func isValidRegion(reqRegion string, confRegion string) bool { + if confRegion == "" { + return true } - if err != nil { - return + if confRegion == "US" { + confRegion = consts.DefaultRegion } - if !ack.Enable { - err = responses.ErrAccessKeyDisabled - return + // Some older s3 clients set region as "US" instead of + // globalDefaultRegion, handle it. + if reqRegion == "US" { + reqRegion = consts.DefaultRegion } + return reqRegion == confRegion +} - // Extract date, if not present throw error. - var date string - if date = req.Header.Get(consts.AmzDate); date == "" { - if date = r.Header.Get(consts.Date); date == "" { - err = responses.ErrMissingDateHeader - return +// check if the access key is valid and recognized, additionally +// also returns if the access key is owner/admin. +func (s *AuthSys) checkKeyValid(r *http.Request, accessKey string) (auth.Credentials, bool, apierrors.ErrorCode) { + + cred := s.AdminCred + if cred.AccessKey != accessKey { + // Check if the access key is part of users credentials. + ucred, ok := s.Iam.GetUser(r.Context(), accessKey) + if !ok { + // Credentials will be invalid but and disabled + // return a different error in such a scenario. + if ucred.Status == auth.AccountOff { + return cred, false, apierrors.ErrAccessKeyDisabled + } + return cred, false, apierrors.ErrInvalidAccessKeyID } + cred = ucred } + owner := cred.AccessKey == s.AdminCred.AccessKey + return cred, owner, apierrors.ErrNone +} - // Parse date header. - t, err := time.Parse(iso8601Format, date) - if err != nil { - err = responses.ErrAuthorizationHeaderMalformed - return +func contains(slice interface{}, elem interface{}) bool { + v := reflect.ValueOf(slice) + if v.Kind() == reflect.Slice { + for i := 0; i < v.Len(); i++ { + if v.Index(i).Interface() == elem { + return true + } + } } + return false +} - // Query string. - queryStr := req.URL.Query().Encode() - - // Get canonical request. - canonicalRequest := utils.GetCanonicalRequest(extractedSignedHeaders, hashedPayload, queryStr, req.URL.Path, req.Method) - - // Get string to sign from canonical request. - stringToSign := utils.GetStringToSign(canonicalRequest, t, signV4Values.Credential.getScope()) - - // Get hmac signing key. - signingKey := utils.GetSigningKey(ack.Secret, signV4Values.Credential.scope.date, - signV4Values.Credential.scope.region, string(stype)) - - // Calculate signature. - newSignature := utils.GetSignature(signingKey, stringToSign) - - // Verify if signature match. - if !compareSignatureV4(newSignature, signV4Values.Signature) { - err = responses.ErrSignatureDoesNotMatch - return +// extractSignedHeaders extract signed headers from Authorization header +func extractSignedHeaders(signedHeaders []string, r *http.Request) (http.Header, apierrors.ErrorCode) { + reqHeaders := r.Header + reqQueries := r.Form + // find whether "host" is part of list of signed headers. + // if not return ErrUnsignedHeaders. "host" is mandatory. + if !contains(signedHeaders, "host") { + return nil, apierrors.ErrUnsignedHeaders + } + extractedSignedHeaders := make(http.Header) + for _, header := range signedHeaders { + // `host` will not be found in the headers, can be found in r.Host. + // but its alway necessary that the list of signed headers containing host in it. + val, ok := reqHeaders[http.CanonicalHeaderKey(header)] + if !ok { + // try to set headers from Query String + val, ok = reqQueries[header] + } + if ok { + extractedSignedHeaders[http.CanonicalHeaderKey(header)] = val + continue + } + switch header { + case "expect": + // Golang http server strips off 'Expect' header, if the + // client sent this as part of signed headers we need to + // handle otherwise we would see a signature mismatch. + // `aws-cli` sets this as part of signed headers. + // + // According to + // http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.20 + // Expect header is always of form: + // + // Expect = "Expect" ":" 1#expectation + // expectation = "100-continue" | expectation-extension + // + // So it safe to assume that '100-continue' is what would + // be sent, for the time being keep this work around. + // Adding a *TODO* to remove this later when Golang server + // doesn't filter out the 'Expect' header. + extractedSignedHeaders.Set(header, "100-continue") + case "host": + // Go http server removes "host" from Request.Header + + //extractedSignedHeaders.Set(header, r.Host) + // todo use r.Host, or filedag-web deal with + //value := strings.Split(r.Host, ":") + extractedSignedHeaders.Set(header, r.Host) + case "transfer-encoding": + // Go http server removes "host" from Request.Header + extractedSignedHeaders[http.CanonicalHeaderKey(header)] = r.TransferEncoding + case "content-length": + // Signature-V4 spec excludes Content-Length from signed headers list for signature calculation. + // But some clients deviate from this rule. Hence we consider Content-Length for signature + // calculation to be compatible with such clients. + extractedSignedHeaders.Set(header, strconv.FormatInt(r.ContentLength, 10)) + default: + return nil, apierrors.ErrUnsignedHeaders + } } - - return + return extractedSignedHeaders, apierrors.ErrNone } - -//// getScope generate a string of a specific date, an AWS region, and a service. -//func getScope(t time.Time, region string) string { -// scope := strings.Join([]string{ -// t.Format(yyyymmdd), -// region, -// string(ServiceS3), -// "aws4_request", -// }, consts.SlashSeparator) -// return scope -//} diff --git a/s3/services/auth/streaming-signature-v4.go b/s3/services/auth/streaming-signature-v4.go new file mode 100644 index 000000000..9a85b68bd --- /dev/null +++ b/s3/services/auth/streaming-signature-v4.go @@ -0,0 +1,481 @@ +/* + * The following code tries to reverse engineer the Amazon S3 APIs, + * and is mostly copied from minio implementation. + */ + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +// Package cmd This file implements helper functions to validate Streaming AWS +// Signature Version '4' authorization header. +package auth + +import ( + "bufio" + "bytes" + "crypto/sha256" + "encoding/hex" + "errors" + "github.com/bittorrent/go-btfs/s3/apierrors" + "github.com/bittorrent/go-btfs/s3/utils" + "hash" + "io" + "net/http" + "strings" + "time" + + "github.com/bittorrent/go-btfs/s3/consts" + "github.com/bittorrent/go-btfs/s3/iam/auth" + "github.com/dustin/go-humanize" +) + +// Streaming AWS Signature Version '4' constants. +const ( + emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + streamingContentSHA256 = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" + signV4ChunkedAlgorithm = "AWS4-HMAC-SHA256-PAYLOAD" + streamingContentEncoding = "aws-chunked" +) + +// errSignatureMismatch means signature did not match. +var errSignatureMismatch = errors.New("Signature does not match") + +// getChunkSignature - get chunk signature. +func getChunkSignature(cred auth.Credentials, seedSignature string, region string, date time.Time, hashedChunk string) string { + // Calculate string to sign. + stringToSign := signV4ChunkedAlgorithm + "\n" + + date.Format(iso8601Format) + "\n" + + getScope(date, region) + "\n" + + seedSignature + "\n" + + emptySHA256 + "\n" + + hashedChunk + + // Get hmac signing key. + signingKey := utils.GetSigningKey(cred.SecretKey, date, region, string(ServiceS3)) + + // Calculate signature. + newSignature := utils.GetSignature(signingKey, stringToSign) + + return newSignature +} + +// CalculateSeedSignature - Calculate seed signature in accordance with +// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html +// +// returns signature, error otherwise if the signature mismatches or any other +// error while parsing and validating. +func (s *AuthSys) CalculateSeedSignature(r *http.Request) (cred auth.Credentials, signature string, region string, date time.Time, errCode apierrors.ErrorCode) { + // Copy request. + req := *r + + // Save authorization header. + v4Auth := req.Header.Get(consts.Authorization) + + // Parse signature version '4' header. + signV4Values, errCode := parseSignV4(v4Auth, "", ServiceS3) + if errCode != apierrors.ErrNone { + return cred, "", "", time.Time{}, errCode + } + + // Payload streaming. + payload := streamingContentSHA256 + + // Payload for STREAMING signature should be 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD' + if payload != req.Header.Get(consts.AmzContentSha256) { + return cred, "", "", time.Time{}, apierrors.ErrContentSHA256Mismatch + } + + // Extract all the signed headers along with its values. + extractedSignedHeaders, errCode := extractSignedHeaders(signV4Values.SignedHeaders, r) + if errCode != apierrors.ErrNone { + return cred, "", "", time.Time{}, errCode + } + + cred, _, errCode = s.checkKeyValid(r, signV4Values.Credential.accessKey) + if errCode != apierrors.ErrNone { + return cred, "", "", time.Time{}, errCode + } + + // Verify if region is valid. + region = signV4Values.Credential.scope.region + + // Extract date, if not present throw error. + var dateStr string + if dateStr = req.Header.Get("x-amz-date"); dateStr == "" { + if dateStr = r.Header.Get("Date"); dateStr == "" { + return cred, "", "", time.Time{}, apierrors.ErrMissingDateHeader + } + } + + // Parse date header. + var err error + date, err = time.Parse(iso8601Format, dateStr) + if err != nil { + return cred, "", "", time.Time{}, apierrors.ErrMalformedDate + } + + // Query string. + queryStr := req.Form.Encode() + + // Get canonical request. + canonicalRequest := utils.GetCanonicalRequest(extractedSignedHeaders, payload, queryStr, req.URL.Path, req.Method) + + // Get string to sign from canonical request. + stringToSign := utils.GetStringToSign(canonicalRequest, date, signV4Values.Credential.getScope()) + + // Get hmac signing key. + signingKey := utils.GetSigningKey(cred.SecretKey, signV4Values.Credential.scope.date, region, string(ServiceS3)) + + // Calculate signature. + newSignature := utils.GetSignature(signingKey, stringToSign) + + // Verify if signature match. + if !compareSignatureV4(newSignature, signV4Values.Signature) { + return cred, "", "", time.Time{}, apierrors.ErrSignatureDoesNotMatch + } + + // Return caculated signature. + return cred, newSignature, region, date, apierrors.ErrNone +} + +const maxLineLength = 4 * humanize.KiByte // assumed <= bufio.defaultBufSize 4KiB + +// lineTooLong is generated as chunk header is bigger than 4KiB. +var errLineTooLong = errors.New("header line too long") + +// malformed encoding is generated when chunk header is wrongly formed. +var errMalformedEncoding = errors.New("malformed chunked encoding") + +// chunk is considered too big if its bigger than > 16MiB. +var errChunkTooBig = errors.New("chunk too big: choose chunk size <= 16MiB") + +// NewSignV4ChunkedReader returns a new s3ChunkedReader that translates the data read from r +// out of HTTP "chunked" format before returning it. +// The s3ChunkedReader returns io.EOF when the final 0-length chunk is read. +// +// NewChunkedReader is not needed by normal applications. The http package +// automatically decodes chunking when reading response bodies. +func NewSignV4ChunkedReader(req *http.Request, s *AuthSys) (io.ReadCloser, apierrors.ErrorCode) { + cred, seedSignature, region, seedDate, errCode := s.CalculateSeedSignature(req) + if errCode != apierrors.ErrNone { + return nil, errCode + } + + return &s3ChunkedReader{ + reader: bufio.NewReader(req.Body), + cred: cred, + seedSignature: seedSignature, + seedDate: seedDate, + region: region, + chunkSHA256Writer: sha256.New(), + buffer: make([]byte, 64*1024), + }, apierrors.ErrNone +} + +// Represents the overall state that is required for decoding a +// AWS Signature V4 chunked reader. +type s3ChunkedReader struct { + reader *bufio.Reader + cred auth.Credentials + seedSignature string + seedDate time.Time + region string + + chunkSHA256Writer hash.Hash // Calculates sha256 of chunk data. + buffer []byte + offset int + err error +} + +func (cr *s3ChunkedReader) Close() (err error) { + return nil +} + +// Now, we read one chunk from the underlying reader. +// A chunk has the following format: +// +// + ";chunk-signature=" + + "\r\n" + + "\r\n" +// +// First, we read the chunk size but fail if it is larger +// than 16 MiB. We must not accept arbitrary large chunks. +// One 16 MiB is a reasonable max limit. +// +// Then we read the signature and payload data. We compute the SHA256 checksum +// of the payload and verify that it matches the expected signature value. +// +// The last chunk is *always* 0-sized. So, we must only return io.EOF if we have encountered +// a chunk with a chunk size = 0. However, this chunk still has a signature and we must +// verify it. +const maxChunkSize = 16 << 20 // 16 MiB + +// Read - implements `io.Reader`, which transparently decodes +// the incoming AWS Signature V4 streaming signature. +func (cr *s3ChunkedReader) Read(buf []byte) (n int, err error) { + // First, if there is any unread data, copy it to the client + // provided buffer. + if cr.offset > 0 { + n = copy(buf, cr.buffer[cr.offset:]) + if n == len(buf) { + cr.offset += n + return n, nil + } + cr.offset = 0 + buf = buf[n:] + } + + var size int + for { + b, err := cr.reader.ReadByte() + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + if err != nil { + cr.err = err + return n, cr.err + } + if b == ';' { // separating character + break + } + + // Manually deserialize the size since AWS specified + // the chunk size to be of variable width. In particular, + // a size of 16 is encoded as `10` while a size of 64 KB + // is `10000`. + switch { + case b >= '0' && b <= '9': + size = size<<4 | int(b-'0') + case b >= 'a' && b <= 'f': + size = size<<4 | int(b-('a'-10)) + case b >= 'A' && b <= 'F': + size = size<<4 | int(b-('A'-10)) + default: + cr.err = errMalformedEncoding + return n, cr.err + } + if size > maxChunkSize { + cr.err = errChunkTooBig + return n, cr.err + } + } + + // Now, we read the signature of the following payload and expect: + // chunk-signature=" + + "\r\n" + // + // The signature is 64 bytes long (hex-encoded SHA256 hash) and + // starts with a 16 byte header: len("chunk-signature=") + 64 == 80. + var signature [80]byte + _, err = io.ReadFull(cr.reader, signature[:]) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + if err != nil { + cr.err = err + return n, cr.err + } + if !bytes.HasPrefix(signature[:], []byte("chunk-signature=")) { + cr.err = errMalformedEncoding + return n, cr.err + } + b, err := cr.reader.ReadByte() + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + if err != nil { + cr.err = err + return n, cr.err + } + if b != '\r' { + cr.err = errMalformedEncoding + return n, cr.err + } + b, err = cr.reader.ReadByte() + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + if err != nil { + cr.err = err + return n, cr.err + } + if b != '\n' { + cr.err = errMalformedEncoding + return n, cr.err + } + + if cap(cr.buffer) < size { + cr.buffer = make([]byte, size) + } else { + cr.buffer = cr.buffer[:size] + } + + // Now, we read the payload and compute its SHA-256 hash. + _, err = io.ReadFull(cr.reader, cr.buffer) + if err == io.EOF && size != 0 { + err = io.ErrUnexpectedEOF + } + if err != nil && err != io.EOF { + cr.err = err + return n, cr.err + } + b, err = cr.reader.ReadByte() + if b != '\r' { + cr.err = errMalformedEncoding + return n, cr.err + } + b, err = cr.reader.ReadByte() + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + if err != nil { + cr.err = err + return n, cr.err + } + if b != '\n' { + cr.err = errMalformedEncoding + return n, cr.err + } + + // Once we have read the entire chunk successfully, we verify + // that the received signature matches our computed signature. + cr.chunkSHA256Writer.Write(cr.buffer) + newSignature := getChunkSignature(cr.cred, cr.seedSignature, cr.region, cr.seedDate, hex.EncodeToString(cr.chunkSHA256Writer.Sum(nil))) + if !compareSignatureV4(string(signature[16:]), newSignature) { + cr.err = errSignatureMismatch + return n, cr.err + } + cr.seedSignature = newSignature + cr.chunkSHA256Writer.Reset() + + // If the chunk size is zero we return io.EOF. As specified by AWS, + // only the last chunk is zero-sized. + if size == 0 { + cr.err = io.EOF + return n, cr.err + } + + cr.offset = copy(buf, cr.buffer) + n += cr.offset + return n, err +} + +// readCRLF - check if reader only has '\r\n' CRLF character. +// returns malformed encoding if it doesn't. +func readCRLF(reader io.Reader) error { + buf := make([]byte, 2) + _, err := io.ReadFull(reader, buf[:2]) + if err != nil { + return err + } + if buf[0] != '\r' || buf[1] != '\n' { + return errMalformedEncoding + } + return nil +} + +// Read a line of bytes (up to \n) from b. +// Give up if the line exceeds maxLineLength. +// The returned bytes are owned by the bufio.Reader +// so they are only valid until the next bufio read. +func readChunkLine(b *bufio.Reader) ([]byte, []byte, error) { + buf, err := b.ReadSlice('\n') + if err != nil { + // We always know when EOF is coming. + // If the caller asked for a line, there should be a line. + if err == io.EOF { + err = io.ErrUnexpectedEOF + } else if err == bufio.ErrBufferFull { + err = errLineTooLong + } + return nil, nil, err + } + if len(buf) >= maxLineLength { + return nil, nil, errLineTooLong + } + // Parse s3 specific chunk extension and fetch the values. + hexChunkSize, hexChunkSignature := parseS3ChunkExtension(buf) + return hexChunkSize, hexChunkSignature, nil +} + +// trimTrailingWhitespace - trim trailing white space. +func trimTrailingWhitespace(b []byte) []byte { + for len(b) > 0 && isASCIISpace(b[len(b)-1]) { + b = b[:len(b)-1] + } + return b +} + +// isASCIISpace - is ascii space? +func isASCIISpace(b byte) bool { + return b == ' ' || b == '\t' || b == '\n' || b == '\r' +} + +// Constant s3 chunk encoding signature. +const s3ChunkSignatureStr = ";chunk-signature=" + +// parses3ChunkExtension removes any s3 specific chunk-extension from buf. +// For example, +// +// "10000;chunk-signature=..." => "10000", "chunk-signature=..." +func parseS3ChunkExtension(buf []byte) ([]byte, []byte) { + buf = trimTrailingWhitespace(buf) + semi := bytes.Index(buf, []byte(s3ChunkSignatureStr)) + // Chunk signature not found, return the whole buffer. + if semi == -1 { + return buf, nil + } + return buf[:semi], parseChunkSignature(buf[semi:]) +} + +// parseChunkSignature - parse chunk signature. +func parseChunkSignature(chunk []byte) []byte { + chunkSplits := bytes.SplitN(chunk, []byte(s3ChunkSignatureStr), 2) + return chunkSplits[1] +} + +// parse hex to uint64. +func parseHexUint(v []byte) (n uint64, err error) { + for i, b := range v { + switch { + case '0' <= b && b <= '9': + b -= '0' + case 'a' <= b && b <= 'f': + b = b - 'a' + 10 + case 'A' <= b && b <= 'F': + b = b - 'A' + 10 + default: + return 0, errors.New("invalid byte in chunk length") + } + if i == 16 { + return 0, errors.New("http chunk length too large") + } + n <<= 4 + n |= uint64(b) + } + return +} + +// Trims away `aws-chunked` from the content-encoding header if present. +// Streaming signature clients can have custom content-encoding such as +// `aws-chunked,gzip` here we need to only save `gzip`. +// For more refer http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html +func TrimAwsChunkedContentEncoding(contentEnc string) (trimmedContentEnc string) { + if contentEnc == "" { + return contentEnc + } + var newEncs []string + for _, enc := range strings.Split(contentEnc, ",") { + if enc != streamingContentEncoding { + newEncs = append(newEncs, enc) + } + } + return strings.Join(newEncs, ",") +} From 1c5ac9d16c16341a2cb7e97921256ba0f221b2ed Mon Sep 17 00:00:00 2001 From: steve Date: Thu, 24 Aug 2023 21:21:18 +0800 Subject: [PATCH 056/139] chore: clear sig --- s3/services/auth/auth_type.go | 101 ----- s3/services/auth/check_handler_auth.go | 339 --------------- s3/services/auth/signature-v2.go | 429 ------------------ s3/services/auth/signature-v4-parser.go | 307 ------------- s3/services/auth/signature-v4-utils.go | 232 ---------- s3/services/auth/signature-v4.go | 235 ---------- s3/services/auth/streaming-signature-v4.go | 481 --------------------- 7 files changed, 2124 deletions(-) delete mode 100644 s3/services/auth/auth_type.go delete mode 100644 s3/services/auth/check_handler_auth.go delete mode 100644 s3/services/auth/signature-v2.go delete mode 100644 s3/services/auth/signature-v4-parser.go delete mode 100644 s3/services/auth/signature-v4-utils.go delete mode 100644 s3/services/auth/signature-v4.go delete mode 100644 s3/services/auth/streaming-signature-v4.go diff --git a/s3/services/auth/auth_type.go b/s3/services/auth/auth_type.go deleted file mode 100644 index 936eb2aa0..000000000 --- a/s3/services/auth/auth_type.go +++ /dev/null @@ -1,101 +0,0 @@ -package auth - -import ( - "github.com/bittorrent/go-btfs/s3/consts" - "net/http" - "net/url" - "strings" -) - -// Verify if request has JWT. -func isRequestJWT(r *http.Request) bool { - return strings.HasPrefix(r.Header.Get("Authorization"), "Bearer") -} - -// IsRequestSignatureV4 Verify if request has AWS Signature Version '4'. -func IsRequestSignatureV4(r *http.Request) bool { - return strings.HasPrefix(r.Header.Get("Authorization"), signV4Algorithm) -} - -// Verify if request has AWS Signature Version '2'. -func isRequestSignatureV2(r *http.Request) bool { - return !strings.HasPrefix(r.Header.Get("Authorization"), signV4Algorithm) && - strings.HasPrefix(r.Header.Get("Authorization"), signV2Algorithm) -} - -// Verify if request has AWS PreSign Version '4'. -func isRequestPresignedSignatureV4(r *http.Request) bool { - _, ok := r.URL.Query()["X-Amz-Credential"] - return ok -} - -// Verify request has AWS PreSign Version '2'. -func isRequestPresignedSignatureV2(r *http.Request) bool { - _, ok := r.URL.Query()["AWSAccessKeyId"] - return ok -} - -// Verify if request has AWS Post policy Signature Version '4'. -func isRequestPostPolicySignatureV4(r *http.Request) bool { - return strings.Contains(r.Header.Get("Content-Type"), "multipart/form-data") && - r.Method == http.MethodPost -} - -// Verify if the request has AWS Streaming Signature Version '4'. This is only valid for 'PUT' operation. -func isRequestSignStreamingV4(r *http.Request) bool { - return r.Header.Get("x-amz-content-sha256") == consts.StreamingContentSHA256 && - r.Method == http.MethodPut -} - -// AuthType Authorization type. -type AuthType int - -// List of all supported auth types. -const ( - AuthTypeUnknown AuthType = iota - AuthTypeAnonymous - AuthTypePresigned - AuthTypePresignedV2 - AuthTypePostPolicy - AuthTypeStreamingSigned - AuthTypeSigned - AuthTypeSignedV2 - AuthTypeJWT - AuthTypeSTS -) - -// GetRequestAuthType Get request authentication type. -func GetRequestAuthType(r *http.Request) AuthType { - if r.URL != nil { - var err error - r.Form, err = url.ParseQuery(r.URL.RawQuery) - if err != nil { - log.Infof("parse query failed, query: %s, error: %v", r.URL.RawQuery, err) - return AuthTypeUnknown - } - } - if isRequestSignatureV2(r) { - return AuthTypeSignedV2 - } else if isRequestPresignedSignatureV2(r) { - return AuthTypePresignedV2 - } else if isRequestSignStreamingV4(r) { - return AuthTypeStreamingSigned - } else if IsRequestSignatureV4(r) { - return AuthTypeSigned - } else if isRequestPresignedSignatureV4(r) { - return AuthTypePresigned - } else if isRequestJWT(r) { - return AuthTypeJWT - } else if isRequestPostPolicySignatureV4(r) { - return AuthTypePostPolicy - } else if _, ok := r.Form[consts.StsAction]; ok { - return AuthTypeSTS - } else if _, ok := r.Header[consts.Authorization]; !ok { - return AuthTypeAnonymous - } - return AuthTypeUnknown -} - -func IsAuthTypeStreamingSigned(atype AuthType) bool { - return atype == AuthTypeStreamingSigned -} diff --git a/s3/services/auth/check_handler_auth.go b/s3/services/auth/check_handler_auth.go deleted file mode 100644 index 7f9fa7518..000000000 --- a/s3/services/auth/check_handler_auth.go +++ /dev/null @@ -1,339 +0,0 @@ -package auth - -import ( - "bytes" - "context" - "encoding/hex" - s3action "github.com/bittorrent/go-btfs/s3/action" - "io" - "net/http" - "net/url" - "strconv" - "time" - - "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/etag" - "github.com/bittorrent/go-btfs/s3/utils/hash" -) - -// AuthSys auth and sign system -type AuthSys struct { - Iam *IdentityAMSys - PolicySys *iPolicySys - AdminCred auth.Credentials -} - -// NewAuthSys new an AuthSys -func NewAuthSys(db *uleveldb.ULevelDB, adminCred auth.Credentials) *AuthSys { - return &AuthSys{ - Iam: NewIdentityAMSys(db), - PolicySys: newIPolicySys(db), - AdminCred: adminCred, - } -} - -// CheckRequestAuthTypeCredential Check request auth type verifies the incoming http request -// - validates the request signature -// - validates the policy action if anonymous tests bucket policies if any, -// for authenticated requests validates IAM policies. -// -// returns APIErrorCode if any to be replied to the client. -// Additionally, returns the accessKey used in the request, and if this request is by an admin. -func (s *AuthSys) CheckRequestAuthTypeCredential(ctx context.Context, r *http.Request, action s3action.Action, bucketName, objectName string) (cred auth.Credentials, owner bool, s3Err apierrors.ErrorCode) { - switch GetRequestAuthType(r) { - case AuthTypeUnknown, AuthTypeStreamingSigned: - return cred, owner, apierrors.ErrSignatureVersionNotSupported - case AuthTypePresignedV2, AuthTypeSignedV2: - if s3Err = s.IsReqAuthenticatedV2(r); s3Err != apierrors.ErrNone { - return cred, owner, s3Err - } - cred, owner, s3Err = s.getReqAccessKeyV2(r) - case AuthTypeSigned, AuthTypePresigned: - region := "" - switch action { - case s3action.GetBucketLocationAction, s3action.ListAllMyBucketsAction: - region = "" - } - if s3Err = s.IsReqAuthenticated(ctx, r, region, ServiceS3); s3Err != apierrors.ErrNone { - return cred, owner, s3Err - } - cred, owner, s3Err = s.GetReqAccessKeyV4(r, region, ServiceS3) - } - if s3Err != apierrors.ErrNone { - return cred, owner, s3Err - } - // TODO: Why should a temporary user be replaced with the parent user's account? - //if cred.IsTemp() { - // cred, _ = s.Iam.GetUser(ctx, cred.ParentUser) - //} - if action == s3action.CreateBucketAction { - // To extract region from XML in request body, get copy of request body. - payload, err := io.ReadAll(io.LimitReader(r.Body, consts.MaxLocationConstraintSize)) - if err != nil { - log.Errorf("ReadAll err:%v", err) - return cred, owner, apierrors.ErrMalformedXML - } - - // Populate payload to extract location constraint. - r.Body = io.NopCloser(bytes.NewReader(payload)) - if s.PolicySys.bmSys.HasBucket(ctx, bucketName) { - return cred, owner, apierrors.ErrBucketAlreadyExists - } - } - - // Anonymous user - if cred.AccessKey == "" { - owner = false - } - - // check bucket policy - if s.PolicySys.isAllowed(ctx, auth.Args{ - AccountName: cred.AccessKey, - Action: action, - BucketName: bucketName, - IsOwner: owner, - ObjectName: objectName, - }) { - // Request is allowed return the appropriate access key. - return cred, owner, apierrors.ErrNone - } - if action == s3action.ListBucketVersionsAction { - // In AWS S3 s3:ListBucket permission is same as s3:ListBucketVersions permission - // verify as a fallback. - if s.PolicySys.isAllowed(ctx, auth.Args{ - AccountName: cred.AccessKey, - Action: s3action.ListBucketAction, - BucketName: bucketName, - IsOwner: owner, - ObjectName: objectName, - }) { - // Request is allowed return the appropriate access key. - return cred, owner, apierrors.ErrNone - } - } - - // check user policy - if bucketName == "" || action == s3action.CreateBucketAction { - if s.Iam.IsAllowed(r.Context(), auth.Args{ - AccountName: cred.AccessKey, - Action: action, - BucketName: bucketName, - Conditions: getConditions(r, cred.AccessKey), - ObjectName: objectName, - IsOwner: owner, - }) { - // Request is allowed return the appropriate access key. - return cred, owner, apierrors.ErrNone - } - } else { - if !s.PolicySys.bmSys.HasBucket(ctx, bucketName) { - return cred, owner, apierrors.ErrNoSuchBucket - } - } - - return cred, owner, apierrors.ErrAccessDenied -} - -// Verify if request has valid AWS Signature Version '2'. -func (s *AuthSys) IsReqAuthenticatedV2(r *http.Request) (s3Error apierrors.ErrorCode) { - if isRequestSignatureV2(r) { - return s.doesSignV2Match(r) - } - return s.doesPresignV2SignatureMatch(r) -} - -func (s *AuthSys) ReqSignatureV4Verify(r *http.Request, region string, stype serviceType) (s3Error apierrors.ErrorCode) { - sha256sum := GetContentSha256Cksum(r, stype) - switch { - case IsRequestSignatureV4(r): - return s.doesSignatureMatch(sha256sum, r, region, stype) - case isRequestPresignedSignatureV4(r): - return s.doesPresignedSignatureMatch(sha256sum, r, region, stype) - default: - return apierrors.ErrAccessDenied - } -} - -// IsReqAuthenticated Verify if request has valid AWS Signature Version '4'. -func (s *AuthSys) IsReqAuthenticated(ctx context.Context, r *http.Request, region string, stype serviceType) (s3Error apierrors.ErrorCode) { - if errCode := s.ReqSignatureV4Verify(r, region, stype); errCode != apierrors.ErrNone { - return errCode - } - clientETag, err := etag.FromContentMD5(r.Header) - if err != nil { - return apierrors.ErrInvalidDigest - } - - // Extract either 'X-Amz-Content-Sha256' header or 'X-Amz-Content-Sha256' query parameter (if V4 presigned) - // Do not verify 'X-Amz-Content-Sha256' if skipSHA256. - var contentSHA256 []byte - if skipSHA256 := SkipContentSha256Cksum(r); !skipSHA256 && isRequestPresignedSignatureV4(r) { - if sha256Sum, ok := r.Form[consts.AmzContentSha256]; ok && len(sha256Sum) > 0 { - contentSHA256, err = hex.DecodeString(sha256Sum[0]) - if err != nil { - return apierrors.ErrContentSHA256Mismatch - } - } - } else if _, ok := r.Header[consts.AmzContentSha256]; !skipSHA256 && ok { - contentSHA256, err = hex.DecodeString(r.Header.Get(consts.AmzContentSha256)) - if err != nil || len(contentSHA256) == 0 { - return apierrors.ErrContentSHA256Mismatch - } - } - - // Verify 'Content-Md5' and/or 'X-Amz-Content-Sha256' if present. - // The verification happens implicit during reading. - reader, err := hash.NewReader(r.Body, -1, clientETag.String(), hex.EncodeToString(contentSHA256), -1) - if err != nil { - return apierrors.ErrInternalError - } - r.Body = reader - return apierrors.ErrNone -} - -// ValidateAdminSignature validate admin Signature -func (s *AuthSys) ValidateAdminSignature(ctx context.Context, r *http.Request, region string) (auth.Credentials, map[string]interface{}, bool, apierrors.ErrorCode) { - var cred auth.Credentials - var owner bool - s3Err := apierrors.ErrAccessDenied - if _, ok := r.Header[consts.AmzContentSha256]; ok && - GetRequestAuthType(r) == AuthTypeSigned { - // We only support admin credentials to access admin APIs. - cred, owner, s3Err = s.GetReqAccessKeyV4(r, region, ServiceS3) - if s3Err != apierrors.ErrNone { - return cred, nil, owner, s3Err - } - - // we only support V4 (no presign) with auth body - s3Err = s.IsReqAuthenticated(ctx, r, region, ServiceS3) - } - if s3Err != apierrors.ErrNone { - return cred, nil, owner, s3Err - } - - return cred, nil, owner, apierrors.ErrNone -} - -func getConditions(r *http.Request, username string) map[string][]string { - currTime := time.Now().UTC() - - principalType := "Anonymous" - if username != "" { - principalType = "User" - } - - at := GetRequestAuthType(r) - var signatureVersion string - switch at { - case AuthTypeSignedV2, AuthTypePresignedV2: - signatureVersion = signV2Algorithm - case AuthTypeSigned, AuthTypePresigned, AuthTypeStreamingSigned, AuthTypePostPolicy: - signatureVersion = signV4Algorithm - } - - var authtype string - switch at { - case AuthTypePresignedV2, AuthTypePresigned: - authtype = "REST-QUERY-STRING" - case AuthTypeSignedV2, AuthTypeSigned, AuthTypeStreamingSigned: - authtype = "REST-HEADER" - case AuthTypePostPolicy: - authtype = "POST" - } - - args := map[string][]string{ - "CurrentTime": {currTime.Format(time.RFC3339)}, - "EpochTime": {strconv.FormatInt(currTime.Unix(), 10)}, - "SecureTransport": {strconv.FormatBool(r.TLS != nil)}, - "UserAgent": {r.UserAgent()}, - "Referer": {r.Referer()}, - "principaltype": {principalType}, - "userid": {username}, - "username": {username}, - "signatureversion": {signatureVersion}, - "authType": {authtype}, - } - - cloneHeader := r.Header.Clone() - - for key, values := range cloneHeader { - if existingValues, found := args[key]; found { - args[key] = append(existingValues, values...) - } else { - args[key] = values - } - } - - cloneURLValues := make(url.Values, len(r.Form)) - for k, v := range r.Form { - cloneURLValues[k] = v - } - - for key, values := range cloneURLValues { - if existingValues, found := args[key]; found { - args[key] = append(existingValues, values...) - } else { - args[key] = values - } - } - - return args -} - -// IsPutActionAllowed - check if PUT operation is allowed on the resource, this -// call verifies bucket policies and IAM policies, supports multi user -// checks etc. -func (s *AuthSys) IsPutActionAllowed(ctx context.Context, r *http.Request, action s3action.Action, bucketName, objectName string) (s3Err apierrors.ErrorCode) { - var cred auth.Credentials - var owner bool - switch GetRequestAuthType(r) { - case AuthTypeUnknown: - return apierrors.ErrSignatureVersionNotSupported - case AuthTypeSignedV2, AuthTypePresignedV2: - cred, owner, s3Err = s.getReqAccessKeyV2(r) - case AuthTypeStreamingSigned, AuthTypePresigned, AuthTypeSigned: - region := "" - cred, owner, s3Err = s.GetReqAccessKeyV4(r, region, ServiceS3) - } - if s3Err != apierrors.ErrNone { - return s3Err - } - - // Do not check for PutObjectRetentionAction permission, - // if mode and retain until date are not set. - // Can happen when bucket has default lock config set - if action == s3action.PutObjectRetentionAction && - r.Header.Get(consts.AmzObjectLockMode) == "" && - r.Header.Get(consts.AmzObjectLockRetainUntilDate) == "" { - return apierrors.ErrNone - } - - // check bucket policy - if s.PolicySys.isAllowed(ctx, auth.Args{ - AccountName: cred.AccessKey, - Action: action, - BucketName: bucketName, - IsOwner: owner, - ObjectName: objectName, - }) { - return apierrors.ErrNone - } - - if !s.PolicySys.bmSys.HasBucket(ctx, bucketName) { - return apierrors.ErrNoSuchBucket - } - return apierrors.ErrAccessDenied -} - -func (s *AuthSys) GetCredential(r *http.Request) (cred auth.Credentials, owner bool, s3Err apierrors.ErrorCode) { - switch GetRequestAuthType(r) { - case AuthTypeUnknown: - s3Err = apierrors.ErrSignatureVersionNotSupported - case AuthTypeSignedV2, AuthTypePresignedV2: - cred, owner, s3Err = s.getReqAccessKeyV2(r) - case AuthTypeStreamingSigned, AuthTypePresigned, AuthTypeSigned: - region := "" - cred, owner, s3Err = s.GetReqAccessKeyV4(r, region, ServiceS3) - } - return -} diff --git a/s3/services/auth/signature-v2.go b/s3/services/auth/signature-v2.go deleted file mode 100644 index 2410ee825..000000000 --- a/s3/services/auth/signature-v2.go +++ /dev/null @@ -1,429 +0,0 @@ -/* - * The following code tries to reverse engineer the Amazon S3 APIs, - * and is mostly copied from minio implementation. - */ - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the License. - -package auth - -import ( - "crypto/hmac" - "crypto/sha1" - "crypto/subtle" - "encoding/base64" - "fmt" - "github.com/bittorrent/go-btfs/s3/apierrors" - "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/iam/auth" - "net" - "net/http" - "net/url" - "sort" - "strconv" - "strings" - "time" -) - -// Whitelist resource list that will be used in query string for signature-V2 calculation. -// -// This list should be kept alphabetically sorted, do not hastily edit. -var resourceList = []string{ - "acl", - "cors", - "delete", - "encryption", - "legal-hold", - "lifecycle", - "location", - "logging", - "notification", - "partNumber", - "policy", - "requestPayment", - "response-cache-control", - "response-content-disposition", - "response-content-encoding", - "response-content-language", - "response-content-type", - "response-expires", - "retention", - "select", - "select-type", - "tagging", - "torrent", - "uploadId", - "uploads", - "versionId", - "versioning", - "versions", - "website", -} - -// Signature and API related constants. -const ( - signV2Algorithm = "AWS" -) - -// AWS S3 Signature V2 calculation rule is give here: -// http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationStringToSign -func (s *AuthSys) doesPolicySignatureV2Match(formValues http.Header) (auth.Credentials, apierrors.ErrorCode) { - accessKey := formValues.Get(consts.AmzAccessKeyID) - - r := &http.Request{Header: formValues} - cred, _, s3Err := s.checkKeyValid(r, accessKey) - if s3Err != apierrors.ErrNone { - return cred, s3Err - } - policy := formValues.Get("Policy") - signature := formValues.Get(consts.AmzSignatureV2) - if !compareSignatureV2(signature, calculateSignatureV2(policy, cred.SecretKey)) { - return cred, apierrors.ErrSignatureDoesNotMatch - } - return cred, apierrors.ErrNone -} - -// Escape encodedQuery string into unescaped list of query params, returns error -// if any while unescaping the values. -func unescapeQueries(encodedQuery string) (unescapedQueries []string, err error) { - for _, query := range strings.Split(encodedQuery, "&") { - var unescapedQuery string - unescapedQuery, err = url.QueryUnescape(query) - if err != nil { - return nil, err - } - unescapedQueries = append(unescapedQueries, unescapedQuery) - } - return unescapedQueries, nil -} - -// doesPresignV2SignatureMatch - Verify query headers with presigned signature -// - http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationQueryStringAuth -// -// returns apierrors.ErrNone if matches. S3 errors otherwise. -func (s *AuthSys) doesPresignV2SignatureMatch(r *http.Request) apierrors.ErrorCode { - // r.RequestURI will have raw encoded URI as sent by the client. - tokens := strings.SplitN(r.RequestURI, "?", 2) - encodedResource := tokens[0] - encodedQuery := "" - if len(tokens) == 2 { - encodedQuery = tokens[1] - } - - var ( - filteredQueries []string - gotSignature string - expires string - accessKey string - err error - ) - - var unescapedQueries []string - unescapedQueries, err = unescapeQueries(encodedQuery) - if err != nil { - return apierrors.ErrInvalidQueryParams - } - - // Extract the necessary values from presigned query, construct a list of new filtered queries. - for _, query := range unescapedQueries { - keyval := strings.SplitN(query, "=", 2) - if len(keyval) != 2 { - return apierrors.ErrInvalidQueryParams - } - switch keyval[0] { - case consts.AmzAccessKeyID: - accessKey = keyval[1] - case consts.AmzSignatureV2: - gotSignature = keyval[1] - case consts.Expires: - expires = keyval[1] - default: - filteredQueries = append(filteredQueries, query) - } - } - - // Invalid values returns error. - if accessKey == "" || gotSignature == "" || expires == "" { - return apierrors.ErrInvalidQueryParams - } - - cred, _, s3Err := s.checkKeyValid(r, accessKey) - if s3Err != apierrors.ErrNone { - return s3Err - } - - // Make sure the request has not expired. - expiresInt, err := strconv.ParseInt(expires, 10, 64) - if err != nil { - return apierrors.ErrAuthorizationHeaderMalformed - } - - // Check if the presigned URL has expired. - if expiresInt < time.Now().UTC().Unix() { - return apierrors.ErrExpiredPresignRequest - } - - encodedResource, err = getResource(encodedResource, r.Host) - if err != nil { - return apierrors.ErrInvalidRequest - } - - expectedSignature := preSignatureV2(cred, r.Method, encodedResource, strings.Join(filteredQueries, "&"), r.Header, expires) - if !compareSignatureV2(gotSignature, expectedSignature) { - return apierrors.ErrSignatureDoesNotMatch - } - - r.Form.Del(consts.Expires) - - return apierrors.ErrNone -} - -func (s *AuthSys) getReqAccessKeyV2(r *http.Request) (auth.Credentials, bool, apierrors.ErrorCode) { - if accessKey := r.Form.Get(consts.AmzAccessKeyID); accessKey != "" { - return s.checkKeyValid(r, accessKey) - } - - // below is V2 Signed Auth header format, splitting on `space` (after the `AWS` string). - // Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature - authFields := strings.Split(r.Header.Get(consts.Authorization), " ") - if len(authFields) != 2 { - return auth.Credentials{}, false, apierrors.ErrMissingFields - } - - // Then will be splitting on ":", this will seprate `AWSAccessKeyId` and `Signature` string. - keySignFields := strings.Split(strings.TrimSpace(authFields[1]), ":") - if len(keySignFields) != 2 { - return auth.Credentials{}, false, apierrors.ErrMissingFields - } - - return s.checkKeyValid(r, keySignFields[0]) -} - -// Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature; -// Signature = Base64( HMAC-SHA1( YourSecretKey, UTF-8-Encoding-Of( StringToSign ) ) ); -// -// StringToSign = HTTP-Verb + "\n" + -// Content-Md5 + "\n" + -// Content-Type + "\n" + -// Date + "\n" + -// CanonicalizedProtocolHeaders + -// CanonicalizedResource; -// -// CanonicalizedResource = [ consts.SlashSeparator + Bucket ] + -// + -// [ subresource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; -// -// CanonicalizedProtocolHeaders = - -// doesSignV2Match - Verify authorization header with calculated header in accordance with -// - http://docs.aws.amazon.com/AmazonS3/latest/dev/auth-request-sig-v2.html -// returns true if matches, false otherwise. if error is not nil then it is always false - -func (s *AuthSys) validateV2AuthHeader(r *http.Request) (auth.Credentials, apierrors.ErrorCode) { - var cred auth.Credentials - v2Auth := r.Header.Get(consts.Authorization) - if v2Auth == "" { - return cred, apierrors.ErrAuthHeaderEmpty - } - - // Verify if the header algorithm is supported or not. - if !strings.HasPrefix(v2Auth, signV2Algorithm) { - return cred, apierrors.ErrSignatureVersionNotSupported - } - - cred, _, apiErr := s.getReqAccessKeyV2(r) - if apiErr != apierrors.ErrNone { - return cred, apiErr - } - - return cred, apierrors.ErrNone -} - -func (s *AuthSys) doesSignV2Match(r *http.Request) apierrors.ErrorCode { - v2Auth := r.Header.Get(consts.Authorization) - cred, apiError := s.validateV2AuthHeader(r) - if apiError != apierrors.ErrNone { - return apiError - } - - // r.RequestURI will have raw encoded URI as sent by the client. - tokens := strings.SplitN(r.RequestURI, "?", 2) - encodedResource := tokens[0] - encodedQuery := "" - if len(tokens) == 2 { - encodedQuery = tokens[1] - } - - unescapedQueries, err := unescapeQueries(encodedQuery) - if err != nil { - return apierrors.ErrInvalidQueryParams - } - - encodedResource, err = getResource(encodedResource, r.Host) - if err != nil { - return apierrors.ErrInvalidRequest - } - - prefix := fmt.Sprintf("%s %s:", signV2Algorithm, cred.AccessKey) - if !strings.HasPrefix(v2Auth, prefix) { - return apierrors.ErrSignatureDoesNotMatch - } - v2Auth = v2Auth[len(prefix):] - expectedAuth := signatureV2(cred, r.Method, encodedResource, strings.Join(unescapedQueries, "&"), r.Header) - if !compareSignatureV2(v2Auth, expectedAuth) { - return apierrors.ErrSignatureDoesNotMatch - } - return apierrors.ErrNone -} - -func calculateSignatureV2(stringToSign string, secret string) string { - hm := hmac.New(sha1.New, []byte(secret)) - hm.Write([]byte(stringToSign)) - return base64.StdEncoding.EncodeToString(hm.Sum(nil)) -} - -// Return signature-v2 for the presigned request. -func preSignatureV2(cred auth.Credentials, method string, encodedResource string, encodedQuery string, headers http.Header, expires string) string { - stringToSign := getStringToSignV2(method, encodedResource, encodedQuery, headers, expires) - return calculateSignatureV2(stringToSign, cred.SecretKey) -} - -// Return the signature v2 of a given request. -func signatureV2(cred auth.Credentials, method string, encodedResource string, encodedQuery string, headers http.Header) string { - stringToSign := getStringToSignV2(method, encodedResource, encodedQuery, headers, "") - signature := calculateSignatureV2(stringToSign, cred.SecretKey) - return signature -} - -// compareSignatureV2 returns true if and only if both signatures -// are equal. The signatures are expected to be base64 encoded strings -// according to the AWS S3 signature V2 spec. -func compareSignatureV2(sig1, sig2 string) bool { - // Decode signature string to binary byte-sequence representation is required - // as Base64 encoding of a value is not unique: - // For example "aGVsbG8=" and "aGVsbG8=\r" will result in the same byte slice. - signature1, err := base64.StdEncoding.DecodeString(sig1) - if err != nil { - return false - } - signature2, err := base64.StdEncoding.DecodeString(sig2) - if err != nil { - return false - } - return subtle.ConstantTimeCompare(signature1, signature2) == 1 -} - -// Return canonical headers. -func canonicalizedAmzHeadersV2(headers http.Header) string { - var keys []string - keyval := make(map[string]string, len(headers)) - for key := range headers { - lkey := strings.ToLower(key) - if !strings.HasPrefix(lkey, "x-amz-") { - continue - } - keys = append(keys, lkey) - keyval[lkey] = strings.Join(headers[key], ",") - } - sort.Strings(keys) - var canonicalHeaders []string - for _, key := range keys { - canonicalHeaders = append(canonicalHeaders, key+":"+keyval[key]) - } - return strings.Join(canonicalHeaders, "\n") -} - -// Return canonical resource string. -func canonicalizedResourceV2(encodedResource, encodedQuery string) string { - queries := strings.Split(encodedQuery, "&") - keyval := make(map[string]string) - for _, query := range queries { - key := query - val := "" - index := strings.Index(query, "=") - if index != -1 { - key = query[:index] - val = query[index+1:] - } - keyval[key] = val - } - - var canonicalQueries []string - for _, key := range resourceList { - val, ok := keyval[key] - if !ok { - continue - } - if val == "" { - canonicalQueries = append(canonicalQueries, key) - continue - } - canonicalQueries = append(canonicalQueries, key+"="+val) - } - - // The queries will be already sorted as resourceList is sorted, if canonicalQueries - // is empty strings.Join returns empty. - canonicalQuery := strings.Join(canonicalQueries, "&") - if canonicalQuery != "" { - return encodedResource + "?" + canonicalQuery - } - return encodedResource -} - -// Return string to sign under two different conditions. -// - if expires string is set then string to sign includes date instead of the Date header. -// - if expires string is empty then string to sign includes date header instead. -func getStringToSignV2(method string, encodedResource, encodedQuery string, headers http.Header, expires string) string { - canonicalHeaders := canonicalizedAmzHeadersV2(headers) - if len(canonicalHeaders) > 0 { - canonicalHeaders += "\n" - } - - date := expires // Date is set to expires date for presign operations. - if date == "" { - // If expires date is empty then request header Date is used. - date = headers.Get(consts.Date) - } - - // From the Amazon docs: - // - // StringToSign = HTTP-Verb + "\n" + - // Content-Md5 + "\n" + - // Content-Type + "\n" + - // Date/Expires + "\n" + - // CanonicalizedProtocolHeaders + - // CanonicalizedResource; - stringToSign := strings.Join([]string{ - method, - headers.Get(consts.ContentMD5), - headers.Get(consts.ContentType), - date, - canonicalHeaders, - }, "\n") - - return stringToSign + canonicalizedResourceV2(encodedResource, encodedQuery) -} - -// Returns "/bucketName/objectName" for path-style or virtual-host-style requests. -func getResource(path string, host string) (string, error) { - - // If virtual-host-style is enabled construct the "resource" properly. - if strings.Contains(host, ":") { - // In bucket.mydomain.com:9000, strip out :9000 - var err error - if host, _, err = net.SplitHostPort(host); err != nil { - return "", err - } - } - return path, nil -} diff --git a/s3/services/auth/signature-v4-parser.go b/s3/services/auth/signature-v4-parser.go deleted file mode 100644 index 3851d1ff6..000000000 --- a/s3/services/auth/signature-v4-parser.go +++ /dev/null @@ -1,307 +0,0 @@ -/* - * The following code tries to reverse engineer the Amazon S3 APIs, - * and is mostly copied from minio implementation. - */ - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the License. - -package auth - -import ( - "net/http" - "net/url" - "strings" - "time" - - "github.com/bittorrent/go-btfs/s3/consts" -) - -// credentialHeader data type represents structured form of Credential -// string from authorization header. -type credentialHeader struct { - accessKey string - scope struct { - date time.Time - region string - service string - request string - } -} - -// Return scope string. -func (c credentialHeader) getScope() string { - return strings.Join([]string{ - c.scope.date.Format(yyyymmdd), - c.scope.region, - c.scope.service, - c.scope.request, - }, consts.SlashSeparator) -} - -func (s *AuthSys) GetReqAccessKeyV4(r *http.Request, region string, stype serviceType) (auth.Credentials, bool, apierrors.ErrorCode) { - ch, s3Err := parseCredentialHeader("Credential="+r.Form.Get(consts.AmzCredential), region, stype) - if s3Err != apierrors.ErrNone { - // Strip off the Algorithm prefix. - v4Auth := strings.TrimPrefix(r.Header.Get("Authorization"), signV4Algorithm) - authFields := strings.Split(strings.TrimSpace(v4Auth), ",") - if len(authFields) != 3 { - return auth.Credentials{}, false, apierrors.ErrMissingFields - } - ch, s3Err = parseCredentialHeader(authFields[0], region, stype) - if s3Err != apierrors.ErrNone { - return auth.Credentials{}, false, s3Err - } - } - // TODO: Why should a temporary user be replaced with the parent user's account name? - //cerd, _ := s.Iam.GetUser(r.Context(), ch.accessKey) - //if cerd.IsTemp() { - // ch.accessKey = cerd.ParentUser - //} - return s.checkKeyValid(r, ch.accessKey) -} - -// parse credentialHeader string into its structured form. -func parseCredentialHeader(credElement string, region string, stype serviceType) (ch credentialHeader, aec apierrors.ErrorCode) { - creds := strings.SplitN(strings.TrimSpace(credElement), "=", 2) - if len(creds) != 2 { - return ch, apierrors.ErrMissingFields - } - if creds[0] != "Credential" { - return ch, apierrors.ErrMissingCredTag - } - credElements := strings.Split(strings.TrimSpace(creds[1]), consts.SlashSeparator) - if len(credElements) < 5 { - return ch, apierrors.ErrCredMalformed - } - accessKey := strings.Join(credElements[:len(credElements)-4], consts.SlashSeparator) // The access key may contain one or more `/` - if !auth.IsAccessKeyValid(accessKey) { - return ch, apierrors.ErrInvalidAccessKeyID - } - // Save access key id. - cred := credentialHeader{ - accessKey: accessKey, - } - credElements = credElements[len(credElements)-4:] - var e error - cred.scope.date, e = time.Parse(yyyymmdd, credElements[0]) - if e != nil { - return ch, apierrors.ErrAuthorizationHeaderMalformed - } - - cred.scope.region = credElements[1] - // Verify if region is valid. - sRegion := cred.scope.region - // Region is set to be empty, we use whatever was sent by the - // request and proceed further. This is a work-around to address - // an important problem for ListBuckets() getting signed with - // different regions. - if region == "" { - region = sRegion - } - // Should validate region, only if region is set. - if !isValidRegion(sRegion, region) { - return ch, apierrors.ErrAuthorizationHeaderMalformed - } - if credElements[2] != string(stype) { - switch stype { - case ServiceSTS: - return ch, apierrors.ErrAuthorizationHeaderMalformed - } - return ch, apierrors.ErrAuthorizationHeaderMalformed - } - cred.scope.service = credElements[2] - if credElements[3] != "aws4_request" { - return ch, apierrors.ErrAuthorizationHeaderMalformed - } - cred.scope.request = credElements[3] - return cred, apierrors.ErrNone -} - -// Parse signature from signature tag. -func parseSignature(signElement string) (string, apierrors.ErrorCode) { - signFields := strings.Split(strings.TrimSpace(signElement), "=") - if len(signFields) != 2 { - return "", apierrors.ErrMissingFields - } - if signFields[0] != "Signature" { - return "", apierrors.ErrMissingSignTag - } - if signFields[1] == "" { - return "", apierrors.ErrMissingFields - } - signature := signFields[1] - return signature, apierrors.ErrNone -} - -// Parse slice of signed headers from signed headers tag. -func parseSignedHeader(signedHdrElement string) ([]string, apierrors.ErrorCode) { - signedHdrFields := strings.Split(strings.TrimSpace(signedHdrElement), "=") - if len(signedHdrFields) != 2 { - return nil, apierrors.ErrMissingFields - } - if signedHdrFields[0] != "SignedHeaders" { - return nil, apierrors.ErrMissingSignHeadersTag - } - if signedHdrFields[1] == "" { - return nil, apierrors.ErrMissingFields - } - signedHeaders := strings.Split(signedHdrFields[1], ";") - return signedHeaders, apierrors.ErrNone -} - -// signValues data type represents structured form of AWS Signature V4 header. -type signValues struct { - Credential credentialHeader - SignedHeaders []string - Signature string -} - -// preSignValues data type represents structued form of AWS Signature V4 query string. -type preSignValues struct { - signValues - Date time.Time - Expires time.Duration -} - -// Parses signature version '4' query string of the following form. -// -// querystring = X-Amz-Algorithm=algorithm -// querystring += &X-Amz-Credential= urlencode(accessKey + '/' + credential_scope) -// querystring += &X-Amz-Date=date -// querystring += &X-Amz-Expires=timeout interval -// querystring += &X-Amz-SignedHeaders=signed_headers -// querystring += &X-Amz-Signature=signature -// -// verifies if any of the necessary query params are missing in the presigned request. -func doesV4PresignParamsExist(query url.Values) apierrors.ErrorCode { - v4PresignQueryParams := []string{consts.AmzAlgorithm, consts.AmzCredential, consts.AmzSignature, consts.AmzDate, consts.AmzSignedHeaders, consts.AmzExpires} - for _, v4PresignQueryParam := range v4PresignQueryParams { - if _, ok := query[v4PresignQueryParam]; !ok { - return apierrors.ErrInvalidQueryParams - } - } - return apierrors.ErrNone -} - -// Parses all the presigned signature values into separate elements. -func parsePreSignV4(query url.Values, region string, stype serviceType) (psv preSignValues, aec apierrors.ErrorCode) { - // verify whether the required query params exist. - aec = doesV4PresignParamsExist(query) - if aec != apierrors.ErrNone { - return psv, aec - } - - // Verify if the query algorithm is supported or not. - if query.Get(consts.AmzAlgorithm) != signV4Algorithm { - return psv, apierrors.ErrAuthorizationHeaderMalformed - } - - // Initialize signature version '4' structured header. - preSignV4Values := preSignValues{} - - // Save credential. - preSignV4Values.Credential, aec = parseCredentialHeader("Credential="+query.Get(consts.AmzCredential), region, stype) - if aec != apierrors.ErrNone { - return psv, aec - } - - var e error - // Save date in native time.Time. - preSignV4Values.Date, e = time.Parse(iso8601Format, query.Get(consts.AmzDate)) - if e != nil { - return psv, apierrors.ErrAuthorizationHeaderMalformed - } - - // Save expires in native time.Duration. - preSignV4Values.Expires, e = time.ParseDuration(query.Get(consts.AmzExpires) + "s") - if e != nil { - return psv, apierrors.ErrAuthorizationHeaderMalformed - } - - if preSignV4Values.Expires < 0 { - return psv, apierrors.ErrAuthorizationHeaderMalformed - } - - // Check if Expiry time is less than 7 days (value in seconds). - if preSignV4Values.Expires.Seconds() > 604800 { - return psv, apierrors.ErrAuthorizationHeaderMalformed - } - - // Save signed headers. - preSignV4Values.SignedHeaders, aec = parseSignedHeader("SignedHeaders=" + query.Get(consts.AmzSignedHeaders)) - if aec != apierrors.ErrNone { - return psv, aec - } - - // Save signature. - preSignV4Values.Signature, aec = parseSignature("Signature=" + query.Get(consts.AmzSignature)) - if aec != apierrors.ErrNone { - return psv, aec - } - - // Return structed form of signature query string. - return preSignV4Values, apierrors.ErrNone -} - -// Parses signature version '4' header of the following form. -// -// Authorization: algorithm Credential=accessKeyID/credScope, \ -// SignedHeaders=signedHeaders, Signature=signature -func parseSignV4(v4Auth string, region string, stype serviceType) (sv signValues, aec apierrors.ErrorCode) { - // credElement is fetched first to skip replacing the space in access key. - credElement := strings.TrimPrefix(strings.Split(strings.TrimSpace(v4Auth), ",")[0], signV4Algorithm) - // Replace all spaced strings, some clients can send spaced - // parameters and some won't. So we pro-actively remove any spaces - // to make parsing easier. - v4Auth = strings.ReplaceAll(v4Auth, " ", "") - if v4Auth == "" { - return sv, apierrors.ErrAuthHeaderEmpty - } - - // Verify if the header algorithm is supported or not. - if !strings.HasPrefix(v4Auth, signV4Algorithm) { - return sv, apierrors.ErrSignatureVersionNotSupported - } - - // Strip off the Algorithm prefix. - v4Auth = strings.TrimPrefix(v4Auth, signV4Algorithm) - authFields := strings.Split(strings.TrimSpace(v4Auth), ",") - if len(authFields) != 3 { - return sv, apierrors.ErrMissingFields - } - - // Initialize signature version '4' structured header. - signV4Values := signValues{} - - var s3Err apierrors.ErrorCode - // Save credentail values. - signV4Values.Credential, s3Err = parseCredentialHeader(strings.TrimSpace(credElement), region, stype) - if s3Err != apierrors.ErrNone { - return sv, s3Err - } - - // Save signed headers. - signV4Values.SignedHeaders, s3Err = parseSignedHeader(authFields[1]) - if s3Err != apierrors.ErrNone { - return sv, s3Err - } - - // Save signature. - signV4Values.Signature, s3Err = parseSignature(authFields[2]) - if s3Err != apierrors.ErrNone { - return sv, s3Err - } - - // Return the structure here. - return signV4Values, apierrors.ErrNone -} diff --git a/s3/services/auth/signature-v4-utils.go b/s3/services/auth/signature-v4-utils.go deleted file mode 100644 index 17d73a777..000000000 --- a/s3/services/auth/signature-v4-utils.go +++ /dev/null @@ -1,232 +0,0 @@ -/* - * The following code tries to reverse engineer the Amazon S3 APIs, - * and is mostly copied from minio implementation. - */ - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the License. - -package auth - -import ( - "bytes" - "encoding/hex" - "github.com/bittorrent/go-btfs/s3/consts" - "io" - "io/ioutil" - "net/http" - "reflect" - "strconv" -) - -// http Header "x-amz-content-sha256" == "UNSIGNED-PAYLOAD" indicates that the -// client did not calculate sha256 of the payload. -const unsignedPayload = "UNSIGNED-PAYLOAD" - -// SkipContentSha256Cksum returns true if caller needs to skip -// payload checksum, false if not. -func SkipContentSha256Cksum(r *http.Request) bool { - var ( - v []string - ok bool - ) - - if isRequestPresignedSignatureV4(r) { - v, ok = r.Form[consts.AmzContentSha256] - if !ok { - v, ok = r.Header[consts.AmzContentSha256] - } - } else { - v, ok = r.Header[consts.AmzContentSha256] - } - - // Skip if no header was set. - if !ok { - return true - } - - // If x-amz-content-sha256 is set and the value is not - // 'UNSIGNED-PAYLOAD' we should validate the content sha256. - switch v[0] { - case unsignedPayload: - return true - case consts.EmptySHA256: - // some broken clients set empty-sha256 - // with > 0 content-length in the body, - // we should skip such clients and allow - // blindly such insecure clients only if - // S3 strict compatibility is disabled. - if r.ContentLength > 0 { - // We return true only in situations when - // deployment has asked MinIO to allow for - // such broken clients and content-length > 0. - return true - } - } - return false -} - -// Returns SHA256 for calculating canonical-request. -func GetContentSha256Cksum(r *http.Request, stype serviceType) string { - if stype == ServiceSTS { - payload, err := ioutil.ReadAll(io.LimitReader(r.Body, consts.StsRequestBodyLimit)) - if err != nil { - log.Errorf("ServiceSTS ReadAll err:%v", err) - } - sum256 := sha256.Sum256(payload) - r.Body = ioutil.NopCloser(bytes.NewReader(payload)) - return hex.EncodeToString(sum256[:]) - } - - var ( - defaultSha256Cksum string - v []string - ok bool - ) - - // For a presigned request we look at the query param for sha256. - if isRequestPresignedSignatureV4(r) { - // X-Amz-Content-Sha256, if not set in presigned requests, checksum - // will default to 'UNSIGNED-PAYLOAD'. - defaultSha256Cksum = unsignedPayload - v, ok = r.Form[consts.AmzContentSha256] - if !ok { - v, ok = r.Header[consts.AmzContentSha256] - } - } else { - // X-Amz-Content-Sha256, if not set in signed requests, checksum - // will default to sha256([]byte("")). - defaultSha256Cksum = consts.EmptySHA256 - v, ok = r.Header[consts.AmzContentSha256] - } - - // We found 'X-Amz-Content-Sha256' return the captured value. - if ok { - return v[0] - } - - // We couldn't find 'X-Amz-Content-Sha256'. - return defaultSha256Cksum -} - -// isValidRegion - verify if incoming region value is valid with configured Region. -func isValidRegion(reqRegion string, confRegion string) bool { - if confRegion == "" { - return true - } - if confRegion == "US" { - confRegion = consts.DefaultRegion - } - // Some older s3 clients set region as "US" instead of - // globalDefaultRegion, handle it. - if reqRegion == "US" { - reqRegion = consts.DefaultRegion - } - return reqRegion == confRegion -} - -// check if the access key is valid and recognized, additionally -// also returns if the access key is owner/admin. -func (s *AuthSys) checkKeyValid(r *http.Request, accessKey string) (auth.Credentials, bool, apierrors.ErrorCode) { - - cred := s.AdminCred - if cred.AccessKey != accessKey { - // Check if the access key is part of users credentials. - ucred, ok := s.Iam.GetUser(r.Context(), accessKey) - if !ok { - // Credentials will be invalid but and disabled - // return a different error in such a scenario. - if ucred.Status == auth.AccountOff { - return cred, false, apierrors.ErrAccessKeyDisabled - } - return cred, false, apierrors.ErrInvalidAccessKeyID - } - cred = ucred - } - owner := cred.AccessKey == s.AdminCred.AccessKey - return cred, owner, apierrors.ErrNone -} - -func contains(slice interface{}, elem interface{}) bool { - v := reflect.ValueOf(slice) - if v.Kind() == reflect.Slice { - for i := 0; i < v.Len(); i++ { - if v.Index(i).Interface() == elem { - return true - } - } - } - return false -} - -// extractSignedHeaders extract signed headers from Authorization header -func extractSignedHeaders(signedHeaders []string, r *http.Request) (http.Header, apierrors.ErrorCode) { - reqHeaders := r.Header - reqQueries := r.Form - // find whether "host" is part of list of signed headers. - // if not return ErrUnsignedHeaders. "host" is mandatory. - if !contains(signedHeaders, "host") { - return nil, apierrors.ErrUnsignedHeaders - } - extractedSignedHeaders := make(http.Header) - for _, header := range signedHeaders { - // `host` will not be found in the headers, can be found in r.Host. - // but its alway necessary that the list of signed headers containing host in it. - val, ok := reqHeaders[http.CanonicalHeaderKey(header)] - if !ok { - // try to set headers from Query String - val, ok = reqQueries[header] - } - if ok { - extractedSignedHeaders[http.CanonicalHeaderKey(header)] = val - continue - } - switch header { - case "expect": - // Golang http server strips off 'Expect' header, if the - // client sent this as part of signed headers we need to - // handle otherwise we would see a signature mismatch. - // `aws-cli` sets this as part of signed headers. - // - // According to - // http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.20 - // Expect header is always of form: - // - // Expect = "Expect" ":" 1#expectation - // expectation = "100-continue" | expectation-extension - // - // So it safe to assume that '100-continue' is what would - // be sent, for the time being keep this work around. - // Adding a *TODO* to remove this later when Golang server - // doesn't filter out the 'Expect' header. - extractedSignedHeaders.Set(header, "100-continue") - case "host": - // Go http server removes "host" from Request.Header - - //extractedSignedHeaders.Set(header, r.Host) - // todo use r.Host, or filedag-web deal with - //value := strings.Split(r.Host, ":") - extractedSignedHeaders.Set(header, r.Host) - case "transfer-encoding": - // Go http server removes "host" from Request.Header - extractedSignedHeaders[http.CanonicalHeaderKey(header)] = r.TransferEncoding - case "content-length": - // Signature-V4 spec excludes Content-Length from signed headers list for signature calculation. - // But some clients deviate from this rule. Hence we consider Content-Length for signature - // calculation to be compatible with such clients. - extractedSignedHeaders.Set(header, strconv.FormatInt(r.ContentLength, 10)) - default: - return nil, apierrors.ErrUnsignedHeaders - } - } - return extractedSignedHeaders, apierrors.ErrNone -} diff --git a/s3/services/auth/signature-v4.go b/s3/services/auth/signature-v4.go deleted file mode 100644 index 2f14648ca..000000000 --- a/s3/services/auth/signature-v4.go +++ /dev/null @@ -1,235 +0,0 @@ -/* - * The following code tries to reverse engineer the Amazon S3 APIs, - * and is mostly copied from minio implementation. - */ - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the License. - -package iam - -import ( - "bytes" - "crypto/sha256" - "encoding/hex" - "github.com/bittorrent/go-btfs/s3/apierrors" - "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/iam/auth" - "io" - "io/ioutil" - "net/http" - "reflect" - "strconv" -) - -// http Header "x-amz-content-sha256" == "UNSIGNED-PAYLOAD" indicates that the -// client did not calculate sha256 of the payload. -const unsignedPayload = "UNSIGNED-PAYLOAD" - -// SkipContentSha256Cksum returns true if caller needs to skip -// payload checksum, false if not. -func SkipContentSha256Cksum(r *http.Request) bool { - var ( - v []string - ok bool - ) - - if isRequestPresignedSignatureV4(r) { - v, ok = r.Form[consts.AmzContentSha256] - if !ok { - v, ok = r.Header[consts.AmzContentSha256] - } - } else { - v, ok = r.Header[consts.AmzContentSha256] - } - - // Skip if no header was set. - if !ok { - return true - } - - // If x-amz-content-sha256 is set and the value is not - // 'UNSIGNED-PAYLOAD' we should validate the content sha256. - switch v[0] { - case unsignedPayload: - return true - case consts.EmptySHA256: - // some broken clients set empty-sha256 - // with > 0 content-length in the body, - // we should skip such clients and allow - // blindly such insecure clients only if - // S3 strict compatibility is disabled. - if r.ContentLength > 0 { - // We return true only in situations when - // deployment has asked MinIO to allow for - // such broken clients and content-length > 0. - return true - } - } - return false -} - -// Returns SHA256 for calculating canonical-request. -func GetContentSha256Cksum(r *http.Request, stype serviceType) string { - if stype == ServiceSTS { - payload, err := ioutil.ReadAll(io.LimitReader(r.Body, consts.StsRequestBodyLimit)) - if err != nil { - log.Errorf("ServiceSTS ReadAll err:%v", err) - } - sum256 := sha256.Sum256(payload) - r.Body = ioutil.NopCloser(bytes.NewReader(payload)) - return hex.EncodeToString(sum256[:]) - } - - var ( - defaultSha256Cksum string - v []string - ok bool - ) - - // For a presigned request we look at the query param for sha256. - if isRequestPresignedSignatureV4(r) { - // X-Amz-Content-Sha256, if not set in presigned requests, checksum - // will default to 'UNSIGNED-PAYLOAD'. - defaultSha256Cksum = unsignedPayload - v, ok = r.Form[consts.AmzContentSha256] - if !ok { - v, ok = r.Header[consts.AmzContentSha256] - } - } else { - // X-Amz-Content-Sha256, if not set in signed requests, checksum - // will default to sha256([]byte("")). - defaultSha256Cksum = consts.EmptySHA256 - v, ok = r.Header[consts.AmzContentSha256] - } - - // We found 'X-Amz-Content-Sha256' return the captured value. - if ok { - return v[0] - } - - // We couldn't find 'X-Amz-Content-Sha256'. - return defaultSha256Cksum -} - -// isValidRegion - verify if incoming region value is valid with configured Region. -func isValidRegion(reqRegion string, confRegion string) bool { - if confRegion == "" { - return true - } - if confRegion == "US" { - confRegion = consts.DefaultRegion - } - // Some older s3 clients set region as "US" instead of - // globalDefaultRegion, handle it. - if reqRegion == "US" { - reqRegion = consts.DefaultRegion - } - return reqRegion == confRegion -} - -// check if the access key is valid and recognized, additionally -// also returns if the access key is owner/admin. -func (s *AuthSys) checkKeyValid(r *http.Request, accessKey string) (auth.Credentials, bool, apierrors.ErrorCode) { - - cred := s.AdminCred - if cred.AccessKey != accessKey { - // Check if the access key is part of users credentials. - ucred, ok := s.Iam.GetUser(r.Context(), accessKey) - if !ok { - // Credentials will be invalid but and disabled - // return a different error in such a scenario. - if ucred.Status == auth.AccountOff { - return cred, false, apierrors.ErrAccessKeyDisabled - } - return cred, false, apierrors.ErrInvalidAccessKeyID - } - cred = ucred - } - owner := cred.AccessKey == s.AdminCred.AccessKey - return cred, owner, apierrors.ErrNone -} - -func contains(slice interface{}, elem interface{}) bool { - v := reflect.ValueOf(slice) - if v.Kind() == reflect.Slice { - for i := 0; i < v.Len(); i++ { - if v.Index(i).Interface() == elem { - return true - } - } - } - return false -} - -// extractSignedHeaders extract signed headers from Authorization header -func extractSignedHeaders(signedHeaders []string, r *http.Request) (http.Header, apierrors.ErrorCode) { - reqHeaders := r.Header - reqQueries := r.Form - // find whether "host" is part of list of signed headers. - // if not return ErrUnsignedHeaders. "host" is mandatory. - if !contains(signedHeaders, "host") { - return nil, apierrors.ErrUnsignedHeaders - } - extractedSignedHeaders := make(http.Header) - for _, header := range signedHeaders { - // `host` will not be found in the headers, can be found in r.Host. - // but its alway necessary that the list of signed headers containing host in it. - val, ok := reqHeaders[http.CanonicalHeaderKey(header)] - if !ok { - // try to set headers from Query String - val, ok = reqQueries[header] - } - if ok { - extractedSignedHeaders[http.CanonicalHeaderKey(header)] = val - continue - } - switch header { - case "expect": - // Golang http server strips off 'Expect' header, if the - // client sent this as part of signed headers we need to - // handle otherwise we would see a signature mismatch. - // `aws-cli` sets this as part of signed headers. - // - // According to - // http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.20 - // Expect header is always of form: - // - // Expect = "Expect" ":" 1#expectation - // expectation = "100-continue" | expectation-extension - // - // So it safe to assume that '100-continue' is what would - // be sent, for the time being keep this work around. - // Adding a *TODO* to remove this later when Golang server - // doesn't filter out the 'Expect' header. - extractedSignedHeaders.Set(header, "100-continue") - case "host": - // Go http server removes "host" from Request.Header - - //extractedSignedHeaders.Set(header, r.Host) - // todo use r.Host, or filedag-web deal with - //value := strings.Split(r.Host, ":") - extractedSignedHeaders.Set(header, r.Host) - case "transfer-encoding": - // Go http server removes "host" from Request.Header - extractedSignedHeaders[http.CanonicalHeaderKey(header)] = r.TransferEncoding - case "content-length": - // Signature-V4 spec excludes Content-Length from signed headers list for signature calculation. - // But some clients deviate from this rule. Hence we consider Content-Length for signature - // calculation to be compatible with such clients. - extractedSignedHeaders.Set(header, strconv.FormatInt(r.ContentLength, 10)) - default: - return nil, apierrors.ErrUnsignedHeaders - } - } - return extractedSignedHeaders, apierrors.ErrNone -} diff --git a/s3/services/auth/streaming-signature-v4.go b/s3/services/auth/streaming-signature-v4.go deleted file mode 100644 index 9a85b68bd..000000000 --- a/s3/services/auth/streaming-signature-v4.go +++ /dev/null @@ -1,481 +0,0 @@ -/* - * The following code tries to reverse engineer the Amazon S3 APIs, - * and is mostly copied from minio implementation. - */ - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -// Package cmd This file implements helper functions to validate Streaming AWS -// Signature Version '4' authorization header. -package auth - -import ( - "bufio" - "bytes" - "crypto/sha256" - "encoding/hex" - "errors" - "github.com/bittorrent/go-btfs/s3/apierrors" - "github.com/bittorrent/go-btfs/s3/utils" - "hash" - "io" - "net/http" - "strings" - "time" - - "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/iam/auth" - "github.com/dustin/go-humanize" -) - -// Streaming AWS Signature Version '4' constants. -const ( - emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" - streamingContentSHA256 = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" - signV4ChunkedAlgorithm = "AWS4-HMAC-SHA256-PAYLOAD" - streamingContentEncoding = "aws-chunked" -) - -// errSignatureMismatch means signature did not match. -var errSignatureMismatch = errors.New("Signature does not match") - -// getChunkSignature - get chunk signature. -func getChunkSignature(cred auth.Credentials, seedSignature string, region string, date time.Time, hashedChunk string) string { - // Calculate string to sign. - stringToSign := signV4ChunkedAlgorithm + "\n" + - date.Format(iso8601Format) + "\n" + - getScope(date, region) + "\n" + - seedSignature + "\n" + - emptySHA256 + "\n" + - hashedChunk - - // Get hmac signing key. - signingKey := utils.GetSigningKey(cred.SecretKey, date, region, string(ServiceS3)) - - // Calculate signature. - newSignature := utils.GetSignature(signingKey, stringToSign) - - return newSignature -} - -// CalculateSeedSignature - Calculate seed signature in accordance with -// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html -// -// returns signature, error otherwise if the signature mismatches or any other -// error while parsing and validating. -func (s *AuthSys) CalculateSeedSignature(r *http.Request) (cred auth.Credentials, signature string, region string, date time.Time, errCode apierrors.ErrorCode) { - // Copy request. - req := *r - - // Save authorization header. - v4Auth := req.Header.Get(consts.Authorization) - - // Parse signature version '4' header. - signV4Values, errCode := parseSignV4(v4Auth, "", ServiceS3) - if errCode != apierrors.ErrNone { - return cred, "", "", time.Time{}, errCode - } - - // Payload streaming. - payload := streamingContentSHA256 - - // Payload for STREAMING signature should be 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD' - if payload != req.Header.Get(consts.AmzContentSha256) { - return cred, "", "", time.Time{}, apierrors.ErrContentSHA256Mismatch - } - - // Extract all the signed headers along with its values. - extractedSignedHeaders, errCode := extractSignedHeaders(signV4Values.SignedHeaders, r) - if errCode != apierrors.ErrNone { - return cred, "", "", time.Time{}, errCode - } - - cred, _, errCode = s.checkKeyValid(r, signV4Values.Credential.accessKey) - if errCode != apierrors.ErrNone { - return cred, "", "", time.Time{}, errCode - } - - // Verify if region is valid. - region = signV4Values.Credential.scope.region - - // Extract date, if not present throw error. - var dateStr string - if dateStr = req.Header.Get("x-amz-date"); dateStr == "" { - if dateStr = r.Header.Get("Date"); dateStr == "" { - return cred, "", "", time.Time{}, apierrors.ErrMissingDateHeader - } - } - - // Parse date header. - var err error - date, err = time.Parse(iso8601Format, dateStr) - if err != nil { - return cred, "", "", time.Time{}, apierrors.ErrMalformedDate - } - - // Query string. - queryStr := req.Form.Encode() - - // Get canonical request. - canonicalRequest := utils.GetCanonicalRequest(extractedSignedHeaders, payload, queryStr, req.URL.Path, req.Method) - - // Get string to sign from canonical request. - stringToSign := utils.GetStringToSign(canonicalRequest, date, signV4Values.Credential.getScope()) - - // Get hmac signing key. - signingKey := utils.GetSigningKey(cred.SecretKey, signV4Values.Credential.scope.date, region, string(ServiceS3)) - - // Calculate signature. - newSignature := utils.GetSignature(signingKey, stringToSign) - - // Verify if signature match. - if !compareSignatureV4(newSignature, signV4Values.Signature) { - return cred, "", "", time.Time{}, apierrors.ErrSignatureDoesNotMatch - } - - // Return caculated signature. - return cred, newSignature, region, date, apierrors.ErrNone -} - -const maxLineLength = 4 * humanize.KiByte // assumed <= bufio.defaultBufSize 4KiB - -// lineTooLong is generated as chunk header is bigger than 4KiB. -var errLineTooLong = errors.New("header line too long") - -// malformed encoding is generated when chunk header is wrongly formed. -var errMalformedEncoding = errors.New("malformed chunked encoding") - -// chunk is considered too big if its bigger than > 16MiB. -var errChunkTooBig = errors.New("chunk too big: choose chunk size <= 16MiB") - -// NewSignV4ChunkedReader returns a new s3ChunkedReader that translates the data read from r -// out of HTTP "chunked" format before returning it. -// The s3ChunkedReader returns io.EOF when the final 0-length chunk is read. -// -// NewChunkedReader is not needed by normal applications. The http package -// automatically decodes chunking when reading response bodies. -func NewSignV4ChunkedReader(req *http.Request, s *AuthSys) (io.ReadCloser, apierrors.ErrorCode) { - cred, seedSignature, region, seedDate, errCode := s.CalculateSeedSignature(req) - if errCode != apierrors.ErrNone { - return nil, errCode - } - - return &s3ChunkedReader{ - reader: bufio.NewReader(req.Body), - cred: cred, - seedSignature: seedSignature, - seedDate: seedDate, - region: region, - chunkSHA256Writer: sha256.New(), - buffer: make([]byte, 64*1024), - }, apierrors.ErrNone -} - -// Represents the overall state that is required for decoding a -// AWS Signature V4 chunked reader. -type s3ChunkedReader struct { - reader *bufio.Reader - cred auth.Credentials - seedSignature string - seedDate time.Time - region string - - chunkSHA256Writer hash.Hash // Calculates sha256 of chunk data. - buffer []byte - offset int - err error -} - -func (cr *s3ChunkedReader) Close() (err error) { - return nil -} - -// Now, we read one chunk from the underlying reader. -// A chunk has the following format: -// -// + ";chunk-signature=" + + "\r\n" + + "\r\n" -// -// First, we read the chunk size but fail if it is larger -// than 16 MiB. We must not accept arbitrary large chunks. -// One 16 MiB is a reasonable max limit. -// -// Then we read the signature and payload data. We compute the SHA256 checksum -// of the payload and verify that it matches the expected signature value. -// -// The last chunk is *always* 0-sized. So, we must only return io.EOF if we have encountered -// a chunk with a chunk size = 0. However, this chunk still has a signature and we must -// verify it. -const maxChunkSize = 16 << 20 // 16 MiB - -// Read - implements `io.Reader`, which transparently decodes -// the incoming AWS Signature V4 streaming signature. -func (cr *s3ChunkedReader) Read(buf []byte) (n int, err error) { - // First, if there is any unread data, copy it to the client - // provided buffer. - if cr.offset > 0 { - n = copy(buf, cr.buffer[cr.offset:]) - if n == len(buf) { - cr.offset += n - return n, nil - } - cr.offset = 0 - buf = buf[n:] - } - - var size int - for { - b, err := cr.reader.ReadByte() - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - if err != nil { - cr.err = err - return n, cr.err - } - if b == ';' { // separating character - break - } - - // Manually deserialize the size since AWS specified - // the chunk size to be of variable width. In particular, - // a size of 16 is encoded as `10` while a size of 64 KB - // is `10000`. - switch { - case b >= '0' && b <= '9': - size = size<<4 | int(b-'0') - case b >= 'a' && b <= 'f': - size = size<<4 | int(b-('a'-10)) - case b >= 'A' && b <= 'F': - size = size<<4 | int(b-('A'-10)) - default: - cr.err = errMalformedEncoding - return n, cr.err - } - if size > maxChunkSize { - cr.err = errChunkTooBig - return n, cr.err - } - } - - // Now, we read the signature of the following payload and expect: - // chunk-signature=" + + "\r\n" - // - // The signature is 64 bytes long (hex-encoded SHA256 hash) and - // starts with a 16 byte header: len("chunk-signature=") + 64 == 80. - var signature [80]byte - _, err = io.ReadFull(cr.reader, signature[:]) - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - if err != nil { - cr.err = err - return n, cr.err - } - if !bytes.HasPrefix(signature[:], []byte("chunk-signature=")) { - cr.err = errMalformedEncoding - return n, cr.err - } - b, err := cr.reader.ReadByte() - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - if err != nil { - cr.err = err - return n, cr.err - } - if b != '\r' { - cr.err = errMalformedEncoding - return n, cr.err - } - b, err = cr.reader.ReadByte() - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - if err != nil { - cr.err = err - return n, cr.err - } - if b != '\n' { - cr.err = errMalformedEncoding - return n, cr.err - } - - if cap(cr.buffer) < size { - cr.buffer = make([]byte, size) - } else { - cr.buffer = cr.buffer[:size] - } - - // Now, we read the payload and compute its SHA-256 hash. - _, err = io.ReadFull(cr.reader, cr.buffer) - if err == io.EOF && size != 0 { - err = io.ErrUnexpectedEOF - } - if err != nil && err != io.EOF { - cr.err = err - return n, cr.err - } - b, err = cr.reader.ReadByte() - if b != '\r' { - cr.err = errMalformedEncoding - return n, cr.err - } - b, err = cr.reader.ReadByte() - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - if err != nil { - cr.err = err - return n, cr.err - } - if b != '\n' { - cr.err = errMalformedEncoding - return n, cr.err - } - - // Once we have read the entire chunk successfully, we verify - // that the received signature matches our computed signature. - cr.chunkSHA256Writer.Write(cr.buffer) - newSignature := getChunkSignature(cr.cred, cr.seedSignature, cr.region, cr.seedDate, hex.EncodeToString(cr.chunkSHA256Writer.Sum(nil))) - if !compareSignatureV4(string(signature[16:]), newSignature) { - cr.err = errSignatureMismatch - return n, cr.err - } - cr.seedSignature = newSignature - cr.chunkSHA256Writer.Reset() - - // If the chunk size is zero we return io.EOF. As specified by AWS, - // only the last chunk is zero-sized. - if size == 0 { - cr.err = io.EOF - return n, cr.err - } - - cr.offset = copy(buf, cr.buffer) - n += cr.offset - return n, err -} - -// readCRLF - check if reader only has '\r\n' CRLF character. -// returns malformed encoding if it doesn't. -func readCRLF(reader io.Reader) error { - buf := make([]byte, 2) - _, err := io.ReadFull(reader, buf[:2]) - if err != nil { - return err - } - if buf[0] != '\r' || buf[1] != '\n' { - return errMalformedEncoding - } - return nil -} - -// Read a line of bytes (up to \n) from b. -// Give up if the line exceeds maxLineLength. -// The returned bytes are owned by the bufio.Reader -// so they are only valid until the next bufio read. -func readChunkLine(b *bufio.Reader) ([]byte, []byte, error) { - buf, err := b.ReadSlice('\n') - if err != nil { - // We always know when EOF is coming. - // If the caller asked for a line, there should be a line. - if err == io.EOF { - err = io.ErrUnexpectedEOF - } else if err == bufio.ErrBufferFull { - err = errLineTooLong - } - return nil, nil, err - } - if len(buf) >= maxLineLength { - return nil, nil, errLineTooLong - } - // Parse s3 specific chunk extension and fetch the values. - hexChunkSize, hexChunkSignature := parseS3ChunkExtension(buf) - return hexChunkSize, hexChunkSignature, nil -} - -// trimTrailingWhitespace - trim trailing white space. -func trimTrailingWhitespace(b []byte) []byte { - for len(b) > 0 && isASCIISpace(b[len(b)-1]) { - b = b[:len(b)-1] - } - return b -} - -// isASCIISpace - is ascii space? -func isASCIISpace(b byte) bool { - return b == ' ' || b == '\t' || b == '\n' || b == '\r' -} - -// Constant s3 chunk encoding signature. -const s3ChunkSignatureStr = ";chunk-signature=" - -// parses3ChunkExtension removes any s3 specific chunk-extension from buf. -// For example, -// -// "10000;chunk-signature=..." => "10000", "chunk-signature=..." -func parseS3ChunkExtension(buf []byte) ([]byte, []byte) { - buf = trimTrailingWhitespace(buf) - semi := bytes.Index(buf, []byte(s3ChunkSignatureStr)) - // Chunk signature not found, return the whole buffer. - if semi == -1 { - return buf, nil - } - return buf[:semi], parseChunkSignature(buf[semi:]) -} - -// parseChunkSignature - parse chunk signature. -func parseChunkSignature(chunk []byte) []byte { - chunkSplits := bytes.SplitN(chunk, []byte(s3ChunkSignatureStr), 2) - return chunkSplits[1] -} - -// parse hex to uint64. -func parseHexUint(v []byte) (n uint64, err error) { - for i, b := range v { - switch { - case '0' <= b && b <= '9': - b -= '0' - case 'a' <= b && b <= 'f': - b = b - 'a' + 10 - case 'A' <= b && b <= 'F': - b = b - 'A' + 10 - default: - return 0, errors.New("invalid byte in chunk length") - } - if i == 16 { - return 0, errors.New("http chunk length too large") - } - n <<= 4 - n |= uint64(b) - } - return -} - -// Trims away `aws-chunked` from the content-encoding header if present. -// Streaming signature clients can have custom content-encoding such as -// `aws-chunked,gzip` here we need to only save `gzip`. -// For more refer http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html -func TrimAwsChunkedContentEncoding(contentEnc string) (trimmedContentEnc string) { - if contentEnc == "" { - return contentEnc - } - var newEncs []string - for _, enc := range strings.Split(contentEnc, ",") { - if enc != streamingContentEncoding { - newEncs = append(newEncs, enc) - } - } - return strings.Join(newEncs, ",") -} From c492fad407cf593fdc7e1f9191e070bf8d9987af Mon Sep 17 00:00:00 2001 From: steve Date: Thu, 24 Aug 2023 21:23:41 +0800 Subject: [PATCH 057/139] chore: mig sig 02 --- s3/services/auth/auth_handler.go | 100 ++++ s3/services/auth/auth_type.go | 101 ++++ s3/services/auth/check_handler_auth.go | 341 +++++++++++++ s3/services/auth/check_handler_auth_test.go | 16 + s3/services/auth/signature-v2.go | 429 ++++++++++++++++ s3/services/auth/signature-v4-parser.go | 308 +++++++++++ s3/services/auth/signature-v4-utils.go | 235 +++++++++ s3/services/auth/signature-v4.go | 260 ++++++++++ s3/services/auth/streaming-signature-v4.go | 481 ++++++++++++++++++ .../auth/streaming-signature-v4_test.go | 198 +++++++ 10 files changed, 2469 insertions(+) create mode 100644 s3/services/auth/auth_handler.go create mode 100644 s3/services/auth/auth_type.go create mode 100644 s3/services/auth/check_handler_auth.go create mode 100644 s3/services/auth/check_handler_auth_test.go create mode 100644 s3/services/auth/signature-v2.go create mode 100644 s3/services/auth/signature-v4-parser.go create mode 100644 s3/services/auth/signature-v4-utils.go create mode 100644 s3/services/auth/signature-v4.go create mode 100644 s3/services/auth/streaming-signature-v4.go create mode 100644 s3/services/auth/streaming-signature-v4_test.go diff --git a/s3/services/auth/auth_handler.go b/s3/services/auth/auth_handler.go new file mode 100644 index 000000000..06a27b938 --- /dev/null +++ b/s3/services/auth/auth_handler.go @@ -0,0 +1,100 @@ +package iam + +import ( + "github.com/yann-y/fds/internal/apierrors" + "github.com/yann-y/fds/internal/consts" + "github.com/yann-y/fds/internal/response" + "net/http" + "time" +) + +// SetAuthHandler to validate authorization header for the incoming request. +func SetAuthHandler(h http.Handler) http.Handler { + // handler for validating incoming authorization headers. + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + aType := GetRequestAuthType(r) + if aType == AuthTypeSigned || aType == AuthTypeSignedV2 || aType == AuthTypeStreamingSigned { + // Verify if date headers are set, if not reject the request + amzDate, errCode := parseAmzDateHeader(r) + if errCode != apierrors.ErrNone { + // All our internal APIs are sensitive towards Date + // header, for all requests where Date header is not + // present we will reject such clients. + response.WriteErrorResponse(w, r, errCode) + return + } + // Verify if the request date header is shifted by less than globalMaxSkewTime parameter in the past + // or in the future, reject request otherwise. + curTime := time.Now().UTC() + if curTime.Sub(amzDate) > consts.GlobalMaxSkewTime || amzDate.Sub(curTime) > consts.GlobalMaxSkewTime { + response.WriteErrorResponse(w, r, apierrors.ErrRequestTimeTooSkewed) + return + } + } + if isSupportedS3AuthType(aType) || aType == AuthTypeJWT || aType == AuthTypeSTS { + h.ServeHTTP(w, r) + return + } + response.WriteErrorResponse(w, r, apierrors.ErrSignatureVersionNotSupported) + }) +} + +// Supported amz date formats. +var amzDateFormats = []string{ + // Do not change this order, x-amz-date format is usually in + // iso8601Format rest are meant for relaxed handling of other + // odd SDKs that might be out there. + iso8601Format, + time.RFC1123, + time.RFC1123Z, + // Add new AMZ date formats here. +} + +// Supported Amz date headers. +var amzDateHeaders = []string{ + // Do not chane this order, x-amz-date value should be + // validated first. + "x-amz-date", + "date", +} + +// parseAmzDate - parses date string into supported amz date formats. +func parseAmzDate(amzDateStr string) (amzDate time.Time, apiErr apierrors.ErrorCode) { + for _, dateFormat := range amzDateFormats { + amzDate, err := time.Parse(dateFormat, amzDateStr) + if err == nil { + return amzDate, apierrors.ErrNone + } + } + return time.Time{}, apierrors.ErrMalformedDate +} + +// parseAmzDateHeader - parses supported amz date headers, in +// supported amz date formats. +func parseAmzDateHeader(req *http.Request) (time.Time, apierrors.ErrorCode) { + for _, amzDateHeader := range amzDateHeaders { + amzDateStr := req.Header.Get(amzDateHeader) + if amzDateStr != "" { + return parseAmzDate(amzDateStr) + } + } + // Date header missing. + return time.Time{}, apierrors.ErrMissingDateHeader +} + +// List of all support S3 auth types. +var supportedS3AuthTypes = map[AuthType]struct{}{ + AuthTypeAnonymous: {}, + AuthTypePresigned: {}, + AuthTypePresignedV2: {}, + AuthTypeSigned: {}, + AuthTypeSignedV2: {}, + AuthTypePostPolicy: {}, + AuthTypeStreamingSigned: {}, +} + +// Validate if the authType is valid and supported. +func isSupportedS3AuthType(aType AuthType) bool { + _, ok := supportedS3AuthTypes[aType] + return ok +} diff --git a/s3/services/auth/auth_type.go b/s3/services/auth/auth_type.go new file mode 100644 index 000000000..fdf80bf36 --- /dev/null +++ b/s3/services/auth/auth_type.go @@ -0,0 +1,101 @@ +package iam + +import ( + "github.com/yann-y/fds/internal/consts" + "net/http" + "net/url" + "strings" +) + +// Verify if request has JWT. +func isRequestJWT(r *http.Request) bool { + return strings.HasPrefix(r.Header.Get("Authorization"), "Bearer") +} + +// IsRequestSignatureV4 Verify if request has AWS Signature Version '4'. +func IsRequestSignatureV4(r *http.Request) bool { + return strings.HasPrefix(r.Header.Get("Authorization"), signV4Algorithm) +} + +// Verify if request has AWS Signature Version '2'. +func isRequestSignatureV2(r *http.Request) bool { + return !strings.HasPrefix(r.Header.Get("Authorization"), signV4Algorithm) && + strings.HasPrefix(r.Header.Get("Authorization"), signV2Algorithm) +} + +// Verify if request has AWS PreSign Version '4'. +func isRequestPresignedSignatureV4(r *http.Request) bool { + _, ok := r.URL.Query()["X-Amz-Credential"] + return ok +} + +// Verify request has AWS PreSign Version '2'. +func isRequestPresignedSignatureV2(r *http.Request) bool { + _, ok := r.URL.Query()["AWSAccessKeyId"] + return ok +} + +// Verify if request has AWS Post policy Signature Version '4'. +func isRequestPostPolicySignatureV4(r *http.Request) bool { + return strings.Contains(r.Header.Get("Content-Type"), "multipart/form-data") && + r.Method == http.MethodPost +} + +// Verify if the request has AWS Streaming Signature Version '4'. This is only valid for 'PUT' operation. +func isRequestSignStreamingV4(r *http.Request) bool { + return r.Header.Get("x-amz-content-sha256") == consts.StreamingContentSHA256 && + r.Method == http.MethodPut +} + +// AuthType Authorization type. +type AuthType int + +// List of all supported auth types. +const ( + AuthTypeUnknown AuthType = iota + AuthTypeAnonymous + AuthTypePresigned + AuthTypePresignedV2 + AuthTypePostPolicy + AuthTypeStreamingSigned + AuthTypeSigned + AuthTypeSignedV2 + AuthTypeJWT + AuthTypeSTS +) + +// GetRequestAuthType Get request authentication type. +func GetRequestAuthType(r *http.Request) AuthType { + if r.URL != nil { + var err error + r.Form, err = url.ParseQuery(r.URL.RawQuery) + if err != nil { + log.Infof("parse query failed, query: %s, error: %v", r.URL.RawQuery, err) + return AuthTypeUnknown + } + } + if isRequestSignatureV2(r) { + return AuthTypeSignedV2 + } else if isRequestPresignedSignatureV2(r) { + return AuthTypePresignedV2 + } else if isRequestSignStreamingV4(r) { + return AuthTypeStreamingSigned + } else if IsRequestSignatureV4(r) { + return AuthTypeSigned + } else if isRequestPresignedSignatureV4(r) { + return AuthTypePresigned + } else if isRequestJWT(r) { + return AuthTypeJWT + } else if isRequestPostPolicySignatureV4(r) { + return AuthTypePostPolicy + } else if _, ok := r.Form[consts.StsAction]; ok { + return AuthTypeSTS + } else if _, ok := r.Header[consts.Authorization]; !ok { + return AuthTypeAnonymous + } + return AuthTypeUnknown +} + +func IsAuthTypeStreamingSigned(atype AuthType) bool { + return atype == AuthTypeStreamingSigned +} diff --git a/s3/services/auth/check_handler_auth.go b/s3/services/auth/check_handler_auth.go new file mode 100644 index 000000000..b4f443789 --- /dev/null +++ b/s3/services/auth/check_handler_auth.go @@ -0,0 +1,341 @@ +package iam + +import ( + "bytes" + "context" + "encoding/hex" + "github.com/yann-y/fds/internal/apierrors" + "github.com/yann-y/fds/internal/consts" + "github.com/yann-y/fds/internal/iam/auth" + "github.com/yann-y/fds/internal/iam/s3action" + "github.com/yann-y/fds/internal/uleveldb" + "github.com/yann-y/fds/internal/utils/hash" + "github.com/yann-y/fds/pkg/etag" + "io" + "net/http" + "net/url" + "strconv" + "time" +) + +// AuthSys auth and sign system +type AuthSys struct { + Iam *IdentityAMSys + PolicySys *iPolicySys + AdminCred auth.Credentials +} + +// NewAuthSys new an AuthSys +func NewAuthSys(db *uleveldb.ULevelDB, adminCred auth.Credentials) *AuthSys { + return &AuthSys{ + Iam: NewIdentityAMSys(db), + PolicySys: newIPolicySys(db), + AdminCred: adminCred, + } +} + +// CheckRequestAuthTypeCredential Check request auth type verifies the incoming http request +// - validates the request signature +// - validates the policy action if anonymous tests bucket policies if any, +// for authenticated requests validates IAM policies. +// +// returns APIErrorCode if any to be replied to the client. +// Additionally, returns the accessKey used in the request, and if this request is by an admin. +func (s *AuthSys) CheckRequestAuthTypeCredential(ctx context.Context, r *http.Request, action s3action.Action, bucketName, objectName string) (cred auth.Credentials, owner bool, s3Err apierrors.ErrorCode) { + switch GetRequestAuthType(r) { + case AuthTypeUnknown, AuthTypeStreamingSigned: + return cred, owner, apierrors.ErrSignatureVersionNotSupported + case AuthTypePresignedV2, AuthTypeSignedV2: + if s3Err = s.IsReqAuthenticatedV2(r); s3Err != apierrors.ErrNone { + return cred, owner, s3Err + } + cred, owner, s3Err = s.getReqAccessKeyV2(r) + case AuthTypeSigned, AuthTypePresigned: + region := "" + switch action { + case s3action.GetBucketLocationAction, s3action.ListAllMyBucketsAction: + region = "" + } + if s3Err = s.IsReqAuthenticated(ctx, r, region, ServiceS3); s3Err != apierrors.ErrNone { + return cred, owner, s3Err + } + cred, owner, s3Err = s.GetReqAccessKeyV4(r, region, ServiceS3) + } + if s3Err != apierrors.ErrNone { + return cred, owner, s3Err + } + // TODO: Why should a temporary user be replaced with the parent user's account? + //if cred.IsTemp() { + // cred, _ = s.Iam.GetUser(ctx, cred.ParentUser) + //} + if action == s3action.CreateBucketAction { + // To extract region from XML in request body, get copy of request body. + payload, err := io.ReadAll(io.LimitReader(r.Body, consts.MaxLocationConstraintSize)) + if err != nil { + log.Errorf("ReadAll err:%v", err) + return cred, owner, apierrors.ErrMalformedXML + } + + // Populate payload to extract location constraint. + r.Body = io.NopCloser(bytes.NewReader(payload)) + if s.PolicySys.bmSys.HasBucket(ctx, bucketName) { + return cred, owner, apierrors.ErrBucketAlreadyExists + } + } + + // Anonymous user + if cred.AccessKey == "" { + owner = false + } + + // check bucket policy + if s.PolicySys.isAllowed(ctx, auth.Args{ + AccountName: cred.AccessKey, + Action: action, + BucketName: bucketName, + IsOwner: owner, + ObjectName: objectName, + }) { + // Request is allowed return the appropriate access key. + return cred, owner, apierrors.ErrNone + } + if action == s3action.ListBucketVersionsAction { + // In AWS S3 s3:ListBucket permission is same as s3:ListBucketVersions permission + // verify as a fallback. + if s.PolicySys.isAllowed(ctx, auth.Args{ + AccountName: cred.AccessKey, + Action: s3action.ListBucketAction, + BucketName: bucketName, + IsOwner: owner, + ObjectName: objectName, + }) { + // Request is allowed return the appropriate access key. + return cred, owner, apierrors.ErrNone + } + } + + // check user policy + if bucketName == "" || action == s3action.CreateBucketAction { + if s.Iam.IsAllowed(r.Context(), auth.Args{ + AccountName: cred.AccessKey, + Action: action, + BucketName: bucketName, + Conditions: getConditions(r, cred.AccessKey), + ObjectName: objectName, + IsOwner: owner, + }) { + // Request is allowed return the appropriate access key. + return cred, owner, apierrors.ErrNone + } + } else { + if !s.PolicySys.bmSys.HasBucket(ctx, bucketName) { + return cred, owner, apierrors.ErrNoSuchBucket + } + } + + return cred, owner, apierrors.ErrAccessDenied +} + +// Verify if request has valid AWS Signature Version '2'. +func (s *AuthSys) IsReqAuthenticatedV2(r *http.Request) (s3Error apierrors.ErrorCode) { + if isRequestSignatureV2(r) { + return s.doesSignV2Match(r) + } + return s.doesPresignV2SignatureMatch(r) +} + +func (s *AuthSys) ReqSignatureV4Verify(r *http.Request, region string, stype serviceType) (s3Error apierrors.ErrorCode) { + sha256sum := GetContentSha256Cksum(r, stype) + switch { + case IsRequestSignatureV4(r): + return s.doesSignatureMatch(sha256sum, r, region, stype) + case isRequestPresignedSignatureV4(r): + return s.doesPresignedSignatureMatch(sha256sum, r, region, stype) + default: + return apierrors.ErrAccessDenied + } +} + +// IsReqAuthenticated Verify if request has valid AWS Signature Version '4'. +func (s *AuthSys) IsReqAuthenticated(ctx context.Context, r *http.Request, region string, stype serviceType) (s3Error apierrors.ErrorCode) { + if errCode := s.ReqSignatureV4Verify(r, region, stype); errCode != apierrors.ErrNone { + return errCode + } + clientETag, err := etag.FromContentMD5(r.Header) + if err != nil { + return apierrors.ErrInvalidDigest + } + + // Extract either 'X-Amz-Content-Sha256' header or 'X-Amz-Content-Sha256' query parameter (if V4 presigned) + // Do not verify 'X-Amz-Content-Sha256' if skipSHA256. + var contentSHA256 []byte + if skipSHA256 := SkipContentSha256Cksum(r); !skipSHA256 && isRequestPresignedSignatureV4(r) { + if sha256Sum, ok := r.Form[consts.AmzContentSha256]; ok && len(sha256Sum) > 0 { + contentSHA256, err = hex.DecodeString(sha256Sum[0]) + if err != nil { + return apierrors.ErrContentSHA256Mismatch + } + } + } else if _, ok := r.Header[consts.AmzContentSha256]; !skipSHA256 && ok { + contentSHA256, err = hex.DecodeString(r.Header.Get(consts.AmzContentSha256)) + if err != nil || len(contentSHA256) == 0 { + return apierrors.ErrContentSHA256Mismatch + } + } + + // Verify 'Content-Md5' and/or 'X-Amz-Content-Sha256' if present. + // The verification happens implicit during reading. + reader, err := hash.NewReader(r.Body, -1, clientETag.String(), hex.EncodeToString(contentSHA256), -1) + if err != nil { + return apierrors.ErrInternalError + } + r.Body = reader + return apierrors.ErrNone +} + +// ValidateAdminSignature validate admin Signature +func (s *AuthSys) ValidateAdminSignature(ctx context.Context, r *http.Request, region string) (auth.Credentials, map[string]interface{}, bool, apierrors.ErrorCode) { + var cred auth.Credentials + var owner bool + s3Err := apierrors.ErrAccessDenied + if _, ok := r.Header[consts.AmzContentSha256]; ok && + GetRequestAuthType(r) == AuthTypeSigned { + // We only support admin credentials to access admin APIs. + cred, owner, s3Err = s.GetReqAccessKeyV4(r, region, ServiceS3) + if s3Err != apierrors.ErrNone { + return cred, nil, owner, s3Err + } + + // we only support V4 (no presign) with auth body + s3Err = s.IsReqAuthenticated(ctx, r, region, ServiceS3) + } + if s3Err != apierrors.ErrNone { + return cred, nil, owner, s3Err + } + + return cred, nil, owner, apierrors.ErrNone +} + +func getConditions(r *http.Request, username string) map[string][]string { + currTime := time.Now().UTC() + + principalType := "Anonymous" + if username != "" { + principalType = "User" + } + + at := GetRequestAuthType(r) + var signatureVersion string + switch at { + case AuthTypeSignedV2, AuthTypePresignedV2: + signatureVersion = signV2Algorithm + case AuthTypeSigned, AuthTypePresigned, AuthTypeStreamingSigned, AuthTypePostPolicy: + signatureVersion = signV4Algorithm + } + + var authtype string + switch at { + case AuthTypePresignedV2, AuthTypePresigned: + authtype = "REST-QUERY-STRING" + case AuthTypeSignedV2, AuthTypeSigned, AuthTypeStreamingSigned: + authtype = "REST-HEADER" + case AuthTypePostPolicy: + authtype = "POST" + } + + args := map[string][]string{ + "CurrentTime": {currTime.Format(time.RFC3339)}, + "EpochTime": {strconv.FormatInt(currTime.Unix(), 10)}, + "SecureTransport": {strconv.FormatBool(r.TLS != nil)}, + "UserAgent": {r.UserAgent()}, + "Referer": {r.Referer()}, + "principaltype": {principalType}, + "userid": {username}, + "username": {username}, + "signatureversion": {signatureVersion}, + "authType": {authtype}, + } + + cloneHeader := r.Header.Clone() + + for key, values := range cloneHeader { + if existingValues, found := args[key]; found { + args[key] = append(existingValues, values...) + } else { + args[key] = values + } + } + + cloneURLValues := make(url.Values, len(r.Form)) + for k, v := range r.Form { + cloneURLValues[k] = v + } + + for key, values := range cloneURLValues { + if existingValues, found := args[key]; found { + args[key] = append(existingValues, values...) + } else { + args[key] = values + } + } + + return args +} + +// IsPutActionAllowed - check if PUT operation is allowed on the resource, this +// call verifies bucket policies and IAM policies, supports multi user +// checks etc. +func (s *AuthSys) IsPutActionAllowed(ctx context.Context, r *http.Request, action s3action.Action, bucketName, objectName string) (s3Err apierrors.ErrorCode) { + var cred auth.Credentials + var owner bool + switch GetRequestAuthType(r) { + case AuthTypeUnknown: + return apierrors.ErrSignatureVersionNotSupported + case AuthTypeSignedV2, AuthTypePresignedV2: + cred, owner, s3Err = s.getReqAccessKeyV2(r) + case AuthTypeStreamingSigned, AuthTypePresigned, AuthTypeSigned: + region := "" + cred, owner, s3Err = s.GetReqAccessKeyV4(r, region, ServiceS3) + } + if s3Err != apierrors.ErrNone { + return s3Err + } + + // Do not check for PutObjectRetentionAction permission, + // if mode and retain until date are not set. + // Can happen when bucket has default lock config set + if action == s3action.PutObjectRetentionAction && + r.Header.Get(consts.AmzObjectLockMode) == "" && + r.Header.Get(consts.AmzObjectLockRetainUntilDate) == "" { + return apierrors.ErrNone + } + + // check bucket policy + if s.PolicySys.isAllowed(ctx, auth.Args{ + AccountName: cred.AccessKey, + Action: action, + BucketName: bucketName, + IsOwner: owner, + ObjectName: objectName, + }) { + return apierrors.ErrNone + } + + if !s.PolicySys.bmSys.HasBucket(ctx, bucketName) { + return apierrors.ErrNoSuchBucket + } + return apierrors.ErrAccessDenied +} + +func (s *AuthSys) GetCredential(r *http.Request) (cred auth.Credentials, owner bool, s3Err apierrors.ErrorCode) { + switch GetRequestAuthType(r) { + case AuthTypeUnknown: + s3Err = apierrors.ErrSignatureVersionNotSupported + case AuthTypeSignedV2, AuthTypePresignedV2: + cred, owner, s3Err = s.getReqAccessKeyV2(r) + case AuthTypeStreamingSigned, AuthTypePresigned, AuthTypeSigned: + region := "" + cred, owner, s3Err = s.GetReqAccessKeyV4(r, region, ServiceS3) + } + return +} diff --git a/s3/services/auth/check_handler_auth_test.go b/s3/services/auth/check_handler_auth_test.go new file mode 100644 index 000000000..f0f4678ef --- /dev/null +++ b/s3/services/auth/check_handler_auth_test.go @@ -0,0 +1,16 @@ +package iam + +//func TestV2CheckRequestAuthType(t *testing.T) { +// var aSys AuthSys +// aSys.Init() +// req := testsign.MustNewSignedV2Request("GET", "http://127.0.0.1:9000", 0, nil, t) +// _, _, err := aSys.CheckRequestAuthTypeCredential(context.Background(), req, s3action.ListAllMyBucketsAction, "test", "testobject") +// fmt.Println(apierrors.GetAPIError(err)) +//} +//func TestV4CheckRequestAuthType(t *testing.T) { +// var aSys AuthSys +// aSys.Init() +// req := testsign.MustNewSignedV4Request("GET", "http://127.0.0.1:9000", 0, nil, "test", "test", "s3", t) +// _, _, err := aSys.CheckRequestAuthTypeCredential(context.Background(), req, s3action.ListAllMyBucketsAction, "test", "testobject") +// fmt.Println(apierrors.GetAPIError(err)) +//} diff --git a/s3/services/auth/signature-v2.go b/s3/services/auth/signature-v2.go new file mode 100644 index 000000000..a38b06341 --- /dev/null +++ b/s3/services/auth/signature-v2.go @@ -0,0 +1,429 @@ +/* + * The following code tries to reverse engineer the Amazon S3 APIs, + * and is mostly copied from minio implementation. + */ + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. + +package iam + +import ( + "crypto/hmac" + "crypto/sha1" + "crypto/subtle" + "encoding/base64" + "fmt" + "github.com/yann-y/fds/internal/apierrors" + "github.com/yann-y/fds/internal/consts" + "github.com/yann-y/fds/internal/iam/auth" + "net" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" +) + +// Whitelist resource list that will be used in query string for signature-V2 calculation. +// +// This list should be kept alphabetically sorted, do not hastily edit. +var resourceList = []string{ + "acl", + "cors", + "delete", + "encryption", + "legal-hold", + "lifecycle", + "location", + "logging", + "notification", + "partNumber", + "policy", + "requestPayment", + "response-cache-control", + "response-content-disposition", + "response-content-encoding", + "response-content-language", + "response-content-type", + "response-expires", + "retention", + "select", + "select-type", + "tagging", + "torrent", + "uploadId", + "uploads", + "versionId", + "versioning", + "versions", + "website", +} + +// Signature and API related constants. +const ( + signV2Algorithm = "AWS" +) + +// AWS S3 Signature V2 calculation rule is give here: +// http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationStringToSign +func (s *AuthSys) doesPolicySignatureV2Match(formValues http.Header) (auth.Credentials, apierrors.ErrorCode) { + accessKey := formValues.Get(consts.AmzAccessKeyID) + + r := &http.Request{Header: formValues} + cred, _, s3Err := s.checkKeyValid(r, accessKey) + if s3Err != apierrors.ErrNone { + return cred, s3Err + } + policy := formValues.Get("Policy") + signature := formValues.Get(consts.AmzSignatureV2) + if !compareSignatureV2(signature, calculateSignatureV2(policy, cred.SecretKey)) { + return cred, apierrors.ErrSignatureDoesNotMatch + } + return cred, apierrors.ErrNone +} + +// Escape encodedQuery string into unescaped list of query params, returns error +// if any while unescaping the values. +func unescapeQueries(encodedQuery string) (unescapedQueries []string, err error) { + for _, query := range strings.Split(encodedQuery, "&") { + var unescapedQuery string + unescapedQuery, err = url.QueryUnescape(query) + if err != nil { + return nil, err + } + unescapedQueries = append(unescapedQueries, unescapedQuery) + } + return unescapedQueries, nil +} + +// doesPresignV2SignatureMatch - Verify query headers with presigned signature +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationQueryStringAuth +// +// returns apierrors.ErrNone if matches. S3 errors otherwise. +func (s *AuthSys) doesPresignV2SignatureMatch(r *http.Request) apierrors.ErrorCode { + // r.RequestURI will have raw encoded URI as sent by the client. + tokens := strings.SplitN(r.RequestURI, "?", 2) + encodedResource := tokens[0] + encodedQuery := "" + if len(tokens) == 2 { + encodedQuery = tokens[1] + } + + var ( + filteredQueries []string + gotSignature string + expires string + accessKey string + err error + ) + + var unescapedQueries []string + unescapedQueries, err = unescapeQueries(encodedQuery) + if err != nil { + return apierrors.ErrInvalidQueryParams + } + + // Extract the necessary values from presigned query, construct a list of new filtered queries. + for _, query := range unescapedQueries { + keyval := strings.SplitN(query, "=", 2) + if len(keyval) != 2 { + return apierrors.ErrInvalidQueryParams + } + switch keyval[0] { + case consts.AmzAccessKeyID: + accessKey = keyval[1] + case consts.AmzSignatureV2: + gotSignature = keyval[1] + case consts.Expires: + expires = keyval[1] + default: + filteredQueries = append(filteredQueries, query) + } + } + + // Invalid values returns error. + if accessKey == "" || gotSignature == "" || expires == "" { + return apierrors.ErrInvalidQueryParams + } + + cred, _, s3Err := s.checkKeyValid(r, accessKey) + if s3Err != apierrors.ErrNone { + return s3Err + } + + // Make sure the request has not expired. + expiresInt, err := strconv.ParseInt(expires, 10, 64) + if err != nil { + return apierrors.ErrAuthorizationHeaderMalformed + } + + // Check if the presigned URL has expired. + if expiresInt < time.Now().UTC().Unix() { + return apierrors.ErrExpiredPresignRequest + } + + encodedResource, err = getResource(encodedResource, r.Host) + if err != nil { + return apierrors.ErrInvalidRequest + } + + expectedSignature := preSignatureV2(cred, r.Method, encodedResource, strings.Join(filteredQueries, "&"), r.Header, expires) + if !compareSignatureV2(gotSignature, expectedSignature) { + return apierrors.ErrSignatureDoesNotMatch + } + + r.Form.Del(consts.Expires) + + return apierrors.ErrNone +} + +func (s *AuthSys) getReqAccessKeyV2(r *http.Request) (auth.Credentials, bool, apierrors.ErrorCode) { + if accessKey := r.Form.Get(consts.AmzAccessKeyID); accessKey != "" { + return s.checkKeyValid(r, accessKey) + } + + // below is V2 Signed Auth header format, splitting on `space` (after the `AWS` string). + // Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature + authFields := strings.Split(r.Header.Get(consts.Authorization), " ") + if len(authFields) != 2 { + return auth.Credentials{}, false, apierrors.ErrMissingFields + } + + // Then will be splitting on ":", this will seprate `AWSAccessKeyId` and `Signature` string. + keySignFields := strings.Split(strings.TrimSpace(authFields[1]), ":") + if len(keySignFields) != 2 { + return auth.Credentials{}, false, apierrors.ErrMissingFields + } + + return s.checkKeyValid(r, keySignFields[0]) +} + +// Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature; +// Signature = Base64( HMAC-SHA1( YourSecretKey, UTF-8-Encoding-Of( StringToSign ) ) ); +// +// StringToSign = HTTP-Verb + "\n" + +// Content-Md5 + "\n" + +// Content-Type + "\n" + +// Date + "\n" + +// CanonicalizedProtocolHeaders + +// CanonicalizedResource; +// +// CanonicalizedResource = [ consts.SlashSeparator + Bucket ] + +// + +// [ subresource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; +// +// CanonicalizedProtocolHeaders = + +// doesSignV2Match - Verify authorization header with calculated header in accordance with +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/auth-request-sig-v2.html +// returns true if matches, false otherwise. if error is not nil then it is always false + +func (s *AuthSys) validateV2AuthHeader(r *http.Request) (auth.Credentials, apierrors.ErrorCode) { + var cred auth.Credentials + v2Auth := r.Header.Get(consts.Authorization) + if v2Auth == "" { + return cred, apierrors.ErrAuthHeaderEmpty + } + + // Verify if the header algorithm is supported or not. + if !strings.HasPrefix(v2Auth, signV2Algorithm) { + return cred, apierrors.ErrSignatureVersionNotSupported + } + + cred, _, apiErr := s.getReqAccessKeyV2(r) + if apiErr != apierrors.ErrNone { + return cred, apiErr + } + + return cred, apierrors.ErrNone +} + +func (s *AuthSys) doesSignV2Match(r *http.Request) apierrors.ErrorCode { + v2Auth := r.Header.Get(consts.Authorization) + cred, apiError := s.validateV2AuthHeader(r) + if apiError != apierrors.ErrNone { + return apiError + } + + // r.RequestURI will have raw encoded URI as sent by the client. + tokens := strings.SplitN(r.RequestURI, "?", 2) + encodedResource := tokens[0] + encodedQuery := "" + if len(tokens) == 2 { + encodedQuery = tokens[1] + } + + unescapedQueries, err := unescapeQueries(encodedQuery) + if err != nil { + return apierrors.ErrInvalidQueryParams + } + + encodedResource, err = getResource(encodedResource, r.Host) + if err != nil { + return apierrors.ErrInvalidRequest + } + + prefix := fmt.Sprintf("%s %s:", signV2Algorithm, cred.AccessKey) + if !strings.HasPrefix(v2Auth, prefix) { + return apierrors.ErrSignatureDoesNotMatch + } + v2Auth = v2Auth[len(prefix):] + expectedAuth := signatureV2(cred, r.Method, encodedResource, strings.Join(unescapedQueries, "&"), r.Header) + if !compareSignatureV2(v2Auth, expectedAuth) { + return apierrors.ErrSignatureDoesNotMatch + } + return apierrors.ErrNone +} + +func calculateSignatureV2(stringToSign string, secret string) string { + hm := hmac.New(sha1.New, []byte(secret)) + hm.Write([]byte(stringToSign)) + return base64.StdEncoding.EncodeToString(hm.Sum(nil)) +} + +// Return signature-v2 for the presigned request. +func preSignatureV2(cred auth.Credentials, method string, encodedResource string, encodedQuery string, headers http.Header, expires string) string { + stringToSign := getStringToSignV2(method, encodedResource, encodedQuery, headers, expires) + return calculateSignatureV2(stringToSign, cred.SecretKey) +} + +// Return the signature v2 of a given request. +func signatureV2(cred auth.Credentials, method string, encodedResource string, encodedQuery string, headers http.Header) string { + stringToSign := getStringToSignV2(method, encodedResource, encodedQuery, headers, "") + signature := calculateSignatureV2(stringToSign, cred.SecretKey) + return signature +} + +// compareSignatureV2 returns true if and only if both signatures +// are equal. The signatures are expected to be base64 encoded strings +// according to the AWS S3 signature V2 spec. +func compareSignatureV2(sig1, sig2 string) bool { + // Decode signature string to binary byte-sequence representation is required + // as Base64 encoding of a value is not unique: + // For example "aGVsbG8=" and "aGVsbG8=\r" will result in the same byte slice. + signature1, err := base64.StdEncoding.DecodeString(sig1) + if err != nil { + return false + } + signature2, err := base64.StdEncoding.DecodeString(sig2) + if err != nil { + return false + } + return subtle.ConstantTimeCompare(signature1, signature2) == 1 +} + +// Return canonical headers. +func canonicalizedAmzHeadersV2(headers http.Header) string { + var keys []string + keyval := make(map[string]string, len(headers)) + for key := range headers { + lkey := strings.ToLower(key) + if !strings.HasPrefix(lkey, "x-amz-") { + continue + } + keys = append(keys, lkey) + keyval[lkey] = strings.Join(headers[key], ",") + } + sort.Strings(keys) + var canonicalHeaders []string + for _, key := range keys { + canonicalHeaders = append(canonicalHeaders, key+":"+keyval[key]) + } + return strings.Join(canonicalHeaders, "\n") +} + +// Return canonical resource string. +func canonicalizedResourceV2(encodedResource, encodedQuery string) string { + queries := strings.Split(encodedQuery, "&") + keyval := make(map[string]string) + for _, query := range queries { + key := query + val := "" + index := strings.Index(query, "=") + if index != -1 { + key = query[:index] + val = query[index+1:] + } + keyval[key] = val + } + + var canonicalQueries []string + for _, key := range resourceList { + val, ok := keyval[key] + if !ok { + continue + } + if val == "" { + canonicalQueries = append(canonicalQueries, key) + continue + } + canonicalQueries = append(canonicalQueries, key+"="+val) + } + + // The queries will be already sorted as resourceList is sorted, if canonicalQueries + // is empty strings.Join returns empty. + canonicalQuery := strings.Join(canonicalQueries, "&") + if canonicalQuery != "" { + return encodedResource + "?" + canonicalQuery + } + return encodedResource +} + +// Return string to sign under two different conditions. +// - if expires string is set then string to sign includes date instead of the Date header. +// - if expires string is empty then string to sign includes date header instead. +func getStringToSignV2(method string, encodedResource, encodedQuery string, headers http.Header, expires string) string { + canonicalHeaders := canonicalizedAmzHeadersV2(headers) + if len(canonicalHeaders) > 0 { + canonicalHeaders += "\n" + } + + date := expires // Date is set to expires date for presign operations. + if date == "" { + // If expires date is empty then request header Date is used. + date = headers.Get(consts.Date) + } + + // From the Amazon docs: + // + // StringToSign = HTTP-Verb + "\n" + + // Content-Md5 + "\n" + + // Content-Type + "\n" + + // Date/Expires + "\n" + + // CanonicalizedProtocolHeaders + + // CanonicalizedResource; + stringToSign := strings.Join([]string{ + method, + headers.Get(consts.ContentMD5), + headers.Get(consts.ContentType), + date, + canonicalHeaders, + }, "\n") + + return stringToSign + canonicalizedResourceV2(encodedResource, encodedQuery) +} + +// Returns "/bucketName/objectName" for path-style or virtual-host-style requests. +func getResource(path string, host string) (string, error) { + + // If virtual-host-style is enabled construct the "resource" properly. + if strings.Contains(host, ":") { + // In bucket.mydomain.com:9000, strip out :9000 + var err error + if host, _, err = net.SplitHostPort(host); err != nil { + return "", err + } + } + return path, nil +} diff --git a/s3/services/auth/signature-v4-parser.go b/s3/services/auth/signature-v4-parser.go new file mode 100644 index 000000000..733bcb446 --- /dev/null +++ b/s3/services/auth/signature-v4-parser.go @@ -0,0 +1,308 @@ +/* + * The following code tries to reverse engineer the Amazon S3 APIs, + * and is mostly copied from minio implementation. + */ + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. + +package iam + +import ( + "github.com/yann-y/fds/internal/apierrors" + "github.com/yann-y/fds/internal/consts" + "github.com/yann-y/fds/internal/iam/auth" + "net/http" + "net/url" + "strings" + "time" +) + +// credentialHeader data type represents structured form of Credential +// string from authorization header. +type credentialHeader struct { + accessKey string + scope struct { + date time.Time + region string + service string + request string + } +} + +// Return scope string. +func (c credentialHeader) getScope() string { + return strings.Join([]string{ + c.scope.date.Format(yyyymmdd), + c.scope.region, + c.scope.service, + c.scope.request, + }, consts.SlashSeparator) +} + +func (s *AuthSys) GetReqAccessKeyV4(r *http.Request, region string, stype serviceType) (auth.Credentials, bool, apierrors.ErrorCode) { + ch, s3Err := parseCredentialHeader("Credential="+r.Form.Get(consts.AmzCredential), region, stype) + if s3Err != apierrors.ErrNone { + // Strip off the Algorithm prefix. + v4Auth := strings.TrimPrefix(r.Header.Get("Authorization"), signV4Algorithm) + authFields := strings.Split(strings.TrimSpace(v4Auth), ",") + if len(authFields) != 3 { + return auth.Credentials{}, false, apierrors.ErrMissingFields + } + ch, s3Err = parseCredentialHeader(authFields[0], region, stype) + if s3Err != apierrors.ErrNone { + return auth.Credentials{}, false, s3Err + } + } + // TODO: Why should a temporary user be replaced with the parent user's account name? + //cerd, _ := s.Iam.GetUser(r.Context(), ch.accessKey) + //if cerd.IsTemp() { + // ch.accessKey = cerd.ParentUser + //} + return s.checkKeyValid(r, ch.accessKey) +} + +// parse credentialHeader string into its structured form. +func parseCredentialHeader(credElement string, region string, stype serviceType) (ch credentialHeader, aec apierrors.ErrorCode) { + creds := strings.SplitN(strings.TrimSpace(credElement), "=", 2) + if len(creds) != 2 { + return ch, apierrors.ErrMissingFields + } + if creds[0] != "Credential" { + return ch, apierrors.ErrMissingCredTag + } + credElements := strings.Split(strings.TrimSpace(creds[1]), consts.SlashSeparator) + if len(credElements) < 5 { + return ch, apierrors.ErrCredMalformed + } + accessKey := strings.Join(credElements[:len(credElements)-4], consts.SlashSeparator) // The access key may contain one or more `/` + if !auth.IsAccessKeyValid(accessKey) { + return ch, apierrors.ErrInvalidAccessKeyID + } + // Save access key id. + cred := credentialHeader{ + accessKey: accessKey, + } + credElements = credElements[len(credElements)-4:] + var e error + cred.scope.date, e = time.Parse(yyyymmdd, credElements[0]) + if e != nil { + return ch, apierrors.ErrAuthorizationHeaderMalformed + } + + cred.scope.region = credElements[1] + // Verify if region is valid. + sRegion := cred.scope.region + // Region is set to be empty, we use whatever was sent by the + // request and proceed further. This is a work-around to address + // an important problem for ListBuckets() getting signed with + // different regions. + if region == "" { + region = sRegion + } + // Should validate region, only if region is set. + if !isValidRegion(sRegion, region) { + return ch, apierrors.ErrAuthorizationHeaderMalformed + } + if credElements[2] != string(stype) { + switch stype { + case ServiceSTS: + return ch, apierrors.ErrAuthorizationHeaderMalformed + } + return ch, apierrors.ErrAuthorizationHeaderMalformed + } + cred.scope.service = credElements[2] + if credElements[3] != "aws4_request" { + return ch, apierrors.ErrAuthorizationHeaderMalformed + } + cred.scope.request = credElements[3] + return cred, apierrors.ErrNone +} + +// Parse signature from signature tag. +func parseSignature(signElement string) (string, apierrors.ErrorCode) { + signFields := strings.Split(strings.TrimSpace(signElement), "=") + if len(signFields) != 2 { + return "", apierrors.ErrMissingFields + } + if signFields[0] != "Signature" { + return "", apierrors.ErrMissingSignTag + } + if signFields[1] == "" { + return "", apierrors.ErrMissingFields + } + signature := signFields[1] + return signature, apierrors.ErrNone +} + +// Parse slice of signed headers from signed headers tag. +func parseSignedHeader(signedHdrElement string) ([]string, apierrors.ErrorCode) { + signedHdrFields := strings.Split(strings.TrimSpace(signedHdrElement), "=") + if len(signedHdrFields) != 2 { + return nil, apierrors.ErrMissingFields + } + if signedHdrFields[0] != "SignedHeaders" { + return nil, apierrors.ErrMissingSignHeadersTag + } + if signedHdrFields[1] == "" { + return nil, apierrors.ErrMissingFields + } + signedHeaders := strings.Split(signedHdrFields[1], ";") + return signedHeaders, apierrors.ErrNone +} + +// signValues data type represents structured form of AWS Signature V4 header. +type signValues struct { + Credential credentialHeader + SignedHeaders []string + Signature string +} + +// preSignValues data type represents structued form of AWS Signature V4 query string. +type preSignValues struct { + signValues + Date time.Time + Expires time.Duration +} + +// Parses signature version '4' query string of the following form. +// +// querystring = X-Amz-Algorithm=algorithm +// querystring += &X-Amz-Credential= urlencode(accessKey + '/' + credential_scope) +// querystring += &X-Amz-Date=date +// querystring += &X-Amz-Expires=timeout interval +// querystring += &X-Amz-SignedHeaders=signed_headers +// querystring += &X-Amz-Signature=signature +// +// verifies if any of the necessary query params are missing in the presigned request. +func doesV4PresignParamsExist(query url.Values) apierrors.ErrorCode { + v4PresignQueryParams := []string{consts.AmzAlgorithm, consts.AmzCredential, consts.AmzSignature, consts.AmzDate, consts.AmzSignedHeaders, consts.AmzExpires} + for _, v4PresignQueryParam := range v4PresignQueryParams { + if _, ok := query[v4PresignQueryParam]; !ok { + return apierrors.ErrInvalidQueryParams + } + } + return apierrors.ErrNone +} + +// Parses all the presigned signature values into separate elements. +func parsePreSignV4(query url.Values, region string, stype serviceType) (psv preSignValues, aec apierrors.ErrorCode) { + // verify whether the required query params exist. + aec = doesV4PresignParamsExist(query) + if aec != apierrors.ErrNone { + return psv, aec + } + + // Verify if the query algorithm is supported or not. + if query.Get(consts.AmzAlgorithm) != signV4Algorithm { + return psv, apierrors.ErrAuthorizationHeaderMalformed + } + + // Initialize signature version '4' structured header. + preSignV4Values := preSignValues{} + + // Save credential. + preSignV4Values.Credential, aec = parseCredentialHeader("Credential="+query.Get(consts.AmzCredential), region, stype) + if aec != apierrors.ErrNone { + return psv, aec + } + + var e error + // Save date in native time.Time. + preSignV4Values.Date, e = time.Parse(iso8601Format, query.Get(consts.AmzDate)) + if e != nil { + return psv, apierrors.ErrAuthorizationHeaderMalformed + } + + // Save expires in native time.Duration. + preSignV4Values.Expires, e = time.ParseDuration(query.Get(consts.AmzExpires) + "s") + if e != nil { + return psv, apierrors.ErrAuthorizationHeaderMalformed + } + + if preSignV4Values.Expires < 0 { + return psv, apierrors.ErrAuthorizationHeaderMalformed + } + + // Check if Expiry time is less than 7 days (value in seconds). + if preSignV4Values.Expires.Seconds() > 604800 { + return psv, apierrors.ErrAuthorizationHeaderMalformed + } + + // Save signed headers. + preSignV4Values.SignedHeaders, aec = parseSignedHeader("SignedHeaders=" + query.Get(consts.AmzSignedHeaders)) + if aec != apierrors.ErrNone { + return psv, aec + } + + // Save signature. + preSignV4Values.Signature, aec = parseSignature("Signature=" + query.Get(consts.AmzSignature)) + if aec != apierrors.ErrNone { + return psv, aec + } + + // Return structed form of signature query string. + return preSignV4Values, apierrors.ErrNone +} + +// Parses signature version '4' header of the following form. +// +// Authorization: algorithm Credential=accessKeyID/credScope, \ +// SignedHeaders=signedHeaders, Signature=signature +func parseSignV4(v4Auth string, region string, stype serviceType) (sv signValues, aec apierrors.ErrorCode) { + // credElement is fetched first to skip replacing the space in access key. + credElement := strings.TrimPrefix(strings.Split(strings.TrimSpace(v4Auth), ",")[0], signV4Algorithm) + // Replace all spaced strings, some clients can send spaced + // parameters and some won't. So we pro-actively remove any spaces + // to make parsing easier. + v4Auth = strings.ReplaceAll(v4Auth, " ", "") + if v4Auth == "" { + return sv, apierrors.ErrAuthHeaderEmpty + } + + // Verify if the header algorithm is supported or not. + if !strings.HasPrefix(v4Auth, signV4Algorithm) { + return sv, apierrors.ErrSignatureVersionNotSupported + } + + // Strip off the Algorithm prefix. + v4Auth = strings.TrimPrefix(v4Auth, signV4Algorithm) + authFields := strings.Split(strings.TrimSpace(v4Auth), ",") + if len(authFields) != 3 { + return sv, apierrors.ErrMissingFields + } + + // Initialize signature version '4' structured header. + signV4Values := signValues{} + + var s3Err apierrors.ErrorCode + // Save credentail values. + signV4Values.Credential, s3Err = parseCredentialHeader(strings.TrimSpace(credElement), region, stype) + if s3Err != apierrors.ErrNone { + return sv, s3Err + } + + // Save signed headers. + signV4Values.SignedHeaders, s3Err = parseSignedHeader(authFields[1]) + if s3Err != apierrors.ErrNone { + return sv, s3Err + } + + // Save signature. + signV4Values.Signature, s3Err = parseSignature(authFields[2]) + if s3Err != apierrors.ErrNone { + return sv, s3Err + } + + // Return the structure here. + return signV4Values, apierrors.ErrNone +} diff --git a/s3/services/auth/signature-v4-utils.go b/s3/services/auth/signature-v4-utils.go new file mode 100644 index 000000000..005d7944a --- /dev/null +++ b/s3/services/auth/signature-v4-utils.go @@ -0,0 +1,235 @@ +/* + * The following code tries to reverse engineer the Amazon S3 APIs, + * and is mostly copied from minio implementation. + */ + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. + +package iam + +import ( + "bytes" + "crypto/sha256" + "encoding/hex" + "github.com/yann-y/fds/internal/apierrors" + "github.com/yann-y/fds/internal/consts" + "github.com/yann-y/fds/internal/iam/auth" + "io" + "io/ioutil" + "net/http" + "reflect" + "strconv" +) + +// http Header "x-amz-content-sha256" == "UNSIGNED-PAYLOAD" indicates that the +// client did not calculate sha256 of the payload. +const unsignedPayload = "UNSIGNED-PAYLOAD" + +// SkipContentSha256Cksum returns true if caller needs to skip +// payload checksum, false if not. +func SkipContentSha256Cksum(r *http.Request) bool { + var ( + v []string + ok bool + ) + + if isRequestPresignedSignatureV4(r) { + v, ok = r.Form[consts.AmzContentSha256] + if !ok { + v, ok = r.Header[consts.AmzContentSha256] + } + } else { + v, ok = r.Header[consts.AmzContentSha256] + } + + // Skip if no header was set. + if !ok { + return true + } + + // If x-amz-content-sha256 is set and the value is not + // 'UNSIGNED-PAYLOAD' we should validate the content sha256. + switch v[0] { + case unsignedPayload: + return true + case consts.EmptySHA256: + // some broken clients set empty-sha256 + // with > 0 content-length in the body, + // we should skip such clients and allow + // blindly such insecure clients only if + // S3 strict compatibility is disabled. + if r.ContentLength > 0 { + // We return true only in situations when + // deployment has asked MinIO to allow for + // such broken clients and content-length > 0. + return true + } + } + return false +} + +// Returns SHA256 for calculating canonical-request. +func GetContentSha256Cksum(r *http.Request, stype serviceType) string { + if stype == ServiceSTS { + payload, err := ioutil.ReadAll(io.LimitReader(r.Body, consts.StsRequestBodyLimit)) + if err != nil { + log.Errorf("ServiceSTS ReadAll err:%v", err) + } + sum256 := sha256.Sum256(payload) + r.Body = ioutil.NopCloser(bytes.NewReader(payload)) + return hex.EncodeToString(sum256[:]) + } + + var ( + defaultSha256Cksum string + v []string + ok bool + ) + + // For a presigned request we look at the query param for sha256. + if isRequestPresignedSignatureV4(r) { + // X-Amz-Content-Sha256, if not set in presigned requests, checksum + // will default to 'UNSIGNED-PAYLOAD'. + defaultSha256Cksum = unsignedPayload + v, ok = r.Form[consts.AmzContentSha256] + if !ok { + v, ok = r.Header[consts.AmzContentSha256] + } + } else { + // X-Amz-Content-Sha256, if not set in signed requests, checksum + // will default to sha256([]byte("")). + defaultSha256Cksum = consts.EmptySHA256 + v, ok = r.Header[consts.AmzContentSha256] + } + + // We found 'X-Amz-Content-Sha256' return the captured value. + if ok { + return v[0] + } + + // We couldn't find 'X-Amz-Content-Sha256'. + return defaultSha256Cksum +} + +// isValidRegion - verify if incoming region value is valid with configured Region. +func isValidRegion(reqRegion string, confRegion string) bool { + if confRegion == "" { + return true + } + if confRegion == "US" { + confRegion = consts.DefaultRegion + } + // Some older s3 clients set region as "US" instead of + // globalDefaultRegion, handle it. + if reqRegion == "US" { + reqRegion = consts.DefaultRegion + } + return reqRegion == confRegion +} + +// check if the access key is valid and recognized, additionally +// also returns if the access key is owner/admin. +func (s *AuthSys) checkKeyValid(r *http.Request, accessKey string) (auth.Credentials, bool, apierrors.ErrorCode) { + + cred := s.AdminCred + if cred.AccessKey != accessKey { + // Check if the access key is part of users credentials. + ucred, ok := s.Iam.GetUser(r.Context(), accessKey) + if !ok { + // Credentials will be invalid but and disabled + // return a different error in such a scenario. + if ucred.Status == auth.AccountOff { + return cred, false, apierrors.ErrAccessKeyDisabled + } + return cred, false, apierrors.ErrInvalidAccessKeyID + } + cred = ucred + } + owner := cred.AccessKey == s.AdminCred.AccessKey + return cred, owner, apierrors.ErrNone +} + +func contains(slice interface{}, elem interface{}) bool { + v := reflect.ValueOf(slice) + if v.Kind() == reflect.Slice { + for i := 0; i < v.Len(); i++ { + if v.Index(i).Interface() == elem { + return true + } + } + } + return false +} + +// extractSignedHeaders extract signed headers from Authorization header +func extractSignedHeaders(signedHeaders []string, r *http.Request) (http.Header, apierrors.ErrorCode) { + reqHeaders := r.Header + reqQueries := r.Form + // find whether "host" is part of list of signed headers. + // if not return ErrUnsignedHeaders. "host" is mandatory. + if !contains(signedHeaders, "host") { + return nil, apierrors.ErrUnsignedHeaders + } + extractedSignedHeaders := make(http.Header) + for _, header := range signedHeaders { + // `host` will not be found in the headers, can be found in r.Host. + // but its alway necessary that the list of signed headers containing host in it. + val, ok := reqHeaders[http.CanonicalHeaderKey(header)] + if !ok { + // try to set headers from Query String + val, ok = reqQueries[header] + } + if ok { + extractedSignedHeaders[http.CanonicalHeaderKey(header)] = val + continue + } + switch header { + case "expect": + // Golang http server strips off 'Expect' header, if the + // client sent this as part of signed headers we need to + // handle otherwise we would see a signature mismatch. + // `aws-cli` sets this as part of signed headers. + // + // According to + // http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.20 + // Expect header is always of form: + // + // Expect = "Expect" ":" 1#expectation + // expectation = "100-continue" | expectation-extension + // + // So it safe to assume that '100-continue' is what would + // be sent, for the time being keep this work around. + // Adding a *TODO* to remove this later when Golang server + // doesn't filter out the 'Expect' header. + extractedSignedHeaders.Set(header, "100-continue") + case "host": + // Go http server removes "host" from Request.Header + + //extractedSignedHeaders.Set(header, r.Host) + // todo use r.Host, or filedag-web deal with + //value := strings.Split(r.Host, ":") + extractedSignedHeaders.Set(header, r.Host) + case "transfer-encoding": + // Go http server removes "host" from Request.Header + extractedSignedHeaders[http.CanonicalHeaderKey(header)] = r.TransferEncoding + case "content-length": + // Signature-V4 spec excludes Content-Length from signed headers list for signature calculation. + // But some clients deviate from this rule. Hence we consider Content-Length for signature + // calculation to be compatible with such clients. + extractedSignedHeaders.Set(header, strconv.FormatInt(r.ContentLength, 10)) + default: + return nil, apierrors.ErrUnsignedHeaders + } + } + return extractedSignedHeaders, apierrors.ErrNone +} diff --git a/s3/services/auth/signature-v4.go b/s3/services/auth/signature-v4.go new file mode 100644 index 000000000..1cb4df480 --- /dev/null +++ b/s3/services/auth/signature-v4.go @@ -0,0 +1,260 @@ +/* + * The following code tries to reverse engineer the Amazon S3 APIs, + * and is mostly copied from minio implementation. + */ + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. + +package iam + +import ( + "crypto/subtle" + "github.com/yann-y/fds/internal/apierrors" + "github.com/yann-y/fds/internal/consts" + "github.com/yann-y/fds/internal/iam/set" + "github.com/yann-y/fds/internal/utils" + "net/http" + "net/url" + "strconv" + "strings" + "time" +) + +// AWS Signature Version '4' constants. +const ( + signV4Algorithm = "AWS4-HMAC-SHA256" + iso8601Format = "20060102T150405Z" + yyyymmdd = "20060102" +) + +type serviceType string + +const ( + ServiceS3 serviceType = "s3" + //ServiceSTS STS + ServiceSTS serviceType = "sts" +) + +// compareSignatureV4 returns true if and only if both signatures +// are equal. The signatures are expected to be HEX encoded strings +// according to the AWS S3 signature V4 spec. +func compareSignatureV4(sig1, sig2 string) bool { + // The CTC using []byte(str) works because the hex encoding + // is unique for a sequence of bytes. See also compareSignatureV2. + return subtle.ConstantTimeCompare([]byte(sig1), []byte(sig2)) == 1 +} + +// doesPresignedSignatureMatch - Verify query headers with presigned signature +// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html +// +// returns apierrors.ErrNone if the signature matches. +func (s *AuthSys) doesPresignedSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType) apierrors.ErrorCode { + // Copy request + req := *r + + // Parse request query string. + pSignValues, err := parsePreSignV4(req.Form, region, stype) + if err != apierrors.ErrNone { + return err + } + + cred, _, s3Err := s.checkKeyValid(r, pSignValues.Credential.accessKey) + if s3Err != apierrors.ErrNone { + return s3Err + } + + // Extract all the signed headers along with its values. + extractedSignedHeaders, errCode := extractSignedHeaders(pSignValues.SignedHeaders, r) + if errCode != apierrors.ErrNone { + return errCode + } + + // If the host which signed the request is slightly ahead in time (by less than MaxSkewTime) the + // request should still be allowed. + if pSignValues.Date.After(time.Now().UTC().Add(consts.MaxSkewTime)) { + return apierrors.ErrRequestNotReadyYet + } + + if time.Now().UTC().Sub(pSignValues.Date) > pSignValues.Expires { + return apierrors.ErrExpiredPresignRequest + } + + // Save the date and expires. + t := pSignValues.Date + expireSeconds := int(pSignValues.Expires / time.Second) + + // Construct new query. + query := make(url.Values) + clntHashedPayload := req.Form.Get(consts.AmzContentSha256) + if clntHashedPayload != "" { + query.Set(consts.AmzContentSha256, hashedPayload) + } + + token := req.Form.Get(consts.AmzSecurityToken) + if token != "" { + query.Set(consts.AmzSecurityToken, cred.SessionToken) + } + + query.Set(consts.AmzAlgorithm, signV4Algorithm) + + // Construct the query. + query.Set(consts.AmzDate, t.Format(iso8601Format)) + query.Set(consts.AmzExpires, strconv.Itoa(expireSeconds)) + query.Set(consts.AmzSignedHeaders, utils.GetSignedHeaders(extractedSignedHeaders)) + query.Set(consts.AmzCredential, cred.AccessKey+consts.SlashSeparator+pSignValues.Credential.getScope()) + + defaultSigParams := set.CreateStringSet( + consts.AmzContentSha256, + consts.AmzSecurityToken, + consts.AmzAlgorithm, + consts.AmzDate, + consts.AmzExpires, + consts.AmzSignedHeaders, + consts.AmzCredential, + consts.AmzSignature, + ) + + // Add missing query parameters if any provided in the request URL + for k, v := range req.Form { + if !defaultSigParams.Contains(k) { + query[k] = v + } + } + + // Get the encoded query. + encodedQuery := query.Encode() + + // Verify if date query is same. + if req.Form.Get(consts.AmzDate) != query.Get(consts.AmzDate) { + return apierrors.ErrSignatureDoesNotMatch + } + // Verify if expires query is same. + if req.Form.Get(consts.AmzExpires) != query.Get(consts.AmzExpires) { + return apierrors.ErrSignatureDoesNotMatch + } + // Verify if signed headers query is same. + if req.Form.Get(consts.AmzSignedHeaders) != query.Get(consts.AmzSignedHeaders) { + return apierrors.ErrSignatureDoesNotMatch + } + // Verify if credential query is same. + if req.Form.Get(consts.AmzCredential) != query.Get(consts.AmzCredential) { + return apierrors.ErrSignatureDoesNotMatch + } + // Verify if sha256 payload query is same. + if clntHashedPayload != "" && clntHashedPayload != query.Get(consts.AmzContentSha256) { + return apierrors.ErrContentSHA256Mismatch + } + // Verify if security token is correct. + if token != "" && subtle.ConstantTimeCompare([]byte(token), []byte(cred.SessionToken)) != 1 { + return apierrors.ErrInvalidToken + } + + // Verify finally if signature is same. + + // Get canonical request. + presignedCanonicalReq := utils.GetCanonicalRequest(extractedSignedHeaders, hashedPayload, encodedQuery, req.URL.Path, req.Method) + + // Get string to sign from canonical request. + presignedStringToSign := utils.GetStringToSign(presignedCanonicalReq, t, pSignValues.Credential.getScope()) + + // Get hmac presigned signing key. + presignedSigningKey := utils.GetSigningKey(cred.SecretKey, pSignValues.Credential.scope.date, + pSignValues.Credential.scope.region, string(stype)) + + // Get new signature. + newSignature := utils.GetSignature(presignedSigningKey, presignedStringToSign) + + // Verify signature. + if !compareSignatureV4(req.Form.Get(consts.AmzSignature), newSignature) { + return apierrors.ErrSignatureDoesNotMatch + } + return apierrors.ErrNone +} + +// doesSignatureMatch - Verify authorization header with calculated header in accordance with +// - http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html +// +// returns apierrors.ErrNone if signature matches. +func (s *AuthSys) doesSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType) apierrors.ErrorCode { + // Copy request. + req := *r + + // Save authorization header. + v4Auth := req.Header.Get(consts.Authorization) + + // Parse signature version '4' header. + signV4Values, err := parseSignV4(v4Auth, region, stype) + if err != apierrors.ErrNone { + return err + } + + // Extract all the signed headers along with its values. + extractedSignedHeaders, errCode := extractSignedHeaders(signV4Values.SignedHeaders, r) + if errCode != apierrors.ErrNone { + return errCode + } + + cred, _, s3Err := s.checkKeyValid(r, signV4Values.Credential.accessKey) + if s3Err != apierrors.ErrNone { + return s3Err + } + + // Extract date, if not present throw error. + var date string + if date = req.Header.Get(consts.AmzDate); date == "" { + if date = r.Header.Get(consts.Date); date == "" { + return apierrors.ErrMissingDateHeader + } + } + + // Parse date header. + t, e := time.Parse(iso8601Format, date) + if e != nil { + return apierrors.ErrAuthorizationHeaderMalformed + } + + // Query string. + queryStr := req.URL.Query().Encode() + + // Get canonical request. + canonicalRequest := utils.GetCanonicalRequest(extractedSignedHeaders, hashedPayload, queryStr, req.URL.Path, req.Method) + + // Get string to sign from canonical request. + stringToSign := utils.GetStringToSign(canonicalRequest, t, signV4Values.Credential.getScope()) + + // Get hmac signing key. + signingKey := utils.GetSigningKey(cred.SecretKey, signV4Values.Credential.scope.date, + signV4Values.Credential.scope.region, string(stype)) + + // Calculate signature. + newSignature := utils.GetSignature(signingKey, stringToSign) + + // Verify if signature match. + if !compareSignatureV4(newSignature, signV4Values.Signature) { + return apierrors.ErrSignatureDoesNotMatch + } + + // Return error none. + return apierrors.ErrNone +} + +// getScope generate a string of a specific date, an AWS region, and a service. +func getScope(t time.Time, region string) string { + scope := strings.Join([]string{ + t.Format(yyyymmdd), + region, + string(ServiceS3), + "aws4_request", + }, consts.SlashSeparator) + return scope +} diff --git a/s3/services/auth/streaming-signature-v4.go b/s3/services/auth/streaming-signature-v4.go new file mode 100644 index 000000000..694ab68bd --- /dev/null +++ b/s3/services/auth/streaming-signature-v4.go @@ -0,0 +1,481 @@ +/* + * The following code tries to reverse engineer the Amazon S3 APIs, + * and is mostly copied from minio implementation. + */ + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +// Package cmd This file implements helper functions to validate Streaming AWS +// Signature Version '4' authorization header. +package iam + +import ( + "bufio" + "bytes" + "crypto/sha256" + "encoding/hex" + "errors" + "github.com/yann-y/fds/internal/apierrors" + "github.com/yann-y/fds/internal/utils" + "hash" + "io" + "net/http" + "strings" + "time" + + humanize "github.com/dustin/go-humanize" + "github.com/yann-y/fds/internal/consts" + "github.com/yann-y/fds/internal/iam/auth" +) + +// Streaming AWS Signature Version '4' constants. +const ( + emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + streamingContentSHA256 = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" + signV4ChunkedAlgorithm = "AWS4-HMAC-SHA256-PAYLOAD" + streamingContentEncoding = "aws-chunked" +) + +// errSignatureMismatch means signature did not match. +var errSignatureMismatch = errors.New("Signature does not match") + +// getChunkSignature - get chunk signature. +func getChunkSignature(cred auth.Credentials, seedSignature string, region string, date time.Time, hashedChunk string) string { + // Calculate string to sign. + stringToSign := signV4ChunkedAlgorithm + "\n" + + date.Format(iso8601Format) + "\n" + + getScope(date, region) + "\n" + + seedSignature + "\n" + + emptySHA256 + "\n" + + hashedChunk + + // Get hmac signing key. + signingKey := utils.GetSigningKey(cred.SecretKey, date, region, string(ServiceS3)) + + // Calculate signature. + newSignature := utils.GetSignature(signingKey, stringToSign) + + return newSignature +} + +// CalculateSeedSignature - Calculate seed signature in accordance with +// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html +// +// returns signature, error otherwise if the signature mismatches or any other +// error while parsing and validating. +func (s *AuthSys) CalculateSeedSignature(r *http.Request) (cred auth.Credentials, signature string, region string, date time.Time, errCode apierrors.ErrorCode) { + // Copy request. + req := *r + + // Save authorization header. + v4Auth := req.Header.Get(consts.Authorization) + + // Parse signature version '4' header. + signV4Values, errCode := parseSignV4(v4Auth, "", ServiceS3) + if errCode != apierrors.ErrNone { + return cred, "", "", time.Time{}, errCode + } + + // Payload streaming. + payload := streamingContentSHA256 + + // Payload for STREAMING signature should be 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD' + if payload != req.Header.Get(consts.AmzContentSha256) { + return cred, "", "", time.Time{}, apierrors.ErrContentSHA256Mismatch + } + + // Extract all the signed headers along with its values. + extractedSignedHeaders, errCode := extractSignedHeaders(signV4Values.SignedHeaders, r) + if errCode != apierrors.ErrNone { + return cred, "", "", time.Time{}, errCode + } + + cred, _, errCode = s.checkKeyValid(r, signV4Values.Credential.accessKey) + if errCode != apierrors.ErrNone { + return cred, "", "", time.Time{}, errCode + } + + // Verify if region is valid. + region = signV4Values.Credential.scope.region + + // Extract date, if not present throw error. + var dateStr string + if dateStr = req.Header.Get("x-amz-date"); dateStr == "" { + if dateStr = r.Header.Get("Date"); dateStr == "" { + return cred, "", "", time.Time{}, apierrors.ErrMissingDateHeader + } + } + + // Parse date header. + var err error + date, err = time.Parse(iso8601Format, dateStr) + if err != nil { + return cred, "", "", time.Time{}, apierrors.ErrMalformedDate + } + + // Query string. + queryStr := req.Form.Encode() + + // Get canonical request. + canonicalRequest := utils.GetCanonicalRequest(extractedSignedHeaders, payload, queryStr, req.URL.Path, req.Method) + + // Get string to sign from canonical request. + stringToSign := utils.GetStringToSign(canonicalRequest, date, signV4Values.Credential.getScope()) + + // Get hmac signing key. + signingKey := utils.GetSigningKey(cred.SecretKey, signV4Values.Credential.scope.date, region, string(ServiceS3)) + + // Calculate signature. + newSignature := utils.GetSignature(signingKey, stringToSign) + + // Verify if signature match. + if !compareSignatureV4(newSignature, signV4Values.Signature) { + return cred, "", "", time.Time{}, apierrors.ErrSignatureDoesNotMatch + } + + // Return caculated signature. + return cred, newSignature, region, date, apierrors.ErrNone +} + +const maxLineLength = 4 * humanize.KiByte // assumed <= bufio.defaultBufSize 4KiB + +// lineTooLong is generated as chunk header is bigger than 4KiB. +var errLineTooLong = errors.New("header line too long") + +// malformed encoding is generated when chunk header is wrongly formed. +var errMalformedEncoding = errors.New("malformed chunked encoding") + +// chunk is considered too big if its bigger than > 16MiB. +var errChunkTooBig = errors.New("chunk too big: choose chunk size <= 16MiB") + +// NewSignV4ChunkedReader returns a new s3ChunkedReader that translates the data read from r +// out of HTTP "chunked" format before returning it. +// The s3ChunkedReader returns io.EOF when the final 0-length chunk is read. +// +// NewChunkedReader is not needed by normal applications. The http package +// automatically decodes chunking when reading response bodies. +func NewSignV4ChunkedReader(req *http.Request, s *AuthSys) (io.ReadCloser, apierrors.ErrorCode) { + cred, seedSignature, region, seedDate, errCode := s.CalculateSeedSignature(req) + if errCode != apierrors.ErrNone { + return nil, errCode + } + + return &s3ChunkedReader{ + reader: bufio.NewReader(req.Body), + cred: cred, + seedSignature: seedSignature, + seedDate: seedDate, + region: region, + chunkSHA256Writer: sha256.New(), + buffer: make([]byte, 64*1024), + }, apierrors.ErrNone +} + +// Represents the overall state that is required for decoding a +// AWS Signature V4 chunked reader. +type s3ChunkedReader struct { + reader *bufio.Reader + cred auth.Credentials + seedSignature string + seedDate time.Time + region string + + chunkSHA256Writer hash.Hash // Calculates sha256 of chunk data. + buffer []byte + offset int + err error +} + +func (cr *s3ChunkedReader) Close() (err error) { + return nil +} + +// Now, we read one chunk from the underlying reader. +// A chunk has the following format: +// +// + ";chunk-signature=" + + "\r\n" + + "\r\n" +// +// First, we read the chunk size but fail if it is larger +// than 16 MiB. We must not accept arbitrary large chunks. +// One 16 MiB is a reasonable max limit. +// +// Then we read the signature and payload data. We compute the SHA256 checksum +// of the payload and verify that it matches the expected signature value. +// +// The last chunk is *always* 0-sized. So, we must only return io.EOF if we have encountered +// a chunk with a chunk size = 0. However, this chunk still has a signature and we must +// verify it. +const maxChunkSize = 16 << 20 // 16 MiB + +// Read - implements `io.Reader`, which transparently decodes +// the incoming AWS Signature V4 streaming signature. +func (cr *s3ChunkedReader) Read(buf []byte) (n int, err error) { + // First, if there is any unread data, copy it to the client + // provided buffer. + if cr.offset > 0 { + n = copy(buf, cr.buffer[cr.offset:]) + if n == len(buf) { + cr.offset += n + return n, nil + } + cr.offset = 0 + buf = buf[n:] + } + + var size int + for { + b, err := cr.reader.ReadByte() + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + if err != nil { + cr.err = err + return n, cr.err + } + if b == ';' { // separating character + break + } + + // Manually deserialize the size since AWS specified + // the chunk size to be of variable width. In particular, + // a size of 16 is encoded as `10` while a size of 64 KB + // is `10000`. + switch { + case b >= '0' && b <= '9': + size = size<<4 | int(b-'0') + case b >= 'a' && b <= 'f': + size = size<<4 | int(b-('a'-10)) + case b >= 'A' && b <= 'F': + size = size<<4 | int(b-('A'-10)) + default: + cr.err = errMalformedEncoding + return n, cr.err + } + if size > maxChunkSize { + cr.err = errChunkTooBig + return n, cr.err + } + } + + // Now, we read the signature of the following payload and expect: + // chunk-signature=" + + "\r\n" + // + // The signature is 64 bytes long (hex-encoded SHA256 hash) and + // starts with a 16 byte header: len("chunk-signature=") + 64 == 80. + var signature [80]byte + _, err = io.ReadFull(cr.reader, signature[:]) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + if err != nil { + cr.err = err + return n, cr.err + } + if !bytes.HasPrefix(signature[:], []byte("chunk-signature=")) { + cr.err = errMalformedEncoding + return n, cr.err + } + b, err := cr.reader.ReadByte() + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + if err != nil { + cr.err = err + return n, cr.err + } + if b != '\r' { + cr.err = errMalformedEncoding + return n, cr.err + } + b, err = cr.reader.ReadByte() + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + if err != nil { + cr.err = err + return n, cr.err + } + if b != '\n' { + cr.err = errMalformedEncoding + return n, cr.err + } + + if cap(cr.buffer) < size { + cr.buffer = make([]byte, size) + } else { + cr.buffer = cr.buffer[:size] + } + + // Now, we read the payload and compute its SHA-256 hash. + _, err = io.ReadFull(cr.reader, cr.buffer) + if err == io.EOF && size != 0 { + err = io.ErrUnexpectedEOF + } + if err != nil && err != io.EOF { + cr.err = err + return n, cr.err + } + b, err = cr.reader.ReadByte() + if b != '\r' { + cr.err = errMalformedEncoding + return n, cr.err + } + b, err = cr.reader.ReadByte() + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + if err != nil { + cr.err = err + return n, cr.err + } + if b != '\n' { + cr.err = errMalformedEncoding + return n, cr.err + } + + // Once we have read the entire chunk successfully, we verify + // that the received signature matches our computed signature. + cr.chunkSHA256Writer.Write(cr.buffer) + newSignature := getChunkSignature(cr.cred, cr.seedSignature, cr.region, cr.seedDate, hex.EncodeToString(cr.chunkSHA256Writer.Sum(nil))) + if !compareSignatureV4(string(signature[16:]), newSignature) { + cr.err = errSignatureMismatch + return n, cr.err + } + cr.seedSignature = newSignature + cr.chunkSHA256Writer.Reset() + + // If the chunk size is zero we return io.EOF. As specified by AWS, + // only the last chunk is zero-sized. + if size == 0 { + cr.err = io.EOF + return n, cr.err + } + + cr.offset = copy(buf, cr.buffer) + n += cr.offset + return n, err +} + +// readCRLF - check if reader only has '\r\n' CRLF character. +// returns malformed encoding if it doesn't. +func readCRLF(reader io.Reader) error { + buf := make([]byte, 2) + _, err := io.ReadFull(reader, buf[:2]) + if err != nil { + return err + } + if buf[0] != '\r' || buf[1] != '\n' { + return errMalformedEncoding + } + return nil +} + +// Read a line of bytes (up to \n) from b. +// Give up if the line exceeds maxLineLength. +// The returned bytes are owned by the bufio.Reader +// so they are only valid until the next bufio read. +func readChunkLine(b *bufio.Reader) ([]byte, []byte, error) { + buf, err := b.ReadSlice('\n') + if err != nil { + // We always know when EOF is coming. + // If the caller asked for a line, there should be a line. + if err == io.EOF { + err = io.ErrUnexpectedEOF + } else if err == bufio.ErrBufferFull { + err = errLineTooLong + } + return nil, nil, err + } + if len(buf) >= maxLineLength { + return nil, nil, errLineTooLong + } + // Parse s3 specific chunk extension and fetch the values. + hexChunkSize, hexChunkSignature := parseS3ChunkExtension(buf) + return hexChunkSize, hexChunkSignature, nil +} + +// trimTrailingWhitespace - trim trailing white space. +func trimTrailingWhitespace(b []byte) []byte { + for len(b) > 0 && isASCIISpace(b[len(b)-1]) { + b = b[:len(b)-1] + } + return b +} + +// isASCIISpace - is ascii space? +func isASCIISpace(b byte) bool { + return b == ' ' || b == '\t' || b == '\n' || b == '\r' +} + +// Constant s3 chunk encoding signature. +const s3ChunkSignatureStr = ";chunk-signature=" + +// parses3ChunkExtension removes any s3 specific chunk-extension from buf. +// For example, +// +// "10000;chunk-signature=..." => "10000", "chunk-signature=..." +func parseS3ChunkExtension(buf []byte) ([]byte, []byte) { + buf = trimTrailingWhitespace(buf) + semi := bytes.Index(buf, []byte(s3ChunkSignatureStr)) + // Chunk signature not found, return the whole buffer. + if semi == -1 { + return buf, nil + } + return buf[:semi], parseChunkSignature(buf[semi:]) +} + +// parseChunkSignature - parse chunk signature. +func parseChunkSignature(chunk []byte) []byte { + chunkSplits := bytes.SplitN(chunk, []byte(s3ChunkSignatureStr), 2) + return chunkSplits[1] +} + +// parse hex to uint64. +func parseHexUint(v []byte) (n uint64, err error) { + for i, b := range v { + switch { + case '0' <= b && b <= '9': + b -= '0' + case 'a' <= b && b <= 'f': + b = b - 'a' + 10 + case 'A' <= b && b <= 'F': + b = b - 'A' + 10 + default: + return 0, errors.New("invalid byte in chunk length") + } + if i == 16 { + return 0, errors.New("http chunk length too large") + } + n <<= 4 + n |= uint64(b) + } + return +} + +// Trims away `aws-chunked` from the content-encoding header if present. +// Streaming signature clients can have custom content-encoding such as +// `aws-chunked,gzip` here we need to only save `gzip`. +// For more refer http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html +func TrimAwsChunkedContentEncoding(contentEnc string) (trimmedContentEnc string) { + if contentEnc == "" { + return contentEnc + } + var newEncs []string + for _, enc := range strings.Split(contentEnc, ",") { + if enc != streamingContentEncoding { + newEncs = append(newEncs, enc) + } + } + return strings.Join(newEncs, ",") +} diff --git a/s3/services/auth/streaming-signature-v4_test.go b/s3/services/auth/streaming-signature-v4_test.go new file mode 100644 index 000000000..24cf58c81 --- /dev/null +++ b/s3/services/auth/streaming-signature-v4_test.go @@ -0,0 +1,198 @@ +/* + * The following code tries to reverse engineer the Amazon S3 APIs, + * and is mostly copied from minio implementation. + */ + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package iam + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strings" + "testing" +) + +// Test read chunk line. +func TestReadChunkLine(t *testing.T) { + type testCase struct { + reader *bufio.Reader + expectedErr error + chunkSize []byte + chunkSignature []byte + } + // List of readers used. + readers := []io.Reader{ + // Test - 1 + bytes.NewReader([]byte("1000;chunk-signature=111123333333333333334444211\r\n")), + // Test - 2 + bytes.NewReader([]byte("1000;")), + // Test - 3 + bytes.NewReader([]byte(fmt.Sprintf("%4097d", 1))), + // Test - 4 + bytes.NewReader([]byte("1000;chunk-signature=111123333333333333334444211\r\n")), + } + testCases := []testCase{ + // Test - 1 - small bufio reader. + { + bufio.NewReaderSize(readers[0], 16), + errLineTooLong, + nil, + nil, + }, + // Test - 2 - unexpected end of the reader. + { + bufio.NewReader(readers[1]), + io.ErrUnexpectedEOF, + nil, + nil, + }, + // Test - 3 - line too long bigger than 4k+1 + { + bufio.NewReader(readers[2]), + errLineTooLong, + nil, + nil, + }, + // Test - 4 - parse the chunk reader properly. + { + bufio.NewReader(readers[3]), + nil, + []byte("1000"), + []byte("111123333333333333334444211"), + }, + } + // Valid test cases for each chunk line. + for i, tt := range testCases { + chunkSize, chunkSignature, err := readChunkLine(tt.reader) + if err != tt.expectedErr { + t.Errorf("Test %d: Expected %s, got %s", i+1, tt.expectedErr, err) + } + if !bytes.Equal(chunkSize, tt.chunkSize) { + t.Errorf("Test %d: Expected %s, got %s", i+1, string(tt.chunkSize), string(chunkSize)) + } + if !bytes.Equal(chunkSignature, tt.chunkSignature) { + t.Errorf("Test %d: Expected %s, got %s", i+1, string(tt.chunkSignature), string(chunkSignature)) + } + } +} + +// Test parsing s3 chunk extension. +func TestParseS3ChunkExtension(t *testing.T) { + type testCase struct { + buf []byte + chunkSize []byte + chunkSign []byte + } + + tests := []testCase{ + // Test - 1 valid case. + { + []byte("10000;chunk-signature=ad80c730a21e5b8d04586a2213dd63b9a0e99e0e2307b0ade35a65485a288648"), + []byte("10000"), + []byte("ad80c730a21e5b8d04586a2213dd63b9a0e99e0e2307b0ade35a65485a288648"), + }, + // Test - 2 no chunk extension, return same buffer. + { + []byte("10000;"), + []byte("10000;"), + nil, + }, + // Test - 3 no chunk size, return error. + { + []byte(";chunk-signature="), + nil, + nil, + }, + // Test - 4 removes trailing slash. + { + []byte("10000;chunk-signature=ad80c730a21e5b8d04586a2213dd63b9a0e99e0e2307b0ade35a65485a288648 \t \n"), + []byte("10000"), + []byte("ad80c730a21e5b8d04586a2213dd63b9a0e99e0e2307b0ade35a65485a288648"), + }, + } + // Validate chunk extension removal. + for i, tt := range tests { + // Extract chunk size and chunk signature after parsing a standard chunk-extension format. + hexChunkSize, hexChunkSignature := parseS3ChunkExtension(tt.buf) + if !bytes.Equal(hexChunkSize, tt.chunkSize) { + t.Errorf("Test %d: Expected %s, got %s", i+1, string(tt.chunkSize), string(hexChunkSize)) + } + if !bytes.Equal(hexChunkSignature, tt.chunkSign) { + t.Errorf("Test %d: Expected %s, got %s", i+1, string(tt.chunkSign), string(hexChunkSignature)) + } + } +} + +// Test read CRLF characters on input reader. +func TestReadCRLF(t *testing.T) { + type testCase struct { + reader io.Reader + expectedErr error + } + tests := []testCase{ + // Test - 1 valid buffer with CRLF. + {bytes.NewReader([]byte("\r\n")), nil}, + // Test - 2 invalid buffer with no CRLF. + {bytes.NewReader([]byte("he")), errMalformedEncoding}, + // Test - 3 invalid buffer with more characters. + {bytes.NewReader([]byte("he\r\n")), errMalformedEncoding}, + // Test - 4 smaller buffer than expected. + {bytes.NewReader([]byte("h")), io.ErrUnexpectedEOF}, + } + for i, tt := range tests { + err := readCRLF(tt.reader) + if err != tt.expectedErr { + t.Errorf("Test %d: Expected %s, got %s this", i+1, tt.expectedErr, err) + } + } +} + +// Tests parsing hex number into its uint64 decimal equivalent. +func TestParseHexUint(t *testing.T) { + type testCase struct { + in string + want uint64 + wantErr string + } + tests := []testCase{ + {"x", 0, "invalid byte in chunk length"}, + {"0000000000000000", 0, ""}, + {"0000000000000001", 1, ""}, + {"ffffffffffffffff", 1<<64 - 1, ""}, + {"FFFFFFFFFFFFFFFF", 1<<64 - 1, ""}, + {"000000000000bogus", 0, "invalid byte in chunk length"}, + {"00000000000000000", 0, "http chunk length too large"}, // could accept if we wanted + {"10000000000000000", 0, "http chunk length too large"}, + {"00000000000000001", 0, "http chunk length too large"}, // could accept if we wanted + } + for i := uint64(0); i <= 1234; i++ { + tests = append(tests, testCase{in: fmt.Sprintf("%x", i), want: i}) + } + for _, tt := range tests { + got, err := parseHexUint([]byte(tt.in)) + if tt.wantErr != "" { + if err != nil && !strings.Contains(err.Error(), tt.wantErr) { + t.Errorf("parseHexUint(%q) = %v, %v; want error %q", tt.in, got, err, tt.wantErr) + } + } else { + if err != nil || got != tt.want { + t.Errorf("parseHexUint(%q) = %v, %v; want %v", tt.in, got, err, tt.want) + } + } + } +} From a839315b9b9d64ba8efd522f49d7c805c2e22411 Mon Sep 17 00:00:00 2001 From: steve Date: Thu, 24 Aug 2023 21:28:39 +0800 Subject: [PATCH 058/139] chore: mig sig 03 --- s3/services/auth/auth_handler.go | 100 ----------------- s3/services/auth/auth_type.go | 4 +- s3/services/auth/check_handler_auth.go | 85 +++++++-------- s3/services/auth/check_handler_auth_test.go | 6 +- s3/services/auth/signature-v2.go | 67 ++++++------ s3/services/auth/signature-v4-parser.go | 103 +++++++++--------- s3/services/auth/signature-v4-utils.go | 23 ++-- s3/services/auth/signature-v4.go | 57 +++++----- s3/services/auth/streaming-signature-v4.go | 33 +++--- .../auth/streaming-signature-v4_test.go | 2 +- 10 files changed, 187 insertions(+), 293 deletions(-) delete mode 100644 s3/services/auth/auth_handler.go diff --git a/s3/services/auth/auth_handler.go b/s3/services/auth/auth_handler.go deleted file mode 100644 index 06a27b938..000000000 --- a/s3/services/auth/auth_handler.go +++ /dev/null @@ -1,100 +0,0 @@ -package iam - -import ( - "github.com/yann-y/fds/internal/apierrors" - "github.com/yann-y/fds/internal/consts" - "github.com/yann-y/fds/internal/response" - "net/http" - "time" -) - -// SetAuthHandler to validate authorization header for the incoming request. -func SetAuthHandler(h http.Handler) http.Handler { - // handler for validating incoming authorization headers. - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - aType := GetRequestAuthType(r) - if aType == AuthTypeSigned || aType == AuthTypeSignedV2 || aType == AuthTypeStreamingSigned { - // Verify if date headers are set, if not reject the request - amzDate, errCode := parseAmzDateHeader(r) - if errCode != apierrors.ErrNone { - // All our internal APIs are sensitive towards Date - // header, for all requests where Date header is not - // present we will reject such clients. - response.WriteErrorResponse(w, r, errCode) - return - } - // Verify if the request date header is shifted by less than globalMaxSkewTime parameter in the past - // or in the future, reject request otherwise. - curTime := time.Now().UTC() - if curTime.Sub(amzDate) > consts.GlobalMaxSkewTime || amzDate.Sub(curTime) > consts.GlobalMaxSkewTime { - response.WriteErrorResponse(w, r, apierrors.ErrRequestTimeTooSkewed) - return - } - } - if isSupportedS3AuthType(aType) || aType == AuthTypeJWT || aType == AuthTypeSTS { - h.ServeHTTP(w, r) - return - } - response.WriteErrorResponse(w, r, apierrors.ErrSignatureVersionNotSupported) - }) -} - -// Supported amz date formats. -var amzDateFormats = []string{ - // Do not change this order, x-amz-date format is usually in - // iso8601Format rest are meant for relaxed handling of other - // odd SDKs that might be out there. - iso8601Format, - time.RFC1123, - time.RFC1123Z, - // Add new AMZ date formats here. -} - -// Supported Amz date headers. -var amzDateHeaders = []string{ - // Do not chane this order, x-amz-date value should be - // validated first. - "x-amz-date", - "date", -} - -// parseAmzDate - parses date string into supported amz date formats. -func parseAmzDate(amzDateStr string) (amzDate time.Time, apiErr apierrors.ErrorCode) { - for _, dateFormat := range amzDateFormats { - amzDate, err := time.Parse(dateFormat, amzDateStr) - if err == nil { - return amzDate, apierrors.ErrNone - } - } - return time.Time{}, apierrors.ErrMalformedDate -} - -// parseAmzDateHeader - parses supported amz date headers, in -// supported amz date formats. -func parseAmzDateHeader(req *http.Request) (time.Time, apierrors.ErrorCode) { - for _, amzDateHeader := range amzDateHeaders { - amzDateStr := req.Header.Get(amzDateHeader) - if amzDateStr != "" { - return parseAmzDate(amzDateStr) - } - } - // Date header missing. - return time.Time{}, apierrors.ErrMissingDateHeader -} - -// List of all support S3 auth types. -var supportedS3AuthTypes = map[AuthType]struct{}{ - AuthTypeAnonymous: {}, - AuthTypePresigned: {}, - AuthTypePresignedV2: {}, - AuthTypeSigned: {}, - AuthTypeSignedV2: {}, - AuthTypePostPolicy: {}, - AuthTypeStreamingSigned: {}, -} - -// Validate if the authType is valid and supported. -func isSupportedS3AuthType(aType AuthType) bool { - _, ok := supportedS3AuthTypes[aType] - return ok -} diff --git a/s3/services/auth/auth_type.go b/s3/services/auth/auth_type.go index fdf80bf36..936eb2aa0 100644 --- a/s3/services/auth/auth_type.go +++ b/s3/services/auth/auth_type.go @@ -1,7 +1,7 @@ -package iam +package auth import ( - "github.com/yann-y/fds/internal/consts" + "github.com/bittorrent/go-btfs/s3/consts" "net/http" "net/url" "strings" diff --git a/s3/services/auth/check_handler_auth.go b/s3/services/auth/check_handler_auth.go index b4f443789..aa9e36a56 100644 --- a/s3/services/auth/check_handler_auth.go +++ b/s3/services/auth/check_handler_auth.go @@ -1,15 +1,14 @@ -package iam +package auth import ( "bytes" "context" "encoding/hex" - "github.com/yann-y/fds/internal/apierrors" - "github.com/yann-y/fds/internal/consts" - "github.com/yann-y/fds/internal/iam/auth" - "github.com/yann-y/fds/internal/iam/s3action" - "github.com/yann-y/fds/internal/uleveldb" - "github.com/yann-y/fds/internal/utils/hash" + "github.com/bittorrent/go-btfs/s3/consts" + "github.com/bittorrent/go-btfs/s3/iam/auth" + "github.com/bittorrent/go-btfs/s3/iam/s3action" + "github.com/bittorrent/go-btfs/s3/uleveldb" + "github.com/bittorrent/go-btfs/s3/utils/hash" "github.com/yann-y/fds/pkg/etag" "io" "net/http" @@ -41,12 +40,12 @@ func NewAuthSys(db *uleveldb.ULevelDB, adminCred auth.Credentials) *AuthSys { // // returns APIErrorCode if any to be replied to the client. // Additionally, returns the accessKey used in the request, and if this request is by an admin. -func (s *AuthSys) CheckRequestAuthTypeCredential(ctx context.Context, r *http.Request, action s3action.Action, bucketName, objectName string) (cred auth.Credentials, owner bool, s3Err apierrors.ErrorCode) { +func (s *AuthSys) CheckRequestAuthTypeCredential(ctx context.Context, r *http.Request, action s3action.Action, bucketName, objectName string) (cred auth.Credentials, owner bool, s3Err responses.Error) { switch GetRequestAuthType(r) { case AuthTypeUnknown, AuthTypeStreamingSigned: - return cred, owner, apierrors.ErrSignatureVersionNotSupported + return cred, owner, responses.ErrSignatureVersionNotSupported case AuthTypePresignedV2, AuthTypeSignedV2: - if s3Err = s.IsReqAuthenticatedV2(r); s3Err != apierrors.ErrNone { + if s3Err = s.IsReqAuthenticatedV2(r); s3Err != nil { return cred, owner, s3Err } cred, owner, s3Err = s.getReqAccessKeyV2(r) @@ -56,12 +55,12 @@ func (s *AuthSys) CheckRequestAuthTypeCredential(ctx context.Context, r *http.Re case s3action.GetBucketLocationAction, s3action.ListAllMyBucketsAction: region = "" } - if s3Err = s.IsReqAuthenticated(ctx, r, region, ServiceS3); s3Err != apierrors.ErrNone { + if s3Err = s.IsReqAuthenticated(ctx, r, region, ServiceS3); s3Err != nil { return cred, owner, s3Err } cred, owner, s3Err = s.GetReqAccessKeyV4(r, region, ServiceS3) } - if s3Err != apierrors.ErrNone { + if s3Err != nil { return cred, owner, s3Err } // TODO: Why should a temporary user be replaced with the parent user's account? @@ -73,13 +72,13 @@ func (s *AuthSys) CheckRequestAuthTypeCredential(ctx context.Context, r *http.Re payload, err := io.ReadAll(io.LimitReader(r.Body, consts.MaxLocationConstraintSize)) if err != nil { log.Errorf("ReadAll err:%v", err) - return cred, owner, apierrors.ErrMalformedXML + return cred, owner, responses.ErrMalformedXML } // Populate payload to extract location constraint. r.Body = io.NopCloser(bytes.NewReader(payload)) if s.PolicySys.bmSys.HasBucket(ctx, bucketName) { - return cred, owner, apierrors.ErrBucketAlreadyExists + return cred, owner, responses.ErrBucketAlreadyExists } } @@ -97,7 +96,7 @@ func (s *AuthSys) CheckRequestAuthTypeCredential(ctx context.Context, r *http.Re ObjectName: objectName, }) { // Request is allowed return the appropriate access key. - return cred, owner, apierrors.ErrNone + return cred, owner, nil } if action == s3action.ListBucketVersionsAction { // In AWS S3 s3:ListBucket permission is same as s3:ListBucketVersions permission @@ -110,7 +109,7 @@ func (s *AuthSys) CheckRequestAuthTypeCredential(ctx context.Context, r *http.Re ObjectName: objectName, }) { // Request is allowed return the appropriate access key. - return cred, owner, apierrors.ErrNone + return cred, owner, nil } } @@ -125,26 +124,26 @@ func (s *AuthSys) CheckRequestAuthTypeCredential(ctx context.Context, r *http.Re IsOwner: owner, }) { // Request is allowed return the appropriate access key. - return cred, owner, apierrors.ErrNone + return cred, owner, nil } } else { if !s.PolicySys.bmSys.HasBucket(ctx, bucketName) { - return cred, owner, apierrors.ErrNoSuchBucket + return cred, owner, responses.ErrNoSuchBucket } } - return cred, owner, apierrors.ErrAccessDenied + return cred, owner, responses.ErrAccessDenied } // Verify if request has valid AWS Signature Version '2'. -func (s *AuthSys) IsReqAuthenticatedV2(r *http.Request) (s3Error apierrors.ErrorCode) { +func (s *AuthSys) IsReqAuthenticatedV2(r *http.Request) (s3Error responses.Error) { if isRequestSignatureV2(r) { return s.doesSignV2Match(r) } return s.doesPresignV2SignatureMatch(r) } -func (s *AuthSys) ReqSignatureV4Verify(r *http.Request, region string, stype serviceType) (s3Error apierrors.ErrorCode) { +func (s *AuthSys) ReqSignatureV4Verify(r *http.Request, region string, stype serviceType) (s3Error responses.Error) { sha256sum := GetContentSha256Cksum(r, stype) switch { case IsRequestSignatureV4(r): @@ -152,18 +151,18 @@ func (s *AuthSys) ReqSignatureV4Verify(r *http.Request, region string, stype ser case isRequestPresignedSignatureV4(r): return s.doesPresignedSignatureMatch(sha256sum, r, region, stype) default: - return apierrors.ErrAccessDenied + return responses.ErrAccessDenied } } // IsReqAuthenticated Verify if request has valid AWS Signature Version '4'. -func (s *AuthSys) IsReqAuthenticated(ctx context.Context, r *http.Request, region string, stype serviceType) (s3Error apierrors.ErrorCode) { - if errCode := s.ReqSignatureV4Verify(r, region, stype); errCode != apierrors.ErrNone { +func (s *AuthSys) IsReqAuthenticated(ctx context.Context, r *http.Request, region string, stype serviceType) (s3Error responses.Error) { + if errCode := s.ReqSignatureV4Verify(r, region, stype); errCode != nil { return errCode } clientETag, err := etag.FromContentMD5(r.Header) if err != nil { - return apierrors.ErrInvalidDigest + return responses.ErrInvalidDigest } // Extract either 'X-Amz-Content-Sha256' header or 'X-Amz-Content-Sha256' query parameter (if V4 presigned) @@ -173,13 +172,13 @@ func (s *AuthSys) IsReqAuthenticated(ctx context.Context, r *http.Request, regio if sha256Sum, ok := r.Form[consts.AmzContentSha256]; ok && len(sha256Sum) > 0 { contentSHA256, err = hex.DecodeString(sha256Sum[0]) if err != nil { - return apierrors.ErrContentSHA256Mismatch + return responses.ErrContentSHA256Mismatch } } } else if _, ok := r.Header[consts.AmzContentSha256]; !skipSHA256 && ok { contentSHA256, err = hex.DecodeString(r.Header.Get(consts.AmzContentSha256)) if err != nil || len(contentSHA256) == 0 { - return apierrors.ErrContentSHA256Mismatch + return responses.ErrContentSHA256Mismatch } } @@ -187,33 +186,33 @@ func (s *AuthSys) IsReqAuthenticated(ctx context.Context, r *http.Request, regio // The verification happens implicit during reading. reader, err := hash.NewReader(r.Body, -1, clientETag.String(), hex.EncodeToString(contentSHA256), -1) if err != nil { - return apierrors.ErrInternalError + return responses.ErrInternalError } r.Body = reader - return apierrors.ErrNone + return nil } // ValidateAdminSignature validate admin Signature -func (s *AuthSys) ValidateAdminSignature(ctx context.Context, r *http.Request, region string) (auth.Credentials, map[string]interface{}, bool, apierrors.ErrorCode) { +func (s *AuthSys) ValidateAdminSignature(ctx context.Context, r *http.Request, region string) (auth.Credentials, map[string]interface{}, bool, responses.Error) { var cred auth.Credentials var owner bool - s3Err := apierrors.ErrAccessDenied + s3Err := responses.ErrAccessDenied if _, ok := r.Header[consts.AmzContentSha256]; ok && GetRequestAuthType(r) == AuthTypeSigned { // We only support admin credentials to access admin APIs. cred, owner, s3Err = s.GetReqAccessKeyV4(r, region, ServiceS3) - if s3Err != apierrors.ErrNone { + if s3Err != nil { return cred, nil, owner, s3Err } // we only support V4 (no presign) with auth body s3Err = s.IsReqAuthenticated(ctx, r, region, ServiceS3) } - if s3Err != apierrors.ErrNone { + if s3Err != nil { return cred, nil, owner, s3Err } - return cred, nil, owner, apierrors.ErrNone + return cred, nil, owner, nil } func getConditions(r *http.Request, username string) map[string][]string { @@ -285,19 +284,19 @@ func getConditions(r *http.Request, username string) map[string][]string { // IsPutActionAllowed - check if PUT operation is allowed on the resource, this // call verifies bucket policies and IAM policies, supports multi user // checks etc. -func (s *AuthSys) IsPutActionAllowed(ctx context.Context, r *http.Request, action s3action.Action, bucketName, objectName string) (s3Err apierrors.ErrorCode) { +func (s *AuthSys) IsPutActionAllowed(ctx context.Context, r *http.Request, action s3action.Action, bucketName, objectName string) (s3Err responses.Error) { var cred auth.Credentials var owner bool switch GetRequestAuthType(r) { case AuthTypeUnknown: - return apierrors.ErrSignatureVersionNotSupported + return responses.ErrSignatureVersionNotSupported case AuthTypeSignedV2, AuthTypePresignedV2: cred, owner, s3Err = s.getReqAccessKeyV2(r) case AuthTypeStreamingSigned, AuthTypePresigned, AuthTypeSigned: region := "" cred, owner, s3Err = s.GetReqAccessKeyV4(r, region, ServiceS3) } - if s3Err != apierrors.ErrNone { + if s3Err != nil { return s3Err } @@ -307,7 +306,7 @@ func (s *AuthSys) IsPutActionAllowed(ctx context.Context, r *http.Request, actio if action == s3action.PutObjectRetentionAction && r.Header.Get(consts.AmzObjectLockMode) == "" && r.Header.Get(consts.AmzObjectLockRetainUntilDate) == "" { - return apierrors.ErrNone + return nil } // check bucket policy @@ -318,19 +317,19 @@ func (s *AuthSys) IsPutActionAllowed(ctx context.Context, r *http.Request, actio IsOwner: owner, ObjectName: objectName, }) { - return apierrors.ErrNone + return nil } if !s.PolicySys.bmSys.HasBucket(ctx, bucketName) { - return apierrors.ErrNoSuchBucket + return responses.ErrNoSuchBucket } - return apierrors.ErrAccessDenied + return responses.ErrAccessDenied } -func (s *AuthSys) GetCredential(r *http.Request) (cred auth.Credentials, owner bool, s3Err apierrors.ErrorCode) { +func (s *AuthSys) GetCredential(r *http.Request) (cred auth.Credentials, owner bool, s3Err responses.Error) { switch GetRequestAuthType(r) { case AuthTypeUnknown: - s3Err = apierrors.ErrSignatureVersionNotSupported + s3Err = responses.ErrSignatureVersionNotSupported case AuthTypeSignedV2, AuthTypePresignedV2: cred, owner, s3Err = s.getReqAccessKeyV2(r) case AuthTypeStreamingSigned, AuthTypePresigned, AuthTypeSigned: diff --git a/s3/services/auth/check_handler_auth_test.go b/s3/services/auth/check_handler_auth_test.go index f0f4678ef..868dcf00e 100644 --- a/s3/services/auth/check_handler_auth_test.go +++ b/s3/services/auth/check_handler_auth_test.go @@ -1,16 +1,16 @@ -package iam +package auth //func TestV2CheckRequestAuthType(t *testing.T) { // var aSys AuthSys // aSys.Init() // req := testsign.MustNewSignedV2Request("GET", "http://127.0.0.1:9000", 0, nil, t) // _, _, err := aSys.CheckRequestAuthTypeCredential(context.Background(), req, s3action.ListAllMyBucketsAction, "test", "testobject") -// fmt.Println(apierrors.GetAPIError(err)) +// fmt.Println(responses.GetAPIError(err)) //} //func TestV4CheckRequestAuthType(t *testing.T) { // var aSys AuthSys // aSys.Init() // req := testsign.MustNewSignedV4Request("GET", "http://127.0.0.1:9000", 0, nil, "test", "test", "s3", t) // _, _, err := aSys.CheckRequestAuthTypeCredential(context.Background(), req, s3action.ListAllMyBucketsAction, "test", "testobject") -// fmt.Println(apierrors.GetAPIError(err)) +// fmt.Println(responses.GetAPIError(err)) //} diff --git a/s3/services/auth/signature-v2.go b/s3/services/auth/signature-v2.go index a38b06341..7e9ec912b 100644 --- a/s3/services/auth/signature-v2.go +++ b/s3/services/auth/signature-v2.go @@ -15,7 +15,7 @@ // implied. See the License for the specific language governing // permissions and limitations under the License. -package iam +package auth import ( "crypto/hmac" @@ -23,9 +23,8 @@ import ( "crypto/subtle" "encoding/base64" "fmt" - "github.com/yann-y/fds/internal/apierrors" - "github.com/yann-y/fds/internal/consts" - "github.com/yann-y/fds/internal/iam/auth" + "github.com/bittorrent/go-btfs/s3/consts" + "github.com/bittorrent/go-btfs/s3/iam/auth" "net" "net/http" "net/url" @@ -77,20 +76,20 @@ const ( // AWS S3 Signature V2 calculation rule is give here: // http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationStringToSign -func (s *AuthSys) doesPolicySignatureV2Match(formValues http.Header) (auth.Credentials, apierrors.ErrorCode) { +func (s *AuthSys) doesPolicySignatureV2Match(formValues http.Header) (auth.Credentials, responses.Error) { accessKey := formValues.Get(consts.AmzAccessKeyID) r := &http.Request{Header: formValues} cred, _, s3Err := s.checkKeyValid(r, accessKey) - if s3Err != apierrors.ErrNone { + if s3Err != nil { return cred, s3Err } policy := formValues.Get("Policy") signature := formValues.Get(consts.AmzSignatureV2) if !compareSignatureV2(signature, calculateSignatureV2(policy, cred.SecretKey)) { - return cred, apierrors.ErrSignatureDoesNotMatch + return cred, responses.ErrSignatureDoesNotMatch } - return cred, apierrors.ErrNone + return cred, nil } // Escape encodedQuery string into unescaped list of query params, returns error @@ -110,8 +109,8 @@ func unescapeQueries(encodedQuery string) (unescapedQueries []string, err error) // doesPresignV2SignatureMatch - Verify query headers with presigned signature // - http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationQueryStringAuth // -// returns apierrors.ErrNone if matches. S3 errors otherwise. -func (s *AuthSys) doesPresignV2SignatureMatch(r *http.Request) apierrors.ErrorCode { +// returns nil if matches. S3 errors otherwise. +func (s *AuthSys) doesPresignV2SignatureMatch(r *http.Request) responses.Error { // r.RequestURI will have raw encoded URI as sent by the client. tokens := strings.SplitN(r.RequestURI, "?", 2) encodedResource := tokens[0] @@ -131,14 +130,14 @@ func (s *AuthSys) doesPresignV2SignatureMatch(r *http.Request) apierrors.ErrorCo var unescapedQueries []string unescapedQueries, err = unescapeQueries(encodedQuery) if err != nil { - return apierrors.ErrInvalidQueryParams + return responses.ErrInvalidQueryParams } // Extract the necessary values from presigned query, construct a list of new filtered queries. for _, query := range unescapedQueries { keyval := strings.SplitN(query, "=", 2) if len(keyval) != 2 { - return apierrors.ErrInvalidQueryParams + return responses.ErrInvalidQueryParams } switch keyval[0] { case consts.AmzAccessKeyID: @@ -154,41 +153,41 @@ func (s *AuthSys) doesPresignV2SignatureMatch(r *http.Request) apierrors.ErrorCo // Invalid values returns error. if accessKey == "" || gotSignature == "" || expires == "" { - return apierrors.ErrInvalidQueryParams + return responses.ErrInvalidQueryParams } cred, _, s3Err := s.checkKeyValid(r, accessKey) - if s3Err != apierrors.ErrNone { + if s3Err != nil { return s3Err } // Make sure the request has not expired. expiresInt, err := strconv.ParseInt(expires, 10, 64) if err != nil { - return apierrors.ErrAuthorizationHeaderMalformed + return responses.ErrAuthorizationHeaderMalformed } // Check if the presigned URL has expired. if expiresInt < time.Now().UTC().Unix() { - return apierrors.ErrExpiredPresignRequest + return responses.ErrExpiredPresignRequest } encodedResource, err = getResource(encodedResource, r.Host) if err != nil { - return apierrors.ErrInvalidRequest + return responses.ErrInvalidRequest } expectedSignature := preSignatureV2(cred, r.Method, encodedResource, strings.Join(filteredQueries, "&"), r.Header, expires) if !compareSignatureV2(gotSignature, expectedSignature) { - return apierrors.ErrSignatureDoesNotMatch + return responses.ErrSignatureDoesNotMatch } r.Form.Del(consts.Expires) - return apierrors.ErrNone + return nil } -func (s *AuthSys) getReqAccessKeyV2(r *http.Request) (auth.Credentials, bool, apierrors.ErrorCode) { +func (s *AuthSys) getReqAccessKeyV2(r *http.Request) (auth.Credentials, bool, responses.Error) { if accessKey := r.Form.Get(consts.AmzAccessKeyID); accessKey != "" { return s.checkKeyValid(r, accessKey) } @@ -197,13 +196,13 @@ func (s *AuthSys) getReqAccessKeyV2(r *http.Request) (auth.Credentials, bool, ap // Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature authFields := strings.Split(r.Header.Get(consts.Authorization), " ") if len(authFields) != 2 { - return auth.Credentials{}, false, apierrors.ErrMissingFields + return auth.Credentials{}, false, responses.ErrMissingFields } // Then will be splitting on ":", this will seprate `AWSAccessKeyId` and `Signature` string. keySignFields := strings.Split(strings.TrimSpace(authFields[1]), ":") if len(keySignFields) != 2 { - return auth.Credentials{}, false, apierrors.ErrMissingFields + return auth.Credentials{}, false, responses.ErrMissingFields } return s.checkKeyValid(r, keySignFields[0]) @@ -229,30 +228,30 @@ func (s *AuthSys) getReqAccessKeyV2(r *http.Request) (auth.Credentials, bool, ap // - http://docs.aws.amazon.com/AmazonS3/latest/dev/auth-request-sig-v2.html // returns true if matches, false otherwise. if error is not nil then it is always false -func (s *AuthSys) validateV2AuthHeader(r *http.Request) (auth.Credentials, apierrors.ErrorCode) { +func (s *AuthSys) validateV2AuthHeader(r *http.Request) (auth.Credentials, responses.Error) { var cred auth.Credentials v2Auth := r.Header.Get(consts.Authorization) if v2Auth == "" { - return cred, apierrors.ErrAuthHeaderEmpty + return cred, responses.ErrAuthHeaderEmpty } // Verify if the header algorithm is supported or not. if !strings.HasPrefix(v2Auth, signV2Algorithm) { - return cred, apierrors.ErrSignatureVersionNotSupported + return cred, responses.ErrSignatureVersionNotSupported } cred, _, apiErr := s.getReqAccessKeyV2(r) - if apiErr != apierrors.ErrNone { + if apiErr != nil { return cred, apiErr } - return cred, apierrors.ErrNone + return cred, nil } -func (s *AuthSys) doesSignV2Match(r *http.Request) apierrors.ErrorCode { +func (s *AuthSys) doesSignV2Match(r *http.Request) responses.Error { v2Auth := r.Header.Get(consts.Authorization) cred, apiError := s.validateV2AuthHeader(r) - if apiError != apierrors.ErrNone { + if apiError != nil { return apiError } @@ -266,24 +265,24 @@ func (s *AuthSys) doesSignV2Match(r *http.Request) apierrors.ErrorCode { unescapedQueries, err := unescapeQueries(encodedQuery) if err != nil { - return apierrors.ErrInvalidQueryParams + return responses.ErrInvalidQueryParams } encodedResource, err = getResource(encodedResource, r.Host) if err != nil { - return apierrors.ErrInvalidRequest + return responses.ErrInvalidRequest } prefix := fmt.Sprintf("%s %s:", signV2Algorithm, cred.AccessKey) if !strings.HasPrefix(v2Auth, prefix) { - return apierrors.ErrSignatureDoesNotMatch + return responses.ErrSignatureDoesNotMatch } v2Auth = v2Auth[len(prefix):] expectedAuth := signatureV2(cred, r.Method, encodedResource, strings.Join(unescapedQueries, "&"), r.Header) if !compareSignatureV2(v2Auth, expectedAuth) { - return apierrors.ErrSignatureDoesNotMatch + return responses.ErrSignatureDoesNotMatch } - return apierrors.ErrNone + return nil } func calculateSignatureV2(stringToSign string, secret string) string { diff --git a/s3/services/auth/signature-v4-parser.go b/s3/services/auth/signature-v4-parser.go index 733bcb446..766b9cb13 100644 --- a/s3/services/auth/signature-v4-parser.go +++ b/s3/services/auth/signature-v4-parser.go @@ -15,12 +15,11 @@ // implied. See the License for the specific language governing // permissions and limitations under the License. -package iam +package auth import ( - "github.com/yann-y/fds/internal/apierrors" - "github.com/yann-y/fds/internal/consts" - "github.com/yann-y/fds/internal/iam/auth" + "github.com/bittorrent/go-btfs/s3/consts" + "github.com/bittorrent/go-btfs/s3/iam/auth" "net/http" "net/url" "strings" @@ -49,17 +48,17 @@ func (c credentialHeader) getScope() string { }, consts.SlashSeparator) } -func (s *AuthSys) GetReqAccessKeyV4(r *http.Request, region string, stype serviceType) (auth.Credentials, bool, apierrors.ErrorCode) { +func (s *AuthSys) GetReqAccessKeyV4(r *http.Request, region string, stype serviceType) (auth.Credentials, bool, responses.Error) { ch, s3Err := parseCredentialHeader("Credential="+r.Form.Get(consts.AmzCredential), region, stype) - if s3Err != apierrors.ErrNone { + if s3Err != nil { // Strip off the Algorithm prefix. v4Auth := strings.TrimPrefix(r.Header.Get("Authorization"), signV4Algorithm) authFields := strings.Split(strings.TrimSpace(v4Auth), ",") if len(authFields) != 3 { - return auth.Credentials{}, false, apierrors.ErrMissingFields + return auth.Credentials{}, false, responses.ErrMissingFields } ch, s3Err = parseCredentialHeader(authFields[0], region, stype) - if s3Err != apierrors.ErrNone { + if s3Err != nil { return auth.Credentials{}, false, s3Err } } @@ -72,21 +71,21 @@ func (s *AuthSys) GetReqAccessKeyV4(r *http.Request, region string, stype servic } // parse credentialHeader string into its structured form. -func parseCredentialHeader(credElement string, region string, stype serviceType) (ch credentialHeader, aec apierrors.ErrorCode) { +func parseCredentialHeader(credElement string, region string, stype serviceType) (ch credentialHeader, aec responses.Error) { creds := strings.SplitN(strings.TrimSpace(credElement), "=", 2) if len(creds) != 2 { - return ch, apierrors.ErrMissingFields + return ch, responses.ErrMissingFields } if creds[0] != "Credential" { - return ch, apierrors.ErrMissingCredTag + return ch, responses.ErrMissingCredTag } credElements := strings.Split(strings.TrimSpace(creds[1]), consts.SlashSeparator) if len(credElements) < 5 { - return ch, apierrors.ErrCredMalformed + return ch, responses.ErrCredMalformed } accessKey := strings.Join(credElements[:len(credElements)-4], consts.SlashSeparator) // The access key may contain one or more `/` if !auth.IsAccessKeyValid(accessKey) { - return ch, apierrors.ErrInvalidAccessKeyID + return ch, responses.ErrInvalidAccessKeyID } // Save access key id. cred := credentialHeader{ @@ -96,7 +95,7 @@ func parseCredentialHeader(credElement string, region string, stype serviceType) var e error cred.scope.date, e = time.Parse(yyyymmdd, credElements[0]) if e != nil { - return ch, apierrors.ErrAuthorizationHeaderMalformed + return ch, responses.ErrAuthorizationHeaderMalformed } cred.scope.region = credElements[1] @@ -111,53 +110,53 @@ func parseCredentialHeader(credElement string, region string, stype serviceType) } // Should validate region, only if region is set. if !isValidRegion(sRegion, region) { - return ch, apierrors.ErrAuthorizationHeaderMalformed + return ch, responses.ErrAuthorizationHeaderMalformed } if credElements[2] != string(stype) { switch stype { case ServiceSTS: - return ch, apierrors.ErrAuthorizationHeaderMalformed + return ch, responses.ErrAuthorizationHeaderMalformed } - return ch, apierrors.ErrAuthorizationHeaderMalformed + return ch, responses.ErrAuthorizationHeaderMalformed } cred.scope.service = credElements[2] if credElements[3] != "aws4_request" { - return ch, apierrors.ErrAuthorizationHeaderMalformed + return ch, responses.ErrAuthorizationHeaderMalformed } cred.scope.request = credElements[3] - return cred, apierrors.ErrNone + return cred, nil } // Parse signature from signature tag. -func parseSignature(signElement string) (string, apierrors.ErrorCode) { +func parseSignature(signElement string) (string, responses.Error) { signFields := strings.Split(strings.TrimSpace(signElement), "=") if len(signFields) != 2 { - return "", apierrors.ErrMissingFields + return "", responses.ErrMissingFields } if signFields[0] != "Signature" { - return "", apierrors.ErrMissingSignTag + return "", responses.ErrMissingSignTag } if signFields[1] == "" { - return "", apierrors.ErrMissingFields + return "", responses.ErrMissingFields } signature := signFields[1] - return signature, apierrors.ErrNone + return signature, nil } // Parse slice of signed headers from signed headers tag. -func parseSignedHeader(signedHdrElement string) ([]string, apierrors.ErrorCode) { +func parseSignedHeader(signedHdrElement string) ([]string, responses.Error) { signedHdrFields := strings.Split(strings.TrimSpace(signedHdrElement), "=") if len(signedHdrFields) != 2 { - return nil, apierrors.ErrMissingFields + return nil, responses.ErrMissingFields } if signedHdrFields[0] != "SignedHeaders" { - return nil, apierrors.ErrMissingSignHeadersTag + return nil, responses.ErrMissingSignHeadersTag } if signedHdrFields[1] == "" { - return nil, apierrors.ErrMissingFields + return nil, responses.ErrMissingFields } signedHeaders := strings.Split(signedHdrFields[1], ";") - return signedHeaders, apierrors.ErrNone + return signedHeaders, nil } // signValues data type represents structured form of AWS Signature V4 header. @@ -184,27 +183,27 @@ type preSignValues struct { // querystring += &X-Amz-Signature=signature // // verifies if any of the necessary query params are missing in the presigned request. -func doesV4PresignParamsExist(query url.Values) apierrors.ErrorCode { +func doesV4PresignParamsExist(query url.Values) responses.Error { v4PresignQueryParams := []string{consts.AmzAlgorithm, consts.AmzCredential, consts.AmzSignature, consts.AmzDate, consts.AmzSignedHeaders, consts.AmzExpires} for _, v4PresignQueryParam := range v4PresignQueryParams { if _, ok := query[v4PresignQueryParam]; !ok { - return apierrors.ErrInvalidQueryParams + return responses.ErrInvalidQueryParams } } - return apierrors.ErrNone + return nil } // Parses all the presigned signature values into separate elements. -func parsePreSignV4(query url.Values, region string, stype serviceType) (psv preSignValues, aec apierrors.ErrorCode) { +func parsePreSignV4(query url.Values, region string, stype serviceType) (psv preSignValues, aec responses.Error) { // verify whether the required query params exist. aec = doesV4PresignParamsExist(query) - if aec != apierrors.ErrNone { + if aec != nil { return psv, aec } // Verify if the query algorithm is supported or not. if query.Get(consts.AmzAlgorithm) != signV4Algorithm { - return psv, apierrors.ErrAuthorizationHeaderMalformed + return psv, responses.ErrAuthorizationHeaderMalformed } // Initialize signature version '4' structured header. @@ -212,7 +211,7 @@ func parsePreSignV4(query url.Values, region string, stype serviceType) (psv pre // Save credential. preSignV4Values.Credential, aec = parseCredentialHeader("Credential="+query.Get(consts.AmzCredential), region, stype) - if aec != apierrors.ErrNone { + if aec != nil { return psv, aec } @@ -220,45 +219,45 @@ func parsePreSignV4(query url.Values, region string, stype serviceType) (psv pre // Save date in native time.Time. preSignV4Values.Date, e = time.Parse(iso8601Format, query.Get(consts.AmzDate)) if e != nil { - return psv, apierrors.ErrAuthorizationHeaderMalformed + return psv, responses.ErrAuthorizationHeaderMalformed } // Save expires in native time.Duration. preSignV4Values.Expires, e = time.ParseDuration(query.Get(consts.AmzExpires) + "s") if e != nil { - return psv, apierrors.ErrAuthorizationHeaderMalformed + return psv, responses.ErrAuthorizationHeaderMalformed } if preSignV4Values.Expires < 0 { - return psv, apierrors.ErrAuthorizationHeaderMalformed + return psv, responses.ErrAuthorizationHeaderMalformed } // Check if Expiry time is less than 7 days (value in seconds). if preSignV4Values.Expires.Seconds() > 604800 { - return psv, apierrors.ErrAuthorizationHeaderMalformed + return psv, responses.ErrAuthorizationHeaderMalformed } // Save signed headers. preSignV4Values.SignedHeaders, aec = parseSignedHeader("SignedHeaders=" + query.Get(consts.AmzSignedHeaders)) - if aec != apierrors.ErrNone { + if aec != nil { return psv, aec } // Save signature. preSignV4Values.Signature, aec = parseSignature("Signature=" + query.Get(consts.AmzSignature)) - if aec != apierrors.ErrNone { + if aec != nil { return psv, aec } // Return structed form of signature query string. - return preSignV4Values, apierrors.ErrNone + return preSignV4Values, nil } // Parses signature version '4' header of the following form. // // Authorization: algorithm Credential=accessKeyID/credScope, \ // SignedHeaders=signedHeaders, Signature=signature -func parseSignV4(v4Auth string, region string, stype serviceType) (sv signValues, aec apierrors.ErrorCode) { +func parseSignV4(v4Auth string, region string, stype serviceType) (sv signValues, aec responses.Error) { // credElement is fetched first to skip replacing the space in access key. credElement := strings.TrimPrefix(strings.Split(strings.TrimSpace(v4Auth), ",")[0], signV4Algorithm) // Replace all spaced strings, some clients can send spaced @@ -266,43 +265,43 @@ func parseSignV4(v4Auth string, region string, stype serviceType) (sv signValues // to make parsing easier. v4Auth = strings.ReplaceAll(v4Auth, " ", "") if v4Auth == "" { - return sv, apierrors.ErrAuthHeaderEmpty + return sv, responses.ErrAuthHeaderEmpty } // Verify if the header algorithm is supported or not. if !strings.HasPrefix(v4Auth, signV4Algorithm) { - return sv, apierrors.ErrSignatureVersionNotSupported + return sv, responses.ErrSignatureVersionNotSupported } // Strip off the Algorithm prefix. v4Auth = strings.TrimPrefix(v4Auth, signV4Algorithm) authFields := strings.Split(strings.TrimSpace(v4Auth), ",") if len(authFields) != 3 { - return sv, apierrors.ErrMissingFields + return sv, responses.ErrMissingFields } // Initialize signature version '4' structured header. signV4Values := signValues{} - var s3Err apierrors.ErrorCode + var s3Err responses.Error // Save credentail values. signV4Values.Credential, s3Err = parseCredentialHeader(strings.TrimSpace(credElement), region, stype) - if s3Err != apierrors.ErrNone { + if s3Err != nil { return sv, s3Err } // Save signed headers. signV4Values.SignedHeaders, s3Err = parseSignedHeader(authFields[1]) - if s3Err != apierrors.ErrNone { + if s3Err != nil { return sv, s3Err } // Save signature. signV4Values.Signature, s3Err = parseSignature(authFields[2]) - if s3Err != apierrors.ErrNone { + if s3Err != nil { return sv, s3Err } // Return the structure here. - return signV4Values, apierrors.ErrNone + return signV4Values, nil } diff --git a/s3/services/auth/signature-v4-utils.go b/s3/services/auth/signature-v4-utils.go index 005d7944a..6f91f91ba 100644 --- a/s3/services/auth/signature-v4-utils.go +++ b/s3/services/auth/signature-v4-utils.go @@ -15,15 +15,14 @@ // implied. See the License for the specific language governing // permissions and limitations under the License. -package iam +package auth import ( "bytes" "crypto/sha256" "encoding/hex" - "github.com/yann-y/fds/internal/apierrors" - "github.com/yann-y/fds/internal/consts" - "github.com/yann-y/fds/internal/iam/auth" + "github.com/bittorrent/go-btfs/s3/consts" + "github.com/bittorrent/go-btfs/s3/iam/auth" "io" "io/ioutil" "net/http" @@ -139,7 +138,7 @@ func isValidRegion(reqRegion string, confRegion string) bool { // check if the access key is valid and recognized, additionally // also returns if the access key is owner/admin. -func (s *AuthSys) checkKeyValid(r *http.Request, accessKey string) (auth.Credentials, bool, apierrors.ErrorCode) { +func (s *AuthSys) checkKeyValid(r *http.Request, accessKey string) (auth.Credentials, bool, responses.Error) { cred := s.AdminCred if cred.AccessKey != accessKey { @@ -149,14 +148,14 @@ func (s *AuthSys) checkKeyValid(r *http.Request, accessKey string) (auth.Credent // Credentials will be invalid but and disabled // return a different error in such a scenario. if ucred.Status == auth.AccountOff { - return cred, false, apierrors.ErrAccessKeyDisabled + return cred, false, responses.ErrAccessKeyDisabled } - return cred, false, apierrors.ErrInvalidAccessKeyID + return cred, false, responses.ErrInvalidAccessKeyID } cred = ucred } owner := cred.AccessKey == s.AdminCred.AccessKey - return cred, owner, apierrors.ErrNone + return cred, owner, nil } func contains(slice interface{}, elem interface{}) bool { @@ -172,13 +171,13 @@ func contains(slice interface{}, elem interface{}) bool { } // extractSignedHeaders extract signed headers from Authorization header -func extractSignedHeaders(signedHeaders []string, r *http.Request) (http.Header, apierrors.ErrorCode) { +func extractSignedHeaders(signedHeaders []string, r *http.Request) (http.Header, responses.Error) { reqHeaders := r.Header reqQueries := r.Form // find whether "host" is part of list of signed headers. // if not return ErrUnsignedHeaders. "host" is mandatory. if !contains(signedHeaders, "host") { - return nil, apierrors.ErrUnsignedHeaders + return nil, responses.ErrUnsignedHeaders } extractedSignedHeaders := make(http.Header) for _, header := range signedHeaders { @@ -228,8 +227,8 @@ func extractSignedHeaders(signedHeaders []string, r *http.Request) (http.Header, // calculation to be compatible with such clients. extractedSignedHeaders.Set(header, strconv.FormatInt(r.ContentLength, 10)) default: - return nil, apierrors.ErrUnsignedHeaders + return nil, responses.ErrUnsignedHeaders } } - return extractedSignedHeaders, apierrors.ErrNone + return extractedSignedHeaders, nil } diff --git a/s3/services/auth/signature-v4.go b/s3/services/auth/signature-v4.go index 1cb4df480..9808bef5d 100644 --- a/s3/services/auth/signature-v4.go +++ b/s3/services/auth/signature-v4.go @@ -15,14 +15,13 @@ // implied. See the License for the specific language governing // permissions and limitations under the License. -package iam +package auth import ( "crypto/subtle" - "github.com/yann-y/fds/internal/apierrors" - "github.com/yann-y/fds/internal/consts" - "github.com/yann-y/fds/internal/iam/set" - "github.com/yann-y/fds/internal/utils" + "github.com/bittorrent/go-btfs/s3/consts" + "github.com/bittorrent/go-btfs/s3/iam/set" + "github.com/bittorrent/go-btfs/s3/utils" "net/http" "net/url" "strconv" @@ -57,36 +56,36 @@ func compareSignatureV4(sig1, sig2 string) bool { // doesPresignedSignatureMatch - Verify query headers with presigned signature // - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html // -// returns apierrors.ErrNone if the signature matches. -func (s *AuthSys) doesPresignedSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType) apierrors.ErrorCode { +// returns nil if the signature matches. +func (s *AuthSys) doesPresignedSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType) responses.Error { // Copy request req := *r // Parse request query string. pSignValues, err := parsePreSignV4(req.Form, region, stype) - if err != apierrors.ErrNone { + if err != nil { return err } cred, _, s3Err := s.checkKeyValid(r, pSignValues.Credential.accessKey) - if s3Err != apierrors.ErrNone { + if s3Err != nil { return s3Err } // Extract all the signed headers along with its values. extractedSignedHeaders, errCode := extractSignedHeaders(pSignValues.SignedHeaders, r) - if errCode != apierrors.ErrNone { + if errCode != nil { return errCode } // If the host which signed the request is slightly ahead in time (by less than MaxSkewTime) the // request should still be allowed. if pSignValues.Date.After(time.Now().UTC().Add(consts.MaxSkewTime)) { - return apierrors.ErrRequestNotReadyYet + return responses.ErrRequestNotReadyYet } if time.Now().UTC().Sub(pSignValues.Date) > pSignValues.Expires { - return apierrors.ErrExpiredPresignRequest + return responses.ErrExpiredPresignRequest } // Save the date and expires. @@ -136,27 +135,27 @@ func (s *AuthSys) doesPresignedSignatureMatch(hashedPayload string, r *http.Requ // Verify if date query is same. if req.Form.Get(consts.AmzDate) != query.Get(consts.AmzDate) { - return apierrors.ErrSignatureDoesNotMatch + return responses.ErrSignatureDoesNotMatch } // Verify if expires query is same. if req.Form.Get(consts.AmzExpires) != query.Get(consts.AmzExpires) { - return apierrors.ErrSignatureDoesNotMatch + return responses.ErrSignatureDoesNotMatch } // Verify if signed headers query is same. if req.Form.Get(consts.AmzSignedHeaders) != query.Get(consts.AmzSignedHeaders) { - return apierrors.ErrSignatureDoesNotMatch + return responses.ErrSignatureDoesNotMatch } // Verify if credential query is same. if req.Form.Get(consts.AmzCredential) != query.Get(consts.AmzCredential) { - return apierrors.ErrSignatureDoesNotMatch + return responses.ErrSignatureDoesNotMatch } // Verify if sha256 payload query is same. if clntHashedPayload != "" && clntHashedPayload != query.Get(consts.AmzContentSha256) { - return apierrors.ErrContentSHA256Mismatch + return responses.ErrContentSHA256Mismatch } // Verify if security token is correct. if token != "" && subtle.ConstantTimeCompare([]byte(token), []byte(cred.SessionToken)) != 1 { - return apierrors.ErrInvalidToken + return responses.ErrInvalidToken } // Verify finally if signature is same. @@ -176,16 +175,16 @@ func (s *AuthSys) doesPresignedSignatureMatch(hashedPayload string, r *http.Requ // Verify signature. if !compareSignatureV4(req.Form.Get(consts.AmzSignature), newSignature) { - return apierrors.ErrSignatureDoesNotMatch + return responses.ErrSignatureDoesNotMatch } - return apierrors.ErrNone + return nil } // doesSignatureMatch - Verify authorization header with calculated header in accordance with // - http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html // -// returns apierrors.ErrNone if signature matches. -func (s *AuthSys) doesSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType) apierrors.ErrorCode { +// returns nil if signature matches. +func (s *AuthSys) doesSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType) responses.Error { // Copy request. req := *r @@ -194,18 +193,18 @@ func (s *AuthSys) doesSignatureMatch(hashedPayload string, r *http.Request, regi // Parse signature version '4' header. signV4Values, err := parseSignV4(v4Auth, region, stype) - if err != apierrors.ErrNone { + if err != nil { return err } // Extract all the signed headers along with its values. extractedSignedHeaders, errCode := extractSignedHeaders(signV4Values.SignedHeaders, r) - if errCode != apierrors.ErrNone { + if errCode != nil { return errCode } cred, _, s3Err := s.checkKeyValid(r, signV4Values.Credential.accessKey) - if s3Err != apierrors.ErrNone { + if s3Err != nil { return s3Err } @@ -213,14 +212,14 @@ func (s *AuthSys) doesSignatureMatch(hashedPayload string, r *http.Request, regi var date string if date = req.Header.Get(consts.AmzDate); date == "" { if date = r.Header.Get(consts.Date); date == "" { - return apierrors.ErrMissingDateHeader + return responses.ErrMissingDateHeader } } // Parse date header. t, e := time.Parse(iso8601Format, date) if e != nil { - return apierrors.ErrAuthorizationHeaderMalformed + return responses.ErrAuthorizationHeaderMalformed } // Query string. @@ -241,11 +240,11 @@ func (s *AuthSys) doesSignatureMatch(hashedPayload string, r *http.Request, regi // Verify if signature match. if !compareSignatureV4(newSignature, signV4Values.Signature) { - return apierrors.ErrSignatureDoesNotMatch + return responses.ErrSignatureDoesNotMatch } // Return error none. - return apierrors.ErrNone + return nil } // getScope generate a string of a specific date, an AWS region, and a service. diff --git a/s3/services/auth/streaming-signature-v4.go b/s3/services/auth/streaming-signature-v4.go index 694ab68bd..10caa3cee 100644 --- a/s3/services/auth/streaming-signature-v4.go +++ b/s3/services/auth/streaming-signature-v4.go @@ -18,7 +18,7 @@ // Package cmd This file implements helper functions to validate Streaming AWS // Signature Version '4' authorization header. -package iam +package auth import ( "bufio" @@ -26,17 +26,16 @@ import ( "crypto/sha256" "encoding/hex" "errors" - "github.com/yann-y/fds/internal/apierrors" - "github.com/yann-y/fds/internal/utils" + "github.com/bittorrent/go-btfs/s3/utils" "hash" "io" "net/http" "strings" "time" + "github.com/bittorrent/go-btfs/s3/consts" + "github.com/bittorrent/go-btfs/s3/iam/auth" humanize "github.com/dustin/go-humanize" - "github.com/yann-y/fds/internal/consts" - "github.com/yann-y/fds/internal/iam/auth" ) // Streaming AWS Signature Version '4' constants. @@ -74,7 +73,7 @@ func getChunkSignature(cred auth.Credentials, seedSignature string, region strin // // returns signature, error otherwise if the signature mismatches or any other // error while parsing and validating. -func (s *AuthSys) CalculateSeedSignature(r *http.Request) (cred auth.Credentials, signature string, region string, date time.Time, errCode apierrors.ErrorCode) { +func (s *AuthSys) CalculateSeedSignature(r *http.Request) (cred auth.Credentials, signature string, region string, date time.Time, errCode responses.Error) { // Copy request. req := *r @@ -83,7 +82,7 @@ func (s *AuthSys) CalculateSeedSignature(r *http.Request) (cred auth.Credentials // Parse signature version '4' header. signV4Values, errCode := parseSignV4(v4Auth, "", ServiceS3) - if errCode != apierrors.ErrNone { + if errCode != nil { return cred, "", "", time.Time{}, errCode } @@ -92,17 +91,17 @@ func (s *AuthSys) CalculateSeedSignature(r *http.Request) (cred auth.Credentials // Payload for STREAMING signature should be 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD' if payload != req.Header.Get(consts.AmzContentSha256) { - return cred, "", "", time.Time{}, apierrors.ErrContentSHA256Mismatch + return cred, "", "", time.Time{}, responses.ErrContentSHA256Mismatch } // Extract all the signed headers along with its values. extractedSignedHeaders, errCode := extractSignedHeaders(signV4Values.SignedHeaders, r) - if errCode != apierrors.ErrNone { + if errCode != nil { return cred, "", "", time.Time{}, errCode } cred, _, errCode = s.checkKeyValid(r, signV4Values.Credential.accessKey) - if errCode != apierrors.ErrNone { + if errCode != nil { return cred, "", "", time.Time{}, errCode } @@ -113,7 +112,7 @@ func (s *AuthSys) CalculateSeedSignature(r *http.Request) (cred auth.Credentials var dateStr string if dateStr = req.Header.Get("x-amz-date"); dateStr == "" { if dateStr = r.Header.Get("Date"); dateStr == "" { - return cred, "", "", time.Time{}, apierrors.ErrMissingDateHeader + return cred, "", "", time.Time{}, responses.ErrMissingDateHeader } } @@ -121,7 +120,7 @@ func (s *AuthSys) CalculateSeedSignature(r *http.Request) (cred auth.Credentials var err error date, err = time.Parse(iso8601Format, dateStr) if err != nil { - return cred, "", "", time.Time{}, apierrors.ErrMalformedDate + return cred, "", "", time.Time{}, responses.ErrMalformedDate } // Query string. @@ -141,11 +140,11 @@ func (s *AuthSys) CalculateSeedSignature(r *http.Request) (cred auth.Credentials // Verify if signature match. if !compareSignatureV4(newSignature, signV4Values.Signature) { - return cred, "", "", time.Time{}, apierrors.ErrSignatureDoesNotMatch + return cred, "", "", time.Time{}, responses.ErrSignatureDoesNotMatch } // Return caculated signature. - return cred, newSignature, region, date, apierrors.ErrNone + return cred, newSignature, region, date, nil } const maxLineLength = 4 * humanize.KiByte // assumed <= bufio.defaultBufSize 4KiB @@ -165,9 +164,9 @@ var errChunkTooBig = errors.New("chunk too big: choose chunk size <= 16MiB") // // NewChunkedReader is not needed by normal applications. The http package // automatically decodes chunking when reading response bodies. -func NewSignV4ChunkedReader(req *http.Request, s *AuthSys) (io.ReadCloser, apierrors.ErrorCode) { +func NewSignV4ChunkedReader(req *http.Request, s *AuthSys) (io.ReadCloser, responses.Error) { cred, seedSignature, region, seedDate, errCode := s.CalculateSeedSignature(req) - if errCode != apierrors.ErrNone { + if errCode != nil { return nil, errCode } @@ -179,7 +178,7 @@ func NewSignV4ChunkedReader(req *http.Request, s *AuthSys) (io.ReadCloser, apier region: region, chunkSHA256Writer: sha256.New(), buffer: make([]byte, 64*1024), - }, apierrors.ErrNone + }, nil } // Represents the overall state that is required for decoding a diff --git a/s3/services/auth/streaming-signature-v4_test.go b/s3/services/auth/streaming-signature-v4_test.go index 24cf58c81..e6a985753 100644 --- a/s3/services/auth/streaming-signature-v4_test.go +++ b/s3/services/auth/streaming-signature-v4_test.go @@ -16,7 +16,7 @@ // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see . -package iam +package auth import ( "bufio" From 691e63c7bea4b28196d7306cab402fc7e6fb706b Mon Sep 17 00:00:00 2001 From: steve Date: Thu, 24 Aug 2023 21:34:20 +0800 Subject: [PATCH 059/139] chore: mig sig 04 --- s3/services/auth/auth_type.go | 1 - s3/services/auth/check_handler_auth.go | 37 ++++++--------------- s3/services/auth/check_handler_auth_test.go | 4 +-- s3/services/auth/signature-v2.go | 11 +++--- s3/services/auth/signature-v4-parser.go | 17 +++++----- s3/services/auth/signature-v4-utils.go | 5 +-- s3/services/auth/signature-v4.go | 5 +-- s3/services/auth/streaming-signature-v4.go | 4 +-- 8 files changed, 35 insertions(+), 49 deletions(-) diff --git a/s3/services/auth/auth_type.go b/s3/services/auth/auth_type.go index 936eb2aa0..54d948637 100644 --- a/s3/services/auth/auth_type.go +++ b/s3/services/auth/auth_type.go @@ -70,7 +70,6 @@ func GetRequestAuthType(r *http.Request) AuthType { var err error r.Form, err = url.ParseQuery(r.URL.RawQuery) if err != nil { - log.Infof("parse query failed, query: %s, error: %v", r.URL.RawQuery, err) return AuthTypeUnknown } } diff --git a/s3/services/auth/check_handler_auth.go b/s3/services/auth/check_handler_auth.go index aa9e36a56..685d5ded9 100644 --- a/s3/services/auth/check_handler_auth.go +++ b/s3/services/auth/check_handler_auth.go @@ -4,12 +4,12 @@ import ( "bytes" "context" "encoding/hex" + s3action "github.com/bittorrent/go-btfs/s3/action" "github.com/bittorrent/go-btfs/s3/consts" + "github.com/bittorrent/go-btfs/s3/etag" "github.com/bittorrent/go-btfs/s3/iam/auth" - "github.com/bittorrent/go-btfs/s3/iam/s3action" - "github.com/bittorrent/go-btfs/s3/uleveldb" + "github.com/bittorrent/go-btfs/s3/responses" "github.com/bittorrent/go-btfs/s3/utils/hash" - "github.com/yann-y/fds/pkg/etag" "io" "net/http" "net/url" @@ -17,22 +17,6 @@ import ( "time" ) -// AuthSys auth and sign system -type AuthSys struct { - Iam *IdentityAMSys - PolicySys *iPolicySys - AdminCred auth.Credentials -} - -// NewAuthSys new an AuthSys -func NewAuthSys(db *uleveldb.ULevelDB, adminCred auth.Credentials) *AuthSys { - return &AuthSys{ - Iam: NewIdentityAMSys(db), - PolicySys: newIPolicySys(db), - AdminCred: adminCred, - } -} - // CheckRequestAuthTypeCredential Check request auth type verifies the incoming http request // - validates the request signature // - validates the policy action if anonymous tests bucket policies if any, @@ -40,7 +24,7 @@ func NewAuthSys(db *uleveldb.ULevelDB, adminCred auth.Credentials) *AuthSys { // // returns APIErrorCode if any to be replied to the client. // Additionally, returns the accessKey used in the request, and if this request is by an admin. -func (s *AuthSys) CheckRequestAuthTypeCredential(ctx context.Context, r *http.Request, action s3action.Action, bucketName, objectName string) (cred auth.Credentials, owner bool, s3Err responses.Error) { +func (s *service) CheckRequestAuthTypeCredential(ctx context.Context, r *http.Request, action s3action.Action, bucketName, objectName string) (cred auth.Credentials, owner bool, s3Err *responses.Error) { switch GetRequestAuthType(r) { case AuthTypeUnknown, AuthTypeStreamingSigned: return cred, owner, responses.ErrSignatureVersionNotSupported @@ -71,7 +55,6 @@ func (s *AuthSys) CheckRequestAuthTypeCredential(ctx context.Context, r *http.Re // To extract region from XML in request body, get copy of request body. payload, err := io.ReadAll(io.LimitReader(r.Body, consts.MaxLocationConstraintSize)) if err != nil { - log.Errorf("ReadAll err:%v", err) return cred, owner, responses.ErrMalformedXML } @@ -136,14 +119,14 @@ func (s *AuthSys) CheckRequestAuthTypeCredential(ctx context.Context, r *http.Re } // Verify if request has valid AWS Signature Version '2'. -func (s *AuthSys) IsReqAuthenticatedV2(r *http.Request) (s3Error responses.Error) { +func (s *service) IsReqAuthenticatedV2(r *http.Request) (s3Error *responses.Error) { if isRequestSignatureV2(r) { return s.doesSignV2Match(r) } return s.doesPresignV2SignatureMatch(r) } -func (s *AuthSys) ReqSignatureV4Verify(r *http.Request, region string, stype serviceType) (s3Error responses.Error) { +func (s *service) ReqSignatureV4Verify(r *http.Request, region string, stype serviceType) (s3Error *responses.Error) { sha256sum := GetContentSha256Cksum(r, stype) switch { case IsRequestSignatureV4(r): @@ -156,7 +139,7 @@ func (s *AuthSys) ReqSignatureV4Verify(r *http.Request, region string, stype ser } // IsReqAuthenticated Verify if request has valid AWS Signature Version '4'. -func (s *AuthSys) IsReqAuthenticated(ctx context.Context, r *http.Request, region string, stype serviceType) (s3Error responses.Error) { +func (s *service) IsReqAuthenticated(ctx context.Context, r *http.Request, region string, stype serviceType) (s3Error *responses.Error) { if errCode := s.ReqSignatureV4Verify(r, region, stype); errCode != nil { return errCode } @@ -193,7 +176,7 @@ func (s *AuthSys) IsReqAuthenticated(ctx context.Context, r *http.Request, regio } // ValidateAdminSignature validate admin Signature -func (s *AuthSys) ValidateAdminSignature(ctx context.Context, r *http.Request, region string) (auth.Credentials, map[string]interface{}, bool, responses.Error) { +func (s *service) ValidateAdminSignature(ctx context.Context, r *http.Request, region string) (auth.Credentials, map[string]interface{}, bool, *responses.Error) { var cred auth.Credentials var owner bool s3Err := responses.ErrAccessDenied @@ -284,7 +267,7 @@ func getConditions(r *http.Request, username string) map[string][]string { // IsPutActionAllowed - check if PUT operation is allowed on the resource, this // call verifies bucket policies and IAM policies, supports multi user // checks etc. -func (s *AuthSys) IsPutActionAllowed(ctx context.Context, r *http.Request, action s3action.Action, bucketName, objectName string) (s3Err responses.Error) { +func (s *service) IsPutActionAllowed(ctx context.Context, r *http.Request, action s3action.Action, bucketName, objectName string) (s3Err *responses.Error) { var cred auth.Credentials var owner bool switch GetRequestAuthType(r) { @@ -326,7 +309,7 @@ func (s *AuthSys) IsPutActionAllowed(ctx context.Context, r *http.Request, actio return responses.ErrAccessDenied } -func (s *AuthSys) GetCredential(r *http.Request) (cred auth.Credentials, owner bool, s3Err responses.Error) { +func (s *service) GetCredential(r *http.Request) (cred auth.Credentials, owner bool, s3Err *responses.Error) { switch GetRequestAuthType(r) { case AuthTypeUnknown: s3Err = responses.ErrSignatureVersionNotSupported diff --git a/s3/services/auth/check_handler_auth_test.go b/s3/services/auth/check_handler_auth_test.go index 868dcf00e..cbc58c980 100644 --- a/s3/services/auth/check_handler_auth_test.go +++ b/s3/services/auth/check_handler_auth_test.go @@ -1,14 +1,14 @@ package auth //func TestV2CheckRequestAuthType(t *testing.T) { -// var aSys AuthSys +// var aSys service // aSys.Init() // req := testsign.MustNewSignedV2Request("GET", "http://127.0.0.1:9000", 0, nil, t) // _, _, err := aSys.CheckRequestAuthTypeCredential(context.Background(), req, s3action.ListAllMyBucketsAction, "test", "testobject") // fmt.Println(responses.GetAPIError(err)) //} //func TestV4CheckRequestAuthType(t *testing.T) { -// var aSys AuthSys +// var aSys service // aSys.Init() // req := testsign.MustNewSignedV4Request("GET", "http://127.0.0.1:9000", 0, nil, "test", "test", "s3", t) // _, _, err := aSys.CheckRequestAuthTypeCredential(context.Background(), req, s3action.ListAllMyBucketsAction, "test", "testobject") diff --git a/s3/services/auth/signature-v2.go b/s3/services/auth/signature-v2.go index 7e9ec912b..80f7b11d3 100644 --- a/s3/services/auth/signature-v2.go +++ b/s3/services/auth/signature-v2.go @@ -25,6 +25,7 @@ import ( "fmt" "github.com/bittorrent/go-btfs/s3/consts" "github.com/bittorrent/go-btfs/s3/iam/auth" + "github.com/bittorrent/go-btfs/s3/responses" "net" "net/http" "net/url" @@ -76,7 +77,7 @@ const ( // AWS S3 Signature V2 calculation rule is give here: // http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationStringToSign -func (s *AuthSys) doesPolicySignatureV2Match(formValues http.Header) (auth.Credentials, responses.Error) { +func (s *service) doesPolicySignatureV2Match(formValues http.Header) (auth.Credentials, *responses.Error) { accessKey := formValues.Get(consts.AmzAccessKeyID) r := &http.Request{Header: formValues} @@ -110,7 +111,7 @@ func unescapeQueries(encodedQuery string) (unescapedQueries []string, err error) // - http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationQueryStringAuth // // returns nil if matches. S3 errors otherwise. -func (s *AuthSys) doesPresignV2SignatureMatch(r *http.Request) responses.Error { +func (s *service) doesPresignV2SignatureMatch(r *http.Request) *responses.Error { // r.RequestURI will have raw encoded URI as sent by the client. tokens := strings.SplitN(r.RequestURI, "?", 2) encodedResource := tokens[0] @@ -187,7 +188,7 @@ func (s *AuthSys) doesPresignV2SignatureMatch(r *http.Request) responses.Error { return nil } -func (s *AuthSys) getReqAccessKeyV2(r *http.Request) (auth.Credentials, bool, responses.Error) { +func (s *service) getReqAccessKeyV2(r *http.Request) (auth.Credentials, bool, *responses.Error) { if accessKey := r.Form.Get(consts.AmzAccessKeyID); accessKey != "" { return s.checkKeyValid(r, accessKey) } @@ -228,7 +229,7 @@ func (s *AuthSys) getReqAccessKeyV2(r *http.Request) (auth.Credentials, bool, re // - http://docs.aws.amazon.com/AmazonS3/latest/dev/auth-request-sig-v2.html // returns true if matches, false otherwise. if error is not nil then it is always false -func (s *AuthSys) validateV2AuthHeader(r *http.Request) (auth.Credentials, responses.Error) { +func (s *service) validateV2AuthHeader(r *http.Request) (auth.Credentials, *responses.Error) { var cred auth.Credentials v2Auth := r.Header.Get(consts.Authorization) if v2Auth == "" { @@ -248,7 +249,7 @@ func (s *AuthSys) validateV2AuthHeader(r *http.Request) (auth.Credentials, respo return cred, nil } -func (s *AuthSys) doesSignV2Match(r *http.Request) responses.Error { +func (s *service) doesSignV2Match(r *http.Request) *responses.Error { v2Auth := r.Header.Get(consts.Authorization) cred, apiError := s.validateV2AuthHeader(r) if apiError != nil { diff --git a/s3/services/auth/signature-v4-parser.go b/s3/services/auth/signature-v4-parser.go index 766b9cb13..46dc7c68f 100644 --- a/s3/services/auth/signature-v4-parser.go +++ b/s3/services/auth/signature-v4-parser.go @@ -20,6 +20,7 @@ package auth import ( "github.com/bittorrent/go-btfs/s3/consts" "github.com/bittorrent/go-btfs/s3/iam/auth" + "github.com/bittorrent/go-btfs/s3/responses" "net/http" "net/url" "strings" @@ -48,7 +49,7 @@ func (c credentialHeader) getScope() string { }, consts.SlashSeparator) } -func (s *AuthSys) GetReqAccessKeyV4(r *http.Request, region string, stype serviceType) (auth.Credentials, bool, responses.Error) { +func (s *service) GetReqAccessKeyV4(r *http.Request, region string, stype serviceType) (auth.Credentials, bool, *responses.Error) { ch, s3Err := parseCredentialHeader("Credential="+r.Form.Get(consts.AmzCredential), region, stype) if s3Err != nil { // Strip off the Algorithm prefix. @@ -71,7 +72,7 @@ func (s *AuthSys) GetReqAccessKeyV4(r *http.Request, region string, stype servic } // parse credentialHeader string into its structured form. -func parseCredentialHeader(credElement string, region string, stype serviceType) (ch credentialHeader, aec responses.Error) { +func parseCredentialHeader(credElement string, region string, stype serviceType) (ch credentialHeader, aec *responses.Error) { creds := strings.SplitN(strings.TrimSpace(credElement), "=", 2) if len(creds) != 2 { return ch, responses.ErrMissingFields @@ -128,7 +129,7 @@ func parseCredentialHeader(credElement string, region string, stype serviceType) } // Parse signature from signature tag. -func parseSignature(signElement string) (string, responses.Error) { +func parseSignature(signElement string) (string, *responses.Error) { signFields := strings.Split(strings.TrimSpace(signElement), "=") if len(signFields) != 2 { return "", responses.ErrMissingFields @@ -144,7 +145,7 @@ func parseSignature(signElement string) (string, responses.Error) { } // Parse slice of signed headers from signed headers tag. -func parseSignedHeader(signedHdrElement string) ([]string, responses.Error) { +func parseSignedHeader(signedHdrElement string) ([]string, *responses.Error) { signedHdrFields := strings.Split(strings.TrimSpace(signedHdrElement), "=") if len(signedHdrFields) != 2 { return nil, responses.ErrMissingFields @@ -183,7 +184,7 @@ type preSignValues struct { // querystring += &X-Amz-Signature=signature // // verifies if any of the necessary query params are missing in the presigned request. -func doesV4PresignParamsExist(query url.Values) responses.Error { +func doesV4PresignParamsExist(query url.Values) *responses.Error { v4PresignQueryParams := []string{consts.AmzAlgorithm, consts.AmzCredential, consts.AmzSignature, consts.AmzDate, consts.AmzSignedHeaders, consts.AmzExpires} for _, v4PresignQueryParam := range v4PresignQueryParams { if _, ok := query[v4PresignQueryParam]; !ok { @@ -194,7 +195,7 @@ func doesV4PresignParamsExist(query url.Values) responses.Error { } // Parses all the presigned signature values into separate elements. -func parsePreSignV4(query url.Values, region string, stype serviceType) (psv preSignValues, aec responses.Error) { +func parsePreSignV4(query url.Values, region string, stype serviceType) (psv preSignValues, aec *responses.Error) { // verify whether the required query params exist. aec = doesV4PresignParamsExist(query) if aec != nil { @@ -257,7 +258,7 @@ func parsePreSignV4(query url.Values, region string, stype serviceType) (psv pre // // Authorization: algorithm Credential=accessKeyID/credScope, \ // SignedHeaders=signedHeaders, Signature=signature -func parseSignV4(v4Auth string, region string, stype serviceType) (sv signValues, aec responses.Error) { +func parseSignV4(v4Auth string, region string, stype serviceType) (sv signValues, aec *responses.Error) { // credElement is fetched first to skip replacing the space in access key. credElement := strings.TrimPrefix(strings.Split(strings.TrimSpace(v4Auth), ",")[0], signV4Algorithm) // Replace all spaced strings, some clients can send spaced @@ -283,7 +284,7 @@ func parseSignV4(v4Auth string, region string, stype serviceType) (sv signValues // Initialize signature version '4' structured header. signV4Values := signValues{} - var s3Err responses.Error + var s3Err *responses.Error // Save credentail values. signV4Values.Credential, s3Err = parseCredentialHeader(strings.TrimSpace(credElement), region, stype) if s3Err != nil { diff --git a/s3/services/auth/signature-v4-utils.go b/s3/services/auth/signature-v4-utils.go index 6f91f91ba..6539b7b0e 100644 --- a/s3/services/auth/signature-v4-utils.go +++ b/s3/services/auth/signature-v4-utils.go @@ -23,6 +23,7 @@ import ( "encoding/hex" "github.com/bittorrent/go-btfs/s3/consts" "github.com/bittorrent/go-btfs/s3/iam/auth" + "github.com/bittorrent/go-btfs/s3/responses" "io" "io/ioutil" "net/http" @@ -138,7 +139,7 @@ func isValidRegion(reqRegion string, confRegion string) bool { // check if the access key is valid and recognized, additionally // also returns if the access key is owner/admin. -func (s *AuthSys) checkKeyValid(r *http.Request, accessKey string) (auth.Credentials, bool, responses.Error) { +func (s *service) checkKeyValid(r *http.Request, accessKey string) (auth.Credentials, bool, *responses.Error) { cred := s.AdminCred if cred.AccessKey != accessKey { @@ -171,7 +172,7 @@ func contains(slice interface{}, elem interface{}) bool { } // extractSignedHeaders extract signed headers from Authorization header -func extractSignedHeaders(signedHeaders []string, r *http.Request) (http.Header, responses.Error) { +func extractSignedHeaders(signedHeaders []string, r *http.Request) (http.Header, *responses.Error) { reqHeaders := r.Header reqQueries := r.Form // find whether "host" is part of list of signed headers. diff --git a/s3/services/auth/signature-v4.go b/s3/services/auth/signature-v4.go index 9808bef5d..0557f64f1 100644 --- a/s3/services/auth/signature-v4.go +++ b/s3/services/auth/signature-v4.go @@ -21,6 +21,7 @@ import ( "crypto/subtle" "github.com/bittorrent/go-btfs/s3/consts" "github.com/bittorrent/go-btfs/s3/iam/set" + "github.com/bittorrent/go-btfs/s3/responses" "github.com/bittorrent/go-btfs/s3/utils" "net/http" "net/url" @@ -57,7 +58,7 @@ func compareSignatureV4(sig1, sig2 string) bool { // - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html // // returns nil if the signature matches. -func (s *AuthSys) doesPresignedSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType) responses.Error { +func (s *service) doesPresignedSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType) *responses.Error { // Copy request req := *r @@ -184,7 +185,7 @@ func (s *AuthSys) doesPresignedSignatureMatch(hashedPayload string, r *http.Requ // - http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html // // returns nil if signature matches. -func (s *AuthSys) doesSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType) responses.Error { +func (s *service) doesSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType) *responses.Error { // Copy request. req := *r diff --git a/s3/services/auth/streaming-signature-v4.go b/s3/services/auth/streaming-signature-v4.go index 10caa3cee..a98cf6025 100644 --- a/s3/services/auth/streaming-signature-v4.go +++ b/s3/services/auth/streaming-signature-v4.go @@ -73,7 +73,7 @@ func getChunkSignature(cred auth.Credentials, seedSignature string, region strin // // returns signature, error otherwise if the signature mismatches or any other // error while parsing and validating. -func (s *AuthSys) CalculateSeedSignature(r *http.Request) (cred auth.Credentials, signature string, region string, date time.Time, errCode responses.Error) { +func (s *service) CalculateSeedSignature(r *http.Request) (cred auth.Credentials, signature string, region string, date time.Time, errCode *responses.Error) { // Copy request. req := *r @@ -164,7 +164,7 @@ var errChunkTooBig = errors.New("chunk too big: choose chunk size <= 16MiB") // // NewChunkedReader is not needed by normal applications. The http package // automatically decodes chunking when reading response bodies. -func NewSignV4ChunkedReader(req *http.Request, s *AuthSys) (io.ReadCloser, responses.Error) { +func NewSignV4ChunkedReader(req *http.Request, s *service) (io.ReadCloser, *responses.Error) { cred, seedSignature, region, seedDate, errCode := s.CalculateSeedSignature(req) if errCode != nil { return nil, errCode From ed1a50362cff641ad2973738e956ff59f809621d Mon Sep 17 00:00:00 2001 From: steve Date: Fri, 25 Aug 2023 00:33:05 +0800 Subject: [PATCH 060/139] optmize: refractor codes --- s3/cctx/access_key.go | 7 +- s3/handlers/handlers.go | 95 ++-- s3/responses/errors.go | 5 + s3/responses/writers_common.go | 1 - s3/server.go | 15 +- s3/services/auth/check_handler_auth.go | 323 ------------- s3/services/auth/check_handler_auth_test.go | 16 - s3/services/auth/proto.go | 11 - s3/services/auth/service.go | 26 -- s3/services/auth/signature-v2.go | 429 ------------------ .../auth/streaming-signature-v4_test.go | 198 -------- s3/services/bucket/proto.go | 3 +- s3/services/bucket/service.go | 5 +- s3/services/sign/proto.go | 11 + s3/services/sign/service.go | 49 ++ s3/services/{auth => sign}/service_options.go | 2 +- .../signature-auth-type.go} | 6 +- .../{auth => sign}/signature-v4-parser.go | 58 +-- .../{auth => sign}/signature-v4-utils.go | 45 +- s3/services/{auth => sign}/signature-v4.go | 102 +++-- s3/services/sign/signature.go | 70 +++ .../{auth => sign}/streaming-signature-v4.go | 104 ++--- 22 files changed, 343 insertions(+), 1238 deletions(-) delete mode 100644 s3/services/auth/check_handler_auth.go delete mode 100644 s3/services/auth/check_handler_auth_test.go delete mode 100644 s3/services/auth/proto.go delete mode 100644 s3/services/auth/service.go delete mode 100644 s3/services/auth/signature-v2.go delete mode 100644 s3/services/auth/streaming-signature-v4_test.go create mode 100644 s3/services/sign/proto.go create mode 100644 s3/services/sign/service.go rename s3/services/{auth => sign}/service_options.go (71%) rename s3/services/{auth/auth_type.go => sign/signature-auth-type.go} (96%) rename s3/services/{auth => sign}/signature-v4-parser.go (83%) rename s3/services/{auth => sign}/signature-v4-utils.go (87%) rename s3/services/{auth => sign}/signature-v4.go (77%) create mode 100644 s3/services/sign/signature.go rename s3/services/{auth => sign}/streaming-signature-v4.go (77%) diff --git a/s3/cctx/access_key.go b/s3/cctx/access_key.go index 9e4b600dd..ca4b1b5b8 100644 --- a/s3/cctx/access_key.go +++ b/s3/cctx/access_key.go @@ -1,17 +1,16 @@ package cctx import ( - "github.com/bittorrent/go-btfs/s3/services/accesskey" "net/http" ) -func SetAccessKey(r *http.Request, ack *accesskey.AccessKey) { +func SetAccessKey(r *http.Request, ack string) { set(r, keyOfAccessKey, ack) return } -func GetAccessKey(r *http.Request) (ack *accesskey.AccessKey) { +func GetAccessKey(r *http.Request) (ack string) { v := get(r, keyOfAccessKey) - ack, _ = v.(*accesskey.AccessKey) + ack, _ = v.(string) return } diff --git a/s3/handlers/handlers.go b/s3/handlers/handlers.go index d6cae47fd..03b19a32e 100644 --- a/s3/handlers/handlers.go +++ b/s3/handlers/handlers.go @@ -5,12 +5,14 @@ import ( "errors" "fmt" "github.com/bittorrent/go-btfs/s3/cctx" + "github.com/bittorrent/go-btfs/s3/ctxmu" "github.com/bittorrent/go-btfs/s3/etag" "github.com/bittorrent/go-btfs/s3/requests" "github.com/bittorrent/go-btfs/s3/responses" - "github.com/bittorrent/go-btfs/s3/services/auth" + "github.com/bittorrent/go-btfs/s3/services/accesskey" "github.com/bittorrent/go-btfs/s3/services/bucket" "github.com/bittorrent/go-btfs/s3/services/cors" + "github.com/bittorrent/go-btfs/s3/services/sign" "net/http" "runtime" @@ -23,21 +25,20 @@ import ( var _ Handlerser = (*Handlers)(nil) type Handlers struct { - corsSvc cors.Service - authSvc auth.Service - bucketSvc bucket.Service + corsvc cors.Service + acksvc accesskey.Service + sigsvc sign.Service + bucsvc bucket.Service + nslock ctxmu.MultiCtxRWLocker } -func NewHandlers( - corsSvc cors.Service, - authSvc auth.Service, - bucketSvc bucket.Service, - options ...Option, -) (handlers *Handlers) { +func NewHandlers(corsvc cors.Service, acksvc accesskey.Service, sigsvc sign.Service, bucsvc bucket.Service, options ...Option) (handlers *Handlers) { handlers = &Handlers{ - corsSvc: corsSvc, - authSvc: authSvc, - bucketSvc: bucketSvc, + corsvc: corsvc, + acksvc: acksvc, + sigsvc: sigsvc, + bucsvc: bucsvc, + nslock: ctxmu.NewDefaultMultiCtxRWMutex(), } for _, option := range options { option(handlers) @@ -47,33 +48,50 @@ func NewHandlers( func (h *Handlers) Cors(handler http.Handler) http.Handler { return rscors.New(rscors.Options{ - AllowedOrigins: h.corsSvc.GetAllowOrigins(), - AllowedMethods: h.corsSvc.GetAllowMethods(), - AllowedHeaders: h.corsSvc.GetAllowHeaders(), - ExposedHeaders: h.corsSvc.GetAllowHeaders(), + AllowedOrigins: h.corsvc.GetAllowOrigins(), + AllowedMethods: h.corsvc.GetAllowMethods(), + AllowedHeaders: h.corsvc.GetAllowHeaders(), + ExposedHeaders: h.corsvc.GetAllowHeaders(), AllowCredentials: true, }).Handler(handler) } func (h *Handlers) Log(handler http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Printf("[REQ] %4s | %s\n", r.Method, r.URL) + fmt.Printf("[REQ] <%4s> | %s\n", r.Method, r.URL) handler.ServeHTTP(w, r) hname, herr := cctx.GetHandleInf(r) - fmt.Printf("[RSP] %4s | %s | %s | %v\n", r.Method, r.URL, hname, herr) + fmt.Printf("[RSP] <%4s> | %s | %s | %v\n", r.Method, r.URL, hname, herr) }) } func (h *Handlers) Auth(handler http.Handler) http.Handler { + h.sigsvc.SetSecretGetter(func(key string) (secret string, exists, enable bool, err error) { + ack, err := h.acksvc.Get(key) + if errors.Is(err, accesskey.ErrNotFound) { + exists = false + enable = true + err = nil + return + } + if err != nil { + return + } + exists = true + secret = ack.Secret + enable = ack.Enable + return + }) + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - var err error + var err *responses.Error defer func() { if err != nil { cctx.SetHandleInf(r, fnName(), err) } }() - ack, err := h.authSvc.VerifySignature(r.Context(), r) + ack, err := h.sigsvc.VerifyRequestSignature(r) if err != nil { responses.WriteErrorResponse(w, r, err) return @@ -97,15 +115,10 @@ func (h *Handlers) PutBucketHandler(w http.ResponseWriter, r *http.Request) { return } + // issue: lock for check ctx := r.Context() ack := cctx.GetAccessKey(r) - err = h.bucketSvc.CheckACL(ack, "", s3action.PutBucketAclAction) - if err != nil { - responses.WriteErrorResponse(w, r, responses.ErrAccessDenied) - return - } - if err = s3utils.CheckValidBucketNameStrict(req.Bucket); err != nil { responses.WriteErrorResponse(w, r, responses.ErrInvalidBucketName) return @@ -117,13 +130,13 @@ func (h *Handlers) PutBucketHandler(w http.ResponseWriter, r *http.Request) { return } - if ok := h.bucketSvc.HasBucket(ctx, req.Bucket); ok { + if ok := h.bucsvc.HasBucket(ctx, req.Bucket); ok { err = responses.ErrBucketAlreadyExists responses.WriteErrorResponseHeadersOnly(w, r, responses.ErrBucketAlreadyExists) return } - err = h.bucketSvc.CreateBucket(ctx, req.Bucket, req.Region, cctx.GetAccessKey(r).Key, req.ACL) + err = h.bucsvc.CreateBucket(ctx, req.Bucket, req.Region, ack, req.ACL) if err != nil { responses.WriteErrorResponse(w, r, responses.ErrInternalError) return @@ -153,7 +166,7 @@ func (h *Handlers) HeadBucketHandler(w http.ResponseWriter, r *http.Request) { ack := cctx.GetAccessKey(r) - err = h.bucketSvc.CheckACL(ack, req.Bucket, s3action.HeadBucketAction) + err = h.bucsvc.CheckACL(ack, req.Bucket, s3action.HeadBucketAction) if errors.Is(err, bucket.ErrNotFound) { responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) return @@ -181,14 +194,14 @@ func (h *Handlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) { ctx := r.Context() ack := cctx.GetAccessKey(r) - err = h.bucketSvc.CheckACL(ack, req.Bucket, s3action.HeadBucketAction) + err = h.bucsvc.CheckACL(ack, req.Bucket, s3action.HeadBucketAction) if err != nil { responses.WriteErrorResponse(w, r, err) return } //todo check all errors. - err = h.bucketSvc.DeleteBucket(ctx, req.Bucket) + err = h.bucsvc.DeleteBucket(ctx, req.Bucket) if err != nil { responses.WriteErrorResponse(w, r, err) return @@ -204,13 +217,13 @@ func (h *Handlers) ListBucketsHandler(w http.ResponseWriter, r *http.Request) { }() ack := cctx.GetAccessKey(r) - if ack.Key == "" { + if ack == "" { responses.WriteErrorResponse(w, r, responses.ErrNoAccessKey) return } //todo check all errors - bucketMetas, err := h.bucketSvc.GetAllBucketsOfUser(ack.Key) + bucketMetas, err := h.bucsvc.GetAllBucketsOfUser(ack) if err != nil { responses.WriteErrorResponse(w, r, err) return @@ -234,25 +247,25 @@ func (h *Handlers) GetBucketAclHandler(w http.ResponseWriter, r *http.Request) { ctx := r.Context() ack := cctx.GetAccessKey(r) - if !h.bucketSvc.HasBucket(ctx, req.Bucket) { + if !h.bucsvc.HasBucket(ctx, req.Bucket) { responses.WriteErrorResponseHeadersOnly(w, r, responses.ErrNoSuchBucket) return } - err = h.bucketSvc.CheckACL(ack, req.Bucket, s3action.GetBucketAclAction) + err = h.bucsvc.CheckACL(ack, req.Bucket, s3action.GetBucketAclAction) if err != nil { responses.WriteErrorResponse(w, r, err) return } //todo check all errors - acl, err := h.bucketSvc.GetBucketAcl(ctx, req.Bucket) + acl, err := h.bucsvc.GetBucketAcl(ctx, req.Bucket) if err != nil { responses.WriteErrorResponse(w, r, err) return } - responses.WriteGetBucketAclResponse(w, r, ack.Key, acl) + responses.WriteGetBucketAclResponse(w, r, ack, acl) } func (h *Handlers) PutBucketAclHandler(w http.ResponseWriter, r *http.Request) { @@ -270,7 +283,7 @@ func (h *Handlers) PutBucketAclHandler(w http.ResponseWriter, r *http.Request) { ctx := r.Context() ack := cctx.GetAccessKey(r) - err = h.bucketSvc.CheckACL(ack, req.Bucket, s3action.PutBucketAclAction) + err = h.bucsvc.CheckACL(ack, req.Bucket, s3action.PutBucketAclAction) if err != nil { responses.WriteErrorResponse(w, r, err) return @@ -282,7 +295,7 @@ func (h *Handlers) PutBucketAclHandler(w http.ResponseWriter, r *http.Request) { } //todo check all errors - err = h.bucketSvc.UpdateBucketAcl(ctx, req.Bucket, req.ACL) + err = h.bucsvc.UpdateBucketAcl(ctx, req.Bucket, req.ACL) if err != nil { responses.WriteErrorResponse(w, r, err) return @@ -338,7 +351,7 @@ func (h *Handlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) { ctx := r.Context() ack := cctx.GetAccessKey(r) - err = h.bucketSvc.CheckACL(ack, buc, s3action.PutObjectAction) + err = h.bucsvc.CheckACL(ack, buc, s3action.PutObjectAction) if err != nil { responses.WriteErrorResponse(w, r, err) return diff --git a/s3/responses/errors.go b/s3/responses/errors.go index 1a9858924..037e57d43 100644 --- a/s3/responses/errors.go +++ b/s3/responses/errors.go @@ -264,6 +264,11 @@ var ( description: "The authorization header is malformed; the region is wrong; expecting 'us-east-1'.", httpStatusCode: http.StatusBadRequest, } + ErrMalformedDate = &Error{ // todo + code: "ErrMalformedDate", + description: "ErrMalformedDate", + httpStatusCode: http.StatusBadRequest, + } ErrMalformedPOSTRequest = &Error{ code: "MalformedPOSTRequest", description: "The body of your POST request is not well-formed multipart/form-data.", diff --git a/s3/responses/writers_common.go b/s3/responses/writers_common.go index 6cb70c879..85ee58cff 100644 --- a/s3/responses/writers_common.go +++ b/s3/responses/writers_common.go @@ -101,7 +101,6 @@ func WriteXMLResponse(w http.ResponseWriter, r *http.Request, statusCode int, re } func writeResponse(w http.ResponseWriter, r *http.Request, statusCode int, response []byte, mType mimeType) { - fmt.Println(r.Method, r.URL, statusCode) setCommonHeaders(w, r) if response != nil { w.Header().Set(consts.ContentLength, strconv.Itoa(len(response))) diff --git a/s3/server.go b/s3/server.go index b762abc50..7c1c49a15 100644 --- a/s3/server.go +++ b/s3/server.go @@ -6,9 +6,9 @@ import ( "github.com/bittorrent/go-btfs/s3/routers" "github.com/bittorrent/go-btfs/s3/server" "github.com/bittorrent/go-btfs/s3/services/accesskey" - "github.com/bittorrent/go-btfs/s3/services/auth" "github.com/bittorrent/go-btfs/s3/services/bucket" "github.com/bittorrent/go-btfs/s3/services/cors" + "github.com/bittorrent/go-btfs/s3/services/sign" "github.com/bittorrent/go-btfs/transaction/storage" "sync" ) @@ -23,7 +23,6 @@ func GetProviders(storageStore storage.StateStorer) *providers.Providers { sstore := providers.NewStorageStateStoreProxy(storageStore) fstore := providers.NewFileStore() ps = providers.NewProviders(sstore, fstore) - }) return ps } @@ -32,14 +31,14 @@ func NewServer(storageStore storage.StateStorer) *server.Server { _ = GetProviders(storageStore) // services - corsSvc := cors.NewService() - accessKeySvc := accesskey.NewService(ps) - authSvc := auth.NewService(ps, accessKeySvc) - bucketSvc := bucket.NewService(ps) - bucketSvc.SetEmptyBucket(bucketSvc.EmptyBucket) //todo EmptyBucket参数后续更新为object对象 + corsvc := cors.NewService() + acksvc := accesskey.NewService(ps) + sigsvc := sign.NewService() + bucsvc := bucket.NewService(ps) + bucsvc.SetEmptyBucket(bucsvc.EmptyBucket) //todo EmptyBucket参数后续更新为object对象 // handlers - hs := handlers.NewHandlers(corsSvc, authSvc, bucketSvc) + hs := handlers.NewHandlers(corsvc, acksvc, sigsvc, bucsvc) // routers rs := routers.NewRouters(hs) diff --git a/s3/services/auth/check_handler_auth.go b/s3/services/auth/check_handler_auth.go deleted file mode 100644 index 685d5ded9..000000000 --- a/s3/services/auth/check_handler_auth.go +++ /dev/null @@ -1,323 +0,0 @@ -package auth - -import ( - "bytes" - "context" - "encoding/hex" - s3action "github.com/bittorrent/go-btfs/s3/action" - "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/etag" - "github.com/bittorrent/go-btfs/s3/iam/auth" - "github.com/bittorrent/go-btfs/s3/responses" - "github.com/bittorrent/go-btfs/s3/utils/hash" - "io" - "net/http" - "net/url" - "strconv" - "time" -) - -// CheckRequestAuthTypeCredential Check request auth type verifies the incoming http request -// - validates the request signature -// - validates the policy action if anonymous tests bucket policies if any, -// for authenticated requests validates IAM policies. -// -// returns APIErrorCode if any to be replied to the client. -// Additionally, returns the accessKey used in the request, and if this request is by an admin. -func (s *service) CheckRequestAuthTypeCredential(ctx context.Context, r *http.Request, action s3action.Action, bucketName, objectName string) (cred auth.Credentials, owner bool, s3Err *responses.Error) { - switch GetRequestAuthType(r) { - case AuthTypeUnknown, AuthTypeStreamingSigned: - return cred, owner, responses.ErrSignatureVersionNotSupported - case AuthTypePresignedV2, AuthTypeSignedV2: - if s3Err = s.IsReqAuthenticatedV2(r); s3Err != nil { - return cred, owner, s3Err - } - cred, owner, s3Err = s.getReqAccessKeyV2(r) - case AuthTypeSigned, AuthTypePresigned: - region := "" - switch action { - case s3action.GetBucketLocationAction, s3action.ListAllMyBucketsAction: - region = "" - } - if s3Err = s.IsReqAuthenticated(ctx, r, region, ServiceS3); s3Err != nil { - return cred, owner, s3Err - } - cred, owner, s3Err = s.GetReqAccessKeyV4(r, region, ServiceS3) - } - if s3Err != nil { - return cred, owner, s3Err - } - // TODO: Why should a temporary user be replaced with the parent user's account? - //if cred.IsTemp() { - // cred, _ = s.Iam.GetUser(ctx, cred.ParentUser) - //} - if action == s3action.CreateBucketAction { - // To extract region from XML in request body, get copy of request body. - payload, err := io.ReadAll(io.LimitReader(r.Body, consts.MaxLocationConstraintSize)) - if err != nil { - return cred, owner, responses.ErrMalformedXML - } - - // Populate payload to extract location constraint. - r.Body = io.NopCloser(bytes.NewReader(payload)) - if s.PolicySys.bmSys.HasBucket(ctx, bucketName) { - return cred, owner, responses.ErrBucketAlreadyExists - } - } - - // Anonymous user - if cred.AccessKey == "" { - owner = false - } - - // check bucket policy - if s.PolicySys.isAllowed(ctx, auth.Args{ - AccountName: cred.AccessKey, - Action: action, - BucketName: bucketName, - IsOwner: owner, - ObjectName: objectName, - }) { - // Request is allowed return the appropriate access key. - return cred, owner, nil - } - if action == s3action.ListBucketVersionsAction { - // In AWS S3 s3:ListBucket permission is same as s3:ListBucketVersions permission - // verify as a fallback. - if s.PolicySys.isAllowed(ctx, auth.Args{ - AccountName: cred.AccessKey, - Action: s3action.ListBucketAction, - BucketName: bucketName, - IsOwner: owner, - ObjectName: objectName, - }) { - // Request is allowed return the appropriate access key. - return cred, owner, nil - } - } - - // check user policy - if bucketName == "" || action == s3action.CreateBucketAction { - if s.Iam.IsAllowed(r.Context(), auth.Args{ - AccountName: cred.AccessKey, - Action: action, - BucketName: bucketName, - Conditions: getConditions(r, cred.AccessKey), - ObjectName: objectName, - IsOwner: owner, - }) { - // Request is allowed return the appropriate access key. - return cred, owner, nil - } - } else { - if !s.PolicySys.bmSys.HasBucket(ctx, bucketName) { - return cred, owner, responses.ErrNoSuchBucket - } - } - - return cred, owner, responses.ErrAccessDenied -} - -// Verify if request has valid AWS Signature Version '2'. -func (s *service) IsReqAuthenticatedV2(r *http.Request) (s3Error *responses.Error) { - if isRequestSignatureV2(r) { - return s.doesSignV2Match(r) - } - return s.doesPresignV2SignatureMatch(r) -} - -func (s *service) ReqSignatureV4Verify(r *http.Request, region string, stype serviceType) (s3Error *responses.Error) { - sha256sum := GetContentSha256Cksum(r, stype) - switch { - case IsRequestSignatureV4(r): - return s.doesSignatureMatch(sha256sum, r, region, stype) - case isRequestPresignedSignatureV4(r): - return s.doesPresignedSignatureMatch(sha256sum, r, region, stype) - default: - return responses.ErrAccessDenied - } -} - -// IsReqAuthenticated Verify if request has valid AWS Signature Version '4'. -func (s *service) IsReqAuthenticated(ctx context.Context, r *http.Request, region string, stype serviceType) (s3Error *responses.Error) { - if errCode := s.ReqSignatureV4Verify(r, region, stype); errCode != nil { - return errCode - } - clientETag, err := etag.FromContentMD5(r.Header) - if err != nil { - return responses.ErrInvalidDigest - } - - // Extract either 'X-Amz-Content-Sha256' header or 'X-Amz-Content-Sha256' query parameter (if V4 presigned) - // Do not verify 'X-Amz-Content-Sha256' if skipSHA256. - var contentSHA256 []byte - if skipSHA256 := SkipContentSha256Cksum(r); !skipSHA256 && isRequestPresignedSignatureV4(r) { - if sha256Sum, ok := r.Form[consts.AmzContentSha256]; ok && len(sha256Sum) > 0 { - contentSHA256, err = hex.DecodeString(sha256Sum[0]) - if err != nil { - return responses.ErrContentSHA256Mismatch - } - } - } else if _, ok := r.Header[consts.AmzContentSha256]; !skipSHA256 && ok { - contentSHA256, err = hex.DecodeString(r.Header.Get(consts.AmzContentSha256)) - if err != nil || len(contentSHA256) == 0 { - return responses.ErrContentSHA256Mismatch - } - } - - // Verify 'Content-Md5' and/or 'X-Amz-Content-Sha256' if present. - // The verification happens implicit during reading. - reader, err := hash.NewReader(r.Body, -1, clientETag.String(), hex.EncodeToString(contentSHA256), -1) - if err != nil { - return responses.ErrInternalError - } - r.Body = reader - return nil -} - -// ValidateAdminSignature validate admin Signature -func (s *service) ValidateAdminSignature(ctx context.Context, r *http.Request, region string) (auth.Credentials, map[string]interface{}, bool, *responses.Error) { - var cred auth.Credentials - var owner bool - s3Err := responses.ErrAccessDenied - if _, ok := r.Header[consts.AmzContentSha256]; ok && - GetRequestAuthType(r) == AuthTypeSigned { - // We only support admin credentials to access admin APIs. - cred, owner, s3Err = s.GetReqAccessKeyV4(r, region, ServiceS3) - if s3Err != nil { - return cred, nil, owner, s3Err - } - - // we only support V4 (no presign) with auth body - s3Err = s.IsReqAuthenticated(ctx, r, region, ServiceS3) - } - if s3Err != nil { - return cred, nil, owner, s3Err - } - - return cred, nil, owner, nil -} - -func getConditions(r *http.Request, username string) map[string][]string { - currTime := time.Now().UTC() - - principalType := "Anonymous" - if username != "" { - principalType = "User" - } - - at := GetRequestAuthType(r) - var signatureVersion string - switch at { - case AuthTypeSignedV2, AuthTypePresignedV2: - signatureVersion = signV2Algorithm - case AuthTypeSigned, AuthTypePresigned, AuthTypeStreamingSigned, AuthTypePostPolicy: - signatureVersion = signV4Algorithm - } - - var authtype string - switch at { - case AuthTypePresignedV2, AuthTypePresigned: - authtype = "REST-QUERY-STRING" - case AuthTypeSignedV2, AuthTypeSigned, AuthTypeStreamingSigned: - authtype = "REST-HEADER" - case AuthTypePostPolicy: - authtype = "POST" - } - - args := map[string][]string{ - "CurrentTime": {currTime.Format(time.RFC3339)}, - "EpochTime": {strconv.FormatInt(currTime.Unix(), 10)}, - "SecureTransport": {strconv.FormatBool(r.TLS != nil)}, - "UserAgent": {r.UserAgent()}, - "Referer": {r.Referer()}, - "principaltype": {principalType}, - "userid": {username}, - "username": {username}, - "signatureversion": {signatureVersion}, - "authType": {authtype}, - } - - cloneHeader := r.Header.Clone() - - for key, values := range cloneHeader { - if existingValues, found := args[key]; found { - args[key] = append(existingValues, values...) - } else { - args[key] = values - } - } - - cloneURLValues := make(url.Values, len(r.Form)) - for k, v := range r.Form { - cloneURLValues[k] = v - } - - for key, values := range cloneURLValues { - if existingValues, found := args[key]; found { - args[key] = append(existingValues, values...) - } else { - args[key] = values - } - } - - return args -} - -// IsPutActionAllowed - check if PUT operation is allowed on the resource, this -// call verifies bucket policies and IAM policies, supports multi user -// checks etc. -func (s *service) IsPutActionAllowed(ctx context.Context, r *http.Request, action s3action.Action, bucketName, objectName string) (s3Err *responses.Error) { - var cred auth.Credentials - var owner bool - switch GetRequestAuthType(r) { - case AuthTypeUnknown: - return responses.ErrSignatureVersionNotSupported - case AuthTypeSignedV2, AuthTypePresignedV2: - cred, owner, s3Err = s.getReqAccessKeyV2(r) - case AuthTypeStreamingSigned, AuthTypePresigned, AuthTypeSigned: - region := "" - cred, owner, s3Err = s.GetReqAccessKeyV4(r, region, ServiceS3) - } - if s3Err != nil { - return s3Err - } - - // Do not check for PutObjectRetentionAction permission, - // if mode and retain until date are not set. - // Can happen when bucket has default lock config set - if action == s3action.PutObjectRetentionAction && - r.Header.Get(consts.AmzObjectLockMode) == "" && - r.Header.Get(consts.AmzObjectLockRetainUntilDate) == "" { - return nil - } - - // check bucket policy - if s.PolicySys.isAllowed(ctx, auth.Args{ - AccountName: cred.AccessKey, - Action: action, - BucketName: bucketName, - IsOwner: owner, - ObjectName: objectName, - }) { - return nil - } - - if !s.PolicySys.bmSys.HasBucket(ctx, bucketName) { - return responses.ErrNoSuchBucket - } - return responses.ErrAccessDenied -} - -func (s *service) GetCredential(r *http.Request) (cred auth.Credentials, owner bool, s3Err *responses.Error) { - switch GetRequestAuthType(r) { - case AuthTypeUnknown: - s3Err = responses.ErrSignatureVersionNotSupported - case AuthTypeSignedV2, AuthTypePresignedV2: - cred, owner, s3Err = s.getReqAccessKeyV2(r) - case AuthTypeStreamingSigned, AuthTypePresigned, AuthTypeSigned: - region := "" - cred, owner, s3Err = s.GetReqAccessKeyV4(r, region, ServiceS3) - } - return -} diff --git a/s3/services/auth/check_handler_auth_test.go b/s3/services/auth/check_handler_auth_test.go deleted file mode 100644 index cbc58c980..000000000 --- a/s3/services/auth/check_handler_auth_test.go +++ /dev/null @@ -1,16 +0,0 @@ -package auth - -//func TestV2CheckRequestAuthType(t *testing.T) { -// var aSys service -// aSys.Init() -// req := testsign.MustNewSignedV2Request("GET", "http://127.0.0.1:9000", 0, nil, t) -// _, _, err := aSys.CheckRequestAuthTypeCredential(context.Background(), req, s3action.ListAllMyBucketsAction, "test", "testobject") -// fmt.Println(responses.GetAPIError(err)) -//} -//func TestV4CheckRequestAuthType(t *testing.T) { -// var aSys service -// aSys.Init() -// req := testsign.MustNewSignedV4Request("GET", "http://127.0.0.1:9000", 0, nil, "test", "test", "s3", t) -// _, _, err := aSys.CheckRequestAuthTypeCredential(context.Background(), req, s3action.ListAllMyBucketsAction, "test", "testobject") -// fmt.Println(responses.GetAPIError(err)) -//} diff --git a/s3/services/auth/proto.go b/s3/services/auth/proto.go deleted file mode 100644 index 36d7fbb03..000000000 --- a/s3/services/auth/proto.go +++ /dev/null @@ -1,11 +0,0 @@ -package auth - -import ( - "context" - "github.com/bittorrent/go-btfs/s3/services/accesskey" - "net/http" -) - -type Service interface { - VerifySignature(ctx context.Context, r *http.Request) (ack *accesskey.AccessKey, err error) -} diff --git a/s3/services/auth/service.go b/s3/services/auth/service.go deleted file mode 100644 index dfc040024..000000000 --- a/s3/services/auth/service.go +++ /dev/null @@ -1,26 +0,0 @@ -package auth - -import ( - "context" - "github.com/bittorrent/go-btfs/s3/providers" - "github.com/bittorrent/go-btfs/s3/services/accesskey" - "net/http" -) - -var _ Service = (*service)(nil) - -type service struct { - getSecret func(key string) (secret string, disabled bool, err error) -} - -func NewService(providers providers.Providerser, accessKeySvc accesskey.Service, options ...Option) Service { - svc := &service{} - for _, option := range options { - option(svc) - } - return svc -} - -func (s *service) VerifySignature(ctx context.Context, r *http.Request) (accessKeyRecord *accesskey.AccessKey, err error) { - return s.CheckRequestAuthTypeCredential(ctx, r) -} diff --git a/s3/services/auth/signature-v2.go b/s3/services/auth/signature-v2.go deleted file mode 100644 index 80f7b11d3..000000000 --- a/s3/services/auth/signature-v2.go +++ /dev/null @@ -1,429 +0,0 @@ -/* - * The following code tries to reverse engineer the Amazon S3 APIs, - * and is mostly copied from minio implementation. - */ - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the License. - -package auth - -import ( - "crypto/hmac" - "crypto/sha1" - "crypto/subtle" - "encoding/base64" - "fmt" - "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/iam/auth" - "github.com/bittorrent/go-btfs/s3/responses" - "net" - "net/http" - "net/url" - "sort" - "strconv" - "strings" - "time" -) - -// Whitelist resource list that will be used in query string for signature-V2 calculation. -// -// This list should be kept alphabetically sorted, do not hastily edit. -var resourceList = []string{ - "acl", - "cors", - "delete", - "encryption", - "legal-hold", - "lifecycle", - "location", - "logging", - "notification", - "partNumber", - "policy", - "requestPayment", - "response-cache-control", - "response-content-disposition", - "response-content-encoding", - "response-content-language", - "response-content-type", - "response-expires", - "retention", - "select", - "select-type", - "tagging", - "torrent", - "uploadId", - "uploads", - "versionId", - "versioning", - "versions", - "website", -} - -// Signature and API related constants. -const ( - signV2Algorithm = "AWS" -) - -// AWS S3 Signature V2 calculation rule is give here: -// http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationStringToSign -func (s *service) doesPolicySignatureV2Match(formValues http.Header) (auth.Credentials, *responses.Error) { - accessKey := formValues.Get(consts.AmzAccessKeyID) - - r := &http.Request{Header: formValues} - cred, _, s3Err := s.checkKeyValid(r, accessKey) - if s3Err != nil { - return cred, s3Err - } - policy := formValues.Get("Policy") - signature := formValues.Get(consts.AmzSignatureV2) - if !compareSignatureV2(signature, calculateSignatureV2(policy, cred.SecretKey)) { - return cred, responses.ErrSignatureDoesNotMatch - } - return cred, nil -} - -// Escape encodedQuery string into unescaped list of query params, returns error -// if any while unescaping the values. -func unescapeQueries(encodedQuery string) (unescapedQueries []string, err error) { - for _, query := range strings.Split(encodedQuery, "&") { - var unescapedQuery string - unescapedQuery, err = url.QueryUnescape(query) - if err != nil { - return nil, err - } - unescapedQueries = append(unescapedQueries, unescapedQuery) - } - return unescapedQueries, nil -} - -// doesPresignV2SignatureMatch - Verify query headers with presigned signature -// - http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationQueryStringAuth -// -// returns nil if matches. S3 errors otherwise. -func (s *service) doesPresignV2SignatureMatch(r *http.Request) *responses.Error { - // r.RequestURI will have raw encoded URI as sent by the client. - tokens := strings.SplitN(r.RequestURI, "?", 2) - encodedResource := tokens[0] - encodedQuery := "" - if len(tokens) == 2 { - encodedQuery = tokens[1] - } - - var ( - filteredQueries []string - gotSignature string - expires string - accessKey string - err error - ) - - var unescapedQueries []string - unescapedQueries, err = unescapeQueries(encodedQuery) - if err != nil { - return responses.ErrInvalidQueryParams - } - - // Extract the necessary values from presigned query, construct a list of new filtered queries. - for _, query := range unescapedQueries { - keyval := strings.SplitN(query, "=", 2) - if len(keyval) != 2 { - return responses.ErrInvalidQueryParams - } - switch keyval[0] { - case consts.AmzAccessKeyID: - accessKey = keyval[1] - case consts.AmzSignatureV2: - gotSignature = keyval[1] - case consts.Expires: - expires = keyval[1] - default: - filteredQueries = append(filteredQueries, query) - } - } - - // Invalid values returns error. - if accessKey == "" || gotSignature == "" || expires == "" { - return responses.ErrInvalidQueryParams - } - - cred, _, s3Err := s.checkKeyValid(r, accessKey) - if s3Err != nil { - return s3Err - } - - // Make sure the request has not expired. - expiresInt, err := strconv.ParseInt(expires, 10, 64) - if err != nil { - return responses.ErrAuthorizationHeaderMalformed - } - - // Check if the presigned URL has expired. - if expiresInt < time.Now().UTC().Unix() { - return responses.ErrExpiredPresignRequest - } - - encodedResource, err = getResource(encodedResource, r.Host) - if err != nil { - return responses.ErrInvalidRequest - } - - expectedSignature := preSignatureV2(cred, r.Method, encodedResource, strings.Join(filteredQueries, "&"), r.Header, expires) - if !compareSignatureV2(gotSignature, expectedSignature) { - return responses.ErrSignatureDoesNotMatch - } - - r.Form.Del(consts.Expires) - - return nil -} - -func (s *service) getReqAccessKeyV2(r *http.Request) (auth.Credentials, bool, *responses.Error) { - if accessKey := r.Form.Get(consts.AmzAccessKeyID); accessKey != "" { - return s.checkKeyValid(r, accessKey) - } - - // below is V2 Signed Auth header format, splitting on `space` (after the `AWS` string). - // Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature - authFields := strings.Split(r.Header.Get(consts.Authorization), " ") - if len(authFields) != 2 { - return auth.Credentials{}, false, responses.ErrMissingFields - } - - // Then will be splitting on ":", this will seprate `AWSAccessKeyId` and `Signature` string. - keySignFields := strings.Split(strings.TrimSpace(authFields[1]), ":") - if len(keySignFields) != 2 { - return auth.Credentials{}, false, responses.ErrMissingFields - } - - return s.checkKeyValid(r, keySignFields[0]) -} - -// Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature; -// Signature = Base64( HMAC-SHA1( YourSecretKey, UTF-8-Encoding-Of( StringToSign ) ) ); -// -// StringToSign = HTTP-Verb + "\n" + -// Content-Md5 + "\n" + -// Content-Type + "\n" + -// Date + "\n" + -// CanonicalizedProtocolHeaders + -// CanonicalizedResource; -// -// CanonicalizedResource = [ consts.SlashSeparator + Bucket ] + -// + -// [ subresource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; -// -// CanonicalizedProtocolHeaders = - -// doesSignV2Match - Verify authorization header with calculated header in accordance with -// - http://docs.aws.amazon.com/AmazonS3/latest/dev/auth-request-sig-v2.html -// returns true if matches, false otherwise. if error is not nil then it is always false - -func (s *service) validateV2AuthHeader(r *http.Request) (auth.Credentials, *responses.Error) { - var cred auth.Credentials - v2Auth := r.Header.Get(consts.Authorization) - if v2Auth == "" { - return cred, responses.ErrAuthHeaderEmpty - } - - // Verify if the header algorithm is supported or not. - if !strings.HasPrefix(v2Auth, signV2Algorithm) { - return cred, responses.ErrSignatureVersionNotSupported - } - - cred, _, apiErr := s.getReqAccessKeyV2(r) - if apiErr != nil { - return cred, apiErr - } - - return cred, nil -} - -func (s *service) doesSignV2Match(r *http.Request) *responses.Error { - v2Auth := r.Header.Get(consts.Authorization) - cred, apiError := s.validateV2AuthHeader(r) - if apiError != nil { - return apiError - } - - // r.RequestURI will have raw encoded URI as sent by the client. - tokens := strings.SplitN(r.RequestURI, "?", 2) - encodedResource := tokens[0] - encodedQuery := "" - if len(tokens) == 2 { - encodedQuery = tokens[1] - } - - unescapedQueries, err := unescapeQueries(encodedQuery) - if err != nil { - return responses.ErrInvalidQueryParams - } - - encodedResource, err = getResource(encodedResource, r.Host) - if err != nil { - return responses.ErrInvalidRequest - } - - prefix := fmt.Sprintf("%s %s:", signV2Algorithm, cred.AccessKey) - if !strings.HasPrefix(v2Auth, prefix) { - return responses.ErrSignatureDoesNotMatch - } - v2Auth = v2Auth[len(prefix):] - expectedAuth := signatureV2(cred, r.Method, encodedResource, strings.Join(unescapedQueries, "&"), r.Header) - if !compareSignatureV2(v2Auth, expectedAuth) { - return responses.ErrSignatureDoesNotMatch - } - return nil -} - -func calculateSignatureV2(stringToSign string, secret string) string { - hm := hmac.New(sha1.New, []byte(secret)) - hm.Write([]byte(stringToSign)) - return base64.StdEncoding.EncodeToString(hm.Sum(nil)) -} - -// Return signature-v2 for the presigned request. -func preSignatureV2(cred auth.Credentials, method string, encodedResource string, encodedQuery string, headers http.Header, expires string) string { - stringToSign := getStringToSignV2(method, encodedResource, encodedQuery, headers, expires) - return calculateSignatureV2(stringToSign, cred.SecretKey) -} - -// Return the signature v2 of a given request. -func signatureV2(cred auth.Credentials, method string, encodedResource string, encodedQuery string, headers http.Header) string { - stringToSign := getStringToSignV2(method, encodedResource, encodedQuery, headers, "") - signature := calculateSignatureV2(stringToSign, cred.SecretKey) - return signature -} - -// compareSignatureV2 returns true if and only if both signatures -// are equal. The signatures are expected to be base64 encoded strings -// according to the AWS S3 signature V2 spec. -func compareSignatureV2(sig1, sig2 string) bool { - // Decode signature string to binary byte-sequence representation is required - // as Base64 encoding of a value is not unique: - // For example "aGVsbG8=" and "aGVsbG8=\r" will result in the same byte slice. - signature1, err := base64.StdEncoding.DecodeString(sig1) - if err != nil { - return false - } - signature2, err := base64.StdEncoding.DecodeString(sig2) - if err != nil { - return false - } - return subtle.ConstantTimeCompare(signature1, signature2) == 1 -} - -// Return canonical headers. -func canonicalizedAmzHeadersV2(headers http.Header) string { - var keys []string - keyval := make(map[string]string, len(headers)) - for key := range headers { - lkey := strings.ToLower(key) - if !strings.HasPrefix(lkey, "x-amz-") { - continue - } - keys = append(keys, lkey) - keyval[lkey] = strings.Join(headers[key], ",") - } - sort.Strings(keys) - var canonicalHeaders []string - for _, key := range keys { - canonicalHeaders = append(canonicalHeaders, key+":"+keyval[key]) - } - return strings.Join(canonicalHeaders, "\n") -} - -// Return canonical resource string. -func canonicalizedResourceV2(encodedResource, encodedQuery string) string { - queries := strings.Split(encodedQuery, "&") - keyval := make(map[string]string) - for _, query := range queries { - key := query - val := "" - index := strings.Index(query, "=") - if index != -1 { - key = query[:index] - val = query[index+1:] - } - keyval[key] = val - } - - var canonicalQueries []string - for _, key := range resourceList { - val, ok := keyval[key] - if !ok { - continue - } - if val == "" { - canonicalQueries = append(canonicalQueries, key) - continue - } - canonicalQueries = append(canonicalQueries, key+"="+val) - } - - // The queries will be already sorted as resourceList is sorted, if canonicalQueries - // is empty strings.Join returns empty. - canonicalQuery := strings.Join(canonicalQueries, "&") - if canonicalQuery != "" { - return encodedResource + "?" + canonicalQuery - } - return encodedResource -} - -// Return string to sign under two different conditions. -// - if expires string is set then string to sign includes date instead of the Date header. -// - if expires string is empty then string to sign includes date header instead. -func getStringToSignV2(method string, encodedResource, encodedQuery string, headers http.Header, expires string) string { - canonicalHeaders := canonicalizedAmzHeadersV2(headers) - if len(canonicalHeaders) > 0 { - canonicalHeaders += "\n" - } - - date := expires // Date is set to expires date for presign operations. - if date == "" { - // If expires date is empty then request header Date is used. - date = headers.Get(consts.Date) - } - - // From the Amazon docs: - // - // StringToSign = HTTP-Verb + "\n" + - // Content-Md5 + "\n" + - // Content-Type + "\n" + - // Date/Expires + "\n" + - // CanonicalizedProtocolHeaders + - // CanonicalizedResource; - stringToSign := strings.Join([]string{ - method, - headers.Get(consts.ContentMD5), - headers.Get(consts.ContentType), - date, - canonicalHeaders, - }, "\n") - - return stringToSign + canonicalizedResourceV2(encodedResource, encodedQuery) -} - -// Returns "/bucketName/objectName" for path-style or virtual-host-style requests. -func getResource(path string, host string) (string, error) { - - // If virtual-host-style is enabled construct the "resource" properly. - if strings.Contains(host, ":") { - // In bucket.mydomain.com:9000, strip out :9000 - var err error - if host, _, err = net.SplitHostPort(host); err != nil { - return "", err - } - } - return path, nil -} diff --git a/s3/services/auth/streaming-signature-v4_test.go b/s3/services/auth/streaming-signature-v4_test.go deleted file mode 100644 index e6a985753..000000000 --- a/s3/services/auth/streaming-signature-v4_test.go +++ /dev/null @@ -1,198 +0,0 @@ -/* - * The following code tries to reverse engineer the Amazon S3 APIs, - * and is mostly copied from minio implementation. - */ - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package auth - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strings" - "testing" -) - -// Test read chunk line. -func TestReadChunkLine(t *testing.T) { - type testCase struct { - reader *bufio.Reader - expectedErr error - chunkSize []byte - chunkSignature []byte - } - // List of readers used. - readers := []io.Reader{ - // Test - 1 - bytes.NewReader([]byte("1000;chunk-signature=111123333333333333334444211\r\n")), - // Test - 2 - bytes.NewReader([]byte("1000;")), - // Test - 3 - bytes.NewReader([]byte(fmt.Sprintf("%4097d", 1))), - // Test - 4 - bytes.NewReader([]byte("1000;chunk-signature=111123333333333333334444211\r\n")), - } - testCases := []testCase{ - // Test - 1 - small bufio reader. - { - bufio.NewReaderSize(readers[0], 16), - errLineTooLong, - nil, - nil, - }, - // Test - 2 - unexpected end of the reader. - { - bufio.NewReader(readers[1]), - io.ErrUnexpectedEOF, - nil, - nil, - }, - // Test - 3 - line too long bigger than 4k+1 - { - bufio.NewReader(readers[2]), - errLineTooLong, - nil, - nil, - }, - // Test - 4 - parse the chunk reader properly. - { - bufio.NewReader(readers[3]), - nil, - []byte("1000"), - []byte("111123333333333333334444211"), - }, - } - // Valid test cases for each chunk line. - for i, tt := range testCases { - chunkSize, chunkSignature, err := readChunkLine(tt.reader) - if err != tt.expectedErr { - t.Errorf("Test %d: Expected %s, got %s", i+1, tt.expectedErr, err) - } - if !bytes.Equal(chunkSize, tt.chunkSize) { - t.Errorf("Test %d: Expected %s, got %s", i+1, string(tt.chunkSize), string(chunkSize)) - } - if !bytes.Equal(chunkSignature, tt.chunkSignature) { - t.Errorf("Test %d: Expected %s, got %s", i+1, string(tt.chunkSignature), string(chunkSignature)) - } - } -} - -// Test parsing s3 chunk extension. -func TestParseS3ChunkExtension(t *testing.T) { - type testCase struct { - buf []byte - chunkSize []byte - chunkSign []byte - } - - tests := []testCase{ - // Test - 1 valid case. - { - []byte("10000;chunk-signature=ad80c730a21e5b8d04586a2213dd63b9a0e99e0e2307b0ade35a65485a288648"), - []byte("10000"), - []byte("ad80c730a21e5b8d04586a2213dd63b9a0e99e0e2307b0ade35a65485a288648"), - }, - // Test - 2 no chunk extension, return same buffer. - { - []byte("10000;"), - []byte("10000;"), - nil, - }, - // Test - 3 no chunk size, return error. - { - []byte(";chunk-signature="), - nil, - nil, - }, - // Test - 4 removes trailing slash. - { - []byte("10000;chunk-signature=ad80c730a21e5b8d04586a2213dd63b9a0e99e0e2307b0ade35a65485a288648 \t \n"), - []byte("10000"), - []byte("ad80c730a21e5b8d04586a2213dd63b9a0e99e0e2307b0ade35a65485a288648"), - }, - } - // Validate chunk extension removal. - for i, tt := range tests { - // Extract chunk size and chunk signature after parsing a standard chunk-extension format. - hexChunkSize, hexChunkSignature := parseS3ChunkExtension(tt.buf) - if !bytes.Equal(hexChunkSize, tt.chunkSize) { - t.Errorf("Test %d: Expected %s, got %s", i+1, string(tt.chunkSize), string(hexChunkSize)) - } - if !bytes.Equal(hexChunkSignature, tt.chunkSign) { - t.Errorf("Test %d: Expected %s, got %s", i+1, string(tt.chunkSign), string(hexChunkSignature)) - } - } -} - -// Test read CRLF characters on input reader. -func TestReadCRLF(t *testing.T) { - type testCase struct { - reader io.Reader - expectedErr error - } - tests := []testCase{ - // Test - 1 valid buffer with CRLF. - {bytes.NewReader([]byte("\r\n")), nil}, - // Test - 2 invalid buffer with no CRLF. - {bytes.NewReader([]byte("he")), errMalformedEncoding}, - // Test - 3 invalid buffer with more characters. - {bytes.NewReader([]byte("he\r\n")), errMalformedEncoding}, - // Test - 4 smaller buffer than expected. - {bytes.NewReader([]byte("h")), io.ErrUnexpectedEOF}, - } - for i, tt := range tests { - err := readCRLF(tt.reader) - if err != tt.expectedErr { - t.Errorf("Test %d: Expected %s, got %s this", i+1, tt.expectedErr, err) - } - } -} - -// Tests parsing hex number into its uint64 decimal equivalent. -func TestParseHexUint(t *testing.T) { - type testCase struct { - in string - want uint64 - wantErr string - } - tests := []testCase{ - {"x", 0, "invalid byte in chunk length"}, - {"0000000000000000", 0, ""}, - {"0000000000000001", 1, ""}, - {"ffffffffffffffff", 1<<64 - 1, ""}, - {"FFFFFFFFFFFFFFFF", 1<<64 - 1, ""}, - {"000000000000bogus", 0, "invalid byte in chunk length"}, - {"00000000000000000", 0, "http chunk length too large"}, // could accept if we wanted - {"10000000000000000", 0, "http chunk length too large"}, - {"00000000000000001", 0, "http chunk length too large"}, // could accept if we wanted - } - for i := uint64(0); i <= 1234; i++ { - tests = append(tests, testCase{in: fmt.Sprintf("%x", i), want: i}) - } - for _, tt := range tests { - got, err := parseHexUint([]byte(tt.in)) - if tt.wantErr != "" { - if err != nil && !strings.Contains(err.Error(), tt.wantErr) { - t.Errorf("parseHexUint(%q) = %v, %v; want error %q", tt.in, got, err, tt.wantErr) - } - } else { - if err != nil || got != tt.want { - t.Errorf("parseHexUint(%q) = %v, %v; want %v", tt.in, got, err, tt.want) - } - } - } -} diff --git a/s3/services/bucket/proto.go b/s3/services/bucket/proto.go index 440363d71..4d2907a4e 100644 --- a/s3/services/bucket/proto.go +++ b/s3/services/bucket/proto.go @@ -4,14 +4,13 @@ import ( "context" "errors" "github.com/bittorrent/go-btfs/s3/action" - "github.com/bittorrent/go-btfs/s3/services/accesskey" "time" ) var ErrNotFound = errors.New("bucket not found") type Service interface { - CheckACL(accessKeyRecord *accesskey.AccessKey, bucketName string, action action.Action) (err error) + CheckACL(accessKey string, bucketName string, action action.Action) (err error) CreateBucket(ctx context.Context, bucket, region, accessKey, acl string) error GetBucketMeta(ctx context.Context, bucket string) (meta Bucket, err error) HasBucket(ctx context.Context, bucket string) bool diff --git a/s3/services/bucket/service.go b/s3/services/bucket/service.go index 36af0441b..b9c17d2bb 100644 --- a/s3/services/bucket/service.go +++ b/s3/services/bucket/service.go @@ -4,7 +4,6 @@ import ( "context" "errors" "github.com/bittorrent/go-btfs/s3/providers" - "github.com/bittorrent/go-btfs/s3/services/accesskey" "time" "github.com/bittorrent/go-btfs/s3/action" @@ -40,7 +39,7 @@ func NewService(providers providers.Providerser, options ...Option) Service { return s } -func (s *service) CheckACL(ack *accesskey.AccessKey, bucketName string, act action.Action) (err error) { +func (s *service) CheckACL(ack string, bucketName string, act action.Action) (err error) { var bucketMeta Bucket if act != action.CreateBucketAction && act != action.ListBucketAction { if bucketName == "" { @@ -52,7 +51,7 @@ func (s *service) CheckACL(ack *accesskey.AccessKey, bucketName string, act acti } } - if policy.IsAllowed(bucketMeta.Owner == ack.Key, bucketMeta.Acl, act) == false { + if policy.IsAllowed(bucketMeta.Owner == ack, bucketMeta.Acl, act) == false { return errors.New("not allowed") } return diff --git a/s3/services/sign/proto.go b/s3/services/sign/proto.go new file mode 100644 index 000000000..a9b17d8fc --- /dev/null +++ b/s3/services/sign/proto.go @@ -0,0 +1,11 @@ +package sign + +import ( + "github.com/bittorrent/go-btfs/s3/responses" + "net/http" +) + +type Service interface { + SetSecretGetter(f func(key string) (secret string, exists, enable bool, err error)) + VerifyRequestSignature(r *http.Request) (ack string, rerr *responses.Error) +} diff --git a/s3/services/sign/service.go b/s3/services/sign/service.go new file mode 100644 index 000000000..0a89e22eb --- /dev/null +++ b/s3/services/sign/service.go @@ -0,0 +1,49 @@ +package sign + +import ( + "github.com/bittorrent/go-btfs/s3/responses" + "net/http" + "sync" +) + +var _ Service = (*service)(nil) + +type service struct { + getSecret func(key string) (secret string, exists, enable bool, err error) + once sync.Once +} + +func NewService(options ...Option) Service { + svc := &service{ + getSecret: func(key string) (secret string, exists, enable bool, err error) { + return + }, + once: sync.Once{}, + } + for _, option := range options { + option(svc) + } + return svc +} + +func (s *service) SetSecretGetter(f func(key string) (secret string, exists, enable bool, err error)) { + s.once.Do(func() { + s.getSecret = f + }) +} + +func (s *service) VerifyRequestSignature(r *http.Request) (ack string, rerr *responses.Error) { + switch GetRequestAuthType(r) { + case AuthTypeUnknown: + return + case AuthTypeSigned, AuthTypePresigned: + ack, rerr = s.isReqAuthenticated(r, "", ServiceS3) + return + case AuthTypeStreamingSigned: + ack, rerr = s.setReqBodySignV4ChunkedReader(r, "", ServiceS3) + return + default: + rerr = responses.ErrSignatureVersionNotSupported + return + } +} diff --git a/s3/services/auth/service_options.go b/s3/services/sign/service_options.go similarity index 71% rename from s3/services/auth/service_options.go rename to s3/services/sign/service_options.go index fb9830f04..46f7ad63c 100644 --- a/s3/services/auth/service_options.go +++ b/s3/services/sign/service_options.go @@ -1,3 +1,3 @@ -package auth +package sign type Option func(svc *service) diff --git a/s3/services/auth/auth_type.go b/s3/services/sign/signature-auth-type.go similarity index 96% rename from s3/services/auth/auth_type.go rename to s3/services/sign/signature-auth-type.go index 54d948637..70220b2c7 100644 --- a/s3/services/auth/auth_type.go +++ b/s3/services/sign/signature-auth-type.go @@ -1,4 +1,4 @@ -package auth +package sign import ( "github.com/bittorrent/go-btfs/s3/consts" @@ -94,7 +94,3 @@ func GetRequestAuthType(r *http.Request) AuthType { } return AuthTypeUnknown } - -func IsAuthTypeStreamingSigned(atype AuthType) bool { - return atype == AuthTypeStreamingSigned -} diff --git a/s3/services/auth/signature-v4-parser.go b/s3/services/sign/signature-v4-parser.go similarity index 83% rename from s3/services/auth/signature-v4-parser.go rename to s3/services/sign/signature-v4-parser.go index 46dc7c68f..d02da5db0 100644 --- a/s3/services/auth/signature-v4-parser.go +++ b/s3/services/sign/signature-v4-parser.go @@ -15,13 +15,11 @@ // implied. See the License for the specific language governing // permissions and limitations under the License. -package auth +package sign import ( "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/iam/auth" "github.com/bittorrent/go-btfs/s3/responses" - "net/http" "net/url" "strings" "time" @@ -49,30 +47,8 @@ func (c credentialHeader) getScope() string { }, consts.SlashSeparator) } -func (s *service) GetReqAccessKeyV4(r *http.Request, region string, stype serviceType) (auth.Credentials, bool, *responses.Error) { - ch, s3Err := parseCredentialHeader("Credential="+r.Form.Get(consts.AmzCredential), region, stype) - if s3Err != nil { - // Strip off the Algorithm prefix. - v4Auth := strings.TrimPrefix(r.Header.Get("Authorization"), signV4Algorithm) - authFields := strings.Split(strings.TrimSpace(v4Auth), ",") - if len(authFields) != 3 { - return auth.Credentials{}, false, responses.ErrMissingFields - } - ch, s3Err = parseCredentialHeader(authFields[0], region, stype) - if s3Err != nil { - return auth.Credentials{}, false, s3Err - } - } - // TODO: Why should a temporary user be replaced with the parent user's account name? - //cerd, _ := s.Iam.GetUser(r.Context(), ch.accessKey) - //if cerd.IsTemp() { - // ch.accessKey = cerd.ParentUser - //} - return s.checkKeyValid(r, ch.accessKey) -} - // parse credentialHeader string into its structured form. -func parseCredentialHeader(credElement string, region string, stype serviceType) (ch credentialHeader, aec *responses.Error) { +func parseCredentialHeader(credElement string, region string, stype serviceType) (ch credentialHeader, rerr *responses.Error) { creds := strings.SplitN(strings.TrimSpace(credElement), "=", 2) if len(creds) != 2 { return ch, responses.ErrMissingFields @@ -85,9 +61,7 @@ func parseCredentialHeader(credElement string, region string, stype serviceType) return ch, responses.ErrCredMalformed } accessKey := strings.Join(credElements[:len(credElements)-4], consts.SlashSeparator) // The access key may contain one or more `/` - if !auth.IsAccessKeyValid(accessKey) { - return ch, responses.ErrInvalidAccessKeyID - } + // Save access key id. cred := credentialHeader{ accessKey: accessKey, @@ -195,11 +169,11 @@ func doesV4PresignParamsExist(query url.Values) *responses.Error { } // Parses all the presigned signature values into separate elements. -func parsePreSignV4(query url.Values, region string, stype serviceType) (psv preSignValues, aec *responses.Error) { +func parsePreSignV4(query url.Values, region string, stype serviceType) (psv preSignValues, rerr *responses.Error) { // verify whether the required query params exist. - aec = doesV4PresignParamsExist(query) - if aec != nil { - return psv, aec + rerr = doesV4PresignParamsExist(query) + if rerr != nil { + return psv, rerr } // Verify if the query algorithm is supported or not. @@ -211,9 +185,9 @@ func parsePreSignV4(query url.Values, region string, stype serviceType) (psv pre preSignV4Values := preSignValues{} // Save credential. - preSignV4Values.Credential, aec = parseCredentialHeader("Credential="+query.Get(consts.AmzCredential), region, stype) - if aec != nil { - return psv, aec + preSignV4Values.Credential, rerr = parseCredentialHeader("Credential="+query.Get(consts.AmzCredential), region, stype) + if rerr != nil { + return psv, rerr } var e error @@ -239,15 +213,15 @@ func parsePreSignV4(query url.Values, region string, stype serviceType) (psv pre } // Save signed headers. - preSignV4Values.SignedHeaders, aec = parseSignedHeader("SignedHeaders=" + query.Get(consts.AmzSignedHeaders)) - if aec != nil { - return psv, aec + preSignV4Values.SignedHeaders, rerr = parseSignedHeader("SignedHeaders=" + query.Get(consts.AmzSignedHeaders)) + if rerr != nil { + return psv, rerr } // Save signature. - preSignV4Values.Signature, aec = parseSignature("Signature=" + query.Get(consts.AmzSignature)) - if aec != nil { - return psv, aec + preSignV4Values.Signature, rerr = parseSignature("Signature=" + query.Get(consts.AmzSignature)) + if rerr != nil { + return psv, rerr } // Return structed form of signature query string. diff --git a/s3/services/auth/signature-v4-utils.go b/s3/services/sign/signature-v4-utils.go similarity index 87% rename from s3/services/auth/signature-v4-utils.go rename to s3/services/sign/signature-v4-utils.go index 6539b7b0e..1239b9ff6 100644 --- a/s3/services/auth/signature-v4-utils.go +++ b/s3/services/sign/signature-v4-utils.go @@ -15,14 +15,13 @@ // implied. See the License for the specific language governing // permissions and limitations under the License. -package auth +package sign import ( "bytes" "crypto/sha256" "encoding/hex" "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/iam/auth" "github.com/bittorrent/go-btfs/s3/responses" "io" "io/ioutil" @@ -79,15 +78,15 @@ func SkipContentSha256Cksum(r *http.Request) bool { } // Returns SHA256 for calculating canonical-request. -func GetContentSha256Cksum(r *http.Request, stype serviceType) string { +func GetContentSha256Cksum(r *http.Request, stype serviceType) (string, error) { if stype == ServiceSTS { payload, err := ioutil.ReadAll(io.LimitReader(r.Body, consts.StsRequestBodyLimit)) if err != nil { - log.Errorf("ServiceSTS ReadAll err:%v", err) + return "", err } sum256 := sha256.Sum256(payload) r.Body = ioutil.NopCloser(bytes.NewReader(payload)) - return hex.EncodeToString(sum256[:]) + return hex.EncodeToString(sum256[:]), nil } var ( @@ -114,11 +113,11 @@ func GetContentSha256Cksum(r *http.Request, stype serviceType) string { // We found 'X-Amz-Content-Sha256' return the captured value. if ok { - return v[0] + return v[0], nil } // We couldn't find 'X-Amz-Content-Sha256'. - return defaultSha256Cksum + return defaultSha256Cksum, nil } // isValidRegion - verify if incoming region value is valid with configured Region. @@ -139,24 +138,24 @@ func isValidRegion(reqRegion string, confRegion string) bool { // check if the access key is valid and recognized, additionally // also returns if the access key is owner/admin. -func (s *service) checkKeyValid(r *http.Request, accessKey string) (auth.Credentials, bool, *responses.Error) { +func (s *service) checkKeyValid(ack string) (secret string, rerr *responses.Error) { + secret, exists, enable, err := s.getSecret(ack) + if err != nil { + rerr = responses.ErrInternalError + return + } - cred := s.AdminCred - if cred.AccessKey != accessKey { - // Check if the access key is part of users credentials. - ucred, ok := s.Iam.GetUser(r.Context(), accessKey) - if !ok { - // Credentials will be invalid but and disabled - // return a different error in such a scenario. - if ucred.Status == auth.AccountOff { - return cred, false, responses.ErrAccessKeyDisabled - } - return cred, false, responses.ErrInvalidAccessKeyID - } - cred = ucred + if !exists { + rerr = responses.ErrInvalidAccessKeyID + return } - owner := cred.AccessKey == s.AdminCred.AccessKey - return cred, owner, nil + + if !enable { + rerr = responses.ErrAccessKeyDisabled + return + } + + return } func contains(slice interface{}, elem interface{}) bool { diff --git a/s3/services/auth/signature-v4.go b/s3/services/sign/signature-v4.go similarity index 77% rename from s3/services/auth/signature-v4.go rename to s3/services/sign/signature-v4.go index 0557f64f1..9fb12a15b 100644 --- a/s3/services/auth/signature-v4.go +++ b/s3/services/sign/signature-v4.go @@ -15,13 +15,13 @@ // implied. See the License for the specific language governing // permissions and limitations under the License. -package auth +package sign import ( "crypto/subtle" "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/iam/set" "github.com/bittorrent/go-btfs/s3/responses" + "github.com/bittorrent/go-btfs/s3/set" "github.com/bittorrent/go-btfs/s3/utils" "net/http" "net/url" @@ -32,6 +32,7 @@ import ( // AWS Signature Version '4' constants. const ( + signV2Algorithm = "AWS" signV4Algorithm = "AWS4-HMAC-SHA256" iso8601Format = "20060102T150405Z" yyyymmdd = "20060102" @@ -58,35 +59,38 @@ func compareSignatureV4(sig1, sig2 string) bool { // - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html // // returns nil if the signature matches. -func (s *service) doesPresignedSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType) *responses.Error { +func (s *service) doesPresignedSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType) (ack string, rerr *responses.Error) { // Copy request req := *r // Parse request query string. - pSignValues, err := parsePreSignV4(req.Form, region, stype) - if err != nil { - return err + pSignValues, rerr := parsePreSignV4(req.Form, region, stype) + if rerr != nil { + return } - cred, _, s3Err := s.checkKeyValid(r, pSignValues.Credential.accessKey) - if s3Err != nil { - return s3Err + ack = pSignValues.Credential.accessKey + secret, rerr := s.checkKeyValid(ack) + if rerr != nil { + return } // Extract all the signed headers along with its values. - extractedSignedHeaders, errCode := extractSignedHeaders(pSignValues.SignedHeaders, r) - if errCode != nil { - return errCode + extractedSignedHeaders, rerr := extractSignedHeaders(pSignValues.SignedHeaders, r) + if rerr != nil { + return } // If the host which signed the request is slightly ahead in time (by less than MaxSkewTime) the // request should still be allowed. if pSignValues.Date.After(time.Now().UTC().Add(consts.MaxSkewTime)) { - return responses.ErrRequestNotReadyYet + rerr = responses.ErrRequestNotReadyYet + return } if time.Now().UTC().Sub(pSignValues.Date) > pSignValues.Expires { - return responses.ErrExpiredPresignRequest + rerr = responses.ErrExpiredPresignRequest + return } // Save the date and expires. @@ -102,7 +106,8 @@ func (s *service) doesPresignedSignatureMatch(hashedPayload string, r *http.Requ token := req.Form.Get(consts.AmzSecurityToken) if token != "" { - query.Set(consts.AmzSecurityToken, cred.SessionToken) + rerr = responses.ErrSignatureVersionNotSupported + return } query.Set(consts.AmzAlgorithm, signV4Algorithm) @@ -111,7 +116,7 @@ func (s *service) doesPresignedSignatureMatch(hashedPayload string, r *http.Requ query.Set(consts.AmzDate, t.Format(iso8601Format)) query.Set(consts.AmzExpires, strconv.Itoa(expireSeconds)) query.Set(consts.AmzSignedHeaders, utils.GetSignedHeaders(extractedSignedHeaders)) - query.Set(consts.AmzCredential, cred.AccessKey+consts.SlashSeparator+pSignValues.Credential.getScope()) + query.Set(consts.AmzCredential, ack+consts.SlashSeparator+pSignValues.Credential.getScope()) defaultSigParams := set.CreateStringSet( consts.AmzContentSha256, @@ -136,27 +141,28 @@ func (s *service) doesPresignedSignatureMatch(hashedPayload string, r *http.Requ // Verify if date query is same. if req.Form.Get(consts.AmzDate) != query.Get(consts.AmzDate) { - return responses.ErrSignatureDoesNotMatch + rerr = responses.ErrSignatureDoesNotMatch + return } // Verify if expires query is same. if req.Form.Get(consts.AmzExpires) != query.Get(consts.AmzExpires) { - return responses.ErrSignatureDoesNotMatch + rerr = responses.ErrSignatureDoesNotMatch + return } // Verify if signed headers query is same. if req.Form.Get(consts.AmzSignedHeaders) != query.Get(consts.AmzSignedHeaders) { - return responses.ErrSignatureDoesNotMatch + rerr = responses.ErrSignatureDoesNotMatch + return } // Verify if credential query is same. if req.Form.Get(consts.AmzCredential) != query.Get(consts.AmzCredential) { - return responses.ErrSignatureDoesNotMatch + rerr = responses.ErrSignatureDoesNotMatch + return } // Verify if sha256 payload query is same. if clntHashedPayload != "" && clntHashedPayload != query.Get(consts.AmzContentSha256) { - return responses.ErrContentSHA256Mismatch - } - // Verify if security token is correct. - if token != "" && subtle.ConstantTimeCompare([]byte(token), []byte(cred.SessionToken)) != 1 { - return responses.ErrInvalidToken + rerr = responses.ErrContentSHA256Mismatch + return } // Verify finally if signature is same. @@ -168,7 +174,7 @@ func (s *service) doesPresignedSignatureMatch(hashedPayload string, r *http.Requ presignedStringToSign := utils.GetStringToSign(presignedCanonicalReq, t, pSignValues.Credential.getScope()) // Get hmac presigned signing key. - presignedSigningKey := utils.GetSigningKey(cred.SecretKey, pSignValues.Credential.scope.date, + presignedSigningKey := utils.GetSigningKey(secret, pSignValues.Credential.scope.date, pSignValues.Credential.scope.region, string(stype)) // Get new signature. @@ -176,16 +182,18 @@ func (s *service) doesPresignedSignatureMatch(hashedPayload string, r *http.Requ // Verify signature. if !compareSignatureV4(req.Form.Get(consts.AmzSignature), newSignature) { - return responses.ErrSignatureDoesNotMatch + rerr = responses.ErrSignatureDoesNotMatch + return } - return nil + + return } // doesSignatureMatch - Verify authorization header with calculated header in accordance with // - http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html // // returns nil if signature matches. -func (s *service) doesSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType) *responses.Error { +func (s *service) doesSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType) (ack string, rerr *responses.Error) { // Copy request. req := *r @@ -193,34 +201,37 @@ func (s *service) doesSignatureMatch(hashedPayload string, r *http.Request, regi v4Auth := req.Header.Get(consts.Authorization) // Parse signature version '4' header. - signV4Values, err := parseSignV4(v4Auth, region, stype) - if err != nil { - return err + signV4Values, rerr := parseSignV4(v4Auth, region, stype) + if rerr != nil { + return } // Extract all the signed headers along with its values. - extractedSignedHeaders, errCode := extractSignedHeaders(signV4Values.SignedHeaders, r) - if errCode != nil { - return errCode + extractedSignedHeaders, rerr := extractSignedHeaders(signV4Values.SignedHeaders, r) + if rerr != nil { + return } - cred, _, s3Err := s.checkKeyValid(r, signV4Values.Credential.accessKey) - if s3Err != nil { - return s3Err + ack = signV4Values.Credential.accessKey + secret, rerr := s.checkKeyValid(ack) + if rerr != nil { + return } // Extract date, if not present throw error. var date string if date = req.Header.Get(consts.AmzDate); date == "" { if date = r.Header.Get(consts.Date); date == "" { - return responses.ErrMissingDateHeader + rerr = responses.ErrMissingDateHeader + return } } // Parse date header. - t, e := time.Parse(iso8601Format, date) - if e != nil { - return responses.ErrAuthorizationHeaderMalformed + t, err := time.Parse(iso8601Format, date) + if err != nil { + rerr = responses.ErrAuthorizationHeaderMalformed + return } // Query string. @@ -233,7 +244,7 @@ func (s *service) doesSignatureMatch(hashedPayload string, r *http.Request, regi stringToSign := utils.GetStringToSign(canonicalRequest, t, signV4Values.Credential.getScope()) // Get hmac signing key. - signingKey := utils.GetSigningKey(cred.SecretKey, signV4Values.Credential.scope.date, + signingKey := utils.GetSigningKey(secret, signV4Values.Credential.scope.date, signV4Values.Credential.scope.region, string(stype)) // Calculate signature. @@ -241,11 +252,12 @@ func (s *service) doesSignatureMatch(hashedPayload string, r *http.Request, regi // Verify if signature match. if !compareSignatureV4(newSignature, signV4Values.Signature) { - return responses.ErrSignatureDoesNotMatch + rerr = responses.ErrSignatureDoesNotMatch + return } // Return error none. - return nil + return } // getScope generate a string of a specific date, an AWS region, and a service. diff --git a/s3/services/sign/signature.go b/s3/services/sign/signature.go new file mode 100644 index 000000000..c176189b7 --- /dev/null +++ b/s3/services/sign/signature.go @@ -0,0 +1,70 @@ +package sign + +import ( + "encoding/hex" + "github.com/bittorrent/go-btfs/s3/consts" + "github.com/bittorrent/go-btfs/s3/etag" + "github.com/bittorrent/go-btfs/s3/responses" + "github.com/bittorrent/go-btfs/s3/utils/hash" + "net/http" +) + +// isReqAuthenticated Verify if request has valid AWS Signature Version '4'. +func (s *service) isReqAuthenticated(r *http.Request, region string, stype serviceType) (ack string, rerr *responses.Error) { + ack, rerr = s.reqSignatureV4Verify(r, region, stype) + if rerr != nil { + return + } + clientETag, err := etag.FromContentMD5(r.Header) + if err != nil { + rerr = responses.ErrInvalidDigest + return + } + + // Extract either 'X-Amz-Content-Sha256' header or 'X-Amz-Content-Sha256' query parameter (if V4 presigned) + // Do not verify 'X-Amz-Content-Sha256' if skipSHA256. + var contentSHA256 []byte + if skipSHA256 := SkipContentSha256Cksum(r); !skipSHA256 && isRequestPresignedSignatureV4(r) { + if sha256Sum, ok := r.Form[consts.AmzContentSha256]; ok && len(sha256Sum) > 0 { + contentSHA256, err = hex.DecodeString(sha256Sum[0]) + if err != nil { + rerr = responses.ErrContentSHA256Mismatch + return + } + } + } else if _, ok := r.Header[consts.AmzContentSha256]; !skipSHA256 && ok { + contentSHA256, err = hex.DecodeString(r.Header.Get(consts.AmzContentSha256)) + if err != nil || len(contentSHA256) == 0 { + rerr = responses.ErrContentSHA256Mismatch + } + } + + // Verify 'Content-Md5' and/or 'X-Amz-Content-Sha256' if present. + // The verification happens implicit during reading. + reader, err := hash.NewReader(r.Body, -1, clientETag.String(), hex.EncodeToString(contentSHA256), -1) + if err != nil { + rerr = responses.ErrInternalError + return + } + + r.Body = reader + + return +} + +func (s *service) reqSignatureV4Verify(r *http.Request, region string, stype serviceType) (ack string, rerr *responses.Error) { + sha256sum, err := GetContentSha256Cksum(r, stype) + if err != nil { + rerr = responses.ErrInternalError + return + } + switch { + case IsRequestSignatureV4(r): + ack, rerr = s.doesSignatureMatch(sha256sum, r, region, stype) + case isRequestPresignedSignatureV4(r): + ack, rerr = s.doesPresignedSignatureMatch(sha256sum, r, region, stype) + default: + rerr = responses.ErrAccessDenied + } + return +} diff --git a/s3/services/auth/streaming-signature-v4.go b/s3/services/sign/streaming-signature-v4.go similarity index 77% rename from s3/services/auth/streaming-signature-v4.go rename to s3/services/sign/streaming-signature-v4.go index a98cf6025..41094ee33 100644 --- a/s3/services/auth/streaming-signature-v4.go +++ b/s3/services/sign/streaming-signature-v4.go @@ -1,24 +1,4 @@ -/* - * The following code tries to reverse engineer the Amazon S3 APIs, - * and is mostly copied from minio implementation. - */ - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -// Package cmd This file implements helper functions to validate Streaming AWS -// Signature Version '4' authorization header. -package auth +package sign import ( "bufio" @@ -26,6 +6,7 @@ import ( "crypto/sha256" "encoding/hex" "errors" + "github.com/bittorrent/go-btfs/s3/responses" "github.com/bittorrent/go-btfs/s3/utils" "hash" "io" @@ -34,7 +15,6 @@ import ( "time" "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/iam/auth" humanize "github.com/dustin/go-humanize" ) @@ -50,7 +30,7 @@ const ( var errSignatureMismatch = errors.New("Signature does not match") // getChunkSignature - get chunk signature. -func getChunkSignature(cred auth.Credentials, seedSignature string, region string, date time.Time, hashedChunk string) string { +func getChunkSignature(secret, seedSignature string, region string, stype serviceType, date time.Time, hashedChunk string) string { // Calculate string to sign. stringToSign := signV4ChunkedAlgorithm + "\n" + date.Format(iso8601Format) + "\n" + @@ -60,7 +40,7 @@ func getChunkSignature(cred auth.Credentials, seedSignature string, region strin hashedChunk // Get hmac signing key. - signingKey := utils.GetSigningKey(cred.SecretKey, date, region, string(ServiceS3)) + signingKey := utils.GetSigningKey(secret, date, region, string(stype)) // Calculate signature. newSignature := utils.GetSignature(signingKey, stringToSign) @@ -68,12 +48,12 @@ func getChunkSignature(cred auth.Credentials, seedSignature string, region strin return newSignature } -// CalculateSeedSignature - Calculate seed signature in accordance with +// calculateSeedSignature - Calculate seed signature in accordance with // - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html // // returns signature, error otherwise if the signature mismatches or any other // error while parsing and validating. -func (s *service) CalculateSeedSignature(r *http.Request) (cred auth.Credentials, signature string, region string, date time.Time, errCode *responses.Error) { +func (s *service) calculateSeedSignature(r *http.Request, iregion string, stype serviceType) (ack, sec string, signature string, region string, date time.Time, rerr *responses.Error) { // Copy request. req := *r @@ -81,9 +61,9 @@ func (s *service) CalculateSeedSignature(r *http.Request) (cred auth.Credentials v4Auth := req.Header.Get(consts.Authorization) // Parse signature version '4' header. - signV4Values, errCode := parseSignV4(v4Auth, "", ServiceS3) - if errCode != nil { - return cred, "", "", time.Time{}, errCode + signV4Values, rerr := parseSignV4(v4Auth, "", stype) + if rerr != nil { + return } // Payload streaming. @@ -91,36 +71,43 @@ func (s *service) CalculateSeedSignature(r *http.Request) (cred auth.Credentials // Payload for STREAMING signature should be 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD' if payload != req.Header.Get(consts.AmzContentSha256) { - return cred, "", "", time.Time{}, responses.ErrContentSHA256Mismatch + rerr = responses.ErrContentSHA256Mismatch + return } // Extract all the signed headers along with its values. - extractedSignedHeaders, errCode := extractSignedHeaders(signV4Values.SignedHeaders, r) - if errCode != nil { - return cred, "", "", time.Time{}, errCode + extractedSignedHeaders, rerr := extractSignedHeaders(signV4Values.SignedHeaders, r) + if rerr != nil { + return } - cred, _, errCode = s.checkKeyValid(r, signV4Values.Credential.accessKey) - if errCode != nil { - return cred, "", "", time.Time{}, errCode + ack = signV4Values.Credential.accessKey + + sec, rerr = s.checkKeyValid(ack) + if rerr != nil { + return } // Verify if region is valid. - region = signV4Values.Credential.scope.region + region = iregion + if region == "" { + region = signV4Values.Credential.scope.region + } // Extract date, if not present throw error. var dateStr string if dateStr = req.Header.Get("x-amz-date"); dateStr == "" { if dateStr = r.Header.Get("Date"); dateStr == "" { - return cred, "", "", time.Time{}, responses.ErrMissingDateHeader + rerr = responses.ErrMissingDateHeader + return } } // Parse date header. - var err error - date, err = time.Parse(iso8601Format, dateStr) + date, err := time.Parse(iso8601Format, dateStr) if err != nil { - return cred, "", "", time.Time{}, responses.ErrMalformedDate + rerr = responses.ErrMalformedDate + return } // Query string. @@ -133,18 +120,19 @@ func (s *service) CalculateSeedSignature(r *http.Request) (cred auth.Credentials stringToSign := utils.GetStringToSign(canonicalRequest, date, signV4Values.Credential.getScope()) // Get hmac signing key. - signingKey := utils.GetSigningKey(cred.SecretKey, signV4Values.Credential.scope.date, region, string(ServiceS3)) + signingKey := utils.GetSigningKey(sec, signV4Values.Credential.scope.date, region, string(stype)) // Calculate signature. newSignature := utils.GetSignature(signingKey, stringToSign) // Verify if signature match. if !compareSignatureV4(newSignature, signV4Values.Signature) { - return cred, "", "", time.Time{}, responses.ErrSignatureDoesNotMatch + rerr = responses.ErrSignatureDoesNotMatch + return } // Return caculated signature. - return cred, newSignature, region, date, nil + return } const maxLineLength = 4 * humanize.KiByte // assumed <= bufio.defaultBufSize 4KiB @@ -158,37 +146,33 @@ var errMalformedEncoding = errors.New("malformed chunked encoding") // chunk is considered too big if its bigger than > 16MiB. var errChunkTooBig = errors.New("chunk too big: choose chunk size <= 16MiB") -// NewSignV4ChunkedReader returns a new s3ChunkedReader that translates the data read from r -// out of HTTP "chunked" format before returning it. -// The s3ChunkedReader returns io.EOF when the final 0-length chunk is read. -// -// NewChunkedReader is not needed by normal applications. The http package -// automatically decodes chunking when reading response bodies. -func NewSignV4ChunkedReader(req *http.Request, s *service) (io.ReadCloser, *responses.Error) { - cred, seedSignature, region, seedDate, errCode := s.CalculateSeedSignature(req) - if errCode != nil { - return nil, errCode +func (s *service) setReqBodySignV4ChunkedReader(req *http.Request, region string, stype serviceType) (ack string, rerr *responses.Error) { + ack, sec, seedSignature, region, seedDate, rerr := s.calculateSeedSignature(req, region, stype) + if rerr != nil { + return } - - return &s3ChunkedReader{ + req.Body = &s3ChunkedReader{ reader: bufio.NewReader(req.Body), - cred: cred, + secret: sec, seedSignature: seedSignature, seedDate: seedDate, region: region, + stype: stype, chunkSHA256Writer: sha256.New(), buffer: make([]byte, 64*1024), - }, nil + } + return } // Represents the overall state that is required for decoding a // AWS Signature V4 chunked reader. type s3ChunkedReader struct { reader *bufio.Reader - cred auth.Credentials + secret string seedSignature string seedDate time.Time region string + stype serviceType chunkSHA256Writer hash.Hash // Calculates sha256 of chunk data. buffer []byte @@ -346,7 +330,7 @@ func (cr *s3ChunkedReader) Read(buf []byte) (n int, err error) { // Once we have read the entire chunk successfully, we verify // that the received signature matches our computed signature. cr.chunkSHA256Writer.Write(cr.buffer) - newSignature := getChunkSignature(cr.cred, cr.seedSignature, cr.region, cr.seedDate, hex.EncodeToString(cr.chunkSHA256Writer.Sum(nil))) + newSignature := getChunkSignature(cr.secret, cr.seedSignature, cr.region, cr.stype, cr.seedDate, hex.EncodeToString(cr.chunkSHA256Writer.Sum(nil))) if !compareSignatureV4(string(signature[16:]), newSignature) { cr.err = errSignatureMismatch return n, cr.err From 0a5e0d6af8ef1b2bc60ca2b63723490030d70673 Mon Sep 17 00:00:00 2001 From: steve Date: Fri, 25 Aug 2023 00:34:23 +0800 Subject: [PATCH 061/139] optmize: rename auth to sign --- s3/handlers/handlers.go | 2 +- s3/handlers/proto.go | 2 +- s3/routers/routers.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/s3/handlers/handlers.go b/s3/handlers/handlers.go index 03b19a32e..232db21f9 100644 --- a/s3/handlers/handlers.go +++ b/s3/handlers/handlers.go @@ -65,7 +65,7 @@ func (h *Handlers) Log(handler http.Handler) http.Handler { }) } -func (h *Handlers) Auth(handler http.Handler) http.Handler { +func (h *Handlers) Sign(handler http.Handler) http.Handler { h.sigsvc.SetSecretGetter(func(key string) (secret string, exists, enable bool, err error) { ack, err := h.acksvc.Get(key) if errors.Is(err, accesskey.ErrNotFound) { diff --git a/s3/handlers/proto.go b/s3/handlers/proto.go index f5927ff15..e00cc1528 100644 --- a/s3/handlers/proto.go +++ b/s3/handlers/proto.go @@ -7,7 +7,7 @@ import ( type Handlerser interface { // middlewares Cors(handler http.Handler) http.Handler - Auth(handler http.Handler) http.Handler + Sign(handler http.Handler) http.Handler Log(handler http.Handler) http.Handler // bucket diff --git a/s3/routers/routers.go b/s3/routers/routers.go index 681fccb18..7410ad669 100644 --- a/s3/routers/routers.go +++ b/s3/routers/routers.go @@ -26,7 +26,7 @@ func (routers *Routers) Register() http.Handler { root.Use( routers.handlers.Cors, routers.handlers.Log, - routers.handlers.Auth, + routers.handlers.Sign, ) bucket := root.PathPrefix("/{bucket}").Subrouter() From fe0156cfae4000073183d315e00412c8793737b8 Mon Sep 17 00:00:00 2001 From: steve Date: Fri, 25 Aug 2023 00:41:06 +0800 Subject: [PATCH 062/139] optmize: code structure & h.name --- s3/handlers/handlers.go | 224 +-------------------------------- s3/handlers/handlers_bucket.go | 164 ++++++++++++++++++++++++ s3/handlers/handlers_object.go | 76 +++++++++++ 3 files changed, 243 insertions(+), 221 deletions(-) create mode 100644 s3/handlers/handlers_bucket.go create mode 100644 s3/handlers/handlers_object.go diff --git a/s3/handlers/handlers.go b/s3/handlers/handlers.go index 232db21f9..604064fac 100644 --- a/s3/handlers/handlers.go +++ b/s3/handlers/handlers.go @@ -6,7 +6,6 @@ import ( "fmt" "github.com/bittorrent/go-btfs/s3/cctx" "github.com/bittorrent/go-btfs/s3/ctxmu" - "github.com/bittorrent/go-btfs/s3/etag" "github.com/bittorrent/go-btfs/s3/requests" "github.com/bittorrent/go-btfs/s3/responses" "github.com/bittorrent/go-btfs/s3/services/accesskey" @@ -16,7 +15,6 @@ import ( "net/http" "runtime" - s3action "github.com/bittorrent/go-btfs/s3/action" "github.com/bittorrent/go-btfs/s3/consts" "github.com/bittorrent/go-btfs/s3/s3utils" rscors "github.com/rs/cors" @@ -87,7 +85,7 @@ func (h *Handlers) Sign(handler http.Handler) http.Handler { var err *responses.Error defer func() { if err != nil { - cctx.SetHandleInf(r, fnName(), err) + cctx.SetHandleInf(r, h.name(), err) } }() @@ -106,7 +104,7 @@ func (h *Handlers) Sign(handler http.Handler) http.Handler { func (h *Handlers) PutBucketHandler(w http.ResponseWriter, r *http.Request) { var err error defer func() { - cctx.SetHandleInf(r, fnName(), err) + cctx.SetHandleInf(r, h.name(), err) }() req, err := requests.ParsePutBucketRequest(r) @@ -152,223 +150,7 @@ func (h *Handlers) PutBucketHandler(w http.ResponseWriter, r *http.Request) { return } -func (h *Handlers) HeadBucketHandler(w http.ResponseWriter, r *http.Request) { - var err error - defer func() { - cctx.SetHandleInf(r, fnName(), err) - }() - - req, err := requests.ParseHeadBucketRequest(r) - if err != nil { - responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestBody) - return - } - - ack := cctx.GetAccessKey(r) - - err = h.bucsvc.CheckACL(ack, req.Bucket, s3action.HeadBucketAction) - if errors.Is(err, bucket.ErrNotFound) { - responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) - return - } - if err != nil { - responses.WriteErrorResponse(w, r, responses.ErrAccessDenied) - return - } - - responses.WriteHeadBucketResponse(w, r) -} - -func (h *Handlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) { - var err error - defer func() { - cctx.SetHandleInf(r, fnName(), err) - }() - - req, err := requests.ParseDeleteBucketRequest(r) - if err != nil { - responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestBody) - return - } - - ctx := r.Context() - ack := cctx.GetAccessKey(r) - - err = h.bucsvc.CheckACL(ack, req.Bucket, s3action.HeadBucketAction) - if err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - - //todo check all errors. - err = h.bucsvc.DeleteBucket(ctx, req.Bucket) - if err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - - responses.WriteDeleteBucketResponse(w) -} - -func (h *Handlers) ListBucketsHandler(w http.ResponseWriter, r *http.Request) { - var err error - defer func() { - cctx.SetHandleInf(r, fnName(), err) - }() - - ack := cctx.GetAccessKey(r) - if ack == "" { - responses.WriteErrorResponse(w, r, responses.ErrNoAccessKey) - return - } - - //todo check all errors - bucketMetas, err := h.bucsvc.GetAllBucketsOfUser(ack) - if err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - - responses.WriteListBucketsResponse(w, r, bucketMetas) -} - -func (h *Handlers) GetBucketAclHandler(w http.ResponseWriter, r *http.Request) { - var err error - defer func() { - cctx.SetHandleInf(r, fnName(), err) - }() - - req, err := requests.ParseGetBucketAclRequest(r) - if err != nil { - responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestBody) - return - } - - ctx := r.Context() - ack := cctx.GetAccessKey(r) - - if !h.bucsvc.HasBucket(ctx, req.Bucket) { - responses.WriteErrorResponseHeadersOnly(w, r, responses.ErrNoSuchBucket) - return - } - - err = h.bucsvc.CheckACL(ack, req.Bucket, s3action.GetBucketAclAction) - if err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - - //todo check all errors - acl, err := h.bucsvc.GetBucketAcl(ctx, req.Bucket) - if err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - - responses.WriteGetBucketAclResponse(w, r, ack, acl) -} - -func (h *Handlers) PutBucketAclHandler(w http.ResponseWriter, r *http.Request) { - var err error - defer func() { - cctx.SetHandleInf(r, fnName(), err) - }() - - req, err := requests.ParsePutBucketAclRequest(r) - if err != nil || len(req.ACL) == 0 || len(req.Bucket) == 0 { - responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestBody) - return - } - - ctx := r.Context() - ack := cctx.GetAccessKey(r) - - err = h.bucsvc.CheckACL(ack, req.Bucket, s3action.PutBucketAclAction) - if err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - - if !requests.CheckAclPermissionType(&req.ACL) { - responses.WriteErrorResponse(w, r, responses.ErrNotImplemented) - return - } - - //todo check all errors - err = h.bucsvc.UpdateBucketAcl(ctx, req.Bucket, req.ACL) - if err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - - //todo check no return? - responses.WritePutBucketAclResponse(w, r) -} - -// PutObjectHandler http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html -func (h *Handlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) { - var err error - defer func() { - cctx.SetHandleInf(r, fnName(), err) - }() - - // X-Amz-Copy-Source shouldn't be set for this call. - if _, ok := r.Header[consts.AmzCopySource]; ok { - responses.WriteErrorResponse(w, r, responses.ErrInvalidCopySource) - return - } - - buc, obj, err := requests.ParseBucketAndObject(r) - if err != nil { - responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) - return - } - - clientETag, err := etag.FromContentMD5(r.Header) - if err != nil { - responses.WriteErrorResponse(w, r, responses.ErrInvalidDigest) - return - } - _ = clientETag - - size := r.ContentLength - // todo: streaming signed - - if size == -1 { - responses.WriteErrorResponse(w, r, responses.ErrMissingContentLength) - return - } - if size == 0 { - responses.WriteErrorResponse(w, r, responses.ErrEntityTooSmall) - return - } - - if size > consts.MaxObjectSize { - responses.WriteErrorResponse(w, r, responses.ErrEntityTooLarge) - return - } - - ctx := r.Context() - ack := cctx.GetAccessKey(r) - - err = h.bucsvc.CheckACL(ack, buc, s3action.PutObjectAction) - if err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - - // todo: convert error - err = s3utils.CheckPutObjectArgs(ctx, buc, obj) - if err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - - // todo - fmt.Println("need put object...", buc, obj) -} - -func fnName() string { +func (h *Handlers) name() string { pc := make([]uintptr, 1) runtime.Callers(3, pc) f := runtime.FuncForPC(pc[0]) diff --git a/s3/handlers/handlers_bucket.go b/s3/handlers/handlers_bucket.go new file mode 100644 index 000000000..33a84c34b --- /dev/null +++ b/s3/handlers/handlers_bucket.go @@ -0,0 +1,164 @@ +package handlers + +import ( + "errors" + s3action "github.com/bittorrent/go-btfs/s3/action" + "github.com/bittorrent/go-btfs/s3/cctx" + "github.com/bittorrent/go-btfs/s3/requests" + "github.com/bittorrent/go-btfs/s3/responses" + "github.com/bittorrent/go-btfs/s3/services/bucket" + "net/http" +) + +func (h *Handlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) { + var err error + defer func() { + cctx.SetHandleInf(r, fnName(), err) + }() + + req, err := requests.ParseDeleteBucketRequest(r) + if err != nil { + responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestBody) + return + } + + ctx := r.Context() + ack := cctx.GetAccessKey(r) + + err = h.bucsvc.CheckACL(ack, req.Bucket, s3action.HeadBucketAction) + if err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + + //todo check all errors. + err = h.bucsvc.DeleteBucket(ctx, req.Bucket) + if err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + + responses.WriteDeleteBucketResponse(w) +} + +func (h *Handlers) ListBucketsHandler(w http.ResponseWriter, r *http.Request) { + var err error + defer func() { + cctx.SetHandleInf(r, fnName(), err) + }() + + ack := cctx.GetAccessKey(r) + if ack == "" { + responses.WriteErrorResponse(w, r, responses.ErrNoAccessKey) + return + } + + //todo check all errors + bucketMetas, err := h.bucsvc.GetAllBucketsOfUser(ack) + if err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + + responses.WriteListBucketsResponse(w, r, bucketMetas) +} + +func (h *Handlers) GetBucketAclHandler(w http.ResponseWriter, r *http.Request) { + var err error + defer func() { + cctx.SetHandleInf(r, fnName(), err) + }() + + req, err := requests.ParseGetBucketAclRequest(r) + if err != nil { + responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestBody) + return + } + + ctx := r.Context() + ack := cctx.GetAccessKey(r) + + if !h.bucsvc.HasBucket(ctx, req.Bucket) { + responses.WriteErrorResponseHeadersOnly(w, r, responses.ErrNoSuchBucket) + return + } + + err = h.bucsvc.CheckACL(ack, req.Bucket, s3action.GetBucketAclAction) + if err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + + //todo check all errors + acl, err := h.bucsvc.GetBucketAcl(ctx, req.Bucket) + if err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + + responses.WriteGetBucketAclResponse(w, r, ack, acl) +} + +func (h *Handlers) PutBucketAclHandler(w http.ResponseWriter, r *http.Request) { + var err error + defer func() { + cctx.SetHandleInf(r, fnName(), err) + }() + + req, err := requests.ParsePutBucketAclRequest(r) + if err != nil || len(req.ACL) == 0 || len(req.Bucket) == 0 { + responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestBody) + return + } + + ctx := r.Context() + ack := cctx.GetAccessKey(r) + + err = h.bucsvc.CheckACL(ack, req.Bucket, s3action.PutBucketAclAction) + if err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + + if !requests.CheckAclPermissionType(&req.ACL) { + responses.WriteErrorResponse(w, r, responses.ErrNotImplemented) + return + } + + //todo check all errors + err = h.bucsvc.UpdateBucketAcl(ctx, req.Bucket, req.ACL) + if err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + + //todo check no return? + responses.WritePutBucketAclResponse(w, r) +} + +func (h *Handlers) HeadBucketHandler(w http.ResponseWriter, r *http.Request) { + var err error + defer func() { + cctx.SetHandleInf(r, fnName(), err) + }() + + req, err := requests.ParseHeadBucketRequest(r) + if err != nil { + responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestBody) + return + } + + ack := cctx.GetAccessKey(r) + + err = h.bucsvc.CheckACL(ack, req.Bucket, s3action.HeadBucketAction) + if errors.Is(err, bucket.ErrNotFound) { + responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) + return + } + if err != nil { + responses.WriteErrorResponse(w, r, responses.ErrAccessDenied) + return + } + + responses.WriteHeadBucketResponse(w, r) +} diff --git a/s3/handlers/handlers_object.go b/s3/handlers/handlers_object.go new file mode 100644 index 000000000..6dc44343d --- /dev/null +++ b/s3/handlers/handlers_object.go @@ -0,0 +1,76 @@ +package handlers + +import ( + "fmt" + s3action "github.com/bittorrent/go-btfs/s3/action" + "github.com/bittorrent/go-btfs/s3/cctx" + "github.com/bittorrent/go-btfs/s3/consts" + "github.com/bittorrent/go-btfs/s3/etag" + "github.com/bittorrent/go-btfs/s3/requests" + "github.com/bittorrent/go-btfs/s3/responses" + "github.com/bittorrent/go-btfs/s3/s3utils" + "net/http" +) + +// PutObjectHandler http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html +func (h *Handlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) { + var err error + defer func() { + cctx.SetHandleInf(r, fnName(), err) + }() + + // X-Amz-Copy-Source shouldn't be set for this call. + if _, ok := r.Header[consts.AmzCopySource]; ok { + responses.WriteErrorResponse(w, r, responses.ErrInvalidCopySource) + return + } + + buc, obj, err := requests.ParseBucketAndObject(r) + if err != nil { + responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) + return + } + + clientETag, err := etag.FromContentMD5(r.Header) + if err != nil { + responses.WriteErrorResponse(w, r, responses.ErrInvalidDigest) + return + } + _ = clientETag + + size := r.ContentLength + // todo: streaming signed + + if size == -1 { + responses.WriteErrorResponse(w, r, responses.ErrMissingContentLength) + return + } + if size == 0 { + responses.WriteErrorResponse(w, r, responses.ErrEntityTooSmall) + return + } + + if size > consts.MaxObjectSize { + responses.WriteErrorResponse(w, r, responses.ErrEntityTooLarge) + return + } + + ctx := r.Context() + ack := cctx.GetAccessKey(r) + + err = h.bucsvc.CheckACL(ack, buc, s3action.PutObjectAction) + if err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + + // todo: convert error + err = s3utils.CheckPutObjectArgs(ctx, buc, obj) + if err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + + // todo + fmt.Println("need put object...", buc, obj) +} From 0c135c32ced52ea76cf2f65b8d2d5ca9613aa01b Mon Sep 17 00:00:00 2001 From: steve Date: Fri, 25 Aug 2023 00:42:02 +0800 Subject: [PATCH 063/139] fix: h.name --- s3/handlers/handlers_bucket.go | 10 +++++----- s3/handlers/handlers_object.go | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/s3/handlers/handlers_bucket.go b/s3/handlers/handlers_bucket.go index 33a84c34b..5cf8e2190 100644 --- a/s3/handlers/handlers_bucket.go +++ b/s3/handlers/handlers_bucket.go @@ -13,7 +13,7 @@ import ( func (h *Handlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) { var err error defer func() { - cctx.SetHandleInf(r, fnName(), err) + cctx.SetHandleInf(r, h.name(), err) }() req, err := requests.ParseDeleteBucketRequest(r) @@ -44,7 +44,7 @@ func (h *Handlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) { func (h *Handlers) ListBucketsHandler(w http.ResponseWriter, r *http.Request) { var err error defer func() { - cctx.SetHandleInf(r, fnName(), err) + cctx.SetHandleInf(r, h.name(), err) }() ack := cctx.GetAccessKey(r) @@ -66,7 +66,7 @@ func (h *Handlers) ListBucketsHandler(w http.ResponseWriter, r *http.Request) { func (h *Handlers) GetBucketAclHandler(w http.ResponseWriter, r *http.Request) { var err error defer func() { - cctx.SetHandleInf(r, fnName(), err) + cctx.SetHandleInf(r, h.name(), err) }() req, err := requests.ParseGetBucketAclRequest(r) @@ -102,7 +102,7 @@ func (h *Handlers) GetBucketAclHandler(w http.ResponseWriter, r *http.Request) { func (h *Handlers) PutBucketAclHandler(w http.ResponseWriter, r *http.Request) { var err error defer func() { - cctx.SetHandleInf(r, fnName(), err) + cctx.SetHandleInf(r, h.name(), err) }() req, err := requests.ParsePutBucketAclRequest(r) @@ -139,7 +139,7 @@ func (h *Handlers) PutBucketAclHandler(w http.ResponseWriter, r *http.Request) { func (h *Handlers) HeadBucketHandler(w http.ResponseWriter, r *http.Request) { var err error defer func() { - cctx.SetHandleInf(r, fnName(), err) + cctx.SetHandleInf(r, h.name(), err) }() req, err := requests.ParseHeadBucketRequest(r) diff --git a/s3/handlers/handlers_object.go b/s3/handlers/handlers_object.go index 6dc44343d..09f04c0b0 100644 --- a/s3/handlers/handlers_object.go +++ b/s3/handlers/handlers_object.go @@ -16,7 +16,7 @@ import ( func (h *Handlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) { var err error defer func() { - cctx.SetHandleInf(r, fnName(), err) + cctx.SetHandleInf(r, h.name(), err) }() // X-Amz-Copy-Source shouldn't be set for this call. From 17118db807fd11c5d41db7e6eff006812bf92af3 Mon Sep 17 00:00:00 2001 From: steve Date: Fri, 25 Aug 2023 03:27:45 +0800 Subject: [PATCH 064/139] feat: put-object --- s3/consts/consts.go | 9 +- s3/handlers/handlers.go | 12 +- s3/handlers/handlers_object.go | 75 +++++++---- s3/handlers/handlers_utils.go | 147 +++++++++++++++++++++ s3/responses/errors.go | 5 + s3/responses/wirters.go | 11 +- s3/responses/writers_common.go | 31 +++++ s3/server.go | 4 +- s3/services/object/proto.go | 65 +++++++++ s3/services/object/service.go | 76 +++++++++++ s3/services/object/service_option.go | 3 + s3/services/sign/signature.go | 72 +++++++--- s3/services/sign/streaming-signature-v4.go | 36 ++++- 13 files changed, 489 insertions(+), 57 deletions(-) create mode 100644 s3/handlers/handlers_utils.go create mode 100644 s3/services/object/proto.go create mode 100644 s3/services/object/service.go create mode 100644 s3/services/object/service_option.go diff --git a/s3/consts/consts.go b/s3/consts/consts.go index 2bb2d09a8..7429b39a1 100644 --- a/s3/consts/consts.go +++ b/s3/consts/consts.go @@ -5,7 +5,7 @@ import ( "time" ) -//some const +// some const const ( // Iso8601TimeFormat RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z Iso8601TimeFormat = "2006-01-02T15:04:05.000Z" // Reply date format with nanosecond precision. @@ -146,7 +146,12 @@ const ( Range = "Range" ) -//object const +// Standard BTFS HTTP response constants +const ( + BTFSHash = "BTFS-Hash" +) + +// object const const ( MaxObjectSize = 5 * humanize.TiByte diff --git a/s3/handlers/handlers.go b/s3/handlers/handlers.go index 604064fac..d45d684d5 100644 --- a/s3/handlers/handlers.go +++ b/s3/handlers/handlers.go @@ -11,6 +11,7 @@ import ( "github.com/bittorrent/go-btfs/s3/services/accesskey" "github.com/bittorrent/go-btfs/s3/services/bucket" "github.com/bittorrent/go-btfs/s3/services/cors" + "github.com/bittorrent/go-btfs/s3/services/object" "github.com/bittorrent/go-btfs/s3/services/sign" "net/http" "runtime" @@ -27,15 +28,24 @@ type Handlers struct { acksvc accesskey.Service sigsvc sign.Service bucsvc bucket.Service + objsvc object.Service nslock ctxmu.MultiCtxRWLocker } -func NewHandlers(corsvc cors.Service, acksvc accesskey.Service, sigsvc sign.Service, bucsvc bucket.Service, options ...Option) (handlers *Handlers) { +func NewHandlers( + corsvc cors.Service, + acksvc accesskey.Service, + sigsvc sign.Service, + bucsvc bucket.Service, + objsvc object.Service, + options ...Option, +) (handlers *Handlers) { handlers = &Handlers{ corsvc: corsvc, acksvc: acksvc, sigsvc: sigsvc, bucsvc: bucsvc, + objsvc: objsvc, nslock: ctxmu.NewDefaultMultiCtxRWMutex(), } for _, option := range options { diff --git a/s3/handlers/handlers_object.go b/s3/handlers/handlers_object.go index 09f04c0b0..f7201ae7d 100644 --- a/s3/handlers/handlers_object.go +++ b/s3/handlers/handlers_object.go @@ -1,19 +1,26 @@ package handlers import ( - "fmt" - s3action "github.com/bittorrent/go-btfs/s3/action" + "context" + "errors" + "github.com/bittorrent/go-btfs/s3/action" "github.com/bittorrent/go-btfs/s3/cctx" "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/etag" "github.com/bittorrent/go-btfs/s3/requests" "github.com/bittorrent/go-btfs/s3/responses" "github.com/bittorrent/go-btfs/s3/s3utils" + "github.com/bittorrent/go-btfs/s3/services/bucket" + "github.com/bittorrent/go-btfs/s3/utils/hash" "net/http" + "time" ) +const lockWaitTimeout = 5 * time.Minute + // PutObjectHandler http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html func (h *Handlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + ack := cctx.GetAccessKey(r) var err error defer func() { cctx.SetHandleInf(r, h.name(), err) @@ -21,56 +28,76 @@ func (h *Handlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) { // X-Amz-Copy-Source shouldn't be set for this call. if _, ok := r.Header[consts.AmzCopySource]; ok { + err = errors.New("shouldn't be copy") responses.WriteErrorResponse(w, r, responses.ErrInvalidCopySource) return } - buc, obj, err := requests.ParseBucketAndObject(r) - if err != nil { + aclHeader := r.Header.Get(consts.AmzACL) + if aclHeader != "" { + err = errors.New("object acl can only set to default") responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) return } - clientETag, err := etag.FromContentMD5(r.Header) + bucname, objname, err := requests.ParseBucketAndObject(r) if err != nil { - responses.WriteErrorResponse(w, r, responses.ErrInvalidDigest) + responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) return } - _ = clientETag - size := r.ContentLength - // todo: streaming signed + err = s3utils.CheckPutObjectArgs(ctx, bucname, objname) + if err != nil { // todo: convert error + responses.WriteErrorResponse(w, r, err) + return + } + + // lock + runlock, err := h.rlock(ctx, bucname, w, r) + if err != nil { + return + } + defer runlock() - if size == -1 { - responses.WriteErrorResponse(w, r, responses.ErrMissingContentLength) + err = h.bucsvc.CheckACL(ack, bucname, action.PutObjectAction) + if errors.Is(err, bucket.ErrNotFound) { + responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) return } - if size == 0 { - responses.WriteErrorResponse(w, r, responses.ErrEntityTooSmall) + + hrdr, ok := r.Body.(*hash.Reader) + if !ok { + responses.WriteErrorResponse(w, r, responses.ErrInternalError) return } - if size > consts.MaxObjectSize { - responses.WriteErrorResponse(w, r, responses.ErrEntityTooLarge) + metadata, err := extractMetadata(ctx, r) + if err != nil { + responses.WriteErrorResponse(w, r, responses.ErrInvalidRequest) return } - ctx := r.Context() - ack := cctx.GetAccessKey(r) + obj, err := h.objsvc.StoreObject(ctx, bucname, objname, hrdr, r.ContentLength, metadata) - err = h.bucsvc.CheckACL(ack, buc, s3action.PutObjectAction) if err != nil { responses.WriteErrorResponse(w, r, err) return } - // todo: convert error - err = s3utils.CheckPutObjectArgs(ctx, buc, obj) + responses.WritePutObjectResponse(w, r, obj, false) +} + +func (h *Handlers) rlock(ctx context.Context, key string, w http.ResponseWriter, r *http.Request) (runlock func(), err error) { + ctx, cancel := context.WithTimeout(ctx, lockWaitTimeout) + err = h.nslock.RLock(ctx, key) if err != nil { responses.WriteErrorResponse(w, r, err) + cancel() return } - - // todo - fmt.Println("need put object...", buc, obj) + runlock = func() { + h.nslock.RUnlock(key) + cancel() + } + return } diff --git a/s3/handlers/handlers_utils.go b/s3/handlers/handlers_utils.go new file mode 100644 index 000000000..fb7d2f8ca --- /dev/null +++ b/s3/handlers/handlers_utils.go @@ -0,0 +1,147 @@ +package handlers + +import ( + "context" + "errors" + "github.com/bittorrent/go-btfs/s3/consts" + "net/http" + "net/textproto" + "strings" +) + +const streamingContentEncoding = "aws-chunked" + +// errInvalidArgument means that input argument is invalid. +var errInvalidArgument = errors.New("Invalid arguments specified") + +// Supported headers that needs to be extracted. +var supportedHeaders = []string{ + consts.ContentType, + consts.CacheControl, + consts.ContentLength, + consts.ContentEncoding, + consts.ContentDisposition, + consts.AmzStorageClass, + consts.AmzObjectTagging, + consts.Expires, + consts.AmzBucketReplicationStatus, + // Add more supported headers here. +} + +// userMetadataKeyPrefixes contains the prefixes of used-defined metadata keys. +// All values stored with a key starting with one of the following prefixes +// must be extracted from the header. +var userMetadataKeyPrefixes = []string{ + "x-amz-meta-", +} + +// matches k1 with all keys, returns 'true' if one of them matches +func equals(k1 string, keys ...string) bool { + for _, k2 := range keys { + if strings.EqualFold(k1, k2) { + return true + } + } + return false +} + +// extractMetadata extracts metadata from HTTP header and HTTP queryString. +// Note: The key has been converted to lowercase letters +func extractMetadata(ctx context.Context, r *http.Request) (metadata map[string]string, err error) { + query := r.Form + header := r.Header + metadata = make(map[string]string) + // Extract all query values. + err = extractMetadataFromMime(ctx, textproto.MIMEHeader(query), metadata) + if err != nil { + return nil, err + } + + // Extract all header values. + err = extractMetadataFromMime(ctx, textproto.MIMEHeader(header), metadata) + if err != nil { + return nil, err + } + + // Set content-type to default value if it is not set. + if _, ok := metadata[strings.ToLower(consts.ContentType)]; !ok { + metadata[strings.ToLower(consts.ContentType)] = "binary/octet-stream" + } + + // https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w + for k := range metadata { + if equals(k, consts.AmzMetaUnencryptedContentLength, consts.AmzMetaUnencryptedContentMD5) { + delete(metadata, k) + } + } + + if contentEncoding, ok := metadata[strings.ToLower(consts.ContentEncoding)]; ok { + contentEncoding = trimAwsChunkedContentEncoding(contentEncoding) + if contentEncoding != "" { + // Make sure to trim and save the content-encoding + // parameter for a streaming signature which is set + // to a custom value for example: "aws-chunked,gzip". + metadata[strings.ToLower(consts.ContentEncoding)] = contentEncoding + } else { + // Trimmed content encoding is empty when the header + // value is set to "aws-chunked" only. + + // Make sure to delete the content-encoding parameter + // for a streaming signature which is set to value + // for example: "aws-chunked" + delete(metadata, strings.ToLower(consts.ContentEncoding)) + } + } + + // Success. + return metadata, nil +} + +// extractMetadata extracts metadata from map values. +func extractMetadataFromMime(ctx context.Context, v textproto.MIMEHeader, m map[string]string) error { + if v == nil { + return errInvalidArgument + } + + nv := make(textproto.MIMEHeader, len(v)) + for k, kv := range v { + // Canonicalize all headers, to remove any duplicates. + nv[strings.ToLower(k)] = kv + } + + // Save all supported headers. + for _, supportedHeader := range supportedHeaders { + value, ok := nv[strings.ToLower(supportedHeader)] + if ok { + m[strings.ToLower(supportedHeader)] = strings.Join(value, ",") + } + } + + for key := range v { + lowerKey := strings.ToLower(key) + for _, prefix := range userMetadataKeyPrefixes { + if !strings.HasPrefix(lowerKey, strings.ToLower(prefix)) { + continue + } + value, ok := nv[lowerKey] + if ok { + m[lowerKey] = strings.Join(value, ",") + break + } + } + } + return nil +} + +func trimAwsChunkedContentEncoding(contentEnc string) (trimmedContentEnc string) { + if contentEnc == "" { + return contentEnc + } + var newEncs []string + for _, enc := range strings.Split(contentEnc, ",") { + if enc != streamingContentEncoding { + newEncs = append(newEncs, enc) + } + } + return strings.Join(newEncs, ",") +} diff --git a/s3/responses/errors.go b/s3/responses/errors.go index 037e57d43..6e009096e 100644 --- a/s3/responses/errors.go +++ b/s3/responses/errors.go @@ -1032,4 +1032,9 @@ var ( description: "The JSON was not well-formed or did not validate against our published format.", httpStatusCode: http.StatusBadRequest, } + ErrInvalidRequest = &Error{ + code: "InvalidRequest", + description: "InvalidRequest", + httpStatusCode: http.StatusBadRequest, + } ) diff --git a/s3/responses/wirters.go b/s3/responses/wirters.go index 43dce5c38..171c28f6e 100644 --- a/s3/responses/wirters.go +++ b/s3/responses/wirters.go @@ -2,12 +2,12 @@ package responses import ( "fmt" - "github.com/bittorrent/go-btfs/s3/services/bucket" - "net/http" - "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" "github.com/bittorrent/go-btfs/s3/consts" + "github.com/bittorrent/go-btfs/s3/services/bucket" + "github.com/bittorrent/go-btfs/s3/services/object" + "net/http" ) func WritePutBucketResponse(w http.ResponseWriter, r *http.Request) { @@ -79,3 +79,8 @@ func WritePutBucketAclResponse(w http.ResponseWriter, r *http.Request) { WriteSuccessResponse(w, r) return } + +func WritePutObjectResponse(w http.ResponseWriter, r *http.Request, obj object.Object, delete bool) { + setPutObjHeaders(w, obj, delete) + WriteSuccessResponseHeadersOnly(w, r) +} diff --git a/s3/responses/writers_common.go b/s3/responses/writers_common.go index 85ee58cff..6afc1cf6f 100644 --- a/s3/responses/writers_common.go +++ b/s3/responses/writers_common.go @@ -7,6 +7,7 @@ import ( "errors" "fmt" "github.com/bittorrent/go-btfs/s3/consts" + "github.com/bittorrent/go-btfs/s3/services/object" "github.com/gorilla/mux" logging "github.com/ipfs/go-log/v2" "net/http" @@ -85,6 +86,11 @@ func WriteErrorResponse(w http.ResponseWriter, r *http.Request, err error) { WriteXMLResponse(w, r, rerr.HTTPStatusCode(), errorResponse) } +// WriteSuccessResponseHeadersOnly write SuccessResponseHeadersOnly +func WriteSuccessResponseHeadersOnly(w http.ResponseWriter, r *http.Request) { + writeResponse(w, r, http.StatusOK, nil, mimeNone) +} + // WriteSuccessResponse write SuccessResponseHeadersOnly func WriteSuccessResponse(w http.ResponseWriter, r *http.Request) { writeResponse(w, r, http.StatusOK, nil, mimeNone) @@ -192,3 +198,28 @@ func writeResponseSimple(w http.ResponseWriter, statusCode int, response []byte, func WriteSuccessNoContent(w http.ResponseWriter) { writeResponseSimple(w, http.StatusNoContent, nil, mimeNone) } + +// setPutObjHeaders sets all the necessary headers returned back +// upon a success Put/Copy/CompleteMultipart/Delete requests +// to activate delete only headers set delete as true +func setPutObjHeaders(w http.ResponseWriter, obj object.Object, delete bool) { + // We must not use the http.Header().Set method here because some (broken) + // clients expect the ETag header key to be literally "ETag" - not "Etag" (case-sensitive). + // Therefore, we have to set the ETag directly as map entry. + if obj.ETag != "" && !delete { + w.Header()[consts.ETag] = []string{`"` + obj.ETag + `"`} + } + + if obj.Cid != "" { + w.Header()[consts.BTFSHash] = []string{obj.Cid} + } + + // Set the relevant version ID as part of the response header. + if obj.VersionID != "" { + w.Header()[consts.AmzVersionID] = []string{obj.VersionID} + // If version is a deleted marker, set this header as well + if obj.DeleteMarker && delete { // only returned during delete object + w.Header()[consts.AmzDeleteMarker] = []string{strconv.FormatBool(obj.DeleteMarker)} + } + } +} diff --git a/s3/server.go b/s3/server.go index 7c1c49a15..f683c5432 100644 --- a/s3/server.go +++ b/s3/server.go @@ -8,6 +8,7 @@ import ( "github.com/bittorrent/go-btfs/s3/services/accesskey" "github.com/bittorrent/go-btfs/s3/services/bucket" "github.com/bittorrent/go-btfs/s3/services/cors" + "github.com/bittorrent/go-btfs/s3/services/object" "github.com/bittorrent/go-btfs/s3/services/sign" "github.com/bittorrent/go-btfs/transaction/storage" "sync" @@ -36,9 +37,10 @@ func NewServer(storageStore storage.StateStorer) *server.Server { sigsvc := sign.NewService() bucsvc := bucket.NewService(ps) bucsvc.SetEmptyBucket(bucsvc.EmptyBucket) //todo EmptyBucket参数后续更新为object对象 + objsvc := object.NewService(ps) // handlers - hs := handlers.NewHandlers(corsvc, acksvc, sigsvc, bucsvc) + hs := handlers.NewHandlers(corsvc, acksvc, sigsvc, bucsvc, objsvc) // routers rs := routers.NewRouters(hs) diff --git a/s3/services/object/proto.go b/s3/services/object/proto.go new file mode 100644 index 000000000..d17d0b43e --- /dev/null +++ b/s3/services/object/proto.go @@ -0,0 +1,65 @@ +package object + +import ( + "context" + "errors" + "github.com/bittorrent/go-btfs/s3/utils/hash" + "time" +) + +var ErrNotFound = errors.New("object not found") + +type Service interface { + StoreObject(ctx context.Context, bucname, objname string, reader *hash.Reader, size int64, meta map[string]string) (obj Object, err error) +} + +type Object struct { + // Name of the bucket. + Bucket string + + // Name of the object. + Name string + + // Date and time when the object was last modified. + ModTime time.Time + + // Total object size. + Size int64 + + // IsDir indicates if the object is prefix. + IsDir bool + + // Hex encoded unique entity tag of the object. + ETag string + + // ipfs key + Cid string + Acl string + // Version ID of this object. + VersionID string + + // IsLatest indicates if this is the latest current version + // latest can be true for delete marker or a version. + IsLatest bool + + // DeleteMarker indicates if the versionId corresponds + // to a delete marker on an object. + DeleteMarker bool + + // A standard MIME type describing the format of the object. + ContentType string + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the object referenced + // by the Content-Type header field. + ContentEncoding string + + // Date and time at which the object is no longer able to be cached + Expires time.Time + + // Date and time when the object was last accessed. + AccTime time.Time + + // The mod time of the successor object version if any + SuccessorModTime time.Time +} diff --git a/s3/services/object/service.go b/s3/services/object/service.go new file mode 100644 index 000000000..8b0260e5a --- /dev/null +++ b/s3/services/object/service.go @@ -0,0 +1,76 @@ +package object + +import ( + "context" + "fmt" + "github.com/bittorrent/go-btfs/s3/consts" + "github.com/bittorrent/go-btfs/s3/providers" + "github.com/bittorrent/go-btfs/s3/utils/hash" + "net/http" + "strings" + "time" +) + +const ( + objectKeyFormat = "obj/%s/%s" +) + +var _ Service = (*service)(nil) + +// service captures all bucket metadata for a given cluster. +type service struct { + providers providers.Providerser +} + +// NewService - creates new policy system. +func NewService(providers providers.Providerser, options ...Option) Service { + s := &service{ + providers: providers, + } + for _, option := range options { + option(s) + } + return s +} + +func (s *service) getObjectKey(buc, obj string) string { + return fmt.Sprintf(objectKeyFormat, buc, obj) +} + +func (s *service) StoreObject(ctx context.Context, bucname, objname string, reader *hash.Reader, size int64, meta map[string]string) (obj Object, err error) { + cid, err := s.providers.GetFileStore().AddWithOpts(reader, true, true) + if err != nil { + return + } + + obj = Object{ + Bucket: bucname, + Name: objname, + ModTime: time.Now().UTC(), + Size: size, + IsDir: false, + ETag: reader.ETag().String(), + Cid: cid, + VersionID: "", + IsLatest: true, + DeleteMarker: false, + Acl: meta[consts.AmzACL], + ContentType: meta[strings.ToLower(consts.ContentType)], + ContentEncoding: meta[strings.ToLower(consts.ContentEncoding)], + SuccessorModTime: time.Now().UTC(), + } + + // Update expires + if exp, ok := meta[strings.ToLower(consts.Expires)]; ok { + if t, e := time.Parse(http.TimeFormat, exp); e == nil { + obj.Expires = t.UTC() + } + } + + err = s.providers.GetStateStore().Put(s.getObjectKey(bucname, objname), obj) + if err != nil { + return + } + + return +} diff --git a/s3/services/object/service_option.go b/s3/services/object/service_option.go new file mode 100644 index 000000000..9109d3188 --- /dev/null +++ b/s3/services/object/service_option.go @@ -0,0 +1,3 @@ +package object + +type Option func(svc *service) diff --git a/s3/services/sign/signature.go b/s3/services/sign/signature.go index c176189b7..6cf160aa1 100644 --- a/s3/services/sign/signature.go +++ b/s3/services/sign/signature.go @@ -15,33 +15,25 @@ func (s *service) isReqAuthenticated(r *http.Request, region string, stype servi if rerr != nil { return } - clientETag, err := etag.FromContentMD5(r.Header) - if err != nil { - rerr = responses.ErrInvalidDigest + + size := r.ContentLength + + if size == -1 { + rerr = responses.ErrMissingContentLength return } - // Extract either 'X-Amz-Content-Sha256' header or 'X-Amz-Content-Sha256' query parameter (if V4 presigned) - // Do not verify 'X-Amz-Content-Sha256' if skipSHA256. - var contentSHA256 []byte - if skipSHA256 := SkipContentSha256Cksum(r); !skipSHA256 && isRequestPresignedSignatureV4(r) { - if sha256Sum, ok := r.Form[consts.AmzContentSha256]; ok && len(sha256Sum) > 0 { - contentSHA256, err = hex.DecodeString(sha256Sum[0]) - if err != nil { - rerr = responses.ErrContentSHA256Mismatch - return - } - } - } else if _, ok := r.Header[consts.AmzContentSha256]; !skipSHA256 && ok { - contentSHA256, err = hex.DecodeString(r.Header.Get(consts.AmzContentSha256)) - if err != nil || len(contentSHA256) == 0 { - rerr = responses.ErrContentSHA256Mismatch - } + if size > consts.MaxObjectSize { + rerr = responses.ErrEntityTooLarge + return + } + + md5Hex, sha256Hex, rerr := s.getClientCheckSum(r) + if rerr != nil { + return } - // Verify 'Content-Md5' and/or 'X-Amz-Content-Sha256' if present. - // The verification happens implicit during reading. - reader, err := hash.NewReader(r.Body, -1, clientETag.String(), hex.EncodeToString(contentSHA256), -1) + reader, err := hash.NewReader(r.Body, size, md5Hex, sha256Hex, size) if err != nil { rerr = responses.ErrInternalError return @@ -52,6 +44,42 @@ func (s *service) isReqAuthenticated(r *http.Request, region string, stype servi return } +func (s *service) getClientCheckSum(r *http.Request) (md5TagStr, sha256SumStr string, rerr *responses.Error) { + eTag, err := etag.FromContentMD5(r.Header) + if err != nil { + rerr = responses.ErrInvalidDigest + return + } + md5TagStr = eTag.String() + + skipSHA256 := SkipContentSha256Cksum(r) + if skipSHA256 { + return + } + + var ( + contentSHA256 []byte + sha256Sum []string + ) + + if isRequestPresignedSignatureV4(r) { + sha256Sum = r.Form[consts.AmzContentSha256] + } else { + sha256Sum = r.Header[consts.AmzContentSha256] + } + + if len(sha256Sum) > 0 { + contentSHA256, err = hex.DecodeString(sha256Sum[0]) + if err != nil || len(contentSHA256) == 0 { + rerr = responses.ErrContentSHA256Mismatch + return + } + sha256SumStr = hex.EncodeToString(contentSHA256) + } + + return +} + func (s *service) reqSignatureV4Verify(r *http.Request, region string, stype serviceType) (ack string, rerr *responses.Error) { sha256sum, err := GetContentSha256Cksum(r, stype) if err != nil { diff --git a/s3/services/sign/streaming-signature-v4.go b/s3/services/sign/streaming-signature-v4.go index 41094ee33..99e991bd3 100644 --- a/s3/services/sign/streaming-signature-v4.go +++ b/s3/services/sign/streaming-signature-v4.go @@ -8,6 +8,7 @@ import ( "errors" "github.com/bittorrent/go-btfs/s3/responses" "github.com/bittorrent/go-btfs/s3/utils" + s3hash "github.com/bittorrent/go-btfs/s3/utils/hash" "hash" "io" "net/http" @@ -146,13 +147,14 @@ var errMalformedEncoding = errors.New("malformed chunked encoding") // chunk is considered too big if its bigger than > 16MiB. var errChunkTooBig = errors.New("chunk too big: choose chunk size <= 16MiB") -func (s *service) setReqBodySignV4ChunkedReader(req *http.Request, region string, stype serviceType) (ack string, rerr *responses.Error) { - ack, sec, seedSignature, region, seedDate, rerr := s.calculateSeedSignature(req, region, stype) +func (s *service) setReqBodySignV4ChunkedReader(r *http.Request, region string, stype serviceType) (ack string, rerr *responses.Error) { + ack, sec, seedSignature, region, seedDate, rerr := s.calculateSeedSignature(r, region, stype) if rerr != nil { return } - req.Body = &s3ChunkedReader{ - reader: bufio.NewReader(req.Body), + + crdr := &s3ChunkedReader{ + reader: bufio.NewReader(r.Body), secret: sec, seedSignature: seedSignature, seedDate: seedDate, @@ -161,6 +163,32 @@ func (s *service) setReqBodySignV4ChunkedReader(req *http.Request, region string chunkSHA256Writer: sha256.New(), buffer: make([]byte, 64*1024), } + + size := r.ContentLength + + if size == -1 { + rerr = responses.ErrMissingContentLength + return + } + + if size > consts.MaxObjectSize { + rerr = responses.ErrEntityTooLarge + return + } + + md5Hex, sha256Hex, rerr := s.getClientCheckSum(r) + if rerr != nil { + return + } + + hrdr, err := s3hash.NewReader(crdr, size, md5Hex, sha256Hex, size) + if err != nil { + rerr = responses.ErrInternalError + return + } + + r.Body = hrdr + return } From be278a4d50de755d116c45dd2fa2305fe690d716 Mon Sep 17 00:00:00 2001 From: steve Date: Fri, 25 Aug 2023 06:38:24 +0800 Subject: [PATCH 065/139] feat: multipart --- s3/handlers/handlers.go | 152 ++++------- s3/handlers/handlers_bucket.go | 51 ++++ s3/handlers/handlers_middlewares.go | 68 +++++ s3/handlers/handlers_multipart.go | 334 +++++++++++++++++++++++ s3/handlers/handlers_object.go | 43 +-- s3/handlers/proto.go | 6 + s3/responses/types.go | 78 ++++-- s3/responses/wirters.go | 24 +- s3/responses/writers_common.go | 27 +- s3/routers/routers.go | 38 ++- s3/services/multipart/proto.go | 4 - s3/services/multipart/service.go | 29 -- s3/services/multipart/service_options.go | 3 - s3/services/object/proto.go | 64 ++++- s3/services/object/service.go | 273 +++++++++++++++++- 15 files changed, 974 insertions(+), 220 deletions(-) create mode 100644 s3/handlers/handlers_middlewares.go create mode 100644 s3/handlers/handlers_multipart.go delete mode 100644 s3/services/multipart/proto.go delete mode 100644 s3/services/multipart/service.go delete mode 100644 s3/services/multipart/service_options.go diff --git a/s3/handlers/handlers.go b/s3/handlers/handlers.go index d45d684d5..98fef93e8 100644 --- a/s3/handlers/handlers.go +++ b/s3/handlers/handlers.go @@ -2,11 +2,9 @@ package handlers import ( - "errors" - "fmt" - "github.com/bittorrent/go-btfs/s3/cctx" + "context" + "github.com/bittorrent/go-btfs/s3/consts" "github.com/bittorrent/go-btfs/s3/ctxmu" - "github.com/bittorrent/go-btfs/s3/requests" "github.com/bittorrent/go-btfs/s3/responses" "github.com/bittorrent/go-btfs/s3/services/accesskey" "github.com/bittorrent/go-btfs/s3/services/bucket" @@ -14,13 +12,13 @@ import ( "github.com/bittorrent/go-btfs/s3/services/object" "github.com/bittorrent/go-btfs/s3/services/sign" "net/http" + "net/url" "runtime" - - "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/s3utils" - rscors "github.com/rs/cors" + "strconv" ) +const lockPrefix = "s3:lock/" + var _ Handlerser = (*Handlers)(nil) type Handlers struct { @@ -54,115 +52,63 @@ func NewHandlers( return } -func (h *Handlers) Cors(handler http.Handler) http.Handler { - return rscors.New(rscors.Options{ - AllowedOrigins: h.corsvc.GetAllowOrigins(), - AllowedMethods: h.corsvc.GetAllowMethods(), - AllowedHeaders: h.corsvc.GetAllowHeaders(), - ExposedHeaders: h.corsvc.GetAllowHeaders(), - AllowCredentials: true, - }).Handler(handler) -} - -func (h *Handlers) Log(handler http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Printf("[REQ] <%4s> | %s\n", r.Method, r.URL) - handler.ServeHTTP(w, r) - hname, herr := cctx.GetHandleInf(r) - fmt.Printf("[RSP] <%4s> | %s | %s | %v\n", r.Method, r.URL, hname, herr) - }) -} - -func (h *Handlers) Sign(handler http.Handler) http.Handler { - h.sigsvc.SetSecretGetter(func(key string) (secret string, exists, enable bool, err error) { - ack, err := h.acksvc.Get(key) - if errors.Is(err, accesskey.ErrNotFound) { - exists = false - enable = true - err = nil - return - } - if err != nil { - return - } - exists = true - secret = ack.Secret - enable = ack.Enable - return - }) - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - var err *responses.Error - defer func() { - if err != nil { - cctx.SetHandleInf(r, h.name(), err) - } - }() - - ack, err := h.sigsvc.VerifyRequestSignature(r) - if err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - - cctx.SetAccessKey(r, ack) - - handler.ServeHTTP(w, r) - }) +func (h *Handlers) name() string { + pc := make([]uintptr, 1) + runtime.Callers(3, pc) + f := runtime.FuncForPC(pc[0]) + return f.Name() } -func (h *Handlers) PutBucketHandler(w http.ResponseWriter, r *http.Request) { - var err error - defer func() { - cctx.SetHandleInf(r, h.name(), err) - }() - - req, err := requests.ParsePutBucketRequest(r) +func (h *Handlers) rlock(ctx context.Context, key string, w http.ResponseWriter, r *http.Request) (runlock func(), err error) { + ctx, cancel := context.WithTimeout(ctx, lockWaitTimeout) + err = h.nslock.RLock(ctx, lockPrefix+key) if err != nil { - responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestBody) + responses.WriteErrorResponse(w, r, err) + cancel() return } - - // issue: lock for check - ctx := r.Context() - ack := cctx.GetAccessKey(r) - - if err = s3utils.CheckValidBucketNameStrict(req.Bucket); err != nil { - responses.WriteErrorResponse(w, r, responses.ErrInvalidBucketName) - return + runlock = func() { + h.nslock.RUnlock(key) + cancel() } + return +} - if !requests.CheckAclPermissionType(&req.ACL) { - err = responses.ErrNotImplemented - responses.WriteErrorResponse(w, r, responses.ErrNotImplemented) +func (h *Handlers) lock(ctx context.Context, key string, w http.ResponseWriter, r *http.Request) (unlock func(), err error) { + ctx, cancel := context.WithTimeout(ctx, lockWaitTimeout) + err = h.nslock.Lock(ctx, lockPrefix+key) + if err != nil { + responses.WriteErrorResponse(w, r, err) + cancel() return } - - if ok := h.bucsvc.HasBucket(ctx, req.Bucket); ok { - err = responses.ErrBucketAlreadyExists - responses.WriteErrorResponseHeadersOnly(w, r, responses.ErrBucketAlreadyExists) - return + unlock = func() { + h.nslock.Unlock(key) + cancel() } + return +} - err = h.bucsvc.CreateBucket(ctx, req.Bucket, req.Region, ack, req.ACL) - if err != nil { - responses.WriteErrorResponse(w, r, responses.ErrInternalError) - return +// Parse object url queries +func (h *Handlers) getObjectResources(values url.Values) (uploadID string, partNumberMarker, maxParts int, encodingType string, rerr *responses.Error) { + var err error + if values.Get("max-parts") != "" { + if maxParts, err = strconv.Atoi(values.Get("max-parts")); err != nil { + rerr = responses.ErrInvalidMaxParts + return + } + } else { + maxParts = consts.MaxPartsList } - // Make sure to add Location information here only for bucket - if cp := requests.PathClean(r.URL.Path); cp != "" { - w.Header().Set(consts.Location, cp) // Clean any trailing slashes. + if values.Get("part-number-marker") != "" { + if partNumberMarker, err = strconv.Atoi(values.Get("part-number-marker")); err != nil { + rerr = responses.ErrInvalidPartNumberMarker + return + } } - responses.WritePutBucketResponse(w, r) - + uploadID = values.Get("uploadId") + encodingType = values.Get("encoding-type") return } - -func (h *Handlers) name() string { - pc := make([]uintptr, 1) - runtime.Callers(3, pc) - f := runtime.FuncForPC(pc[0]) - return f.Name() -} diff --git a/s3/handlers/handlers_bucket.go b/s3/handlers/handlers_bucket.go index 5cf8e2190..39bab2981 100644 --- a/s3/handlers/handlers_bucket.go +++ b/s3/handlers/handlers_bucket.go @@ -4,12 +4,63 @@ import ( "errors" s3action "github.com/bittorrent/go-btfs/s3/action" "github.com/bittorrent/go-btfs/s3/cctx" + "github.com/bittorrent/go-btfs/s3/consts" "github.com/bittorrent/go-btfs/s3/requests" "github.com/bittorrent/go-btfs/s3/responses" + "github.com/bittorrent/go-btfs/s3/s3utils" "github.com/bittorrent/go-btfs/s3/services/bucket" "net/http" ) +func (h *Handlers) PutBucketHandler(w http.ResponseWriter, r *http.Request) { + var err error + defer func() { + cctx.SetHandleInf(r, h.name(), err) + }() + + req, err := requests.ParsePutBucketRequest(r) + if err != nil { + responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestBody) + return + } + + // issue: lock for check + ctx := r.Context() + ack := cctx.GetAccessKey(r) + + if err = s3utils.CheckValidBucketNameStrict(req.Bucket); err != nil { + responses.WriteErrorResponse(w, r, responses.ErrInvalidBucketName) + return + } + + if !requests.CheckAclPermissionType(&req.ACL) { + err = responses.ErrNotImplemented + responses.WriteErrorResponse(w, r, responses.ErrNotImplemented) + return + } + + if ok := h.bucsvc.HasBucket(ctx, req.Bucket); ok { + err = responses.ErrBucketAlreadyExists + responses.WriteErrorResponseHeadersOnly(w, r, responses.ErrBucketAlreadyExists) + return + } + + err = h.bucsvc.CreateBucket(ctx, req.Bucket, req.Region, ack, req.ACL) + if err != nil { + responses.WriteErrorResponse(w, r, responses.ErrInternalError) + return + } + + // Make sure to add Location information here only for bucket + if cp := requests.PathClean(r.URL.Path); cp != "" { + w.Header().Set(consts.Location, cp) // Clean any trailing slashes. + } + + responses.WritePutBucketResponse(w, r) + + return +} + func (h *Handlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) { var err error defer func() { diff --git a/s3/handlers/handlers_middlewares.go b/s3/handlers/handlers_middlewares.go new file mode 100644 index 000000000..522b40d7a --- /dev/null +++ b/s3/handlers/handlers_middlewares.go @@ -0,0 +1,68 @@ +package handlers + +import ( + "errors" + "fmt" + "github.com/bittorrent/go-btfs/s3/cctx" + "github.com/bittorrent/go-btfs/s3/responses" + "github.com/bittorrent/go-btfs/s3/services/accesskey" + rscors "github.com/rs/cors" + "net/http" +) + +func (h *Handlers) Cors(handler http.Handler) http.Handler { + return rscors.New(rscors.Options{ + AllowedOrigins: h.corsvc.GetAllowOrigins(), + AllowedMethods: h.corsvc.GetAllowMethods(), + AllowedHeaders: h.corsvc.GetAllowHeaders(), + ExposedHeaders: h.corsvc.GetAllowHeaders(), + AllowCredentials: true, + }).Handler(handler) +} + +func (h *Handlers) Log(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Printf("[REQ] <%4s> | %s\n", r.Method, r.URL) + handler.ServeHTTP(w, r) + hname, herr := cctx.GetHandleInf(r) + fmt.Printf("[RSP] <%4s> | %s | %s | %v\n", r.Method, r.URL, hname, herr) + }) +} + +func (h *Handlers) Sign(handler http.Handler) http.Handler { + h.sigsvc.SetSecretGetter(func(key string) (secret string, exists, enable bool, err error) { + ack, err := h.acksvc.Get(key) + if errors.Is(err, accesskey.ErrNotFound) { + exists = false + enable = true + err = nil + return + } + if err != nil { + return + } + exists = true + secret = ack.Secret + enable = ack.Enable + return + }) + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var err *responses.Error + defer func() { + if err != nil { + cctx.SetHandleInf(r, h.name(), err) + } + }() + + ack, err := h.sigsvc.VerifyRequestSignature(r) + if err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + + cctx.SetAccessKey(r, ack) + + handler.ServeHTTP(w, r) + }) +} diff --git a/s3/handlers/handlers_multipart.go b/s3/handlers/handlers_multipart.go new file mode 100644 index 000000000..99566cfaa --- /dev/null +++ b/s3/handlers/handlers_multipart.go @@ -0,0 +1,334 @@ +package handlers + +import ( + "errors" + "github.com/bittorrent/go-btfs/s3/action" + "github.com/bittorrent/go-btfs/s3/cctx" + "github.com/bittorrent/go-btfs/s3/consts" + "github.com/bittorrent/go-btfs/s3/requests" + "github.com/bittorrent/go-btfs/s3/responses" + "github.com/bittorrent/go-btfs/s3/s3utils" + "github.com/bittorrent/go-btfs/s3/services/bucket" + "github.com/bittorrent/go-btfs/s3/services/object" + "github.com/bittorrent/go-btfs/s3/utils" + "github.com/bittorrent/go-btfs/s3/utils/hash" + "net/http" + "sort" + "strconv" +) + +func (h *Handlers) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + ack := cctx.GetAccessKey(r) + var err error + defer func() { + cctx.SetHandleInf(r, h.name(), err) + }() + + bucname, objname, err := requests.ParseBucketAndObject(r) + if err != nil { + responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) + return + } + + err = s3utils.CheckNewMultipartArgs(ctx, bucname, objname) + if err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + + meta, err := extractMetadata(ctx, r) + if err != nil { + responses.WriteErrorResponse(w, r, responses.ErrInvalidRequest) + return + } + + // rlock bucket + runlock, err := h.rlock(ctx, bucname, w, r) + if err != nil { + return + } + defer runlock() + + // lock object + unlock, err := h.lock(ctx, bucname+"/"+objname, w, r) + if err != nil { + return + } + defer unlock() + + err = h.bucsvc.CheckACL(ack, bucname, action.CreateMultipartUploadAction) + if errors.Is(err, bucket.ErrNotFound) { + responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) + return + } + if err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + + mtp, err := h.objsvc.CreateMultipartUpload(ctx, bucname, objname, meta) + if err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + + responses.WriteCreateMultipartUploadResponse(w, r, bucname, objname, mtp.UploadID) + + return +} + +func (h *Handlers) UploadPartHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + ack := cctx.GetAccessKey(r) + var err error + defer func() { + cctx.SetHandleInf(r, h.name(), err) + }() + + // X-Amz-Copy-Source shouldn't be set for this call. + if _, ok := r.Header[consts.AmzCopySource]; ok { + err = errors.New("shouldn't be copy") + responses.WriteErrorResponse(w, r, responses.ErrInvalidCopySource) + return + } + + bucname, objname, err := requests.ParseBucketAndObject(r) + if err != nil { + responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) + return + } + + err = s3utils.CheckPutObjectPartArgs(ctx, bucname, objname) + if err != nil { // todo: convert error + responses.WriteErrorResponse(w, r, err) + return + } + + uploadID := r.Form.Get(consts.UploadID) + partIDString := r.Form.Get(consts.PartNumber) + partID, err := strconv.Atoi(partIDString) + if err != nil { + responses.WriteErrorResponse(w, r, responses.ErrInvalidPart) + return + } + if partID > consts.MaxPartID { + responses.WriteErrorResponse(w, r, responses.ErrInvalidMaxParts) + return + } + + if r.ContentLength == 0 { + responses.WriteErrorResponse(w, r, responses.ErrEntityTooSmall) + return + } + + if r.ContentLength > consts.MaxPartSize { + responses.WriteErrorResponse(w, r, responses.ErrEntityTooLarge) + return + } + + hrdr, ok := r.Body.(*hash.Reader) + if !ok { + responses.WriteErrorResponse(w, r, responses.ErrInternalError) + return + } + + mtp, err := h.objsvc.GetMultipart(ctx, bucname, objname, uploadID) + if errors.Is(err, object.ErrUploadNotFound) { + responses.WriteErrorResponse(w, r, responses.ErrNoSuchUpload) + return + } + if err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + + // rlock bucket + runlock, err := h.rlock(ctx, bucname, w, r) + if err != nil { + return + } + defer runlock() + + // lock object + unlock, err := h.lock(ctx, bucname+"/"+objname, w, r) + if err != nil { + return + } + defer unlock() + + err = h.bucsvc.CheckACL(ack, bucname, action.PutObjectAction) + if errors.Is(err, bucket.ErrNotFound) { + responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) + return + } + if err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + + part, err := h.objsvc.UploadPart(ctx, bucname, objname, uploadID, partID, hrdr, r.ContentLength, mtp.MetaData) + if err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + + responses.WriteUploadPartResponse(w, r, part) + + return +} + +func (h *Handlers) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + ack := cctx.GetAccessKey(r) + var err error + defer func() { + cctx.SetHandleInf(r, h.name(), err) + }() + + bucname, objname, err := requests.ParseBucketAndObject(r) + if err != nil { + responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) + return + } + + err = s3utils.CheckAbortMultipartArgs(ctx, bucname, objname) + if err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + + uploadID, _, _, _, rerr := h.getObjectResources(r.Form) + if rerr != nil { + err = rerr + responses.WriteErrorResponse(w, r, err) + return + } + + // rlock bucket + runlock, err := h.rlock(ctx, bucname, w, r) + if err != nil { + return + } + defer runlock() + + // rlock object + unlock, err := h.lock(ctx, bucname+"/"+objname, w, r) + if err != nil { + return + } + defer unlock() + + err = h.bucsvc.CheckACL(ack, bucname, action.AbortMultipartUploadAction) + if errors.Is(err, bucket.ErrNotFound) { + responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) + return + } + if err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + + err = h.objsvc.AbortMultipartUpload(ctx, bucname, objname, uploadID) + if err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + + responses.WriteAbortMultipartUploadResponse(w, r) + + return +} + +func (h *Handlers) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + ack := cctx.GetAccessKey(r) + var err error + defer func() { + cctx.SetHandleInf(r, h.name(), err) + }() + + bucname, objname, err := requests.ParseBucketAndObject(r) + if err != nil { + responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) + return + } + + err = s3utils.CheckCompleteMultipartArgs(ctx, bucname, objname) + if err != nil { // todo: convert error + responses.WriteErrorResponse(w, r, err) + return + } + + // Content-Length is required and should be non-zero + if r.ContentLength <= 0 { + responses.WriteErrorResponse(w, r, responses.ErrMissingContentLength) + return + } + + // Get upload id. + uploadID, _, _, _, rerr := h.getObjectResources(r.Form) + if rerr != nil { + err = rerr + responses.WriteErrorResponse(w, r, rerr) + return + } + + complMultipartUpload := &object.CompleteMultipartUpload{} + if err = utils.XmlDecoder(r.Body, complMultipartUpload, r.ContentLength); err != nil { + responses.WriteErrorResponse(w, r, responses.ErrMalformedXML) + return + } + if len(complMultipartUpload.Parts) == 0 { + responses.WriteErrorResponse(w, r, responses.ErrMalformedXML) + return + } + if !sort.IsSorted(object.CompletedParts(complMultipartUpload.Parts)) { + responses.WriteErrorResponse(w, r, responses.ErrInvalidPartOrder) + return + } + + // rlock bucket + runlock, err := h.rlock(ctx, bucname, w, r) + if err != nil { + return + } + defer runlock() + + // rlock object + unlock, err := h.lock(ctx, bucname+"/"+objname, w, r) + if err != nil { + return + } + defer unlock() + + err = h.bucsvc.CheckACL(ack, bucname, action.CompleteMultipartUploadAction) + if errors.Is(err, bucket.ErrNotFound) { + responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) + return + } + if err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + + obj, err := h.objsvc.CompleteMultiPartUpload(ctx, bucname, objname, uploadID, complMultipartUpload.Parts) + if errors.Is(err, object.ErrUploadNotFound) { + rerr = responses.ErrNoSuchUpload + return + } + if err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + + buc, err := h.bucsvc.GetBucketMeta(ctx, bucname) + if err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + + responses.WriteCompleteMultipartUploadResponse(w, r, bucname, objname, buc.Region, obj) + + return +} diff --git a/s3/handlers/handlers_object.go b/s3/handlers/handlers_object.go index f7201ae7d..9af768abd 100644 --- a/s3/handlers/handlers_object.go +++ b/s3/handlers/handlers_object.go @@ -1,7 +1,6 @@ package handlers import ( - "context" "errors" "github.com/bittorrent/go-btfs/s3/action" "github.com/bittorrent/go-btfs/s3/cctx" @@ -17,7 +16,6 @@ import ( const lockWaitTimeout = 5 * time.Minute -// PutObjectHandler http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html func (h *Handlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) { ctx := r.Context() ack := cctx.GetAccessKey(r) @@ -52,16 +50,14 @@ func (h *Handlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) { return } - // lock - runlock, err := h.rlock(ctx, bucname, w, r) + meta, err := extractMetadata(ctx, r) if err != nil { + responses.WriteErrorResponse(w, r, responses.ErrInvalidRequest) return } - defer runlock() - err = h.bucsvc.CheckACL(ack, bucname, action.PutObjectAction) - if errors.Is(err, bucket.ErrNotFound) { - responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) + if r.ContentLength == 0 { + responses.WriteErrorResponse(w, r, responses.ErrEntityTooSmall) return } @@ -71,33 +67,38 @@ func (h *Handlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) { return } - metadata, err := extractMetadata(ctx, r) + // rlock bucket + runlock, err := h.rlock(ctx, bucname, w, r) if err != nil { - responses.WriteErrorResponse(w, r, responses.ErrInvalidRequest) return } + defer runlock() - obj, err := h.objsvc.StoreObject(ctx, bucname, objname, hrdr, r.ContentLength, metadata) + // lock object + unlock, err := h.lock(ctx, bucname+"/"+objname, w, r) + if err != nil { + return + } + defer unlock() + err = h.bucsvc.CheckACL(ack, bucname, action.PutObjectAction) + if errors.Is(err, bucket.ErrNotFound) { + responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) + return + } if err != nil { responses.WriteErrorResponse(w, r, err) return } - responses.WritePutObjectResponse(w, r, obj, false) -} + obj, err := h.objsvc.PutObject(ctx, bucname, objname, hrdr, r.ContentLength, meta) -func (h *Handlers) rlock(ctx context.Context, key string, w http.ResponseWriter, r *http.Request) (runlock func(), err error) { - ctx, cancel := context.WithTimeout(ctx, lockWaitTimeout) - err = h.nslock.RLock(ctx, key) if err != nil { responses.WriteErrorResponse(w, r, err) - cancel() return } - runlock = func() { - h.nslock.RUnlock(key) - cancel() - } + + responses.WritePutObjectResponse(w, r, obj) + return } diff --git a/s3/handlers/proto.go b/s3/handlers/proto.go index e00cc1528..32c956fc9 100644 --- a/s3/handlers/proto.go +++ b/s3/handlers/proto.go @@ -20,4 +20,10 @@ type Handlerser interface { // object PutObjectHandler(w http.ResponseWriter, r *http.Request) + + // multipart + CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Request) + UploadPartHandler(w http.ResponseWriter, r *http.Request) + AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Request) + CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.Request) } diff --git a/s3/responses/types.go b/s3/responses/types.go index 1ca496d00..3f63653e0 100644 --- a/s3/responses/types.go +++ b/s3/responses/types.go @@ -3,26 +3,29 @@ package responses import ( "encoding/xml" "github.com/aws/aws-sdk-go/service/s3" + "github.com/bittorrent/go-btfs/s3/services/object" ) type GetBucketAclResponse AccessControlPolicy // AccessControlPolicy -// -// 75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a -// CustomersName@amazon.com -// -// -// -// -// 75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a -// CustomersName@amazon.com -// -// FULL_CONTROL -// -// -// +// +// +// 75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a +// CustomersName@amazon.com +// +// +// +// +// 75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a +// CustomersName@amazon.com +// +// FULL_CONTROL +// +// +// +// type AccessControlPolicy struct { Owner canonicalUser `xml:"Owner"` AccessControlList accessControlList `xml:"AccessControlList"` @@ -36,13 +39,13 @@ type canonicalUser struct { DisplayName string `xml:"DisplayName,omitempty"` } -//Grant grant +// Grant grant type Grant struct { Grantee Grantee `xml:"Grantee"` Permission Permission `xml:"Permission"` } -//Grantee grant +// Grantee grant type Grantee struct { XMLNS string `xml:"xmlns:xsi,attr"` XMLXSI string `xml:"xsi:type,attr"` @@ -189,3 +192,44 @@ func (s StringMap) MarshalXML(e *xml.Encoder, start xml.StartElement) error { type CommonPrefix struct { Prefix string } + +type InitiateMultipartUploadResponse struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ InitiateMultipartUploadResult" json:"-"` + + Bucket string + Key string + UploadID string `xml:"UploadId"` +} + +func GenerateInitiateMultipartUploadResponse(bucname, objname, uploadID string) InitiateMultipartUploadResponse { + return InitiateMultipartUploadResponse{ + Bucket: bucname, + Key: objname, + UploadID: uploadID, + } +} + +type CompleteMultipartUploadResponse struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUploadResult" json:"-"` + + Location string + Bucket string + Key string + ETag string + + ChecksumCRC32 string + ChecksumCRC32C string + ChecksumSHA1 string + ChecksumSHA256 string +} + +func GenerateCompleteMultipartUploadResponse(bucname, objname, location string, obj object.Object) CompleteMultipartUploadResponse { + c := CompleteMultipartUploadResponse{ + Location: location, + Bucket: bucname, + Key: objname, + // AWS S3 quotes the ETag in XML, make sure we are compatible here. + ETag: "\"" + obj.ETag + "\"", + } + return c +} diff --git a/s3/responses/wirters.go b/s3/responses/wirters.go index 171c28f6e..81e130be7 100644 --- a/s3/responses/wirters.go +++ b/s3/responses/wirters.go @@ -80,7 +80,27 @@ func WritePutBucketAclResponse(w http.ResponseWriter, r *http.Request) { return } -func WritePutObjectResponse(w http.ResponseWriter, r *http.Request, obj object.Object, delete bool) { - setPutObjHeaders(w, obj, delete) +func WritePutObjectResponse(w http.ResponseWriter, r *http.Request, obj object.Object) { + setPutObjHeaders(w, obj.ETag, obj.Cid, false) WriteSuccessResponseHeadersOnly(w, r) } + +func WriteCreateMultipartUploadResponse(w http.ResponseWriter, r *http.Request, bucname, objname, uploadID string) { + resp := GenerateInitiateMultipartUploadResponse(bucname, objname, uploadID) + WriteSuccessResponseXML(w, r, resp) +} + +func WriteAbortMultipartUploadResponse(w http.ResponseWriter, r *http.Request) { + WriteSuccessNoContent(w) +} + +func WriteUploadPartResponse(w http.ResponseWriter, r *http.Request, part object.ObjectPart) { + setPutObjHeaders(w, part.ETag, part.Cid, false) + WriteSuccessResponseHeadersOnly(w, r) +} + +func WriteCompleteMultipartUploadResponse(w http.ResponseWriter, r *http.Request, bucname, objname, region string, obj object.Object) { + resp := GenerateCompleteMultipartUploadResponse(bucname, objname, region, obj) + setPutObjHeaders(w, obj.ETag, obj.Cid, false) + WriteSuccessResponseXML(w, r, resp) +} diff --git a/s3/responses/writers_common.go b/s3/responses/writers_common.go index 6afc1cf6f..bdb0ca27f 100644 --- a/s3/responses/writers_common.go +++ b/s3/responses/writers_common.go @@ -7,7 +7,6 @@ import ( "errors" "fmt" "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/services/object" "github.com/gorilla/mux" logging "github.com/ipfs/go-log/v2" "net/http" @@ -199,27 +198,11 @@ func WriteSuccessNoContent(w http.ResponseWriter) { writeResponseSimple(w, http.StatusNoContent, nil, mimeNone) } -// setPutObjHeaders sets all the necessary headers returned back -// upon a success Put/Copy/CompleteMultipart/Delete requests -// to activate delete only headers set delete as true -func setPutObjHeaders(w http.ResponseWriter, obj object.Object, delete bool) { - // We must not use the http.Header().Set method here because some (broken) - // clients expect the ETag header key to be literally "ETag" - not "Etag" (case-sensitive). - // Therefore, we have to set the ETag directly as map entry. - if obj.ETag != "" && !delete { - w.Header()[consts.ETag] = []string{`"` + obj.ETag + `"`} +func setPutObjHeaders(w http.ResponseWriter, etag, cid string, delete bool) { + if etag != "" && !delete { + w.Header()[consts.ETag] = []string{`"` + etag + `"`} } - - if obj.Cid != "" { - w.Header()[consts.BTFSHash] = []string{obj.Cid} - } - - // Set the relevant version ID as part of the response header. - if obj.VersionID != "" { - w.Header()[consts.AmzVersionID] = []string{obj.VersionID} - // If version is a deleted marker, set this header as well - if obj.DeleteMarker && delete { // only returned during delete object - w.Header()[consts.AmzDeleteMarker] = []string{strconv.FormatBool(obj.DeleteMarker)} - } + if cid != "" { + w.Header()[consts.BTFSHash] = []string{cid} } } diff --git a/s3/routers/routers.go b/s3/routers/routers.go index 7410ad669..466a3764f 100644 --- a/s3/routers/routers.go +++ b/s3/routers/routers.go @@ -21,8 +21,9 @@ func NewRouters(handlers handlers.Handlerser, options ...Option) (routers *Route } func (routers *Routers) Register() http.Handler { - root := mux.NewRouter() + hs := routers.handlers + root := mux.NewRouter() root.Use( routers.handlers.Cors, routers.handlers.Log, @@ -31,17 +32,30 @@ func (routers *Routers) Register() http.Handler { bucket := root.PathPrefix("/{bucket}").Subrouter() - //object - bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(routers.handlers.PutObjectHandler) - - bucket.Methods(http.MethodGet).HandlerFunc(routers.handlers.GetBucketAclHandler).Queries("acl", "") - bucket.Methods(http.MethodPut).HandlerFunc(routers.handlers.PutBucketAclHandler).Queries("acl", "") - - bucket.Methods(http.MethodPut).HandlerFunc(routers.handlers.PutBucketHandler) - bucket.Methods(http.MethodHead).HandlerFunc(routers.handlers.HeadBucketHandler) - bucket.Methods(http.MethodDelete).HandlerFunc(routers.handlers.DeleteBucketHandler) - - root.Methods(http.MethodGet).Path("/").HandlerFunc(routers.handlers.ListBucketsHandler) + // CreateMultipart + bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(hs.CreateMultipartUploadHandler).Queries("uploads", "") + // UploadPart + bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(hs.UploadPartHandler).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}") + // CompleteMultipartUpload + bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(hs.CompleteMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}") + // AbortMultipart + bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(hs.AbortMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}") + + // PutObject + bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(hs.PutObjectHandler) + + // GetBucketAcl + bucket.Methods(http.MethodGet).HandlerFunc(hs.GetBucketAclHandler).Queries("acl", "") + // PutBucketAcl + bucket.Methods(http.MethodPut).HandlerFunc(hs.PutBucketAclHandler).Queries("acl", "") + // PutBucket + bucket.Methods(http.MethodPut).HandlerFunc(hs.PutBucketHandler) + // HeadBucket + bucket.Methods(http.MethodHead).HandlerFunc(hs.HeadBucketHandler) + // DeleteBucket + bucket.Methods(http.MethodDelete).HandlerFunc(hs.DeleteBucketHandler) + // ListBuckets + root.Methods(http.MethodGet).Path("/").HandlerFunc(hs.ListBucketsHandler) return root } diff --git a/s3/services/multipart/proto.go b/s3/services/multipart/proto.go deleted file mode 100644 index 2673587e2..000000000 --- a/s3/services/multipart/proto.go +++ /dev/null @@ -1,4 +0,0 @@ -package multipart - -type Service interface { -} diff --git a/s3/services/multipart/service.go b/s3/services/multipart/service.go deleted file mode 100644 index 62fb2ccb3..000000000 --- a/s3/services/multipart/service.go +++ /dev/null @@ -1,29 +0,0 @@ -package multipart - -import ( - "github.com/bittorrent/go-btfs/s3/services" - "io" -) - -var _ services.MultipartService = (*service)(nil) - -type service struct { -} - -func NewService(options ...Option) Service { - svc := &service{} - for _, option := range options { - option(svc) - } - return svc -} - -func (svc *service) multiReader() io.Reader { - var ( - r1 io.Reader - r2 io.Reader - r3 io.Reader - ) - - return io.MultiReader(r1, r2, r3) -} diff --git a/s3/services/multipart/service_options.go b/s3/services/multipart/service_options.go deleted file mode 100644 index e3dcdf9c2..000000000 --- a/s3/services/multipart/service_options.go +++ /dev/null @@ -1,3 +0,0 @@ -package multipart - -type Option func(svc *service) diff --git a/s3/services/object/proto.go b/s3/services/object/proto.go index d17d0b43e..b7e714cc7 100644 --- a/s3/services/object/proto.go +++ b/s3/services/object/proto.go @@ -7,10 +7,20 @@ import ( "time" ) -var ErrNotFound = errors.New("object not found") +var ( + ErrObjectNotFound = errors.New("object not found") + ErrUploadNotFound = errors.New("upload not found") +) type Service interface { - StoreObject(ctx context.Context, bucname, objname string, reader *hash.Reader, size int64, meta map[string]string) (obj Object, err error) + PutObject(ctx context.Context, bucname, objname string, reader *hash.Reader, size int64, meta map[string]string) (obj Object, err error) + + // martipart + CreateMultipartUpload(ctx context.Context, bucname string, objname string, meta map[string]string) (mtp Multipart, err error) + AbortMultipartUpload(ctx context.Context, bucname string, objname string, uploadID string) (err error) + UploadPart(ctx context.Context, bucname string, objname string, uploadID string, partID int, reader *hash.Reader, size int64, meta map[string]string) (part ObjectPart, err error) + CompleteMultiPartUpload(ctx context.Context, bucname string, objname string, uploadID string, parts []CompletePart) (obj Object, err error) + GetMultipart(ctx context.Context, bucname string, objname string, uploadID string) (mtp Multipart, err error) } type Object struct { @@ -63,3 +73,53 @@ type Object struct { // The mod time of the successor object version if any SuccessorModTime time.Time } + +type Multipart struct { + Bucket string + Object string + UploadID string + Initiated time.Time + MetaData map[string]string + // List of individual parts, maximum size of upto 10,000 + Parts []ObjectPart +} + +// objectPartInfo Info of each part kept in the multipart metadata +// file after CompleteMultipartUpload() is called. +type ObjectPart struct { + ETag string `json:"etag,omitempty"` + Cid string `json:"cid,omitempty"` + Number int `json:"number"` + Size int64 `json:"size"` + ModTime time.Time `json:"mod_time"` +} + +// CompletePart - represents the part that was completed, this is sent by the client +// during CompleteMultipartUpload request. +type CompletePart struct { + // Part number identifying the part. This is a positive integer between 1 and + // 10,000 + PartNumber int + + // Entity tag returned when the part was uploaded. + ETag string + + // Checksum values. Optional. + ChecksumCRC32 string + ChecksumCRC32C string + ChecksumSHA1 string + ChecksumSHA256 string +} + +// CompletedParts - is a collection satisfying sort.Interface. +type CompletedParts []CompletePart + +func (a CompletedParts) Len() int { return len(a) } +func (a CompletedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a CompletedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber } + +// CompleteMultipartUpload - represents list of parts which are completed, this is sent by the +// client during CompleteMultipartUpload request. +type CompleteMultipartUpload struct { + Parts []CompletePart `xml:"Part"` +} diff --git a/s3/services/object/service.go b/s3/services/object/service.go index 8b0260e5a..33a88737e 100644 --- a/s3/services/object/service.go +++ b/s3/services/object/service.go @@ -2,19 +2,49 @@ package object import ( "context" + "encoding/hex" + "errors" "fmt" "github.com/bittorrent/go-btfs/s3/consts" + "github.com/bittorrent/go-btfs/s3/etag" "github.com/bittorrent/go-btfs/s3/providers" + "github.com/bittorrent/go-btfs/s3/s3utils" "github.com/bittorrent/go-btfs/s3/utils/hash" + "github.com/dustin/go-humanize" + "github.com/google/uuid" + "io" "net/http" + "regexp" "strings" "time" ) const ( - objectKeyFormat = "obj/%s/%s" + // bigFileThreshold is the point where we add readahead to put operations. + bigFileThreshold = 64 * humanize.MiByte + // equals unixfsChunkSize + chunkSize int = 1 << 20 + + objectKeyFormat = "obj/%s/%s" + allObjectPrefixFormat = "obj/%s/%s" + allObjectSeekKeyFormat = "obj/%s/%s" + + uploadKeyFormat = "uploadObj/%s/%s/%s" + allUploadPrefixFormat = "uploadObj/%s/%s" + allUploadSeekKeyFormat = "uploadObj/%s/%s/%s" + + deleteKeyFormat = "delObj/%s" + allDeletePrefixFormat = "delObj/" + + globalOperationTimeout = 5 * time.Minute + deleteOperationTimeout = 1 * time.Minute + + maxCpuPercent = 60 + maxUsedMemoryPercent = 80 ) +var etagRegex = regexp.MustCompile("\"*?([^\"]*?)\"*?$") + var _ Service = (*service)(nil) // service captures all bucket metadata for a given cluster. @@ -33,11 +63,15 @@ func NewService(providers providers.Providerser, options ...Option) Service { return s } -func (s *service) getObjectKey(buc, obj string) string { - return fmt.Sprintf(objectKeyFormat, buc, obj) +func getObjectKey(bucname, objname string) string { + return fmt.Sprintf(objectKeyFormat, bucname, objname) +} + +func getUploadKey(bucname, objname, uploadID string) string { + return fmt.Sprintf(uploadKeyFormat, bucname, objname, uploadID) } -func (s *service) StoreObject(ctx context.Context, bucname, objname string, reader *hash.Reader, size int64, meta map[string]string) (obj Object, err error) { +func (s *service) PutObject(ctx context.Context, bucname, objname string, reader *hash.Reader, size int64, meta map[string]string) (obj Object, err error) { cid, err := s.providers.GetFileStore().AddWithOpts(reader, true, true) if err != nil { return @@ -67,10 +101,239 @@ func (s *service) StoreObject(ctx context.Context, bucname, objname string, read } } - err = s.providers.GetStateStore().Put(s.getObjectKey(bucname, objname), obj) + err = s.providers.GetStateStore().Put(getObjectKey(bucname, objname), obj) + if err != nil { + return + } + + return +} + +func (s *service) CreateMultipartUpload(ctx context.Context, bucname string, objname string, meta map[string]string) (mtp Multipart, err error) { + uploadId := uuid.NewString() + mtp = Multipart{ + Bucket: bucname, + Object: objname, + UploadID: uploadId, + MetaData: meta, + Initiated: time.Now().UTC(), + } + + err = s.providers.GetStateStore().Put(getUploadKey(bucname, objname, uploadId), mtp) if err != nil { return } return } + +func (s *service) UploadPart(ctx context.Context, bucname string, objname string, uploadID string, partID int, reader *hash.Reader, size int64, meta map[string]string) (part ObjectPart, err error) { + cid, err := s.providers.GetFileStore().AddWithOpts(reader, true, true) + if err != nil { + return + } + + part = ObjectPart{ + Number: partID, + ETag: reader.ETag().String(), + Cid: cid, + Size: size, + ModTime: time.Now().UTC(), + } + + mtp, err := s.getMultipart(ctx, bucname, objname, uploadID) + if err != nil { + return + } + + mtp.Parts = append(mtp.Parts, part) + err = s.providers.GetStateStore().Put(getUploadKey(bucname, objname, uploadID), mtp) + if err != nil { + return part, err + } + + return +} + +func (s *service) AbortMultipartUpload(ctx context.Context, bucname string, objname string, uploadID string) (err error) { + mtp, err := s.getMultipart(ctx, bucname, objname, uploadID) + if err != nil { + return + } + + for _, part := range mtp.Parts { + ok := s.providers.GetFileStore().Remove(part.Cid) + if !ok { + err = errors.New("remove file failed") + return + } + } + + err = s.removeMultipart(ctx, bucname, objname, uploadID) + if err != nil { + return + } + + return +} + +func (s *service) CompleteMultiPartUpload(ctx context.Context, bucname string, objname string, uploadID string, parts []CompletePart) (obj Object, err error) { + mi, err := s.getMultipart(ctx, bucname, objname, uploadID) + if err != nil { + return + } + + var ( + readers []io.Reader + objectSize int64 + ) + + defer func() { + for _, rdr := range readers { + _ = rdr.(io.ReadCloser).Close() + } + }() + + idxMap := objectPartIndexMap(mi.Parts) + for i, part := range parts { + partIndex, ok := idxMap[part.PartNumber] + if !ok { + err = s3utils.InvalidPart{ + PartNumber: part.PartNumber, + GotETag: part.ETag, + } + return + } + + gotPart := mi.Parts[partIndex] + + part.ETag = canonicalizeETag(part.ETag) + if gotPart.ETag != part.ETag { + err = s3utils.InvalidPart{ + PartNumber: part.PartNumber, + ExpETag: gotPart.ETag, + GotETag: part.ETag, + } + return + } + + // All parts except the last part has to be at least 5MB. + if (i < len(parts)-1) && !(gotPart.Size >= consts.MinPartSize) { + err = s3utils.PartTooSmall{ + PartNumber: part.PartNumber, + PartSize: gotPart.Size, + PartETag: part.ETag, + } + return + } + + // Save for total objname size. + objectSize += gotPart.Size + + var rdr io.ReadCloser + rdr, err = s.providers.GetFileStore().Cat(gotPart.Cid) + if err != nil { + return + } + + readers = append(readers, rdr) + } + + cid, err := s.providers.GetFileStore().AddWithOpts(io.MultiReader(readers...), true, true) + if err != nil { + return + } + + obj = Object{ + Bucket: bucname, + Name: objname, + ModTime: time.Now().UTC(), + Size: objectSize, + IsDir: false, + ETag: computeCompleteMultipartMD5(parts), + Cid: cid, + VersionID: "", + IsLatest: true, + DeleteMarker: false, + ContentType: mi.MetaData[strings.ToLower(consts.ContentType)], + ContentEncoding: mi.MetaData[strings.ToLower(consts.ContentEncoding)], + SuccessorModTime: time.Now().UTC(), + } + + if exp, ok := mi.MetaData[strings.ToLower(consts.Expires)]; ok { + if t, e := time.Parse(http.TimeFormat, exp); e == nil { + obj.Expires = t.UTC() + } + } + + err = s.providers.GetStateStore().Put(getObjectKey(bucname, objname), obj) + if err != nil { + return + } + + err = s.removeMultipartInfo(ctx, bucname, objname, uploadID) + if err != nil { + return + } + + return +} + +func (s *service) GetMultipart(ctx context.Context, bucname string, objname string, uploadID string) (mtp Multipart, err error) { + return s.getMultipart(ctx, bucname, objname, uploadID) +} + +func (s *service) getMultipart(ctx context.Context, bucname string, objname string, uploadID string) (mtp Multipart, err error) { + err = s.providers.GetStateStore().Get(getUploadKey(bucname, objname, uploadID), &mtp) + if errors.Is(err, providers.ErrStateStoreNotFound) { + err = ErrUploadNotFound + return + } + return +} + +func (s *service) removeMultipart(ctx context.Context, bucname string, objname string, uploadID string) (err error) { + err = s.providers.GetStateStore().Delete(getUploadKey(bucname, objname, uploadID)) + if errors.Is(err, providers.ErrStateStoreNotFound) { + err = ErrUploadNotFound + return + } + return +} + +func (s *service) removeMultipartInfo(ctx context.Context, bucname string, objname string, uploadID string) (err error) { + err = s.providers.GetStateStore().Delete(getUploadKey(bucname, objname, uploadID)) + if errors.Is(err, providers.ErrStateStoreNotFound) { + err = ErrUploadNotFound + return + } + return +} + +func objectPartIndexMap(parts []ObjectPart) map[int]int { + mp := make(map[int]int) + for i, part := range parts { + mp[part.Number] = i + } + return mp +} + +// canonicalizeETag returns ETag with leading and trailing double-quotes removed, +// if any present +func canonicalizeETag(etag string) string { + return etagRegex.ReplaceAllString(etag, "$1") +} + +func computeCompleteMultipartMD5(parts []CompletePart) string { + var finalMD5Bytes []byte + for _, part := range parts { + md5Bytes, err := hex.DecodeString(canonicalizeETag(part.ETag)) + if err != nil { + finalMD5Bytes = append(finalMD5Bytes, []byte(part.ETag)...) + } else { + finalMD5Bytes = append(finalMD5Bytes, md5Bytes...) + } + } + s3MD5 := fmt.Sprintf("%s-%d", etag.Multipart(finalMD5Bytes), len(parts)) + return s3MD5 +} From 5fa266ebe871d6ea1b2e6be52a3c23231ea5ee69 Mon Sep 17 00:00:00 2001 From: steve Date: Fri, 25 Aug 2023 06:47:06 +0800 Subject: [PATCH 066/139] fix: nslock key --- s3/handlers/handlers.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/s3/handlers/handlers.go b/s3/handlers/handlers.go index 98fef93e8..2735a61e1 100644 --- a/s3/handlers/handlers.go +++ b/s3/handlers/handlers.go @@ -60,8 +60,9 @@ func (h *Handlers) name() string { } func (h *Handlers) rlock(ctx context.Context, key string, w http.ResponseWriter, r *http.Request) (runlock func(), err error) { + key = lockPrefix + key ctx, cancel := context.WithTimeout(ctx, lockWaitTimeout) - err = h.nslock.RLock(ctx, lockPrefix+key) + err = h.nslock.RLock(ctx, key) if err != nil { responses.WriteErrorResponse(w, r, err) cancel() @@ -75,8 +76,9 @@ func (h *Handlers) rlock(ctx context.Context, key string, w http.ResponseWriter, } func (h *Handlers) lock(ctx context.Context, key string, w http.ResponseWriter, r *http.Request) (unlock func(), err error) { + key = lockPrefix + key ctx, cancel := context.WithTimeout(ctx, lockWaitTimeout) - err = h.nslock.Lock(ctx, lockPrefix+key) + err = h.nslock.Lock(ctx, key) if err != nil { responses.WriteErrorResponse(w, r, err) cancel() From fbe0ec5f9e7db0f56a0e1ce09d5b094bcfa95ae5 Mon Sep 17 00:00:00 2001 From: steve Date: Fri, 25 Aug 2023 06:51:38 +0800 Subject: [PATCH 067/139] chore: rename s3 constructor file name --- s3/{server.go => s3.go} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename s3/{server.go => s3.go} (100%) diff --git a/s3/server.go b/s3/s3.go similarity index 100% rename from s3/server.go rename to s3/s3.go From cb34bf144ea0898bab2d648dbe4c0b2bd3632588 Mon Sep 17 00:00:00 2001 From: Steve Date: Sat, 26 Aug 2023 22:16:56 +0800 Subject: [PATCH 068/139] fix: tidy example go-ipfs-as-a-library go mod --- docs/examples/go-ipfs-as-a-library/go.mod | 18 +- docs/examples/go-ipfs-as-a-library/go.sum | 2849 +++++++++++++++++---- 2 files changed, 2299 insertions(+), 568 deletions(-) diff --git a/docs/examples/go-ipfs-as-a-library/go.mod b/docs/examples/go-ipfs-as-a-library/go.mod index dbb7f3e2c..0e83c1ea3 100644 --- a/docs/examples/go-ipfs-as-a-library/go.mod +++ b/docs/examples/go-ipfs-as-a-library/go.mod @@ -3,17 +3,13 @@ module github.com/ipfs/go-ipfs/examples/go-ipfs-as-a-library go 1.14 require ( - github.com/TRON-US/go-btfs v1.5.0 - github.com/TRON-US/go-btfs-config v0.11.5 - github.com/TRON-US/go-btfs-files v0.2.0 - github.com/TRON-US/interface-go-btfs-core v0.7.0 - github.com/ipfs/go-ipfs v0.7.0 - github.com/ipfs/go-ipfs-config v0.9.0 - github.com/ipfs/go-ipfs-files v0.0.8 - github.com/TRON-US/interface-go-btfs-core v0.4.0 - github.com/libp2p/go-libp2p-core v0.9.0 - github.com/libp2p/go-libp2p-peerstore v0.2.7 - github.com/multiformats/go-multiaddr v0.3.3 + github.com/bittorrent/go-btfs v0.0.0-20230626064024-58978cbfe949 + github.com/bittorrent/go-btfs-config v0.12.3 + github.com/bittorrent/go-btfs-files v0.3.1 + github.com/bittorrent/interface-go-btfs-core v0.8.2 + github.com/klauspost/cpuid v1.2.4 // indirect + github.com/libp2p/go-libp2p v0.24.2 + github.com/multiformats/go-multiaddr v0.8.0 ) replace github.com/ipfs/go-ipfs => ./../../.. diff --git a/docs/examples/go-ipfs-as-a-library/go.sum b/docs/examples/go-ipfs-as-a-library/go.sum index 887738c6c..d1726045d 100644 --- a/docs/examples/go-ipfs-as-a-library/go.sum +++ b/docs/examples/go-ipfs-as-a-library/go.sum @@ -1,5 +1,7 @@ -bazil.org/fuse v0.0.0-20200117225306-7b5117fecadc h1:utDghgcjE8u+EBjHOgYT+dJPcnDF05KqWMBcjuJy510= +bazil.org/fuse v0.0.0-20180421153158-65cc252bf669/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= bazil.org/fuse v0.0.0-20200117225306-7b5117fecadc/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM= +bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512 h1:SRsZGA7aFnCZETmov57jwPrWuTmaZK6+4R4v5FUe1/c= +bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= @@ -8,6 +10,7 @@ cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSR cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= @@ -15,32 +18,409 @@ cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gc cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= +cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= +cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= +cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= +cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= +cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= +cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= +cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= +cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= +cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= +cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= +cloud.google.com/go/apigeeregistry v0.4.0/go.mod h1:EUG4PGcsZvxOXAdyEghIdXwAEi/4MEaoqLMLDMIwKXY= +cloud.google.com/go/apikeys v0.4.0/go.mod h1:XATS/yqZbaBK0HOssf+ALHp8jAlNHUgyfprvNcBIszU= +cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= +cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= +cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= +cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= +cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= +cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= +cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= +cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= +cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= +cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= +cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= +cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= +cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= +cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= +cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= +cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= +cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= +cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= +cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= +cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= +cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= +cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= +cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= +cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= +cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= +cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= +cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= +cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o= +cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= +cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= +cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= +cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= +cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= +cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= +cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= +cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= +cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= +cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= +cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= +cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= +cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= +cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= +cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= +cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= +cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= +cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= +cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= +cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= +cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= +cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= +cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= +cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= +cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= +cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= +cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= +cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= +cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= +cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= +cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= +cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= +cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= +cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= +cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= +cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= +cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= +cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= +cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= +cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= +cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= +cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= +cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= +cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= +cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= +cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= +cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= +cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= +cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= +cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= +cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= +cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= +cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= +cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= +cloud.google.com/go/dialogflow v1.29.0/go.mod h1:b+2bzMe+k1s9V+F2jbJwpHPzrnIyHihAdRFMtn2WXuM= +cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= +cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= +cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= +cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= +cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= +cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= +cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= +cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= +cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= +cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= +cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= +cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= +cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= +cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= +cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= +cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= +cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= +cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= +cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= +cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= +cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= +cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= +cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= +cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= +cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= +cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= +cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= +cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= +cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= +cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= +cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= +cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= +cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= +cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= +cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= +cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= +cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= +cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= +cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= +cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= +cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= +cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= +cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= +cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= +cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= +cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= +cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= +cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= +cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= +cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= +cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= +cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= +cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= +cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= +cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= +cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= +cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= +cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= +cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= +cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= +cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= +cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= +cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= +cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= +cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= +cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= +cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= +cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= +cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= +cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= +cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= +cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= +cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= +cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= +cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= +cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= +cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= +cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= +cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= +cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= +cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= +cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= +cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= +cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= +cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= +cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= +cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= +cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= +cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= +cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= +cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= +cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= +cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= +cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= +cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= +cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= +cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= +cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= +cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= +cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= +cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= +cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= +cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= +cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= +cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= +cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= +cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= +cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= +cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= +cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= +cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= +cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= +cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= +cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= +cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= +cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= +cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= +cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= +cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= +cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= +cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= +cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= +cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= +cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= +cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= +cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= +cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= +cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= +cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= +cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= +cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= +cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= +cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= +cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= +cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= +cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= +cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= +cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= +cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= +cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= +cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= +cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= +cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= +cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= +cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= +cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= +cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= +cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= +cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= +cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= +cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= +cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= +cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= +cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= +cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= +cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= +cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= +cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= +cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= +cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= +cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= +cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= +cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= +cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= +cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= +cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= +cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= +cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= +cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= +cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= +cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= +cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= +cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= +cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= +cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= +cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= +cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= +cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= +cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= +cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= +cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= +cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= +cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= +contrib.go.opencensus.io/exporter/prometheus v0.4.2/go.mod h1:dvEHbiKmgvbr5pjaF9fpw1KeYcjrnC1J8B+JKjsZyRQ= +crawshaw.io/iox v0.0.0-20181124134642-c51c3df30797/go.mod h1:sXBiorCo8c46JlQV3oXPKINnZ8mcqnye1EkVkqsectk= +crawshaw.io/sqlite v0.3.2/go.mod h1:igAO5JulrQ1DbdZdtVq48mnZUBAPOeFzer7VhDWNtW4= +crawshaw.io/sqlite v0.3.3-0.20210127221821-98b1f83c5508/go.mod h1:igAO5JulrQ1DbdZdtVq48mnZUBAPOeFzer7VhDWNtW4= dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= +filippo.io/edwards25519 v1.0.0-rc.1/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= -github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9 h1:HD8gA2tkByhMAwYaFAX9w2l7vxvBQ5NMoxDrkhqhtn4= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 h1:cTp8I5+VIoKjsnZuH8vjyaysT/ses3EvZeaV/1UkF2M= github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1/go.mod h1:fBF9PQNqB8scdgpZ3ufzaLntG0AG7C1WjPMsiFOmfHM= +github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3/go.mod h1:KLF4gFr6DcKFZwSuH8w8yEK6DpFl3LP5rhdvAb7Yz5I= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0/go.mod h1:tPaiy8S5bQ+S5sOiDlINkp7+Ef339+Nz5L5XO+cnOHo= github.com/Azure/azure-storage-blob-go v0.7.0/go.mod h1:f9YQKtsG1nMisotuTPpO0tjNuEjKRYAcJU8/ydDI++4= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= @@ -61,77 +441,233 @@ github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6L github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.2.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/CloudyKit/fastprinter v0.0.0-20170127035650-74b38d55f37a/go.mod h1:EFZQ978U7x8IRnstaskI3IysnWY5Ao3QgZUKOXlsAdw= +github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno= +github.com/CloudyKit/jet v2.1.3-0.20180809161101-62edd43e4f88+incompatible/go.mod h1:HPYO+50pSWkPoj9Q/eq0aRGByCL6ScRlUmiEX5Zgm+w= +github.com/CloudyKit/jet/v3 v3.0.0/go.mod h1:HKQPgSJmdK8hdoAbKUUWajkHyHo4RaU5rMdUywE7VMo= +github.com/CloudyKit/jet/v6 v6.1.0/go.mod h1:d3ypHeIRNo2+XyqnGA8s+aphtcVpjP5hPwP/Lzo7Ro4= github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/FactomProject/basen v0.0.0-20150613233007-fe3947df716e/go.mod h1:kGUqhHd//musdITWjFvNTHn90WG9bMLBEPQZ17Cmlpw= github.com/FactomProject/btcutilecc v0.0.0-20130527213604-d3a63a5752ec/go.mod h1:CD8UlnlLDiqb36L110uqiP2iSflVjx9g/3U9hCI4q2U= +github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= +github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY= +github.com/Joker/jade v1.0.1-0.20190614124447-d475f43051e7/go.mod h1:6E6s8o2AE4KhCrqr6GRJjdC/gNfTdxkIXvuGZZda2VM= +github.com/Julusian/godocdown v0.0.0-20170816220326-6d19f8ff2df8/go.mod h1:INZr5t32rG59/5xeltqoCJoNY7e5x/3xoY9WSWVWg74= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/RoaringBitmap/roaring v0.4.7/go.mod h1:8khRDP4HmeXns4xIj9oGrKSz7XTQiJx2zgh7AcNke4w= +github.com/RoaringBitmap/roaring v0.4.17/go.mod h1:D3qVegWTmfCaX4Bl5CrBE9hfrSrrXIr8KVNvRsDi1NI= +github.com/RoaringBitmap/roaring v0.4.18/go.mod h1:D3qVegWTmfCaX4Bl5CrBE9hfrSrrXIr8KVNvRsDi1NI= +github.com/RoaringBitmap/roaring v0.4.21/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo= +github.com/RoaringBitmap/roaring v0.4.23/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo= +github.com/RoaringBitmap/roaring v0.5.5/go.mod h1:puNo5VdzwbaIQxSiDIwfXl4Hnc+fbovcX4IW/dSTtUk= +github.com/RoaringBitmap/roaring v0.6.0/go.mod h1:WZ83fjBF/7uBHi6QoFyfGL4+xuV4Qn+xFkm4+vSzrhE= +github.com/RoaringBitmap/roaring v0.9.4/go.mod h1:icnadbWcNyfEHlYdr+tDlOTih1Bf/h+rzPpv4sbomAA= +github.com/RoaringBitmap/roaring v1.0.1-0.20220510143707-3f418c4f42a4/go.mod h1:icnadbWcNyfEHlYdr+tDlOTih1Bf/h+rzPpv4sbomAA= +github.com/RoaringBitmap/roaring v1.2.1/go.mod h1:icnadbWcNyfEHlYdr+tDlOTih1Bf/h+rzPpv4sbomAA= +github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0= +github.com/Shopify/goreferrer v0.0.0-20220729165902-8cddb4f5de06/go.mod h1:7erjKLwalezA0k99cWs5L11HWOAPNjdUZ6RxH1BXbbM= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= -github.com/Stebalien/go-bitfield v0.0.0-20180330043415-076a62f9ce6e/go.mod h1:3oM7gXIttpYDAJXpVNnSCiUMYBLIZ6cb1t+Ip982MRo= github.com/Stebalien/go-bitfield v0.0.1 h1:X3kbSSPUaJK60wV2hjOPZwmpljr6VGCqdq4cBLhbQBo= github.com/Stebalien/go-bitfield v0.0.1/go.mod h1:GNjFpasyUVkHMsfEOk8EFLJ9syQ6SI+XWrX9Wf2XH0s= -github.com/TRON-US/go-btfs v1.5.0 h1:XrvhGK9+imGikGf/sSWdhyST4nV6FKZpwJl3SxGDUGw= -github.com/TRON-US/go-btfs v1.5.0/go.mod h1:x/uFzBwyxNs+2Srn5cH3BzEzC9EV0MRsivK0/pU8B0A= -github.com/TRON-US/go-btfs-api v0.3.0/go.mod h1:surmr8ztnpbVY7y2H7dbb7npNXfdaV0UH6cFwhtJPw0= -github.com/TRON-US/go-btfs-chunker v0.3.0/go.mod h1:m0xvt42kqLskWsLF6SQ51AA9cqPzWoweydOcDgSDX/U= -github.com/TRON-US/go-btfs-cmds v0.2.7/go.mod h1:GLPyQ0EX9JiL41IZd6yRr42RlxNHovp4V4gXZNE2Rfk= -github.com/TRON-US/go-btfs-cmds v0.2.13/go.mod h1:GLPyQ0EX9JiL41IZd6yRr42RlxNHovp4V4gXZNE2Rfk= -github.com/TRON-US/go-btfs-config v0.6.0/go.mod h1:82nKCMRhsgY0I8DCasIUpSr6ZP9iHLsZJSMUxytMpEw= -github.com/TRON-US/go-btfs-config v0.7.0 h1:v1O6ggE71k3a6KuwfUgbqFFPMJkZymvyORXDquQTKl8= -github.com/TRON-US/go-btfs-config v0.7.0/go.mod h1:9y6osJENDCjulSNJjSSt1J8OK+ADRatBdYPXRDewbko= -github.com/TRON-US/go-btfs-config v0.11.2 h1:3rDK/jyeEj5hKAlj+iMBy7SB4wuy8ti5aDjaG8cxAt4= -github.com/TRON-US/go-btfs-config v0.11.2/go.mod h1:9y6osJENDCjulSNJjSSt1J8OK+ADRatBdYPXRDewbko= -github.com/TRON-US/go-btfs-config v0.11.5 h1:Gc1TPaG3xTDojwBRsyumOv3le0NrYiWCBmB9No++Axg= -github.com/TRON-US/go-btfs-config v0.11.5/go.mod h1:9y6osJENDCjulSNJjSSt1J8OK+ADRatBdYPXRDewbko= -github.com/TRON-US/go-btfs-files v0.1.1/go.mod h1:tD2vOKLcLCDNMn9rrA27n2VbNpHdKewGzEguIFY+EJ0= -github.com/TRON-US/go-btfs-files v0.2.0/go.mod h1:Qx+rTOIC0xl3ZkosGcEoB4hqExZmTONErPys8K5suEc= -github.com/ipfs/go-ipfs-pinner v0.1.1/go.mod h1:4CVxKEQNNTbuW92BIYwiH9/W63eDNtlt7bK0fCnXSag= -github.com/TRON-US/go-btns v0.1.1 h1:19rUEc9QK5aIz5Z278lQh6omXbQ3Ha40ecQX1DnGnT8= -github.com/TRON-US/go-btns v0.1.1/go.mod h1:PWfgUinn65Xzar61xNunkadza7h+v+cYFCOXpuVsixY= -github.com/TRON-US/go-eccrypto v0.0.1/go.mod h1:QZqTUSKP9MdYh+0LPsnVKvXV/Q2f9Qb6V4ejvUmHVvI= -github.com/TRON-US/go-mfs v0.3.1 h1:5foDPPlIcF4bPXZ18Qd+lHv3WPBQTJlAHTLSAeFK/rY= -github.com/TRON-US/go-mfs v0.3.1/go.mod h1:hXLxeLnJp50uu+Ibg7Tf7BzaC49m8RSTRA/eDl0wx1s= -github.com/TRON-US/go-unixfs v0.6.0/go.mod h1:U3+FopU5+8rwrr05MJOwDB1E9vAwKGsb/GII0LkXZ8k= -github.com/TRON-US/go-unixfs v0.6.1 h1:7KFhJdt+XsapVSmxEq+mfUFOEPS8SyoaWJkkrr59N4A= -github.com/TRON-US/go-unixfs v0.6.1/go.mod h1:U3+FopU5+8rwrr05MJOwDB1E9vAwKGsb/GII0LkXZ8k= -github.com/TRON-US/interface-go-btfs-core v0.7.0 h1:IV72YgoLbd86df6lXdUum4wO+msB6+NC0504vtD69gs= -github.com/TRON-US/interface-go-btfs-core v0.7.0/go.mod h1:cpdcK3UWZGA78cte11zoUn8kpoilKKVMIPXKk8e75Z8= github.com/VictoriaMetrics/fastcache v1.5.7/go.mod h1:ptDBkNMQI4RtmVo8VS/XwRY6RoTu1dAWCbrk+6WsEM8= +github.com/VictoriaMetrics/fastcache v1.6.0/go.mod h1:0qHz5QP0GMX4pfmMA/zt5RgfNuXJrTP0zS7DqpHGGTw= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= -github.com/Workiva/go-datastructures v1.0.52/go.mod h1:Z+F2Rca0qCsVYDS8z7bAGm8f3UkzuWYS/oBZz5a7VVA= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/ajwerner/btree v0.0.0-20211221152037-f427b3e689c0/go.mod h1:q37NoqncT41qKc048STsifIt69LfUJ8SrWWcz/yam5k= +github.com/alangpierce/go-forceexport v0.0.0-20160317203124-8f1d6941cd75/go.mod h1:uAXEEpARkRhCZfEvy/y0Jcc888f9tHCc1W7/UeEtreE= +github.com/alecthomas/assert/v2 v2.0.0-alpha3/go.mod h1:+zD0lmDXTeQj7TgDgCt0ePWxb0hMC1G+PGTsTCv1B9o= +github.com/alecthomas/atomic v0.1.0-alpha2/go.mod h1:zD6QGEyw49HIq19caJDc2NMXAy8rNi9ROrxtMXATfyI= +github.com/alecthomas/kingpin/v2 v2.3.1/go.mod h1:oYL5vtsvEHZGHxU7DMp32Dvx+qL+ptGn6lWaot2vCNE= +github.com/alecthomas/repr v0.0.0-20210801044451-80ca428c5142/go.mod h1:2kn6fqh/zIyPLmm3ugklbEi5hg5wS435eygvNfaDQL8= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= +github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/alexbrainman/goissue34681 v0.0.0-20191006012335-3fc7a47baff5 h1:iW0a5ljuFxkLGPNem5Ui+KBjFJzKg4Fv2fnxe4dvzpM= github.com/alexbrainman/goissue34681 v0.0.0-20191006012335-3fc7a47baff5/go.mod h1:Y2QMoi1vgtOIfc+6DhrMOGkLoGzqSV2rKp4Sm+opsyA= +github.com/alexflint/go-arg v1.1.0/go.mod h1:3Rj4baqzWaGGmZA2+bVTV8zQOZEjBQAPBnL5xLT+ftY= +github.com/alexflint/go-arg v1.2.0/go.mod h1:3Rj4baqzWaGGmZA2+bVTV8zQOZEjBQAPBnL5xLT+ftY= +github.com/alexflint/go-arg v1.3.0/go.mod h1:9iRbDxne7LcR/GSvEr7ma++GLpdIU1zrghf2y2768kM= +github.com/alexflint/go-arg v1.4.2/go.mod h1:9iRbDxne7LcR/GSvEr7ma++GLpdIU1zrghf2y2768kM= +github.com/alexflint/go-arg v1.4.3/go.mod h1:3PZ/wp/8HuqRZMUUgu7I+e1qcpUbvmS258mRXkFH4IA= +github.com/alexflint/go-scalar v1.0.0/go.mod h1:GpHzbCOZXEKMEcygYQ5n/aa4Aq84zbxjy3MxYW0gjYw= +github.com/alexflint/go-scalar v1.1.0/go.mod h1:LoFvNMqS1CPrMVltza4LvnGKhaSpc3oyLEBUZVhhS2o= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= +github.com/anacrolix/args v0.3.0/go.mod h1:41JBnF8sKExNVLHPkCdL74jkZc3dSxAkGsk1TuKOUFI= +github.com/anacrolix/args v0.4.1-0.20211104085705-59f0fe94eb8f/go.mod h1:41JBnF8sKExNVLHPkCdL74jkZc3dSxAkGsk1TuKOUFI= +github.com/anacrolix/args v0.5.0/go.mod h1:Fj/N2PehEwTBE5t/V/9xgTcxDkuYQ+5IBoFw/8gkldI= +github.com/anacrolix/args v0.5.1-0.20220509024600-c3b77d0b61ac/go.mod h1:Fj/N2PehEwTBE5t/V/9xgTcxDkuYQ+5IBoFw/8gkldI= +github.com/anacrolix/bargle v0.0.0-20220630015206-d7a4d433886a/go.mod h1:9xUiZbkh+94FbiIAL1HXpAIBa832f3Mp07rRPl5c5RQ= +github.com/anacrolix/chansync v0.0.0-20210524073341-a336ebc2de92/go.mod h1:DZsatdsdXxD0WiwcGl0nJVwyjCKMDv+knl1q2iBjA2k= +github.com/anacrolix/chansync v0.1.0/go.mod h1:DZsatdsdXxD0WiwcGl0nJVwyjCKMDv+knl1q2iBjA2k= +github.com/anacrolix/chansync v0.2.1-0.20210910114620-14955c95ded9/go.mod h1:DZsatdsdXxD0WiwcGl0nJVwyjCKMDv+knl1q2iBjA2k= +github.com/anacrolix/chansync v0.3.0-0.0.20211007004133-3f72684c4a93/go.mod h1:DZsatdsdXxD0WiwcGl0nJVwyjCKMDv+knl1q2iBjA2k= +github.com/anacrolix/chansync v0.3.0/go.mod h1:DZsatdsdXxD0WiwcGl0nJVwyjCKMDv+knl1q2iBjA2k= +github.com/anacrolix/confluence v1.7.1-0.20210221224747-9cb14aa2c53a/go.mod h1:T0JHvSaf9UfoiUdCtCOUuRroHm/tauUJTbLc6/vd5YA= +github.com/anacrolix/confluence v1.7.1-0.20210221225853-90405640e928/go.mod h1:NoLcfoRet+kYttjLXJRmh4qBVrylJsfIItik5GGj21A= +github.com/anacrolix/confluence v1.7.1-0.20210311004351-d642adb8546c/go.mod h1:KCZ3eObqKECNeZg0ekAoJVakHMP3gAdR8i0bQ26IkzM= +github.com/anacrolix/confluence v1.8.0/go.mod h1:GsPP6ikA8h/CU7ExbuMOswpzZpPdf1efDPu4rVXL43g= +github.com/anacrolix/confluence v1.9.0/go.mod h1:O5uS+WVgip+3SOcV1K7E/jE3m4DtK7Jk6QJTnU2VS5s= +github.com/anacrolix/dht v0.0.0-20180412060941-24cbf25b72a4/go.mod h1:hQfX2BrtuQsLQMYQwsypFAab/GvHg8qxwVi4OJdR1WI= +github.com/anacrolix/dht/v2 v2.0.1/go.mod h1:GbTT8BaEtfqab/LPd5tY41f3GvYeii3mmDUK300Ycyo= +github.com/anacrolix/dht/v2 v2.2.1-0.20191103020011-1dba080fb358/go.mod h1:d7ARx3WpELh9uOEEr0+8wvQeVTOkPse4UU6dKpv4q0E= +github.com/anacrolix/dht/v2 v2.3.2-0.20200103043204-8dce00767ebd/go.mod h1:cgjKyErDnKS6Mej5D1fEqBKg3KwFF2kpFZJp3L6/fGI= +github.com/anacrolix/dht/v2 v2.5.1-0.20200317023935-129f05e9b752/go.mod h1:7RLvyOjm+ZPA7vgFRP+1eRjFzrh27p/nF0VCk5LcjoU= +github.com/anacrolix/dht/v2 v2.8.0/go.mod h1:RjeKbveVwjnaVj5os4y/NQwqEoDWHigo5rdge9MP52k= +github.com/anacrolix/dht/v2 v2.8.1-0.20210221225335-7a6713a749f9/go.mod h1:p7fLHxqc1mtrFGXfJ226Fo2akG3Pv8ngCTnYAzVJXa4= +github.com/anacrolix/dht/v2 v2.8.1-0.20210311003418-13622df072ae/go.mod h1:wLmYr78fBu4KfUUkFZyGFFwDPDw9EHL5x8c632XCZzs= +github.com/anacrolix/dht/v2 v2.9.1/go.mod h1:ZyYcIQinN/TE3oKONCchQOLjhYR786Jaxz3jsBtih4A= +github.com/anacrolix/dht/v2 v2.10.0/go.mod h1:KC51tqylRYBu82RM5pEYf+g1n7db+F0tOJqSbCjjZWc= +github.com/anacrolix/dht/v2 v2.10.5-0.20210902001729-06cc4fe90e53/go.mod h1:zHjijcebN+L7JbzxW0mOraHis+I81EIgsJAAtiw8bQ8= +github.com/anacrolix/dht/v2 v2.10.6-0.20211007004332-99263ec9c1c8/go.mod h1:WID4DexLrucfnwzv1OV8REzgoCpyVDwEczxIOrUeFrY= +github.com/anacrolix/dht/v2 v2.14.1-0.20211220010335-4062f7927abf/go.mod h1:zJgaiAU2yhzmchZE2mY8WyZ64LK/F/D9MAeN0ct73qQ= +github.com/anacrolix/dht/v2 v2.15.2-0.20220123034220-0538803801cb/go.mod h1:GCylVI6WTvbxvhY7pBoHiE5dmjfDWkhqbobDpjND01A= +github.com/anacrolix/dht/v2 v2.16.2-0.20220311024416-dd658f18fd51/go.mod h1:osiyaNrMLG9dw7wUtVMaII/NdCjlXeHjUcYzXnmop68= +github.com/anacrolix/dht/v2 v2.19.0/go.mod h1:0h83KnnAQ2AUYhpQ/CkoZP45K41pjDAlPR9zGHgFjQE= +github.com/anacrolix/envpprof v0.0.0-20180404065416-323002cec2fa/go.mod h1:KgHhUaQMc8cC0+cEflSgCFNFbKwi5h54gqtVn8yhP7c= +github.com/anacrolix/envpprof v1.0.0/go.mod h1:KgHhUaQMc8cC0+cEflSgCFNFbKwi5h54gqtVn8yhP7c= +github.com/anacrolix/envpprof v1.0.1/go.mod h1:My7T5oSqVfEn4MD4Meczkw/f5lSIndGAKu/0SM/rkf4= +github.com/anacrolix/envpprof v1.1.0/go.mod h1:My7T5oSqVfEn4MD4Meczkw/f5lSIndGAKu/0SM/rkf4= +github.com/anacrolix/envpprof v1.1.1/go.mod h1:My7T5oSqVfEn4MD4Meczkw/f5lSIndGAKu/0SM/rkf4= +github.com/anacrolix/envpprof v1.2.1/go.mod h1:My7T5oSqVfEn4MD4Meczkw/f5lSIndGAKu/0SM/rkf4= +github.com/anacrolix/fuse v0.2.0/go.mod h1:Kfu02xBwnySDpH3N23BmrP3MDfwAQGRLUCj6XyeOvBQ= +github.com/anacrolix/generics v0.0.0-20220618083756-f99e35403a60/go.mod h1:ff2rHB/joTV03aMSSn/AZNnaIpUw0h3njetGsaXcMy8= +github.com/anacrolix/go-libutp v0.0.0-20180522111405-6baeb806518d/go.mod h1:beQSaSxwH2d9Eeu5ijrEnHei5Qhk+J6cDm1QkWFru4E= +github.com/anacrolix/go-libutp v1.0.2/go.mod h1:uIH0A72V++j0D1nnmTjjZUiH/ujPkFxYWkxQ02+7S0U= +github.com/anacrolix/go-libutp v1.0.4/go.mod h1:8vSGX5g0b4eebsDBNVQHUXSCwYaN18Lnkse0hUW8/5w= +github.com/anacrolix/go-libutp v1.1.0/go.mod h1:so9zroOUhFPGnIkddyflaCCl+xdTsRSq97/AOQ2/Hjk= +github.com/anacrolix/go-libutp v1.2.0/go.mod h1:RrJ3KcaDcf9Jqp33YL5V/5CBEc6xMc7aJL8wXfuWL50= +github.com/anacrolix/log v0.0.0-20180412014343-2323884b361d/go.mod h1:sf/7c2aTldL6sRQj/4UKyjgVZBu2+M2z9wf7MmwPiew= +github.com/anacrolix/log v0.3.0/go.mod h1:lWvLTqzAnCWPJA08T2HCstZi0L1y2Wyvm3FJgwU9jwU= +github.com/anacrolix/log v0.3.1-0.20190913000754-831e4ffe0174/go.mod h1:lWvLTqzAnCWPJA08T2HCstZi0L1y2Wyvm3FJgwU9jwU= +github.com/anacrolix/log v0.3.1-0.20191001111012-13cede988bcd/go.mod h1:lWvLTqzAnCWPJA08T2HCstZi0L1y2Wyvm3FJgwU9jwU= +github.com/anacrolix/log v0.4.0/go.mod h1:lWvLTqzAnCWPJA08T2HCstZi0L1y2Wyvm3FJgwU9jwU= +github.com/anacrolix/log v0.5.0/go.mod h1:lWvLTqzAnCWPJA08T2HCstZi0L1y2Wyvm3FJgwU9jwU= +github.com/anacrolix/log v0.6.0/go.mod h1:lWvLTqzAnCWPJA08T2HCstZi0L1y2Wyvm3FJgwU9jwU= +github.com/anacrolix/log v0.6.1-0.20200416071330-f58a030e6149/go.mod h1:s5yBP/j046fm9odtUTbHOfDUq/zh1W8OkPpJtnX0oQI= +github.com/anacrolix/log v0.7.1-0.20200604014615-c244de44fd2d/go.mod h1:s5yBP/j046fm9odtUTbHOfDUq/zh1W8OkPpJtnX0oQI= +github.com/anacrolix/log v0.8.0/go.mod h1:s5yBP/j046fm9odtUTbHOfDUq/zh1W8OkPpJtnX0oQI= +github.com/anacrolix/log v0.9.0/go.mod h1:s5yBP/j046fm9odtUTbHOfDUq/zh1W8OkPpJtnX0oQI= +github.com/anacrolix/log v0.10.0/go.mod h1:s5yBP/j046fm9odtUTbHOfDUq/zh1W8OkPpJtnX0oQI= +github.com/anacrolix/log v0.10.1-0.20220123034749-3920702c17f8/go.mod h1:GmnE2c0nvz8pOIPUSC9Rawgefy1sDXqposC2wgtBZE4= +github.com/anacrolix/log v0.11.0/go.mod h1:D4+CvN8SnruK6zIFS/xPoRJmtvtnxs+CSfDQ+BFxZ68= +github.com/anacrolix/log v0.13.1/go.mod h1:D4+CvN8SnruK6zIFS/xPoRJmtvtnxs+CSfDQ+BFxZ68= +github.com/anacrolix/log v0.13.2-0.20220426014722-7b7d13a55d55/go.mod h1:D4+CvN8SnruK6zIFS/xPoRJmtvtnxs+CSfDQ+BFxZ68= +github.com/anacrolix/log v0.13.2-0.20220711050817-613cb738ef30/go.mod h1:D4+CvN8SnruK6zIFS/xPoRJmtvtnxs+CSfDQ+BFxZ68= +github.com/anacrolix/lsan v0.0.0-20211126052245-807000409a62/go.mod h1:66cFKPCO7Sl4vbFnAaSq7e4OXtdMhRSBagJGWgmpJbM= +github.com/anacrolix/missinggo v0.0.0-20180522035225-b4a5853e62ff/go.mod h1:b0p+7cn+rWMIphK1gDH2hrDuwGOcbB6V4VXeSsEfHVk= +github.com/anacrolix/missinggo v0.0.0-20180725070939-60ef2fbf63df/go.mod h1:kwGiTUTZ0+p4vAz3VbAI5a30t2YbvemcmspjKwrAz5s= +github.com/anacrolix/missinggo v0.2.1-0.20190310234110-9fbdc9f242a8/go.mod h1:MBJu3Sk/k3ZfGYcS7z18gwfu72Ey/xopPFJJbTi5yIo= +github.com/anacrolix/missinggo v1.1.0/go.mod h1:MBJu3Sk/k3ZfGYcS7z18gwfu72Ey/xopPFJJbTi5yIo= +github.com/anacrolix/missinggo v1.1.2-0.20190815015349-b888af804467/go.mod h1:MBJu3Sk/k3ZfGYcS7z18gwfu72Ey/xopPFJJbTi5yIo= +github.com/anacrolix/missinggo v1.2.1/go.mod h1:J5cMhif8jPmFoC3+Uvob3OXXNIhOUikzMt+uUjeM21Y= +github.com/anacrolix/missinggo v1.3.0/go.mod h1:bqHm8cE8xr+15uVfMG3BFui/TxyB6//H5fwlq/TeqMc= +github.com/anacrolix/missinggo/perf v1.0.0/go.mod h1:ljAFWkBuzkO12MQclXzZrosP5urunoLS0Cbvb4V0uMQ= +github.com/anacrolix/missinggo/v2 v2.2.0/go.mod h1:o0jgJoYOyaoYQ4E2ZMISVa9c88BbUBVQQW4QeRkNCGY= +github.com/anacrolix/missinggo/v2 v2.2.1-0.20191103010835-12360f38ced0/go.mod h1:ZzG3/cc3t+5zcYWAgYrJW0MBsSwNwOkTlNquBbP51Bc= +github.com/anacrolix/missinggo/v2 v2.3.0/go.mod h1:ZzG3/cc3t+5zcYWAgYrJW0MBsSwNwOkTlNquBbP51Bc= +github.com/anacrolix/missinggo/v2 v2.3.1/go.mod h1:3XNH0OEmyMUZuvXmYdl+FDfXd0vvSZhvOLy8CFx8tLg= +github.com/anacrolix/missinggo/v2 v2.4.1-0.20200227072623-f02f6484f997/go.mod h1:KY+ij+mWvwGuqSuecLjjPv5LFw5ICUc1UvRems3VAZE= +github.com/anacrolix/missinggo/v2 v2.5.0/go.mod h1:HYuCbwvJXY3XbcmcIcTgZXHleoDXawxPWx/YiPzFzV0= +github.com/anacrolix/missinggo/v2 v2.5.1-0.20210520011502-b3d95d6b1d02/go.mod h1:WEjqh2rmKECd0t1VhQkLGTdIWXO6f6NLjp5GlMZ+6FA= +github.com/anacrolix/missinggo/v2 v2.5.1/go.mod h1:WEjqh2rmKECd0t1VhQkLGTdIWXO6f6NLjp5GlMZ+6FA= +github.com/anacrolix/missinggo/v2 v2.5.2-0.20210623112532-e21e4ddc477d/go.mod h1:WEjqh2rmKECd0t1VhQkLGTdIWXO6f6NLjp5GlMZ+6FA= +github.com/anacrolix/missinggo/v2 v2.5.2/go.mod h1:yNvsLrtZYRYCOI+KRH/JM8TodHjtIE/bjOGhQaLOWIE= +github.com/anacrolix/missinggo/v2 v2.7.0/go.mod h1:2IZIvmRTizALNYFYXsPR7ofXPzJgyBpKZ4kMqMEICkI= +github.com/anacrolix/mmsg v0.0.0-20180515031531-a4a3ba1fc8bb/go.mod h1:x2/ErsYUmT77kezS63+wzZp8E3byYB0gzirM/WMBLfw= +github.com/anacrolix/mmsg v1.0.0/go.mod h1:x8kRaJY/dCrY9Al0PEcj1mb/uFHwP6GCJ9fLl4thEPc= +github.com/anacrolix/multiless v0.0.0-20191223025854-070b7994e841/go.mod h1:TrCLEZfIDbMVfLoQt5tOoiBS/uq4y8+ojuEVVvTNPX4= +github.com/anacrolix/multiless v0.0.0-20200413040533-acfd16f65d5d/go.mod h1:TrCLEZfIDbMVfLoQt5tOoiBS/uq4y8+ojuEVVvTNPX4= +github.com/anacrolix/multiless v0.0.0-20210222022749-ef43011a77ec/go.mod h1:TrCLEZfIDbMVfLoQt5tOoiBS/uq4y8+ojuEVVvTNPX4= +github.com/anacrolix/multiless v0.1.1-0.20210520040635-10ee7b5f3cff/go.mod h1:TrCLEZfIDbMVfLoQt5tOoiBS/uq4y8+ojuEVVvTNPX4= +github.com/anacrolix/multiless v0.1.1-0.20210529082330-de2f6cf29619/go.mod h1:TrCLEZfIDbMVfLoQt5tOoiBS/uq4y8+ojuEVVvTNPX4= +github.com/anacrolix/multiless v0.2.0/go.mod h1:TrCLEZfIDbMVfLoQt5tOoiBS/uq4y8+ojuEVVvTNPX4= +github.com/anacrolix/multiless v0.2.1-0.20211218050420-533661eef5dc/go.mod h1:TrCLEZfIDbMVfLoQt5tOoiBS/uq4y8+ojuEVVvTNPX4= +github.com/anacrolix/multiless v0.3.0/go.mod h1:TrCLEZfIDbMVfLoQt5tOoiBS/uq4y8+ojuEVVvTNPX4= +github.com/anacrolix/publicip v0.2.0/go.mod h1:67G1lVkLo8UjdEcJkwScWVTvlJ35OCDsRJoWXl/wi4g= +github.com/anacrolix/squirrel v0.1.0/go.mod h1:YzgVvikMdFD441oTWlNG189bpKabO9Sbf3uCSVgca04= +github.com/anacrolix/squirrel v0.1.1-0.20210914065657-81bc5ecdc43a/go.mod h1:YzgVvikMdFD441oTWlNG189bpKabO9Sbf3uCSVgca04= +github.com/anacrolix/squirrel v0.2.1-0.20211119092713-2efaee06d169/go.mod h1:dJyE7VefQvX0KAKMkOQDGOVEs91a+LvXQ2BjEKl/DIw= +github.com/anacrolix/squirrel v0.4.0/go.mod h1:dJyE7VefQvX0KAKMkOQDGOVEs91a+LvXQ2BjEKl/DIw= +github.com/anacrolix/squirrel v0.4.1-0.20220122230132-14b040773bac/go.mod h1:YzgVvikMdFD441oTWlNG189bpKabO9Sbf3uCSVgca04= +github.com/anacrolix/stm v0.1.0/go.mod h1:ZKz7e7ERWvP0KgL7WXfRjBXHNRhlVRlbBQecqFtPq+A= +github.com/anacrolix/stm v0.1.1-0.20191106051447-e749ba3531cf/go.mod h1:zoVQRvSiGjGoTmbM0vSLIiaKjWtNPeTvXUSdJQA4hsg= +github.com/anacrolix/stm v0.2.0/go.mod h1:zoVQRvSiGjGoTmbM0vSLIiaKjWtNPeTvXUSdJQA4hsg= +github.com/anacrolix/stm v0.2.1-0.20201002073511-c35a2c748c6a/go.mod h1:spImf/rXwiAUoYYJK1YCZeWkpaHZ3kzjGFjwK5OStfU= +github.com/anacrolix/stm v0.2.1-0.20210310231625-45c211559de6/go.mod h1:spImf/rXwiAUoYYJK1YCZeWkpaHZ3kzjGFjwK5OStfU= +github.com/anacrolix/stm v0.3.0-alpha/go.mod h1:spImf/rXwiAUoYYJK1YCZeWkpaHZ3kzjGFjwK5OStfU= +github.com/anacrolix/stm v0.3.0/go.mod h1:spImf/rXwiAUoYYJK1YCZeWkpaHZ3kzjGFjwK5OStfU= +github.com/anacrolix/stm v0.4.0/go.mod h1:GCkwqWoAsP7RfLW+jw+Z0ovrt2OO7wRzcTtFYMYY5t8= +github.com/anacrolix/sync v0.0.0-20171108081538-eee974e4f8c1/go.mod h1:+u91KiUuf0lyILI6x3n/XrW7iFROCZCG+TjgK8nW52w= +github.com/anacrolix/sync v0.0.0-20180611022320-3c4cb11f5a01/go.mod h1:+u91KiUuf0lyILI6x3n/XrW7iFROCZCG+TjgK8nW52w= +github.com/anacrolix/sync v0.0.0-20180808010631-44578de4e778/go.mod h1:s735Etp3joe/voe2sdaXLcqDdJSay1O0OPnM0ystjqk= +github.com/anacrolix/sync v0.2.0/go.mod h1:BbecHL6jDSExojhNtgTFSBcdGerzNc64tz3DCOj/I0g= +github.com/anacrolix/sync v0.3.0/go.mod h1:BbecHL6jDSExojhNtgTFSBcdGerzNc64tz3DCOj/I0g= +github.com/anacrolix/sync v0.4.0/go.mod h1:BbecHL6jDSExojhNtgTFSBcdGerzNc64tz3DCOj/I0g= +github.com/anacrolix/tagflag v0.0.0-20180109131632-2146c8d41bf0/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw= +github.com/anacrolix/tagflag v0.0.0-20180605133421-f477c8c2f14c/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw= +github.com/anacrolix/tagflag v0.0.0-20180803105420-3a8ff5428f76/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw= +github.com/anacrolix/tagflag v1.0.0/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw= +github.com/anacrolix/tagflag v1.0.1/go.mod h1:gb0fiMQ02qU1djCSqaxGmruMvZGrMwSReidMB0zjdxo= +github.com/anacrolix/tagflag v1.1.0/go.mod h1:Scxs9CV10NQatSmbyjqmqmeQNwGzlNe0CMUMIxqHIG8= +github.com/anacrolix/tagflag v1.1.1-0.20200411025953-9bb5209d56c2/go.mod h1:Scxs9CV10NQatSmbyjqmqmeQNwGzlNe0CMUMIxqHIG8= +github.com/anacrolix/tagflag v1.2.0/go.mod h1:Scxs9CV10NQatSmbyjqmqmeQNwGzlNe0CMUMIxqHIG8= +github.com/anacrolix/tagflag v1.3.0/go.mod h1:Scxs9CV10NQatSmbyjqmqmeQNwGzlNe0CMUMIxqHIG8= +github.com/anacrolix/torrent v0.0.0-20180622074351-fefeef4ee9eb/go.mod h1:3vcFVxgOASslNXHdivT8spyMRBanMCenHRpe0u5vpBs= +github.com/anacrolix/torrent v1.7.1/go.mod h1:uvOcdpOjjrAq3uMP/u1Ide35f6MJ/o8kMnFG8LV3y6g= +github.com/anacrolix/torrent v1.9.0/go.mod h1:jJJ6lsd2LD1eLHkUwFOhy7I0FcLYH0tHKw2K7ZYMHCs= +github.com/anacrolix/torrent v1.11.0/go.mod h1:FwBai7SyOFlflvfEOaM88ag/jjcBWxTOqD6dVU/lKKA= +github.com/anacrolix/torrent v1.15.0/go.mod h1:MFc6KcbpAyfwGqOyRkdarUK9QnKA/FkVg0usFk1OQxU= +github.com/anacrolix/torrent v1.22.0/go.mod h1:GWTwQkOAilf0LR3C6A74XEkWPg0ejfFD9GcEIe57ess= +github.com/anacrolix/torrent v1.23.0/go.mod h1:737rU+al1LBWEs3IHBystZvsbg24iSP+8Gb25Vc/s5U= +github.com/anacrolix/torrent v1.25.1-0.20210221061757-051093ca31f5/go.mod h1:737rU+al1LBWEs3IHBystZvsbg24iSP+8Gb25Vc/s5U= +github.com/anacrolix/torrent v1.25.1-0.20210224024805-693c30dd889e/go.mod h1:d4V6QqkInfQidWVk8b8hMv8mtciswNitI1A2BiRSQV0= +github.com/anacrolix/torrent v1.28.1-0.20210622065255-582f0ccd48a0/go.mod h1:15VRIA5/DwqbqETbKo3fzlC4aSbB0iMoo10ng3mzAbE= +github.com/anacrolix/torrent v1.29.0/go.mod h1:40Hf2bWxFqTbTWbrdig57JnmYLCjShbWWjdbB3VN5n4= +github.com/anacrolix/torrent v1.30.2/go.mod h1:vbNxKfaGiNq8edcCaQI1oSNJwh4GMqtMUMF9qOdZ6C0= +github.com/anacrolix/torrent v1.31.1-0.20210910222643-d957502528e0/go.mod h1:akZJHHFN8aWH2lcPZQ0I3etujnenwYpUvj36HV9uvAI= +github.com/anacrolix/torrent v1.35.1-0.20211104090255-eaeb38b18c6a/go.mod h1:97nxJW8NIeUyGdBvMOAl9cmcxi8xPez3nlE0RwSZcL0= +github.com/anacrolix/torrent v1.40.0/go.mod h1:ejOLXT7fZE8ONHsoIYD7WS4/l6SEwtRGxcsP3dEu/Fk= +github.com/anacrolix/torrent v1.41.1-0.20220309095723-02b6ee995497/go.mod h1:GZlCftqtWLz7EGJ3Oo+JP35jrU5zENxgvTCJ0w+SUSU= +github.com/anacrolix/torrent v1.46.0/go.mod h1:3DE+VA4AgyfKDPjZcIo70D3VFZRo3bfdEBn70CGjca4= +github.com/anacrolix/torrent v1.47.0/go.mod h1:SYPxEUjMwqhDr3kWGzyQLkFMuAb1bgJ57JRMpuD3ZzE= +github.com/anacrolix/upnp v0.1.1/go.mod h1:LXsbsp5h+WGN7YR+0A7iVXm5BL1LYryDev1zuJMWYQo= +github.com/anacrolix/upnp v0.1.2-0.20200416075019-5e9378ed1425/go.mod h1:Pz94W3kl8rf+wxH3IbCa9Sq+DTJr8OSbV2Q3/y51vYs= +github.com/anacrolix/upnp v0.1.3-0.20220123035249-922794e51c96/go.mod h1:Wa6n8cYIdaG35x15aH3Zy6d03f7P728QfdcDeD/IEOs= +github.com/anacrolix/utp v0.0.0-20180219060659-9e0e1d1d0572/go.mod h1:MDwc+vsGEq7RMw6lr2GKOEqjWny5hO5OZXRVNaBJ2Dk= +github.com/anacrolix/utp v0.1.0/go.mod h1:MDwc+vsGEq7RMw6lr2GKOEqjWny5hO5OZXRVNaBJ2Dk= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/andybalholm/brotli v0.0.0-20190621154722-5f990b63d2d6/go.mod h1:+lx6/Aqd1kLJ1GQfkvOnaZ1WGmLpMpbprPuIOOZX30U= +github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ= +github.com/arl/statsviz v0.4.0/go.mod h1:+5inUy/dxy11x/KSmicG3ZrEEy0Yr81AFm3dn4QC04M= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= @@ -151,20 +687,59 @@ github.com/aws/aws-sdk-go-v2/service/route53 v1.1.1/go.mod h1:rLiOUrPLW/Er5kRcQ7 github.com/aws/aws-sdk-go-v2/service/sso v1.1.1/go.mod h1:SuZJxklHxLAXgLTc1iFXbEWkXs7QRTQpCLGaKIprQW0= github.com/aws/aws-sdk-go-v2/service/sts v1.1.1/go.mod h1:Wi0EBZwiz/K44YliU0EKxqTCJGUfYTWXrrBwkq736bM= github.com/aws/smithy-go v1.1.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB9LgIw= -github.com/benbjohnson/clock v1.0.1/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= -github.com/benbjohnson/clock v1.0.2 h1:Z0CN0Yb4ig9sGPXkvAQcGJfnrrMQ5QYLCMPRi9iD7YE= -github.com/benbjohnson/clock v1.0.2/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= -github.com/benbjohnson/clock v1.0.3 h1:vkLuvpK4fmtSCuo60+yC63p7y0BmQ8gm5ZXGuBCJyXg= -github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= +github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4= +github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= +github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= +github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/immutable v0.2.0/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI= +github.com/benbjohnson/immutable v0.3.0/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/bits-and-blooms/bitset v1.2.2/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/bittorrent/go-btfs v0.0.0-20230626064024-58978cbfe949 h1:1FBiTP+5+D6zATxnCZjnVU6+zukuGkuSCgBM7/2w/Xg= +github.com/bittorrent/go-btfs v0.0.0-20230626064024-58978cbfe949/go.mod h1:h5PjtZkd2YY0X5/vkaBm7yDQbLzyzMV3v5O2QggxSSA= +github.com/bittorrent/go-btfs-api v0.5.0/go.mod h1:JnVWIMvRK3Rhv8WsW5y0KkJUbw/Vgt247Y8C8Tn/SPE= +github.com/bittorrent/go-btfs-chunker v0.4.0 h1:ruX0vPwJdj0KP4SAtxNzZm593MUA+A3LLV+l9j2w09c= +github.com/bittorrent/go-btfs-chunker v0.4.0/go.mod h1:1xf90c9gOKrHf2tyFIfB5GgFoTkEd1r/5m73ts+WW9A= +github.com/bittorrent/go-btfs-cmds v0.3.0 h1:xpCBgk3zIm84Ne6EjeJgi8WLB5YJJUIFMjK9L9RfL5k= +github.com/bittorrent/go-btfs-cmds v0.3.0/go.mod h1:Fbac/Rou32G0jpoa6wLrNNDxcGOZbGfk+GiG0r3uEIU= +github.com/bittorrent/go-btfs-common v0.9.0 h1:jHcFvYQmvmA4IdvVtkI5d/S/HW65Qz21C6oxeyK812w= +github.com/bittorrent/go-btfs-common v0.9.0/go.mod h1:OG1n3DfcTxQYfLd5zco54LfL3IiDDaw3s7Igahu0Rj0= +github.com/bittorrent/go-btfs-config v0.12.0/go.mod h1:DNaHVC9wU84KLKoC4HkvdoFJKVZ7TF530qzfYu30fCI= +github.com/bittorrent/go-btfs-config v0.12.3 h1:Zi/GTwHo/PJV+90+w45P7axkWsUpOB/XFhgvNk+TwRs= +github.com/bittorrent/go-btfs-config v0.12.3/go.mod h1:DNaHVC9wU84KLKoC4HkvdoFJKVZ7TF530qzfYu30fCI= +github.com/bittorrent/go-btfs-files v0.3.0/go.mod h1:ylMf73m6oK94hL7VPblY1ZZpePsr6XbPV4BaNUwGZR0= +github.com/bittorrent/go-btfs-files v0.3.1 h1:esq3j+6FtZ+SlaxKjVtiYgvXk/SWUiTcv0Q1MeJoPnQ= +github.com/bittorrent/go-btfs-files v0.3.1/go.mod h1:ylMf73m6oK94hL7VPblY1ZZpePsr6XbPV4BaNUwGZR0= +github.com/bittorrent/go-btns v0.2.0 h1:OMpxUiRbtb/PRTK/z/flxcwOfTvNKMsTLOubYFhKy1s= +github.com/bittorrent/go-btns v0.2.0/go.mod h1:+Cinr/1Jl7V/Pqgz+vbOdHXkLVFbMqjypmbAv8QiQPs= +github.com/bittorrent/go-common/v2 v2.4.0 h1:u0jldKnQteTPQDNKj5GUBOUj2Tswn0+GfWN7yq2QAaY= +github.com/bittorrent/go-common/v2 v2.4.0/go.mod h1:DVJCWPoehldR7u0K1n9UeKKsQL28mYiY7XMShjGfB3I= +github.com/bittorrent/go-eccrypto v0.1.0 h1:sNosO+VGuh8IRQvrm9BJ4FeEatRp8ToMfpRTYaNqe7g= +github.com/bittorrent/go-eccrypto v0.1.0/go.mod h1:1kX5RLI52B+1l0VwwBtv+6h28Gu8XojZUu0wc/Iw6GU= +github.com/bittorrent/go-mfs v0.4.0 h1:xb7Bxp65LQP8yhflx47ZMuXzIMSSo9ZrasVhroCvRxs= +github.com/bittorrent/go-mfs v0.4.0/go.mod h1:w7XQuaSCDsL0MhcMP02ViFJQHYg2tLf+/v0w/m7wMfM= +github.com/bittorrent/go-unixfs v0.7.0 h1:2SPuQcAmubJUl+zuKoGWdculoZRn7D0zkDnTZ9pupqo= +github.com/bittorrent/go-unixfs v0.7.0/go.mod h1:0UNGV0k5MFsMGOeNjOJFtURcXDFz8bjtyfhcom+vW7A= +github.com/bittorrent/interface-go-btfs-core v0.8.2 h1:iTStlXLoandcKyFruq4U0uVSR3CQU7ey9Lwf8Mu3jw0= +github.com/bittorrent/interface-go-btfs-core v0.8.2/go.mod h1:tQ3d3uI2gH+AO7ikbBwlulRgff0/dzobz9H3SL00yYo= +github.com/bittorrent/protobuf v1.4.0 h1:3AW4SZUud3/8/orb8O/957CdspwxWjX/qprvF49aQ70= +github.com/bittorrent/protobuf v1.4.0/go.mod h1:k2fZczatqZOyvWUezE02Xt5uFcVqdUd1tNeZwXjELCk= +github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= +github.com/bradfitz/iter v0.0.0-20140124041915-454541ec3da2/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo= +github.com/bradfitz/iter v0.0.0-20190303215204-33e6a9893b0c/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo= +github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8/go.mod h1:spo1JLcs67NmW1aVLEgtA8Yy1elc+X8y5SRW1sFW4Og= github.com/bren2010/proquint v0.0.0-20160323162903-38337c27106d h1:QgeLLoPD3kRVmeu/1al9iIpIANMi9O1zXFm8BnYGCJg= github.com/bren2010/proquint v0.0.0-20160323162903-38337c27106d/go.mod h1:Jbj8eKecMNwf0KFI75skSUZqMB4UCRcndUScVBTWyUI= github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6/go.mod h1:Dmm/EzmjnCiweXmzRIAiUWCInVmPgjkzgv5k4tVyXiQ= @@ -172,13 +747,22 @@ github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dm github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= github.com/btcsuite/btcd v0.0.0-20190605094302-a0d1e3e36d50/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= -github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= -github.com/btcsuite/btcd v0.22.0-beta/go.mod h1:9n5ntfhhHQBIhUvlhDvD3Qg6fRUj4jkN0VB8L8svzOA= +github.com/btcsuite/btcd v0.21.0-beta/go.mod h1:ZSWyehm27aAuS9bvkATT+Xte3hjHZ+MRgMY/8NJ7K94= +github.com/btcsuite/btcd v0.22.1 h1:CnwP9LM/M9xuRrGSCGeMVs9iv09uMqwsVX7EeIpgV2c= +github.com/btcsuite/btcd v0.22.1/go.mod h1:wqgTSL29+50LRkmOVknEdmt8ZojIzhuWvgu/iptuN7Y= +github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE= +github.com/btcsuite/btcd/btcec/v2 v2.2.0/go.mod h1:U7MHm051Al6XmscBQ0BoNydpOTsFAn707034b5nY8zU= +github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U= +github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= github.com/btcsuite/btcutil v0.0.0-20190207003914-4c204d697803/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= github.com/btcsuite/btcutil v1.0.2/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts= +github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce h1:YtWJF7RHm2pYCvA5t0RPmAaLUhREsKuKd+SLhxFbFeQ= github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce/go.mod h1:0DVlHczLPewLcPGEIeUEzfOJhqGPQ0mJJRDBtD307+o= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= @@ -192,18 +776,29 @@ github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOC github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cheekybits/genny v1.0.0 h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitfE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927/go.mod h1:h/aW8ynjgkuj+NQRlZcDbAbM1ORAbXjXX77sX7T289U= +github.com/cheggaaa/pb v1.0.29/go.mod h1:W40334L7FMC5JKWldsTWbdGjLo0RxUKK73K+TuPxX30= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/logex v1.2.0/go.mod h1:9+9sk7u7pGNWYMkh0hdiL++6OeibzJccyQU4p4MedaY= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/readline v1.5.0/go.mod h1:x22KAscuvRqlLoK9CsoYsmxoXZMMFVyOl86cAH8qUic= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/chzyer/test v0.0.0-20210722231415-061457976a23/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= +github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudflare/cloudflare-go v0.10.2-0.20190916151808-a80f83b9add9/go.mod h1:1MxXX1Ux4x6mqPmjkUgTP1CdXIBXKX7T+Jk9Gxrmx+U= @@ -211,63 +806,128 @@ github.com/cloudflare/cloudflare-go v0.14.0/go.mod h1:EnwdgGMaFOruiPZRFSgn+TsQ3h github.com/cmars/basen v0.0.0-20150613233007-fe3947df716e/go.mod h1:P13beTBKr5Q18lJe1rIoLUqjM+CB1zYrRg44ZqGuQSA= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/cockroachdb/datadriven v1.0.0/go.mod h1:5Ib8Meh+jk1RlHIXej6Pzevx/NLlNvQB9pmSBZErGA4= +github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= +github.com/cockroachdb/errors v1.6.1/go.mod h1:tm6FTP5G81vwJ5lC0SizQo374JNCOPrHyXGitRJoDqM= +github.com/cockroachdb/errors v1.8.1/go.mod h1:qGwQn6JmZ+oMjuLwjWzUNqblqk0xl4CVV3SQbGwK7Ac= +github.com/cockroachdb/errors v1.9.1/go.mod h1:2sxOtL2WIc096WSZqZ5h8fa17rdDq9HZOZLBCor4mBk= +github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= +github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= +github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811/go.mod h1:Nb5lgvnQ2+oGlE/EyZy4+2/CxRh9KfvCXnag1vtpxVM= +github.com/cockroachdb/redact v1.0.8/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2/go.mod h1:8BT+cPK6xvFOcRlk0R8eg+OTkcqI6baNH4xAkpiYVvQ= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= github.com/codemodus/kace v0.5.1/go.mod h1:coddaHoX1ku1YFSe4Ip0mL9kQjJvKkzb9CfIdG1YR04= github.com/consensys/bavard v0.1.8-0.20210406032232-f3452dc9b572/go.mod h1:Bpd0/3mZuaj6Sj+PqrmIquiOKy397AKGThQPaGzNXAQ= +github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI= github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f/go.mod h1:815PAHg3wvysy0SyIqanF8gZ0Y1wjk/hrDHD/iT88+Q= +github.com/consensys/gnark-crypto v0.9.1-0.20230105202408-1a7a29904a7c/go.mod h1:CkbdF9hbRidRJYMRzmfX8TMOr95I2pYXRHF18MzRrvA= +github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= +github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA= +github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.2.1-0.20180108230905-e214231b295a/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/go-systemd/v22 v22.4.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 h1:HVTnpeuvF6Owjd5mniCL8DEXo7uYXdQEmOP4FJbV5tg= github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= +github.com/crate-crypto/go-ipa v0.0.0-20220523130400-f11357ae11c7/go.mod h1:gFnFS95y8HstDP6P9pPwzrxOOC5TRDkwbM+ao15ChAI= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= +github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4= github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= -github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018 h1:6xT9KW8zLC5IlbaIF5Q7JNieBoACT7iW0YTxQHR0in0= github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4= -github.com/davidlazar/go-crypto v0.0.0-20190912175916-7055855a373f h1:BOaYiTvg8p9vBUXpklC22XSK/mifLF7lG9jtmYYi3Tc= -github.com/davidlazar/go-crypto v0.0.0-20190912175916-7055855a373f/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4= +github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= +github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ= +github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo= +github.com/deckarep/golang-set/v2 v2.1.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= +github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= +github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc= github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= +github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M= +github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw= github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ= github.com/dgraph-io/badger v1.6.0-rc1/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= -github.com/dgraph-io/badger v1.6.1 h1:w9pSFNSdq/JPM1N12Fz/F/bzo993Is1W+Q7HjPzi7yg= github.com/dgraph-io/badger v1.6.1/go.mod h1:FRmFw3uxvcpa8zG3Rxs0th+hCLIuaQg8HlNV5bjgnuU= -github.com/dgraph-io/ristretto v0.0.2 h1:a5WaUrDa0qm0YrAAS1tUykT5El3kt62KNZZeMxQn3po= +github.com/dgraph-io/badger v1.6.2 h1:mNw0qs90GVgGGWylh0umH5iag1j6n/PeJtNvL6KY/x8= +github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= +github.com/dgraph-io/badger/v2 v2.2007.4/go.mod h1:vSw/ax2qojzbN6eXHIx6KPKtCSHJN/Uz0X0VPruTIhk= github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de h1:t0UHb5vdojIDUqktM6+xJAfScFBsVpXZmqC9dsgJmeA= +github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/djherbis/atime v1.1.0/go.mod h1:28OF6Y8s3NQWwacXc5eZTsEsiMzp7LF8MbXE+XJPdBE= github.com/dlclark/regexp2 v1.2.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= +github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= +github.com/dlclark/regexp2 v1.7.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= +github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko= +github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v1.6.2/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dop251/goja v0.0.0-20200219165308-d1232e640a87/go.mod h1:Mw6PkjjMXWbTj+nnj4s3QPXq1jaT0s5pC0iFD4+BOAA= github.com/dop251/goja v0.0.0-20200721192441-a695b0cdd498/go.mod h1:Mw6PkjjMXWbTj+nnj4s3QPXq1jaT0s5pC0iFD4+BOAA= +github.com/dop251/goja v0.0.0-20211022113120-dc8c55024d06/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= +github.com/dop251/goja v0.0.0-20220405120441-9037c2b61cbf/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= +github.com/dop251/goja v0.0.0-20230122112309-96b1610dd4f7/go.mod h1:yRkwfj0CBpOGre+TwBsqPV0IH0Pk73e4PXJOeNDboGs= +github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y= +github.com/dop251/goja_nodejs v0.0.0-20211022123610-8dd9abb0616d/go.mod h1:DngW8aVqWbuLRMHItjPUyqdj+HWPvnQe8V8y1nDpIbM= github.com/dsnet/compress v0.0.1/go.mod h1:Aw8dCMJ7RioblQeTqt88akK31OvO8Dhf5JflhBbQEHo= github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v0.0.0-20180421182945-02af3965c54e/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dvyukov/go-fuzz v0.0.0-20200318091601-be3528f3a813/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw= @@ -276,58 +936,108 @@ github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1 github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= github.com/edsrzf/mmap-go v0.0.0-20160512033002-935e0e8a636c/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= +github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM= +github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= +github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4= +github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= +github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/elgris/jsondiff v0.0.0-20160530203242-765b5c24c302/go.mod h1:qBlWZqWeVx9BjvqBsnC/8RUlAYpIFmPvgROcw0n1scE= +github.com/elliotchance/orderedmap v1.2.0/go.mod h1:8hdSl6jmveQw8ScByd3AaNHNk51RhbTazdqtTty+NFw= +github.com/elliotchance/orderedmap v1.3.0/go.mod h1:8hdSl6jmveQw8ScByd3AaNHNk51RhbTazdqtTty+NFw= +github.com/elliotchance/orderedmap v1.4.0/go.mod h1:wsDwEaX5jEoyhbs7x93zk2H/qv0zwuhg4inXhDkYqys= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/ethereum/go-ethereum v1.9.17/go.mod h1:kihoiSg74VC4dZAXMkmoWp70oQabz48BJg1tuzricFc= -github.com/ethereum/go-ethereum v1.9.24 h1:6AK+ORt3EMDO+FTjzXy/AQwHMbu52J2nYHIjyQX9azQ= +github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= +github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= github.com/ethereum/go-ethereum v1.9.24/go.mod h1:JIfVb6esrqALTExdz9hRYvrP0xBDf6wCncIu1hNwHpM= -github.com/ethereum/go-ethereum v1.10.3/go.mod h1:99onQmSd1GRGOziyGldI41YQb7EESX3Q4H41IfJgIQQ= -github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/ethereum/go-ethereum v1.10.26/go.mod h1:EYFyF19u3ezGLD4RqOkLq+ZCXzYbLoNDdZlMt7kyKFg= +github.com/ethereum/go-ethereum v1.11.1 h1:EMymmWFzpS7G9l9NvVN8G73cgdUIqDPNRf2YTSGBXlk= +github.com/ethereum/go-ethereum v1.11.1/go.mod h1:DuefStAgaxoaYGLR0FueVcVbehmn5n9QUcVrMCuOvuc= +github.com/ethersphere/go-sw3-abi v0.4.0/go.mod h1:BmpsvJ8idQZdYEtWnvxA8POYQ8Rl/NhyCdF0zLMOOJU= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5 h1:BBso6MBKW8ncyZLv37o+KNyy0HrrHgfnOaGQC2qvN+A= github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5/go.mod h1:JpoxHjuQauoxiFMl1ie8Xc/7TfLuMZ5eOCONd1sUBHg= +github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= github.com/fatih/color v1.3.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fd/go-nat v1.0.0/go.mod h1:BTBu/CKvMmOMUPkKVef1pngt2WFH/lg7E6yQnulfp6E= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fjl/gencodec v0.0.0-20220412091415-8bb9e558978c/go.mod h1:AzA8Lj6YtixmJWL+wkKoBGsLWy9gFrAzi4g+5bCKwpY= github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= +github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4/go.mod h1:T9YF2M40nIgbVgp3rreNmTged+9HrbNTIQf1PsaIiTA= +github.com/flosch/pongo2/v4 v4.0.2/go.mod h1:B5ObFANs/36VwxxlgKpdchIJHMvHB562PW+BWPhwZD8= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6 h1:u/UEqS66A5ckRmS4yNpjmVH56sVtS/RfclBAYocb4as= github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6/go.mod h1:1i71OnUq3iUe1ma7Lr6yG6/rjvM3emb6yoL7xLFzcVQ= +github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ= github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fomichev/secp256k1 v0.0.0-20180413221153-00116ff8c62f h1:YbIfHI+s+tauzUAdNw+lF8a9o056PoIPaLGodgh5D7g= github.com/fomichev/secp256k1 v0.0.0-20180413221153-00116ff8c62f/go.mod h1:X4BmRxczPduAy11nSLYwnR11VuvnbG7ozOTDKLHhx70= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/frankban/quicktest v1.9.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/frankban/quicktest v1.13.1/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= +github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= +github.com/frankban/quicktest v1.14.2/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= +github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/gabriel-vasile/mimetype v1.1.0/go.mod h1:6CDPel/o/3/s4+bp6kIbsWATq8pmgOisOPG40CJa6To= -github.com/gabriel-vasile/mimetype v1.1.1/go.mod h1:6CDPel/o/3/s4+bp6kIbsWATq8pmgOisOPG40CJa6To= -github.com/gabriel-vasile/mimetype v1.1.2/go.mod h1:6CDPel/o/3/s4+bp6kIbsWATq8pmgOisOPG40CJa6To= +github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/gabriel-vasile/mimetype v1.4.1/go.mod h1:05Vi0w3Y9c/lNvJOdmIwvrrAhX3rYhfQQCaf9VJcv7M= +github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61/go.mod h1:Q0X6pkwTILDlzrGEckF6HKjXe48EgsY/l7K7vhY4MW8= +github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= +github.com/gballet/go-verkle v0.0.0-20220902153445-097bd83b7732/go.mod h1:o/XfIXWi4/GqbQirfRm5uTbXMG5NpqxkxblnbZ+QM9I= +github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= +github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= +github.com/getsentry/sentry-go v0.12.0/go.mod h1:NSap0JBYWzHND8oMbyi0+XZhUalc1TBdRL1M71JZW2c= +github.com/getsentry/sentry-go v0.18.0/go.mod h1:Kgon4Mby+FJ7ZWHFUAZgVaIa8sxHtnRJRLTXZr51aKQ= +github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM= +github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= +github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= +github.com/glycerine/go-unsnap-stream v0.0.0-20190901134440-81cf024a9e0a/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= +github.com/glycerine/go-unsnap-stream v0.0.0-20210130063903-47dfef350d96/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= +github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= +github.com/glycerine/goconvey v0.0.0-20190315024820-982ee783a72e/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= -github.com/go-bindata/go-bindata v3.1.2+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo= github.com/go-bindata/go-bindata/v3 v3.1.3/go.mod h1:1/zrpXsLD8YDIbhZRqXzm1Ghc7NhEvIN9+Z6R5/xH4I= github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= +github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -335,191 +1045,279 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2 github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= -github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-pg/migrations/v7 v7.1.6/go.mod h1:ycN6RqhOqa3km5KVLvRyESYP+lvqhrGYZxAIQ5HPPMM= -github.com/go-pg/migrations/v7 v7.1.9/go.mod h1:ycN6RqhOqa3km5KVLvRyESYP+lvqhrGYZxAIQ5HPPMM= github.com/go-pg/migrations/v7 v7.1.11/go.mod h1:v/v7SfckdB2IGmUyopKyASTzjmN30HnDucLLZcCvBWU= github.com/go-pg/pg/v9 v9.0.0-beta.14/go.mod h1:T2Sr6bpTCOr2lUqOUMiXLMJqZHSUBKk1LdgSqjwhZfA= github.com/go-pg/pg/v9 v9.0.0/go.mod h1:Tm/Q3Vt6gdQOH6TTN1H/xLlIXc+Qrka7TZ6uREtu/eA= -github.com/go-pg/pg/v9 v9.0.1/go.mod h1:Tm/Q3Vt6gdQOH6TTN1H/xLlIXc+Qrka7TZ6uREtu/eA= github.com/go-pg/pg/v9 v9.0.3/go.mod h1:Tm/Q3Vt6gdQOH6TTN1H/xLlIXc+Qrka7TZ6uREtu/eA= -github.com/go-pg/pg/v9 v9.1.5/go.mod h1:QM13HBLkdml4zcKOfUfGLymM6hb72aKTJLrmaH8rsFg= github.com/go-pg/pg/v9 v9.1.6/go.mod h1:QM13HBLkdml4zcKOfUfGLymM6hb72aKTJLrmaH8rsFg= github.com/go-pg/pg/v9 v9.2.0/go.mod h1:fG8qbL+ei4e/fCZLHK+Z+/7b9B+pliZtbpaucG4/YNQ= +github.com/go-pg/pg/v9 v9.2.1/go.mod h1:fG8qbL+ei4e/fCZLHK+Z+/7b9B+pliZtbpaucG4/YNQ= github.com/go-pg/urlstruct v0.1.0/go.mod h1:2Nag+BIny6G/KYCkdt++ZnqU/VinzimGapKfs4kwlN0= -github.com/go-pg/urlstruct v0.1.4/go.mod h1:2Nag+BIny6G/KYCkdt++ZnqU/VinzimGapKfs4kwlN0= github.com/go-pg/urlstruct v0.2.6/go.mod h1:dxENwVISWSOX+k87hDt0ueEJadD+gZWv3tHzwfmZPu8= github.com/go-pg/urlstruct v0.3.0/go.mod h1:/XKyiUOUUS3onjF+LJxbfmSywYAdl6qMfVbX33Q8rgg= github.com/go-pg/urlstruct v0.4.0/go.mod h1:/XKyiUOUUS3onjF+LJxbfmSywYAdl6qMfVbX33Q8rgg= github.com/go-pg/zerochecker v0.1.1/go.mod h1:NJZ4wKL0NmTtz0GKCoJ8kym6Xn/EQzXRl2OnAe7MmDo= github.com/go-pg/zerochecker v0.2.0/go.mod h1:NJZ4wKL0NmTtz0GKCoJ8kym6Xn/EQzXRl2OnAe7MmDo= -github.com/go-redis/redis/v7 v7.0.0-beta.4/go.mod h1:xhhSbUMTsleRPur+Vgx9sUHtyN33bdjxY+9/0n9Ig8s= -github.com/go-redis/redis/v7 v7.2.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs= +github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA= +github.com/go-playground/validator/v10 v10.10.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXSGrTK4nAUsbPlLADvpJkos= +github.com/go-playground/validator/v10 v10.11.1/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU= github.com/go-redis/redis/v7 v7.4.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg= +github.com/go-redis/redis/v7 v7.4.1/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg= +github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= github.com/go-sourcemap/sourcemap v2.1.2+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= +github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/gobuffalo/here v0.6.0/go.mod h1:wAG085dHOYqUpf+Ap+WOdrPTp5IYcDAs/x7PLa8Y5fM= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= +github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM= +github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= +github.com/gobwas/ws v1.1.0/go.mod h1:nzvNcVha5eUziGrbxFCo6qFIojQHjJV5cLYIbezhfL0= +github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= +github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM= +github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= +github.com/golang-jwt/jwt/v4 v4.3.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/gddo v0.0.0-20190419222130-af0f2af80721/go.mod h1:xEhNfoBDX1hzLm2Nf80qUvZ2sVwoMZ8d6IE2SrsQfh4= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0 h1:Rd1kQnQu0Hq3qvJppYSG0HtP+f5LPPUiDswTLiEegLg= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2-0.20190517061210-b285ee9cfc6c/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0 h1:oOuy+ugB+P/kBdUnG5QaMXSIyJ1q38wWSojYCb3z5VQ= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.2-0.20200707131729-196ae77b8a26 h1:lMm2hD9Fy0ynom5+85/pbdkiYcBqM1JWmhpAXLmy0fw= github.com/golang/snappy v0.0.2-0.20200707131729-196ae77b8a26/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.3-0.20201103224600-674baa8c7fc3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y= +github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= +github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= -github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gopacket v1.1.17 h1:rMrlX2ZY2UbvT+sdz3+6J+pp2z+msCq9MxTU6ymxbBY= github.com/google/gopacket v1.1.17/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= -github.com/google/gopacket v1.1.18 h1:lum7VRA9kdlvBi7/v2p7/zcbkduHaCH/SVVyurs7OpY= -github.com/google/gopacket v1.1.18/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= +github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20221203041831-ce31453925ec h1:fR20TYVVwhK4O7r7y+McjRYyaTH6/vjwJOajE+XhlzM= +github.com/google/pprof v0.0.0-20221203041831-ce31453925ec/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.5/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= -github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c h1:7lF+Vz0LqiRidnzC1Oq86fpX1q/iEv2KJdrCtttYjT4= +github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20190309154008-847fc94819f9/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f h1:KMlcu9X58lhTA/KrfX8Bi1LQSO4pzoVjTiL3h4Jk+Zk= -github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g= +github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.1-0.20190629185528-ae1634f6a989/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gosuri/uilive v0.0.0-20170323041506-ac356e6e42cd/go.mod h1:qkLSc0A5EXSP6B04TrN4oQoxqFI7A8XvoXSlJi8cwk8= +github.com/gosuri/uilive v0.0.3/go.mod h1:qkLSc0A5EXSP6B04TrN4oQoxqFI7A8XvoXSlJi8cwk8= +github.com/gosuri/uiprogress v0.0.0-20170224063937-d0567a9d84a1/go.mod h1:C1RTYn4Sc7iEyf6j8ft5dyoZ4212h8G1ol9QQluh5+0= +github.com/gosuri/uiprogress v0.0.1/go.mod h1:C1RTYn4Sc7iEyf6j8ft5dyoZ4212h8G1ol9QQluh5+0= github.com/graph-gophers/graphql-go v0.0.0-20191115155744-f33e81362277/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= -github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= +github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.1.0/go.mod h1:f5nM7jw/oeRSadq3xCzHAvxcr8HZnzsqU6ILg/0NiiE= github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= -github.com/gxed/pubsub v0.0.0-20180201040156-26ebdf44f824/go.mod h1:OiEWyHgK+CWrmOlVquHaIK1vhpUJydC9m0Je6mhaiNE= github.com/hannahhoward/cbor-gen-for v0.0.0-20200817222906-ea96cece81f1/go.mod h1:jvfsLIxk0fY/2BKSQ1xf2406AKA5dwMmKKv0ADcOfN8= github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e h1:3YKHER4nmd7b5qy5t0GWDTwSn4OyRgfAXSmo6VnryBY= github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e/go.mod h1:I8h3MITA53gN9OnWGCgaMa0JWVRdXthWw4M3CPM54OY= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= -github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= @@ -530,90 +1328,116 @@ github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09 github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs= github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= +github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e h1:pIYdhNkDh+YENVNi3gto8n9hAmRxKxoar0iE6BLucjw= +github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e/go.mod h1:j9cQbcqHQujT0oKJ38PylVfqohClLr3CvDC+Qcg+lhU= github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= -github.com/holiman/uint256 v1.1.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw= github.com/holiman/uint256 v1.1.1/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= +github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= +github.com/huandu/xstrings v1.2.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.3.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= -github.com/huin/goupnp v0.0.0-20180415215157-1395d1447324/go.mod h1:MZ2ZmwcBpvOoJ22IJsc7va19ZwoheaBk43rKg12SKag= -github.com/huin/goupnp v1.0.0 h1:wg75sLpL6DZqwHQN6E1Cfk6mtfzS45z8OV+ic+DtHRo= github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= -github.com/huin/goupnp v1.0.1-0.20210310174557-0ca763054c88/go.mod h1:nNs7wvRfN1eKaMknBydLNQU6146XQim8t4h+q90biWo= +github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ= +github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y= github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= +github.com/hydrogen18/memlistener v0.0.0-20141126152155-54553eb933fb/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE= +github.com/hydrogen18/memlistener v0.0.0-20200120041712-dcc25e7acd91/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE= github.com/hypnoglow/go-pg-monitor v0.1.0/go.mod h1:qe/oofabOXAvIn2iv/eLrtSUaHgNBeR0Dwbgf62fgbQ= github.com/hypnoglow/go-pg-monitor/gopgv9 v0.1.0/go.mod h1:0Mj+MFtASobV/5qHb68nxBdoGjr1QXTDU/9ZKPi8UF0= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY= github.com/influxdata/influxdb v1.2.3-0.20180221223340-01288bdb0883/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= github.com/influxdata/influxdb v1.8.3/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI= +github.com/influxdata/influxdb-client-go/v2 v2.4.0/go.mod h1:vLNHdxTJkIf2mSLvGrpj8TCcISApPoXkaxP8g9uRlW8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= +github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= +github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8= github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE= github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0= github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po= -github.com/ip2location/ip2location-go v8.2.0+incompatible/go.mod h1:3JUY1TBjTx1GdA7oRT7Zeqfc0bg3lMMuU5lXmzdpuME= github.com/ip2location/ip2location-go/v9 v9.0.0/go.mod h1:s5SV6YZL10TpfPpXw//7fEJC65G/yH7Oh+Tjq9JcQEQ= github.com/ipfs/bbloom v0.0.1/go.mod h1:oqo8CVWsJFMOZqTglBG4wydCE4IQA/G2/SEofB0rjUI= github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= -github.com/ipfs/go-bitswap v0.0.3/go.mod h1:jadAZYsP/tcRMl47ZhFxhaNuDQoXawT8iHMg+iFoQbg= +github.com/ipfs/go-bitfield v1.0.0/go.mod h1:N/UiujQy+K+ceU1EF5EkVd1TNqevLrCQMIcAEPrdtus= +github.com/ipfs/go-bitfield v1.1.0 h1:fh7FIo8bSwaJEh6DdTWbCeZ1eqOaOkKFI74SCnsWbGA= +github.com/ipfs/go-bitfield v1.1.0/go.mod h1:paqf1wjq/D2BBmzfTVFlJQ9IlFOZpg422HL0HqsGWHU= github.com/ipfs/go-bitswap v0.0.9/go.mod h1:kAPf5qgn2W2DrgAcscZ3HrM9qh4pH+X8Fkk3UPrwvis= github.com/ipfs/go-bitswap v0.1.0/go.mod h1:FFJEf18E9izuCqUtHxbWEvq+reg7o4CW5wSAE1wsxj0= github.com/ipfs/go-bitswap v0.1.2/go.mod h1:qxSWS4NXGs7jQ6zQvoPY3+NmOfHHG47mhkiLzBpJQIs= -github.com/ipfs/go-bitswap v0.1.3/go.mod h1:YEQlFy0kkxops5Vy+OxWdRSEZIoS7I7KDIwoa5Chkps= github.com/ipfs/go-bitswap v0.1.8/go.mod h1:TOWoxllhccevbWFUR2N7B1MTSVVge1s6XSMiCSA4MzM= -github.com/ipfs/go-bitswap v0.2.19 h1:EhgRz8gqWQIBADY9gpqJOrfs5E1MtVfQFy1Vq8Z+Fq8= -github.com/ipfs/go-bitswap v0.2.19/go.mod h1:C7TwBgHnu89Q8sHsTJP7IhUqF9XYLe71P4tT5adgmYo= -github.com/ipfs/go-bitswap v0.2.20 h1:Zfi5jDUoqxDThORUznqdeL77DdGniAzlccNJ4vr+Itc= -github.com/ipfs/go-bitswap v0.2.20/go.mod h1:C7TwBgHnu89Q8sHsTJP7IhUqF9XYLe71P4tT5adgmYo= +github.com/ipfs/go-bitswap v0.3.4/go.mod h1:4T7fvNv/LmOys+21tnLzGKncMeeXUYUd1nUiJ2teMvI= +github.com/ipfs/go-bitswap v0.5.1/go.mod h1:P+ckC87ri1xFLvk74NlXdP0Kj9RmWAh4+H78sC6Qopo= +github.com/ipfs/go-bitswap v0.6.0/go.mod h1:Hj3ZXdOC5wBJvENtdqsixmzzRukqd8EHLxZLZc3mzRA= +github.com/ipfs/go-bitswap v0.8.0/go.mod h1:/h8sBij8UVEaNWl8ABzpLRA5Y1cttdNUnpeGo2AA/LQ= +github.com/ipfs/go-bitswap v0.11.0 h1:j1WVvhDX1yhG32NTC9xfxnqycqYIlhzEzLXG/cU1HyQ= +github.com/ipfs/go-bitswap v0.11.0/go.mod h1:05aE8H3XOU+LXpTedeAS0OZpcO1WFsj5niYQH9a1Tmk= github.com/ipfs/go-block-format v0.0.1/go.mod h1:DK/YYcsSUIVAFNwo/KZCdIIbpN0ROH/baNLgayt4pFc= -github.com/ipfs/go-block-format v0.0.2 h1:qPDvcP19izTjU8rgo6p7gTXZlkMkF5bz5G3fqIsSCPE= github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= -github.com/ipfs/go-blockservice v0.0.3/go.mod h1:/NNihwTi6V2Yr6g8wBI+BSwPuURpBRMtYNGrlxZ8KuI= +github.com/ipfs/go-block-format v0.0.3/go.mod h1:4LmD4ZUw0mhO+JSKdpWwrzATiEfM7WWgQ8H5l6P8MVk= +github.com/ipfs/go-block-format v0.1.2 h1:GAjkfhVx1f4YTODS6Esrj1wt2HhrtwTnhEr+DyPUaJo= +github.com/ipfs/go-block-format v0.1.2/go.mod h1:mACVcrxarQKstUU3Yf/RdwbC4DzPV6++rO2a3d+a/KE= github.com/ipfs/go-blockservice v0.0.7/go.mod h1:EOfb9k/Y878ZTRY/CH0x5+ATtaipfbRhbvNSdgc/7So= github.com/ipfs/go-blockservice v0.1.0/go.mod h1:hzmMScl1kXHg3M2BjTymbVPjv627N7sYcvYaKbop39M= -github.com/ipfs/go-blockservice v0.1.1/go.mod h1:t+411r7psEUhLueM8C7aPA7cxCclv4O3VsUVxt9kz2I= -github.com/ipfs/go-blockservice v0.1.2/go.mod h1:t+411r7psEUhLueM8C7aPA7cxCclv4O3VsUVxt9kz2I= -github.com/ipfs/go-blockservice v0.1.3 h1:9XgsPMwwWJSC9uVr2pMDsW2qFTBSkxpGMhmna8mIjPM= -github.com/ipfs/go-blockservice v0.1.3/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= +github.com/ipfs/go-blockservice v0.1.4/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= +github.com/ipfs/go-blockservice v0.2.1/go.mod h1:k6SiwmgyYgs4M/qt+ww6amPeUH9EISLRBnvUurKJhi8= +github.com/ipfs/go-blockservice v0.3.0/go.mod h1:P5ppi8IHDC7O+pA0AlGTF09jruB2h+oP3wVVaZl8sfk= +github.com/ipfs/go-blockservice v0.4.0/go.mod h1:kRjO3wlGW9mS1aKuiCeGhx9K1DagQ10ACpVO59qgAx4= +github.com/ipfs/go-blockservice v0.5.0 h1:B2mwhhhVQl2ntW2EIpaWPwSCxSuqr5fFA93Ms4bYLEY= +github.com/ipfs/go-blockservice v0.5.0/go.mod h1:W6brZ5k20AehbmERplmERn8o2Ni3ZZubvAxaIUeaT6w= github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M= -github.com/ipfs/go-cid v0.0.5 h1:o0Ix8e/ql7Zb5UVUJEUfjsWCIY8t48++9lR8qi6oiJU= github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= -github.com/ipfs/go-cid v0.0.6 h1:go0y+GcDOGeJIV01FeBsta4FHngoA4Wz7KMeLkXAhMs= github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= -github.com/ipfs/go-cid v0.0.7 h1:ysQJVJA3fNDF1qigJbsSQOdjhVLsOEoPdh0+R97k3jY= github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= -github.com/ipfs/go-cidutil v0.0.2 h1:CNOboQf1t7Qp0nuNh8QMmhJs0+Q//bRL1axtCnIB1Yo= -github.com/ipfs/go-cidutil v0.0.2/go.mod h1:ewllrvrxG6AMYStla3GD7Cqn+XYSLqjK0vc+086tB6s= +github.com/ipfs/go-cid v0.1.0/go.mod h1:rH5/Xv83Rfy8Rw6xG+id3DYAMUVmem1MowoKwdXmN2o= +github.com/ipfs/go-cid v0.2.0/go.mod h1:P+HXFDF4CVhaVayiEb4wkAy7zBHxBwsJyt0Y5U6MLro= +github.com/ipfs/go-cid v0.3.0/go.mod h1:P+HXFDF4CVhaVayiEb4wkAy7zBHxBwsJyt0Y5U6MLro= +github.com/ipfs/go-cid v0.3.2/go.mod h1:gQ8pKqT/sUxGY+tIwy1RPpAojYu7jAyCp5Tz1svoupw= +github.com/ipfs/go-cid v0.4.0 h1:a4pdZq0sx6ZSxbCizebnKiMCx/xI/aBBFlB73IgH4rA= +github.com/ipfs/go-cid v0.4.0/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= +github.com/ipfs/go-cidutil v0.1.0 h1:RW5hO7Vcf16dplUU60Hs0AKDkQAVPVplr7lk97CFL+Q= +github.com/ipfs/go-cidutil v0.1.0/go.mod h1:e7OEVBMIv9JaOxt9zaGEmAoSlXW9jdFZ5lP/0PwcfpA= github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= github.com/ipfs/go-datastore v0.0.5/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= -github.com/ipfs/go-datastore v0.3.0/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= github.com/ipfs/go-datastore v0.3.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= github.com/ipfs/go-datastore v0.4.0/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= -github.com/ipfs/go-datastore v0.4.4 h1:rjvQ9+muFaJ+QZ7dN5B1MSDNQ0JVZKkkES/rMZmA8X8= github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= -github.com/ipfs/go-datastore v0.4.5 h1:cwOUcGMLdLPWgu3SlrCckCMznaGADbPqE0r8h768/Dg= github.com/ipfs/go-datastore v0.4.5/go.mod h1:eXTcaaiN6uOlVCLS9GjJUJtlvJfM3xk23w3fyfrmmJs= +github.com/ipfs/go-datastore v0.5.0/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= +github.com/ipfs/go-datastore v0.5.1/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= +github.com/ipfs/go-datastore v0.6.0 h1:JKyz+Gvz1QEZw0LsX1IBn+JFCJQH4SJVFtM4uWU0Myk= +github.com/ipfs/go-datastore v0.6.0/go.mod h1:rt5M3nNbSO/8q1t4LNkLyUwRs8HupMeN/8O4Vn9YAT8= +github.com/ipfs/go-delegated-routing v0.7.0 h1:43FyMnKA+8XnyX68Fwg6aoGkqrf8NS5aG7p644s26PU= +github.com/ipfs/go-delegated-routing v0.7.0/go.mod h1:u4zxjUWIe7APUW5ds9CfD0tJX3vM9JhIeNqA8kE4vHE= github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= github.com/ipfs/go-ds-badger v0.0.2/go.mod h1:Y3QpeSFWQf6MopLTiZD+VT6IC1yZqaGmjvRcKeSGij8= @@ -621,148 +1445,171 @@ github.com/ipfs/go-ds-badger v0.0.5/go.mod h1:g5AuuCGmr7efyzQhLL8MzwqcauPojGPUaH github.com/ipfs/go-ds-badger v0.0.7/go.mod h1:qt0/fWzZDoPW6jpQeqUjR5kBfhDNB65jd9YlmAvpQBk= github.com/ipfs/go-ds-badger v0.2.1/go.mod h1:Tx7l3aTph3FMFrRS838dcSJh+jjA7cX9DrGVwx/NOwE= github.com/ipfs/go-ds-badger v0.2.3/go.mod h1:pEYw0rgg3FIrywKKnL+Snr+w/LjJZVMTBRn4FS6UHUk= -github.com/ipfs/go-ds-badger v0.2.4 h1:UPGB0y7luFHk+mY/tUZrif/272M8o+hFsW+avLUeWrM= -github.com/ipfs/go-ds-badger v0.2.4/go.mod h1:pEYw0rgg3FIrywKKnL+Snr+w/LjJZVMTBRn4FS6UHUk= -github.com/ipfs/go-ds-flatfs v0.4.4 h1:DmGZ4qOYQLNgu8Mltuz1DtUHpm+BjWMcVN3F3H3VJzQ= -github.com/ipfs/go-ds-flatfs v0.4.4/go.mod h1:e4TesLyZoA8k1gV/yCuBTnt2PJtypn4XUlB5n8KQMZY= -github.com/ipfs/go-ds-flatfs v0.4.5 h1:4QceuKEbH+HVZ2ZommstJMi3o3II+dWS3IhLaD7IGHs= -github.com/ipfs/go-ds-flatfs v0.4.5/go.mod h1:e4TesLyZoA8k1gV/yCuBTnt2PJtypn4XUlB5n8KQMZY= +github.com/ipfs/go-ds-badger v0.3.0 h1:xREL3V0EH9S219kFFueOYJJTcjgNSZ2HY1iSvN7U1Ro= +github.com/ipfs/go-ds-badger v0.3.0/go.mod h1:1ke6mXNqeV8K3y5Ak2bAA0osoTfmxUdupVCGm4QUIek= +github.com/ipfs/go-ds-flatfs v0.5.1 h1:ZCIO/kQOS/PSh3vcF1H6a8fkRGS7pOfwfPdx4n/KJH4= +github.com/ipfs/go-ds-flatfs v0.5.1/go.mod h1:RWTV7oZD/yZYBKdbVIFXTX2fdY2Tbvl94NsWqmoyAX4= github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc= github.com/ipfs/go-ds-leveldb v0.1.0/go.mod h1:hqAW8y4bwX5LWcCtku2rFNX3vjDZCy5LZCg+cSZvYb8= github.com/ipfs/go-ds-leveldb v0.4.1/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= -github.com/ipfs/go-ds-leveldb v0.4.2 h1:QmQoAJ9WkPMUfBLnu1sBVy0xWWlJPg0m4kRAiJL9iaw= github.com/ipfs/go-ds-leveldb v0.4.2/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= -github.com/ipfs/go-ds-measure v0.1.0 h1:vE4TyY4aeLeVgnnPBC5QzKIjKrqzha0NCujTfgvVbVQ= -github.com/ipfs/go-ds-measure v0.1.0/go.mod h1:1nDiFrhLlwArTME1Ees2XaBOl49OoCgd2A3f8EchMSY= -github.com/ipfs/go-filestore v0.0.3 h1:MhZ1jT5K3NewZwim6rS/akcJLm1xM+r6nz6foeB9EwE= -github.com/ipfs/go-filestore v0.0.3/go.mod h1:dvXRykFzyyXN2CdNlRGzDAkXMDPyI+D7JE066SiKLSE= -github.com/ipfs/go-fs-lock v0.0.5 h1:nlKE27N7hlvsTXT3CSDkM6KRqgXpnaDjvyCHjAiZyK8= -github.com/ipfs/go-fs-lock v0.0.5/go.mod h1:fq8gXFwbi1on9KScveHuVJ2wjuqo5jaDgCtZdKLuCO8= -github.com/ipfs/go-fs-lock v0.0.6 h1:sn3TWwNVQqSeNjlWy6zQ1uUGAZrV3hPOyEA6y1/N2a0= -github.com/ipfs/go-fs-lock v0.0.6/go.mod h1:OTR+Rj9sHiRubJh3dRhD15Juhd/+w6VPOY28L7zESmM= -github.com/ipfs/go-graphsync v0.1.1 h1:bFDAYS0Z48yd8ROPI6f/zIVmJxaDLA6m8cVuJPKC5fE= -github.com/ipfs/go-graphsync v0.1.1/go.mod h1:jMXfqIEDFukLPZHqDPp8tJMbHO9Rmeb9CEGevngQbmE= -github.com/ipfs/go-graphsync v0.2.0 h1:x94MvHLNuRwBlZzVal7tR1RYK7T7H6bqQLPopxDbIF0= -github.com/ipfs/go-graphsync v0.2.0/go.mod h1:gEBvJUNelzMkaRPJTpg/jaKN4AQW/7wDWu0K92D8o10= +github.com/ipfs/go-ds-leveldb v0.5.0 h1:s++MEBbD3ZKc9/8/njrn4flZLnCuY9I79v94gBUNumo= +github.com/ipfs/go-ds-leveldb v0.5.0/go.mod h1:d3XG9RUDzQ6V4SHi8+Xgj9j1XuEk1z82lquxrVbml/Q= +github.com/ipfs/go-ds-measure v0.2.0 h1:sG4goQe0KDTccHMyT45CY1XyUbxe5VwTKpg2LjApYyQ= +github.com/ipfs/go-ds-measure v0.2.0/go.mod h1:SEUD/rE2PwRa4IQEC5FuNAmjJCyYObZr9UvVh8V3JxE= +github.com/ipfs/go-fetcher v1.5.0/go.mod h1:5pDZ0393oRF/fHiLmtFZtpMNBQfHOYNPtryWedVuSWE= +github.com/ipfs/go-fetcher v1.6.1 h1:UFuRVYX5AIllTiRhi5uK/iZkfhSpBCGX7L70nSZEmK8= +github.com/ipfs/go-fetcher v1.6.1/go.mod h1:27d/xMV8bodjVs9pugh/RCjjK2OZ68UgAMspMdingNo= +github.com/ipfs/go-filestore v1.2.0 h1:O2wg7wdibwxkEDcl7xkuQsPvJFRBVgVSsOJ/GP6z3yU= +github.com/ipfs/go-filestore v1.2.0/go.mod h1:HLJrCxRXquTeEEpde4lTLMaE/MYJZD7WHLkp9z6+FF8= +github.com/ipfs/go-fs-lock v0.0.7 h1:6BR3dajORFrFTkb5EpCUFIAypsoxpGpDSVUdFwzgL9U= +github.com/ipfs/go-fs-lock v0.0.7/go.mod h1:Js8ka+FNYmgQRLrRXzU3CB/+Csr1BwrRilEcvYrHhhc= +github.com/ipfs/go-graphsync v0.14.0 h1:f5KYkc8GpwwE1BrjBOWxIkRivXIw7fVqGZlnILpvbSc= +github.com/ipfs/go-graphsync v0.14.0/go.mod h1:1LDVVnNHjit8ddJOtw3Jq9epP792xWFXXL3dJWIBIkM= github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw= -github.com/ipfs/go-ipfs-blockstore v0.1.4 h1:2SGI6U1B44aODevza8Rde3+dY30Pb+lbcObe1LETxOQ= github.com/ipfs/go-ipfs-blockstore v0.1.4/go.mod h1:Jxm3XMVjh6R17WvxFEiyKBLUGr86HgIYJW/D/MwqeYQ= +github.com/ipfs/go-ipfs-blockstore v0.2.1/go.mod h1:jGesd8EtCM3/zPgx+qr0/feTXGUeRai6adgwC+Q+JvE= +github.com/ipfs/go-ipfs-blockstore v1.1.2/go.mod h1:w51tNR9y5+QXB0wkNcHt4O2aSZjTdqaEWaQdSxEyUOY= +github.com/ipfs/go-ipfs-blockstore v1.2.0 h1:n3WTeJ4LdICWs/0VSfjHrlqpPpl6MZ+ySd3j8qz0ykw= +github.com/ipfs/go-ipfs-blockstore v1.2.0/go.mod h1:eh8eTFLiINYNSNawfZOC7HOxNTxpB1PFuA5E1m/7exE= github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ= github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk= github.com/ipfs/go-ipfs-chunker v0.0.1/go.mod h1:tWewYK0we3+rMbOh7pPFGDyypCtvGcBFymgY4rSDLAw= github.com/ipfs/go-ipfs-chunker v0.0.5 h1:ojCf7HV/m+uS2vhUGWcogIIxiO5ubl5O57Q7NapWLY8= github.com/ipfs/go-ipfs-chunker v0.0.5/go.mod h1:jhgdF8vxRHycr00k13FM8Y0E+6BoalYeobXmUyTreP8= -github.com/ipfs/go-ipfs-cmds v0.3.0 h1:mi9oYrSCox5aBhutqAYqw6/9crlyGbw4E/aJtwS4zI4= -github.com/ipfs/go-ipfs-cmds v0.3.0/go.mod h1:ZgYiWVnCk43ChwoH8hAmI1IRbuVtq3GSTHwtRB/Kqhk= -github.com/ipfs/go-ipfs-config v0.9.0 h1:qTXJ9CyOyQv1LFJUMysxz8fi6RxxnP9QqcmiobuANvw= -github.com/ipfs/go-ipfs-config v0.9.0/go.mod h1:GQUxqb0NfkZmEU92PxqqqLVVFTLpoGGUlBaTyDaAqrE= github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ= github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= github.com/ipfs/go-ipfs-ds-help v0.0.1/go.mod h1:gtP9xRaZXqIQRh1HRpp595KbBEdgqWFxefeVKOV8sxo= -github.com/ipfs/go-ipfs-ds-help v0.1.1 h1:IW/bXGeaAZV2VH0Kuok+Ohva/zHkHmeLFBxC1k7mNPc= github.com/ipfs/go-ipfs-ds-help v0.1.1/go.mod h1:SbBafGJuGsPI/QL3j9Fc5YPLeAu+SzOkI0gFwAg+mOs= -github.com/ipfs/go-ipfs-exchange-interface v0.0.1 h1:LJXIo9W7CAmugqI+uofioIpRb6rY30GUu7G6LUfpMvM= +github.com/ipfs/go-ipfs-ds-help v1.1.0 h1:yLE2w9RAsl31LtfMt91tRZcrx+e61O5mDxFRR994w4Q= +github.com/ipfs/go-ipfs-ds-help v1.1.0/go.mod h1:YR5+6EaebOhfcqVCyqemItCLthrpVNot+rsOU/5IatU= github.com/ipfs/go-ipfs-exchange-interface v0.0.1/go.mod h1:c8MwfHjtQjPoDyiy9cFquVtVHkO9b9Ob3FG91qJnWCM= -github.com/ipfs/go-ipfs-exchange-offline v0.0.1 h1:P56jYKZF7lDDOLx5SotVh5KFxoY6C81I1NSHW1FxGew= +github.com/ipfs/go-ipfs-exchange-interface v0.1.0/go.mod h1:ych7WPlyHqFvCi/uQI48zLZuAWVP5iTQPXEfVaw5WEI= +github.com/ipfs/go-ipfs-exchange-interface v0.2.0 h1:8lMSJmKogZYNo2jjhUs0izT+dck05pqUw4mWNW9Pw6Y= +github.com/ipfs/go-ipfs-exchange-interface v0.2.0/go.mod h1:z6+RhJuDQbqKguVyslSOuVDhqF9JtTrO3eptSAiW2/Y= github.com/ipfs/go-ipfs-exchange-offline v0.0.1/go.mod h1:WhHSFCVYX36H/anEKQboAzpUws3x7UeEGkzQc3iNkM0= -github.com/ipfs/go-ipfs-files v0.0.2/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= +github.com/ipfs/go-ipfs-exchange-offline v0.1.1/go.mod h1:vTiBRIbzSwDD0OWm+i3xeT0mO7jG2cbJYatp3HPk5XY= +github.com/ipfs/go-ipfs-exchange-offline v0.2.0/go.mod h1:HjwBeW0dvZvfOMwDP0TSKXIHf2s+ksdP4E3MLDRtLKY= +github.com/ipfs/go-ipfs-exchange-offline v0.3.0 h1:c/Dg8GDPzixGd0MC8Jh6mjOwU57uYokgWRFidfvEkuA= +github.com/ipfs/go-ipfs-exchange-offline v0.3.0/go.mod h1:MOdJ9DChbb5u37M1IcbrRB02e++Z7521fMxqCNRrz9s= github.com/ipfs/go-ipfs-files v0.0.3/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= github.com/ipfs/go-ipfs-files v0.0.8 h1:8o0oFJkJ8UkO/ABl8T6ac6tKF3+NIpj67aAB6ZpusRg= github.com/ipfs/go-ipfs-files v0.0.8/go.mod h1:wiN/jSG8FKyk7N0WyctKSvq3ljIa2NNTiZB55kpTdOs= -github.com/ipfs/go-ipfs-flags v0.0.1/go.mod h1:RnXBb9WV53GSfTrSDVK61NLTFKvWc60n+K9EgCDh+rA= -github.com/ipfs/go-ipfs-pinner v0.0.4 h1:EmxhS3vDsCK/rZrsgxX0Le9m2drBcGlUd7ah/VyFYVE= -github.com/ipfs/go-ipfs-pinner v0.0.4/go.mod h1:s4kFZWLWGDudN8Jyd/GTpt222A12C2snA2+OTdy/7p8= +github.com/ipfs/go-ipfs-pinner v0.2.1 h1:kw9hiqh2p8TatILYZ3WAfQQABby7SQARdrdA+5Z5QfY= +github.com/ipfs/go-ipfs-pinner v0.2.1/go.mod h1:l1AtLL5bovb7opnG77sh4Y10waINz3Y1ni6CvTzx7oo= github.com/ipfs/go-ipfs-posinfo v0.0.1 h1:Esoxj+1JgSjX0+ylc0hUmJCOv6V2vFoZiETLR6OtpRs= github.com/ipfs/go-ipfs-posinfo v0.0.1/go.mod h1:SwyeVP+jCwiDu0C313l/8jg6ZxM0qqtlt2a0vILTc1A= github.com/ipfs/go-ipfs-pq v0.0.1/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= github.com/ipfs/go-ipfs-pq v0.0.2 h1:e1vOOW6MuOwG2lqxcLA+wEn93i/9laCY8sXAw76jFOY= github.com/ipfs/go-ipfs-pq v0.0.2/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= -github.com/ipfs/go-ipfs-provider v0.4.3 h1:k54OHXZcFBkhL6l3GnPS9PfpaLeLqZjVASG1bgfBdfQ= -github.com/ipfs/go-ipfs-provider v0.4.3/go.mod h1:rcQBVqfblDQRk5LaCtf2uxuKxMJxvKmF5pLS0pO4au4= +github.com/ipfs/go-ipfs-provider v0.8.0 h1:4YTe9IdX99NUZEEzOsooPNxQozI+lY5x6SDWjUYhPiM= +github.com/ipfs/go-ipfs-provider v0.8.0/go.mod h1:qCpwpoohIRVXvNzkygzsM3qdqP/sXlrogtA5I45tClc= +github.com/ipfs/go-ipfs-redirects-file v0.1.1/go.mod h1:tAwRjCV0RjLTjH8DR/AU7VYvfQECg+lpUy2Mdzv7gyk= github.com/ipfs/go-ipfs-routing v0.0.1/go.mod h1:k76lf20iKFxQTjcJokbPM9iBXVXVZhcOwc360N4nuKs= -github.com/ipfs/go-ipfs-routing v0.1.0 h1:gAJTT1cEeeLj6/DlLX6t+NxD9fQe2ymTO6qWRDI/HQQ= github.com/ipfs/go-ipfs-routing v0.1.0/go.mod h1:hYoUkJLyAUKhF58tysKpids8RNDPO42BVMgK5dNsoqY= -github.com/ipfs/go-ipfs-util v0.0.1 h1:Wz9bL2wB2YBJqggkA4dD7oSmqB4cAnpNbGrlHJulv50= +github.com/ipfs/go-ipfs-routing v0.2.1/go.mod h1:xiNNiwgjmLqPS1cimvAw6EyB9rkVDbiocA4yY+wRNLM= +github.com/ipfs/go-ipfs-routing v0.3.0 h1:9W/W3N+g+y4ZDeffSgqhgo7BsBSJwPMcyssET9OWevc= +github.com/ipfs/go-ipfs-routing v0.3.0/go.mod h1:dKqtTFIql7e1zYsEuWLyuOU+E0WJWW8JjbTPLParDWo= github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= github.com/ipfs/go-ipfs-util v0.0.2 h1:59Sswnk1MFaiq+VcaknX7aYEyGyGDAA73ilhEK2POp8= github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ= -github.com/ipfs/go-ipld-cbor v0.0.1/go.mod h1:RXHr8s4k0NE0TKhnrxqZC9M888QfsBN9rhS5NjfKzY8= github.com/ipfs/go-ipld-cbor v0.0.2/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= github.com/ipfs/go-ipld-cbor v0.0.3/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= -github.com/ipfs/go-ipld-cbor v0.0.4 h1:Aw3KPOKXjvrm6VjwJvFf1F1ekR/BH3jdof3Bk7OTiSA= -github.com/ipfs/go-ipld-cbor v0.0.4/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= +github.com/ipfs/go-ipld-cbor v0.0.5 h1:ovz4CHKogtG2KB/h1zUp5U0c/IzZrL435rCh5+K/5G8= +github.com/ipfs/go-ipld-cbor v0.0.5/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k= -github.com/ipfs/go-ipld-format v0.2.0 h1:xGlJKkArkmBvowr+GMCX0FEZtkro71K1AwiKnL37mwA= github.com/ipfs/go-ipld-format v0.2.0/go.mod h1:3l3C1uKoadTPbeNfrDi+xMInYKlx2Cvg1BuydPSdzQs= -github.com/ipfs/go-ipld-git v0.0.3 h1:/YjkjCyo5KYRpW+suby8Xh9Cm/iH9dAgGV6qyZ1dGus= -github.com/ipfs/go-ipld-git v0.0.3/go.mod h1:RuvMXa9qtJpDbqngyICCU/d+cmLFXxLsbIclmD0Lcr0= -github.com/ipfs/go-ipns v0.0.2 h1:oq4ErrV4hNQ2Eim257RTYRgfOSV/s8BDaf9iIl4NwFs= -github.com/ipfs/go-ipns v0.0.2/go.mod h1:WChil4e0/m9cIINWLxZe1Jtf77oz5L05rO2ei/uKJ5U= +github.com/ipfs/go-ipld-format v0.3.0/go.mod h1:co/SdBE8h99968X0hViiw1MNlh6fvxxnHpvVLnH7jSM= +github.com/ipfs/go-ipld-format v0.4.0 h1:yqJSaJftjmjc9jEOFYlpkwOLVKv68OD27jFLlSghBlQ= +github.com/ipfs/go-ipld-format v0.4.0/go.mod h1:co/SdBE8h99968X0hViiw1MNlh6fvxxnHpvVLnH7jSM= +github.com/ipfs/go-ipld-git v0.1.1 h1:TWGnZjS0htmEmlMFEkA3ogrNCqWjIxwr16x1OsdhG+Y= +github.com/ipfs/go-ipld-git v0.1.1/go.mod h1:+VyMqF5lMcJh4rwEppV0e6g4nCCHXThLYYDpKUkJubI= +github.com/ipfs/go-ipld-legacy v0.1.0/go.mod h1:86f5P/srAmh9GcIcWQR9lfFLZPrIyyXQeVlOWeeWEuI= +github.com/ipfs/go-ipld-legacy v0.1.1 h1:BvD8PEuqwBHLTKqlGFTHSwrwFOMkVESEvwIYwR2cdcc= +github.com/ipfs/go-ipld-legacy v0.1.1/go.mod h1:8AyKFCjgRPsQFf15ZQgDB8Din4DML/fOmKZkkFkrIEg= +github.com/ipfs/go-ipns v0.2.0/go.mod h1:3cLT2rbvgPZGkHJoPO1YMJeh6LtkxopCkKFcio/wE24= +github.com/ipfs/go-ipns v0.3.0 h1:ai791nTgVo+zTuq2bLvEGmWP1M0A6kGTXUsgv/Yq67A= +github.com/ipfs/go-ipns v0.3.0/go.mod h1:3cLT2rbvgPZGkHJoPO1YMJeh6LtkxopCkKFcio/wE24= github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= -github.com/ipfs/go-log v1.0.1/go.mod h1:HuWlQttfN6FWNHRhlY5yMk/lW7evQC0HHGOxEwMRR8I= github.com/ipfs/go-log v1.0.2/go.mod h1:1MNjMxe0u6xvJZgeqbJ8vdo2TKaGwZ1a0Bpza+sr2Sk= -github.com/ipfs/go-log v1.0.3 h1:Gg7SUYSZ7BrqaKMwM+hRgcAkKv4QLfzP4XPQt5Sx/OI= github.com/ipfs/go-log v1.0.3/go.mod h1:OsLySYkwIbiSUR/yBTdv1qPtcE4FW3WPWk/ewz9Ru+A= -github.com/ipfs/go-log v1.0.4 h1:6nLQdX4W8P9yZZFH7mO+X/PzjN8Laozm/lMJ6esdgzY= github.com/ipfs/go-log v1.0.4/go.mod h1:oDCg2FkjogeFOhqqb+N39l2RpTNPL6F/StPkB3kPgcs= +github.com/ipfs/go-log v1.0.5 h1:2dOuUCB1Z7uoczMWgAyDck5JLb72zHzrMnGnCNNbvY8= github.com/ipfs/go-log v1.0.5/go.mod h1:j0b8ZoR+7+R99LD9jZ6+AJsrzkPbSXbZfGakb5JPtIo= -github.com/ipfs/go-log/v2 v2.0.1/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= github.com/ipfs/go-log/v2 v2.0.2/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= -github.com/ipfs/go-log/v2 v2.0.3 h1:Q2gXcBoCALyLN/pUQlz1qgu0x3uFV6FzP9oXhpfyJpc= github.com/ipfs/go-log/v2 v2.0.3/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= -github.com/ipfs/go-log/v2 v2.0.5 h1:fL4YI+1g5V/b1Yxr1qAiXTMg1H8z9vx/VmJxBuQMHvU= github.com/ipfs/go-log/v2 v2.0.5/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw= -github.com/ipfs/go-log/v2 v2.0.8 h1:3b3YNopMHlj4AvyhWAx0pDxqSQWYi4/WuWO7yRV6/Qg= -github.com/ipfs/go-log/v2 v2.0.8/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw= -github.com/ipfs/go-log/v2 v2.1.1 h1:G4TtqN+V9y9HY9TA6BwbCVyyBZ2B9MbCjR2MtGx8FR0= github.com/ipfs/go-log/v2 v2.1.1/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= -github.com/ipfs/go-merkledag v0.0.3/go.mod h1:Oc5kIXLHokkE1hWGMBHw+oxehkAaTOqtEb7Zbh6BhLA= +github.com/ipfs/go-log/v2 v2.3.0/go.mod h1:QqGoj30OTpnKaG/LKTGTxoP2mmQtjVMEnK72gynbe/g= +github.com/ipfs/go-log/v2 v2.5.0/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= +github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= +github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= github.com/ipfs/go-merkledag v0.0.6/go.mod h1:QYPdnlvkOg7GnQRofu9XZimC5ZW5Wi3bKys/4GQQfto= -github.com/ipfs/go-merkledag v0.1.0/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= github.com/ipfs/go-merkledag v0.2.3/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= -github.com/ipfs/go-merkledag v0.3.0/go.mod h1:4pymaZLhSLNVuiCITYrpViD6vmfZ/Ws4n/L9tfNv3S4= -github.com/ipfs/go-merkledag v0.3.1 h1:3UqWINBEr3/N+r6OwgFXAddDP/8zpQX/8J7IGVOCqRQ= -github.com/ipfs/go-merkledag v0.3.1/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M= -github.com/ipfs/go-merkledag v0.3.2 h1:MRqj40QkrWkvPswXs4EfSslhZ4RVPRbxwX11js0t1xY= github.com/ipfs/go-merkledag v0.3.2/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M= +github.com/ipfs/go-merkledag v0.5.1/go.mod h1:cLMZXx8J08idkp5+id62iVftUQV+HlYJ3PIhDfZsjA4= +github.com/ipfs/go-merkledag v0.6.0/go.mod h1:9HSEwRd5sV+lbykiYP+2NC/3o6MZbKNaa4hfNcH5iH0= +github.com/ipfs/go-merkledag v0.8.1 h1:N3yrqSre/ffvdwtHL4MXy0n7XH+VzN8DlzDrJySPa94= +github.com/ipfs/go-merkledag v0.8.1/go.mod h1:uYUlWE34GhbcTjGuUDEcdPzsEtOdnOupL64NgSRjmWI= github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg= github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY= github.com/ipfs/go-metrics-prometheus v0.0.2/go.mod h1:ELLU99AQQNi+zX6GCGm2lAgnzdSH3u5UVlCdqSXnEks= -github.com/ipfs/go-mfs v0.1.2 h1:DlelNSmH+yz/Riy0RjPKlooPg0KML4lXGdLw7uZkfAg= -github.com/ipfs/go-mfs v0.1.2/go.mod h1:T1QBiZPEpkPLzDqEJLNnbK55BVKVlNi2a+gVm4diFo0= -github.com/ipfs/go-path v0.0.3/go.mod h1:zIRQUez3LuQIU25zFjC2hpBTHimWx7VK5bjZgRLbbdo= -github.com/ipfs/go-path v0.0.7 h1:H06hKMquQ0aYtHiHryOMLpQC1qC3QwXwkahcEVD51Ho= github.com/ipfs/go-path v0.0.7/go.mod h1:6KTKmeRnBXgqrTvzFrPV3CamxcgvXX/4z79tfAd2Sno= -github.com/ipfs/go-path v0.0.8 h1:R0k6t9x/pa+g8qzl5apQIPurJFozXhopks3iw3MX+jU= -github.com/ipfs/go-path v0.0.8/go.mod h1:VpDkSBKQ9EFQOUgi54Tq/O/tGi8n1RfYNks13M3DEs8= +github.com/ipfs/go-path v0.2.1/go.mod h1:NOScsVgxfC/eIw4nz6OiGwK42PjaSJ4Y/ZFPn1Xe07I= +github.com/ipfs/go-path v0.3.1 h1:wkeaCWE/NTuuPGlEkLTsED5UkzfKYZpxaFFPgk8ZVLE= +github.com/ipfs/go-path v0.3.1/go.mod h1:eNLsxJEEMxn/CDzUJ6wuNl+6No6tEUhOZcPKsZsYX0E= github.com/ipfs/go-peertaskqueue v0.0.4/go.mod h1:03H8fhyeMfKNFWqzYEVyMbcPUeYrqP1MX6Kd+aN+rMQ= github.com/ipfs/go-peertaskqueue v0.1.0/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= github.com/ipfs/go-peertaskqueue v0.1.1/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= -github.com/ipfs/go-peertaskqueue v0.2.0 h1:2cSr7exUGKYyDeUyQ7P/nHPs9P7Ht/B+ROrpN1EJOjc= github.com/ipfs/go-peertaskqueue v0.2.0/go.mod h1:5/eNrBEbtSKWCG+kQK8K8fGNixoYUnr+P7jivavs9lY= -github.com/ipfs/go-unixfs v0.0.4/go.mod h1:eIo/p9ADu/MFOuyxzwU+Th8D6xoxU//r590vUpWyfz8= -github.com/ipfs/go-unixfs v0.1.0/go.mod h1:lysk5ELhOso8+Fed9U1QTGey2ocsfaZ18h0NCO2Fj9s= -github.com/ipfs/go-unixfs v0.2.4 h1:6NwppOXefWIyysZ4LR/qUBPvXd5//8J3jiMdvpbw6Lo= +github.com/ipfs/go-peertaskqueue v0.7.0/go.mod h1:M/akTIE/z1jGNXMU7kFB4TeSEFvj68ow0Rrb04donIU= +github.com/ipfs/go-peertaskqueue v0.8.0 h1:JyNO144tfu9bx6Hpo119zvbEL9iQ760FHOiJYsUjqaU= +github.com/ipfs/go-peertaskqueue v0.8.0/go.mod h1:cz8hEnnARq4Du5TGqiWKgMr/BOSQ5XOgMOh1K5YYKKM= github.com/ipfs/go-unixfs v0.2.4/go.mod h1:SUdisfUjNoSDzzhGVxvCL9QO/nKdwXdr+gbMUdqcbYw= -github.com/ipfs/go-verifcid v0.0.1 h1:m2HI7zIuR5TFyQ1b79Da5N9dnnCP1vcu2QqawmWlK2E= +github.com/ipfs/go-unixfs v0.3.1 h1:LrfED0OGfG98ZEegO4/xiprx2O+yS+krCMQSp7zLVv8= +github.com/ipfs/go-unixfs v0.3.1/go.mod h1:h4qfQYzghiIc8ZNFKiLMFWOTzrWIAtzYQ59W/pCFf1o= +github.com/ipfs/go-unixfsnode v1.1.2/go.mod h1:5dcE2x03pyjHk4JjamXmunTMzz+VUtqvPwZjIEkfV6s= +github.com/ipfs/go-unixfsnode v1.4.0 h1:9BUxHBXrbNi8mWHc6j+5C580WJqtVw9uoeEKn4tMhwA= +github.com/ipfs/go-unixfsnode v1.4.0/go.mod h1:qc7YFFZ8tABc58p62HnIYbUMwj9chhUuFWmxSokfePo= github.com/ipfs/go-verifcid v0.0.1/go.mod h1:5Hrva5KBeIog4A+UpqlaIU+DEstipcJYQQZc0g37pY0= -github.com/TRON-US/interface-go-btfs-core v0.3.0/go.mod h1:Tihp8zxGpUeE3Tokr94L6zWZZdkRQvG5TL6i9MuNE+s= -github.com/TRON-US/interface-go-btfs-core v0.4.0 h1:+mUiamyHIwedqP8ZgbCIwpy40oX7QcXUbo4CZOeJVJg= -github.com/TRON-US/interface-go-btfs-core v0.4.0/go.mod h1:UJBcU6iNennuI05amq3FQ7g0JHUkibHFAfhfUIy927o= -github.com/ipld/go-car v0.1.1-0.20200429200904-c222d793c339/go.mod h1:eajxljm6I8o3LitnFeVEmucwZmz7+yLSiKce9yYMefg= -github.com/ipld/go-ipld-prime v0.0.2-0.20200428162820-8b59dc292b8e h1:ZISbJlM0urTANR9KRfRaqlBmyOj5uUtxs2r4Up9IXsA= -github.com/ipld/go-ipld-prime v0.0.2-0.20200428162820-8b59dc292b8e/go.mod h1:uVIwe/u0H4VdKv3kaN1ck7uCb6yD9cFLS9/ELyXbsw8= -github.com/ipld/go-ipld-prime v0.5.1-0.20200828233916-988837377a7f h1:XpOuNQ5GbXxUcSukbQcW9jkE7REpaFGJU2/T00fo9kA= -github.com/ipld/go-ipld-prime v0.5.1-0.20200828233916-988837377a7f/go.mod h1:0xEgdD6MKbZ1vF0GC+YcR/C4SQCAlRuOjIJ2i0HxqzM= -github.com/ipld/go-ipld-prime-proto v0.0.0-20200428191222-c1ffdadc01e1 h1:K1Ysr7kgIlo7YQkPqdkA6H7BVdIugvuAz7OQUTJxLdE= -github.com/ipld/go-ipld-prime-proto v0.0.0-20200428191222-c1ffdadc01e1/go.mod h1:OAV6xBmuTLsPZ+epzKkPB1e25FHk/vCtyatkdHcArLs= -github.com/ipld/go-ipld-prime-proto v0.0.0-20200922192210-9a2bfd4440a6 h1:6Mq+tZGSEMEoJJ1NbJRhddeelkXZcU8yfH/ZRYUo/Es= -github.com/ipld/go-ipld-prime-proto v0.0.0-20200922192210-9a2bfd4440a6/go.mod h1:3pHYooM9Ea65jewRwrb2u5uHZCNkNTe9ABsVB+SrkH0= -github.com/jackpal/gateway v1.0.4/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA= +github.com/ipfs/go-verifcid v0.0.2 h1:XPnUv0XmdH+ZIhLGKg6U2vaPaRDXb9urMyNVCE7uvTs= +github.com/ipfs/go-verifcid v0.0.2/go.mod h1:40cD9x1y4OWnFXbLNJYRe7MpNvWlMn3LZAG5Wb4xnPU= +github.com/ipld/edelweiss v0.2.0 h1:KfAZBP8eeJtrLxLhi7r3N0cBCo7JmwSRhOJp3WSpNjk= +github.com/ipld/edelweiss v0.2.0/go.mod h1:FJAzJRCep4iI8FOFlRriN9n0b7OuX3T/S9++NpBDmA4= +github.com/ipld/go-car v0.4.0 h1:U6W7F1aKF/OJMHovnOVdst2cpQE5GhmHibQkAixgNcQ= +github.com/ipld/go-car v0.4.0/go.mod h1:Uslcn4O9cBKK9wqHm/cLTFacg6RAPv6LZx2mxd2Ypl4= +github.com/ipld/go-car/v2 v2.1.1/go.mod h1:+2Yvf0Z3wzkv7NeI69i8tuZ+ft7jyjPYIWZzeVNeFcI= +github.com/ipld/go-car/v2 v2.4.0 h1:8jI6/iKlyLqRZzLz31jFWTqKvslaVzFsin305sOuqNQ= +github.com/ipld/go-car/v2 v2.4.0/go.mod h1:zjpRf0Jew9gHqSvjsKVyoq9OY9SWoEKdYCQUKVaaPT0= +github.com/ipld/go-codec-dagpb v1.3.0/go.mod h1:ga4JTU3abYApDC3pZ00BC2RSvC3qfBb9MSJkMLSwnhA= +github.com/ipld/go-codec-dagpb v1.3.1/go.mod h1:ErNNglIi5KMur/MfFE/svtgQthzVvf+43MrzLbpcIZY= +github.com/ipld/go-codec-dagpb v1.4.1 h1:CUQJaOPRgSZ27OUPgUWtvdvvd2d17/IGGAIMOo4yYp0= +github.com/ipld/go-codec-dagpb v1.4.1/go.mod h1:XdXTO/TUD/ra9RcK/NfmwBfr1JpFxM2uRKaB9oe4LxE= +github.com/ipld/go-ipld-prime v0.9.0/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= +github.com/ipld/go-ipld-prime v0.9.1-0.20210324083106-dc342a9917db/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= +github.com/ipld/go-ipld-prime v0.11.0/go.mod h1:+WIAkokurHmZ/KwzDOMUuoeJgaRQktHtEaLglS3ZeV8= +github.com/ipld/go-ipld-prime v0.14.0/go.mod h1:9ASQLwUFLptCov6lIYc70GRB4V7UTyLD0IJtrDJe6ZM= +github.com/ipld/go-ipld-prime v0.16.0/go.mod h1:axSCuOCBPqrH+gvXr2w9uAOulJqBPhHPT2PjoiiU1qA= +github.com/ipld/go-ipld-prime v0.17.0/go.mod h1:aYcKm5TIvGfY8P3QBKz/2gKcLxzJ1zDaD+o0bOowhgs= +github.com/ipld/go-ipld-prime v0.17.1-0.20220624062450-534ccf82237d/go.mod h1:aYcKm5TIvGfY8P3QBKz/2gKcLxzJ1zDaD+o0bOowhgs= +github.com/ipld/go-ipld-prime v0.18.0/go.mod h1:735yXW548CKrLwVCYXzqx90p5deRJMVVxM9eJ4Qe+qE= +github.com/ipld/go-ipld-prime v0.19.0 h1:5axC7rJmPc17Emw6TelxGwnzALk0PdupZ2oj2roDj04= +github.com/ipld/go-ipld-prime v0.19.0/go.mod h1:Q9j3BaVXwaA3o5JUDNvptDDr/x8+F7FG6XJ8WI3ILg4= +github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20211210234204-ce2a1c70cd73 h1:TsyATB2ZRRQGTwafJdgEUQkmjOExRV0DNokcihZxbnQ= +github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20211210234204-ce2a1c70cd73/go.mod h1:2PJ0JgxyB08t0b2WKrcuqI3di0V+5n6RS/LTUJhkoxY= +github.com/iris-contrib/blackfriday v2.0.0+incompatible/go.mod h1:UzZ2bDEoaSGPbkg6SAB4att1aAwTmVIx/5gCVqeyUdI= +github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0= +github.com/iris-contrib/httpexpect/v2 v2.3.1/go.mod h1:ICTf89VBKSD3KB0fsyyHviKF8G8hyepP0dOXJPWz3T0= +github.com/iris-contrib/i18n v0.0.0-20171121225848-987a633949d0/go.mod h1:pMCz62A0xJL6I+umB2YTlFRwWXaDFA0jy+5HzGiJjqI= +github.com/iris-contrib/jade v1.1.3/go.mod h1:H/geBymxJhShH5kecoiOCSssPX7QWYH7UaeZTSWddIk= +github.com/iris-contrib/jade v1.1.4/go.mod h1:EDqR+ur9piDl6DUgs6qRrlfzmlx/D5UybogqrXvJTBE= +github.com/iris-contrib/pongo2 v0.0.1/go.mod h1:Ssh+00+3GAZqSQb30AvBRNxBx7rf0GqwkjqxNd0u65g= +github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw= +github.com/iris-contrib/schema v0.0.6/go.mod h1:iYszG0IOsuIsfzjymw1kMzTL8YQcCWlm65f3wX8J5iA= github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA= github.com/jackpal/go-nat-pmp v1.0.1/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= @@ -786,33 +1633,60 @@ github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1C github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= -github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q= +github.com/juju/loggo v0.0.0-20180524022052-584905176618/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U= +github.com/juju/testing v0.0.0-20180920084828-472a3e8b2073/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA= github.com/julienschmidt/httprouter v1.1.1-0.20170430222011-975b5c4c7c21/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0= +github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d/go.mod h1:P2viExyCEfeWGU259JnaQ34Inuec4R38JCyBx2edgD0= github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= +github.com/karalabe/usb v0.0.2/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= +github.com/kataras/blocks v0.0.6/go.mod h1:UK+Iwk0Oxpc0GdoJja7sEildotAUKK1LYeYcVF0COWc= +github.com/kataras/blocks v0.0.7/go.mod h1:UJIU97CluDo0f+zEjbnbkeMRlvYORtmc1304EeyXf4I= +github.com/kataras/golog v0.0.9/go.mod h1:12HJgwBIZFNGL0EJnMRhmvGA0PQGx8VFwrZtM4CqbAk= +github.com/kataras/golog v0.0.10/go.mod h1:yJ8YKCmyL+nWjERB90Qwn+bdyBZsaQwU3bTVFgkFIp8= +github.com/kataras/golog v0.1.7/go.mod h1:jOSQ+C5fUqsNSwurB/oAHq1IFSb0KI3l6GMa7xB6dZA= +github.com/kataras/iris/v12 v12.0.1/go.mod h1:udK4vLQKkdDqMGJJVd/msuMtN6hpYJhg/lSzuxjhO+U= +github.com/kataras/iris/v12 v12.1.8/go.mod h1:LMYy4VlP67TQ3Zgriz8RE2h2kMZV2SgMYbq3UhfoFmE= +github.com/kataras/iris/v12 v12.2.0-beta5/go.mod h1:q26aoWJ0Knx/00iPKg5iizDK7oQQSPjbD8np0XDh6dc= +github.com/kataras/jwt v0.1.8/go.mod h1:Q5j2IkcIHnfwy+oNY3TVWuEBJNw0ADgCcXK9CaZwV4o= +github.com/kataras/neffos v0.0.10/go.mod h1:ZYmJC07hQPW67eKuzlfY7SO3bC0mw83A3j6im82hfqw= +github.com/kataras/neffos v0.0.14/go.mod h1:8lqADm8PnbeFfL7CLXh1WHw53dG27MC3pgi2R1rmoTE= +github.com/kataras/neffos v0.0.20/go.mod h1:srdvC/Uo8mgrApWW0AYtiiLgMbyNPf69qPsd2FhE6MQ= +github.com/kataras/pio v0.0.0-20190103105442-ea782b38602d/go.mod h1:NV88laa9UiiDuX9AhMbDPkGYSPugBOV6yTZB1l2K9Z0= +github.com/kataras/pio v0.0.2/go.mod h1:hAoW0t9UmXi4R5Oyq5Z4irTbaTsOemSrDGUtaTl7Dro= +github.com/kataras/pio v0.0.10/go.mod h1:gS3ui9xSD+lAUpbYnjOGiQyY7sUMJO+EHpiRzhtZ5no= +github.com/kataras/pio v0.0.11/go.mod h1:38hH6SWH6m4DKSYmRhlrCJ5WItwWgCVrTNU62XZyUvI= +github.com/kataras/sitemap v0.0.5/go.mod h1:KY2eugMKiPwsJgx7+U103YZehfvNGOXURubcGyk0Bz8= +github.com/kataras/sitemap v0.0.6/go.mod h1:dW4dOCNs896OR1HmG+dMLdT7JjDk7mYBzoIRwuj5jA4= +github.com/kataras/tunnel v0.0.4/go.mod h1:9FkU4LaeifdMWqZu7o20ojmW4B7hdhv2CMLwfnHGpYw= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= @@ -820,121 +1694,127 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.9.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.9.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.14.4/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.15.0/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.15.10/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= +github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= +github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= +github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw= +github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid v1.2.4 h1:EBfaK0SWSwk+fgk6efYFWdzl8MwRWoOO1gkmiaTXPW4= github.com/klauspost/cpuid v1.2.4/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.0.6/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.1.0/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/klauspost/cpuid/v2 v2.1.1/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/klauspost/cpuid/v2 v2.2.1/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk= +github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/klauspost/pgzip v1.2.1/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= -github.com/klauspost/reedsolomon v1.9.2/go.mod h1:CwCi+NUr9pqSVktrkN+Ondf06rkhYZ/pcNv7fu+8Un4= -github.com/klauspost/reedsolomon v1.9.9/go.mod h1:O7yFFHiQwDR6b2t63KPUpccPtNdp5ADgh1gg4fd12wo= +github.com/klauspost/reedsolomon v1.9.14 h1:vkPCIhFMn2VdktLUcugqsU4vcLXN3dAhVd1uWA+TDD8= +github.com/klauspost/reedsolomon v1.9.14/go.mod h1:eqPAcE7xar5CIzcdfwydOEdcmchAKAP/qs14y4GCBOk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= -github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d h1:68u9r4wEvL3gYg2jvAOgROwZ3H+Y3hIDk4tbbmIjcYQ= github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= +github.com/koron/go-ssdp v0.0.3 h1:JivLMY45N76b4p/vsWGOKewBQu6uf39y8l+AQ7sDKx8= +github.com/koron/go-ssdp v0.0.3/go.mod h1:b2MxI6yh02pKrsyNoQUsk4+YNikaGhe4894J+Q5lDvA= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/labstack/echo/v4 v4.1.11/go.mod h1:i541M3Fj6f76NZtHSj7TXnyM8n2gaodfvfxNnFqi74g= +github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg= +github.com/labstack/echo/v4 v4.5.0/go.mod h1:czIriw4a0C1dFun+ObrXp7ok03xON0N1awStJ6ArI7Y= +github.com/labstack/echo/v4 v4.9.0/go.mod h1:xkCDAdFCIf8jsFQ5NnbK7oqaF/yU1A1X20Ltm0OvSks= +github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= +github.com/labstack/gommon v0.3.1/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3MFxTMTM= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= +github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/libp2p/go-addr-util v0.0.1 h1:TpTQm9cXVRVSKsYbgQ7GKc3KbbHVTnbostgGaDEP+88= github.com/libp2p/go-addr-util v0.0.1/go.mod h1:4ac6O7n9rIAKB1dnd+s8IbbMXkt+oBpzX4/+RACcnlQ= -github.com/libp2p/go-addr-util v0.0.2 h1:7cWK5cdA5x72jX0g8iLrQWm5TRJZ6CzGdPEhWj7plWU= github.com/libp2p/go-addr-util v0.0.2/go.mod h1:Ecd6Fb3yIuLzq4bD7VcywcVSBtefcAwnUISBM3WG15E= github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ= -github.com/libp2p/go-buffer-pool v0.0.2 h1:QNK2iAFa8gjAe1SPz6mHSMuCcjs+X1wlHzeOSqcmlfs= github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= +github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= +github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c= github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic= github.com/libp2p/go-conn-security v0.0.1/go.mod h1:bGmu51N0KU9IEjX7kl2PQjgZa40JQWnayTvNMgD/vyk= -github.com/libp2p/go-conn-security-multistream v0.0.1/go.mod h1:nc9vud7inQ+d6SO0I/6dSWrdMnHnzZNHeyUQqrAJulE= github.com/libp2p/go-conn-security-multistream v0.0.2/go.mod h1:nc9vud7inQ+d6SO0I/6dSWrdMnHnzZNHeyUQqrAJulE= -github.com/libp2p/go-conn-security-multistream v0.1.0 h1:aqGmto+ttL/uJgX0JtQI0tD21CIEy5eYd1Hlp0juHY0= github.com/libp2p/go-conn-security-multistream v0.1.0/go.mod h1:aw6eD7LOsHEX7+2hJkDxw1MteijaVcI+/eP2/x3J1xc= -github.com/libp2p/go-conn-security-multistream v0.2.0 h1:uNiDjS58vrvJTg9jO6bySd1rMKejieG7v45ekqHbZ1M= github.com/libp2p/go-conn-security-multistream v0.2.0/go.mod h1:hZN4MjlNetKD3Rq5Jb/P5ohUnFLNzEAR4DLSzpn2QLU= github.com/libp2p/go-conn-security-multistream v0.2.1/go.mod h1:cR1d8gA0Hr59Fj6NhaTpFhJZrjSYuNmhpT2r25zYR70= -github.com/libp2p/go-eventbus v0.1.0 h1:mlawomSAjjkk97QnYiEmHsLu7E136+2oCWSHRUvMfzQ= +github.com/libp2p/go-doh-resolver v0.4.0 h1:gUBa1f1XsPwtpE1du0O+nnZCUqtG7oYi7Bb+0S7FQqw= +github.com/libp2p/go-doh-resolver v0.4.0/go.mod h1:v1/jwsFusgsWIGX/c6vCRrnJ60x7bhTiq/fs2qt0cAg= github.com/libp2p/go-eventbus v0.1.0/go.mod h1:vROgu5cs5T7cv7POWlWxBaVLxfSegC5UGQf8A2eEmx4= -github.com/libp2p/go-eventbus v0.2.1 h1:VanAdErQnpTioN2TowqNcOijf6YwhuODe4pPKSDpxGc= github.com/libp2p/go-eventbus v0.2.1/go.mod h1:jc2S4SoEVPP48H9Wpzm5aiGwUCBMfGhVhhBjyhhCJs8= github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZxBdp967ls1g+k8= github.com/libp2p/go-flow-metrics v0.0.2/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= -github.com/libp2p/go-flow-metrics v0.0.3 h1:8tAs/hSdNvUiLgtlSy3mxwxWP4I9y/jlkPFT7epKdeM= github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= -github.com/libp2p/go-libp2p v0.0.2/go.mod h1:Qu8bWqFXiocPloabFGUcVG4kk94fLvfC8mWTDdFC9wE= +github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= +github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= github.com/libp2p/go-libp2p v0.0.30/go.mod h1:XWT8FGHlhptAv1+3V/+J5mEpzyui/5bvFsNuWYs611A= github.com/libp2p/go-libp2p v0.1.0/go.mod h1:6D/2OBauqLUoqcADOJpn9WbKqvaM07tDw68qHM0BxUM= github.com/libp2p/go-libp2p v0.1.1/go.mod h1:I00BRo1UuUSdpuc8Q2mN7yDF/oTUTRAX6JWpTiK9Rp8= -github.com/libp2p/go-libp2p v0.6.0/go.mod h1:mfKWI7Soz3ABX+XEBR61lGbg+ewyMtJHVt043oWeqwg= github.com/libp2p/go-libp2p v0.6.1/go.mod h1:CTFnWXogryAHjXAKEbOf1OWY+VeAP3lDMZkfEI5sT54= github.com/libp2p/go-libp2p v0.7.0/go.mod h1:hZJf8txWeCduQRDC/WSqBGMxaTHCOYHt2xSU1ivxn0k= -github.com/libp2p/go-libp2p v0.7.4 h1:xVj1oSlN0C+FlxqiLuHC8WruMvq24xxfeVxmNhTG0r0= github.com/libp2p/go-libp2p v0.7.4/go.mod h1:oXsBlTLF1q7pxr+9w6lqzS1ILpyHsaBPniVO7zIHGMw= github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qDKwXujH5o= -github.com/libp2p/go-libp2p v0.8.2/go.mod h1:NQDA/F/qArMHGe0J7sDScaKjW8Jh4y/ozQqBbYJ+BnA= -github.com/libp2p/go-libp2p v0.8.3 h1:IFWeNzxkBaNO1N8stN9ayFGdC6RmVuSsKd5bou7qpK0= -github.com/libp2p/go-libp2p v0.8.3/go.mod h1:EsH1A+8yoWK+L4iKcbPYu6MPluZ+CHWI9El8cTaefiM= -github.com/libp2p/go-libp2p v0.10.0 h1:7ooOvK1wi8eLpyTppy8TeH43UHy5uI75GAHGJxenUi0= -github.com/libp2p/go-libp2p v0.10.0/go.mod h1:yBJNpb+mGJdgrwbKAKrhPU0u3ogyNFTfjJ6bdM+Q/G8= -github.com/libp2p/go-libp2p v0.11.0 h1:jb5mqdqYEBAybTEhD8io43Cz5LzVKuWxOK7znSN69jE= -github.com/libp2p/go-libp2p v0.11.0/go.mod h1:3/ogJDXsbbepEfqtZKBR/DedzxJXCeK17t2Z9RE9bEE= -github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052 h1:BM7aaOF7RpmNn9+9g6uTjGJ0cTzWr5j9i9IKeun2M8U= -github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052/go.mod h1:nRMRTab+kZuk0LnKZpxhOVH/ndsdr2Nr//Zltc/vwgo= -github.com/libp2p/go-libp2p-autonat v0.0.2/go.mod h1:fs71q5Xk+pdnKU014o2iq1RhMs9/PMaG5zXRFNnIIT4= +github.com/libp2p/go-libp2p v0.13.0/go.mod h1:pM0beYdACRfHO1WcJlp65WXyG2A6NqYM+t2DTVAJxMo= +github.com/libp2p/go-libp2p v0.14.3/go.mod h1:d12V4PdKbpL0T1/gsUNN8DfgMuRPDX8bS2QxCZlwRH0= +github.com/libp2p/go-libp2p v0.22.0/go.mod h1:UDolmweypBSjQb2f7xutPnwZ/fxioLbMBxSjRksxxU4= +github.com/libp2p/go-libp2p v0.23.2/go.mod h1:s9DEa5NLR4g+LZS+md5uGU4emjMWFiqkZr6hBTY8UxI= +github.com/libp2p/go-libp2p v0.23.4/go.mod h1:s9DEa5NLR4g+LZS+md5uGU4emjMWFiqkZr6hBTY8UxI= +github.com/libp2p/go-libp2p v0.24.2 h1:iMViPIcLY0D6zr/f+1Yq9EavCZu2i7eDstsr1nEwSAk= +github.com/libp2p/go-libp2p v0.24.2/go.mod h1:WuxtL2V8yGjam03D93ZBC19tvOUiPpewYv1xdFGWu1k= +github.com/libp2p/go-libp2p-asn-util v0.2.0 h1:rg3+Os8jbnO5DxkC7K/Utdi+DkY3q/d1/1q+8WeNAsw= +github.com/libp2p/go-libp2p-asn-util v0.2.0/go.mod h1:WoaWxbHKBymSN41hWSq/lGKJEca7TNm58+gGJi2WsLI= github.com/libp2p/go-libp2p-autonat v0.0.6/go.mod h1:uZneLdOkZHro35xIhpbtTzLlgYturpu4J5+0cZK3MqE= github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8= github.com/libp2p/go-libp2p-autonat v0.1.1/go.mod h1:OXqkeGOY2xJVWKAGV2inNF5aKN/djNA3fdpCWloIudE= github.com/libp2p/go-libp2p-autonat v0.2.0/go.mod h1:DX+9teU4pEEoZUqR1PiMlqliONQdNbfzE1C718tcViI= -github.com/libp2p/go-libp2p-autonat v0.2.1 h1:T0CRQhrvTBKfBSYw6Xo2K3ixtNpAnRCraxof3AAfgQA= github.com/libp2p/go-libp2p-autonat v0.2.1/go.mod h1:MWtAhV5Ko1l6QBsHQNSuM6b1sRkXrpk0/LqCr+vCVxI= -github.com/libp2p/go-libp2p-autonat v0.2.2 h1:4dlgcEEugTFWSvdG2UIFxhnOMpX76QaZSRAtXmYB8n4= github.com/libp2p/go-libp2p-autonat v0.2.2/go.mod h1:HsM62HkqZmHR2k1xgX34WuWDzk/nBwNHoeyyT4IWV6A= -github.com/libp2p/go-libp2p-autonat v0.2.3 h1:w46bKK3KTOUWDe5mDYMRjJu1uryqBp8HCNDp/TWMqKw= -github.com/libp2p/go-libp2p-autonat v0.2.3/go.mod h1:2U6bNWCNsAG9LEbwccBDQbjzQ8Krdjge1jLTE9rdoMM= -github.com/libp2p/go-libp2p-autonat v0.3.2 h1:OhDSwVVaq7liTaRIsFFYvsaPp0pn2yi0WazejZ4DUmo= -github.com/libp2p/go-libp2p-autonat v0.3.2/go.mod h1:0OzOi1/cVc7UcxfOddemYD5vzEqi4fwRbnZcJGLi68U= +github.com/libp2p/go-libp2p-autonat v0.4.0/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk= +github.com/libp2p/go-libp2p-autonat v0.4.2/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk= github.com/libp2p/go-libp2p-blankhost v0.0.1/go.mod h1:Ibpbw/7cPPYwFb7PACIWdvxxv0t0XCCI10t7czjAjTc= github.com/libp2p/go-libp2p-blankhost v0.1.1/go.mod h1:pf2fvdLJPsC1FsVrNP3DUUvMzUts2dsLLBEpo1vW1ro= -github.com/libp2p/go-libp2p-blankhost v0.1.4 h1:I96SWjR4rK9irDHcHq3XHN6hawCRTPUADzkJacgZLvk= github.com/libp2p/go-libp2p-blankhost v0.1.4/go.mod h1:oJF0saYsAXQCSfDq254GMNmLNz6ZTHTOvtF4ZydUvwU= -github.com/libp2p/go-libp2p-blankhost v0.1.6 h1:CkPp1/zaCrCnBo0AdsQA0O1VkUYoUOtyHOnoa8gKIcE= -github.com/libp2p/go-libp2p-blankhost v0.1.6/go.mod h1:jONCAJqEP+Z8T6EQviGL4JsQcLx1LgTGtVqFNY8EMfQ= -github.com/libp2p/go-libp2p-blankhost v0.2.0 h1:3EsGAi0CBGcZ33GwRuXEYJLLPoVWyXJ1bcJzAJjINkk= github.com/libp2p/go-libp2p-blankhost v0.2.0/go.mod h1:eduNKXGTioTuQAUcZ5epXi9vMl+t4d8ugUBRQ4SqaNQ= -github.com/libp2p/go-libp2p-circuit v0.0.1/go.mod h1:Dqm0s/BiV63j8EEAs8hr1H5HudqvCAeXxDyic59lCwE= github.com/libp2p/go-libp2p-circuit v0.0.9/go.mod h1:uU+IBvEQzCu953/ps7bYzC/D/R0Ho2A9LfKVVCatlqU= github.com/libp2p/go-libp2p-circuit v0.1.0/go.mod h1:Ahq4cY3V9VJcHcn1SBXjr78AbFkZeIRmfunbA7pmFh8= github.com/libp2p/go-libp2p-circuit v0.1.4/go.mod h1:CY67BrEjKNDhdTk8UgBX1Y/H5c3xkAcs3gnksxY7osU= -github.com/libp2p/go-libp2p-circuit v0.2.1 h1:BDiBcQxX/ZJJ/yDl3sqZt1bjj4PkZCEi7IEpwxXr13k= github.com/libp2p/go-libp2p-circuit v0.2.1/go.mod h1:BXPwYDN5A8z4OEY9sOfr2DUQMLQvKt/6oku45YUmjIo= -github.com/libp2p/go-libp2p-circuit v0.2.2 h1:87RLabJ9lrhoiSDDZyCJ80ZlI5TLJMwfyoGAaWXzWqA= -github.com/libp2p/go-libp2p-circuit v0.2.2/go.mod h1:nkG3iE01tR3FoQ2nMm06IUrCpCyJp1Eo4A1xYdpjfs4= -github.com/libp2p/go-libp2p-circuit v0.2.3 h1:3Uw1fPHWrp1tgIhBz0vSOxRUmnKL8L/NGUyEd5WfSGM= -github.com/libp2p/go-libp2p-circuit v0.2.3/go.mod h1:nkG3iE01tR3FoQ2nMm06IUrCpCyJp1Eo4A1xYdpjfs4= -github.com/libp2p/go-libp2p-circuit v0.3.1 h1:69ENDoGnNN45BNDnBd+8SXSetDuw0eJFcGmOvvtOgBw= -github.com/libp2p/go-libp2p-circuit v0.3.1/go.mod h1:8RMIlivu1+RxhebipJwFDA45DasLx+kkrp4IlJj53F4= github.com/libp2p/go-libp2p-circuit v0.4.0/go.mod h1:t/ktoFIUzM6uLQ+o1G6NuBl2ANhBKN9Bc8jRIk31MoA= -github.com/libp2p/go-libp2p-connmgr v0.2.3/go.mod h1:Gqjg29zI8CwXX21zRxy6gOg8VYu3zVerJRt2KyktzH4= -github.com/libp2p/go-libp2p-connmgr v0.2.4 h1:TMS0vc0TCBomtQJyWr7fYxcVYYhx+q/2gF++G5Jkl/w= -github.com/libp2p/go-libp2p-connmgr v0.2.4/go.mod h1:YV0b/RIm8NGPnnNWM7hG9Q38OeQiQfKhHCCs1++ufn0= github.com/libp2p/go-libp2p-core v0.0.1/go.mod h1:g/VxnTZ/1ygHxH3dKok7Vno1VfpvGcGip57wjTU4fco= github.com/libp2p/go-libp2p-core v0.0.2/go.mod h1:9dAcntw/n46XycV4RnlBq3BpgrmyUi9LuoTNdPrbUco= github.com/libp2p/go-libp2p-core v0.0.3/go.mod h1:j+YQMNz9WNSkNezXOsahp9kwZBKBvxLpKD316QWSJXE= github.com/libp2p/go-libp2p-core v0.0.4/go.mod h1:jyuCQP356gzfCFtRKyvAbNkyeuxb7OlyhWZ3nls5d2I= -github.com/libp2p/go-libp2p-core v0.0.6/go.mod h1:0d9xmaYAVY5qmbp/fcgxHT3ZJsLjYeYPMJAUKpaCHrE= github.com/libp2p/go-libp2p-core v0.2.0/go.mod h1:X0eyB0Gy93v0DZtSYbEM7RnMChm9Uv3j7yRXjO77xSI= github.com/libp2p/go-libp2p-core v0.2.2/go.mod h1:8fcwTbsG2B+lTgRJ1ICZtiM5GWCWZVoVrLaDRvIRng0= github.com/libp2p/go-libp2p-core v0.2.4/go.mod h1:STh4fdfa5vDYr0/SzYYeqnt+E6KfEV5VxfIrm0bcI0g= @@ -943,88 +1823,61 @@ github.com/libp2p/go-libp2p-core v0.3.0/go.mod h1:ACp3DmS3/N64c2jDzcV429ukDpicbL github.com/libp2p/go-libp2p-core v0.3.1/go.mod h1:thvWy0hvaSBhnVBaW37BvzgVV68OUhgJJLAa6almrII= github.com/libp2p/go-libp2p-core v0.4.0/go.mod h1:49XGI+kc38oGVwqSBhDEwytaAxgZasHhFfQKibzTls0= github.com/libp2p/go-libp2p-core v0.5.0/go.mod h1:49XGI+kc38oGVwqSBhDEwytaAxgZasHhFfQKibzTls0= -github.com/libp2p/go-libp2p-core v0.5.1 h1:6Cu7WljPQtGY2krBlMoD8L/zH3tMUsCbqNFH7cZwCoI= github.com/libp2p/go-libp2p-core v0.5.1/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= -github.com/libp2p/go-libp2p-core v0.5.2 h1:hevsCcdLiazurKBoeNn64aPYTVOPdY4phaEGeLtHOAs= -github.com/libp2p/go-libp2p-core v0.5.2/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= -github.com/libp2p/go-libp2p-core v0.5.3/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= github.com/libp2p/go-libp2p-core v0.5.4/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= github.com/libp2p/go-libp2p-core v0.5.5/go.mod h1:vj3awlOr9+GMZJFH9s4mpt9RHHgGqeHCopzbYKZdRjM= github.com/libp2p/go-libp2p-core v0.5.6/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= github.com/libp2p/go-libp2p-core v0.5.7/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= -github.com/libp2p/go-libp2p-core v0.6.0 h1:u03qofNYTBN+yVg08PuAKylZogVf0xcTEeM8skGf+ak= github.com/libp2p/go-libp2p-core v0.6.0/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= -github.com/libp2p/go-libp2p-core v0.6.1 h1:XS+Goh+QegCDojUZp00CaPMfiEADCrLjNZskWE7pvqs= -github.com/libp2p/go-libp2p-core v0.6.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= github.com/libp2p/go-libp2p-core v0.7.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= github.com/libp2p/go-libp2p-core v0.8.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= github.com/libp2p/go-libp2p-core v0.8.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= github.com/libp2p/go-libp2p-core v0.8.2/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= github.com/libp2p/go-libp2p-core v0.8.5/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= -github.com/libp2p/go-libp2p-core v0.9.0 h1:t97Mv0LIBZlP2FXVRNKKVzHJCIjbIWGxYptGId4+htU= -github.com/libp2p/go-libp2p-core v0.9.0/go.mod h1:ESsbz31oC3C1AvMJoGx26RTuCkNhmkSRCqZ0kQtJ2/8= +github.com/libp2p/go-libp2p-core v0.19.0/go.mod h1:AkA+FUKQfYt1FLNef5fOPlo/naAWjKy/RCjkcPjqzYg= +github.com/libp2p/go-libp2p-core v0.20.1/go.mod h1:6zR8H7CvQWgYLsbG4on6oLNSGcyKaYFSEYyDt51+bIY= github.com/libp2p/go-libp2p-crypto v0.0.1/go.mod h1:yJkNyDmO341d5wwXxDUGO0LykUVT72ImHNUqh5D/dBE= github.com/libp2p/go-libp2p-crypto v0.0.2/go.mod h1:eETI5OUfBnvARGOHrJz2eWNyTUxEGZnBxMcbUjfIj4I= -github.com/libp2p/go-libp2p-crypto v0.1.0 h1:k9MFy+o2zGDNGsaoZl0MA3iZ75qXxr9OOoAZF+sD5OQ= github.com/libp2p/go-libp2p-crypto v0.1.0/go.mod h1:sPUokVISZiy+nNuTTH/TY+leRSxnFj/2GLjtOTW90hI= -github.com/libp2p/go-libp2p-discovery v0.0.1/go.mod h1:ZkkF9xIFRLA1xCc7bstYFkd80gBGK8Fc1JqGoU2i+zI= github.com/libp2p/go-libp2p-discovery v0.0.5/go.mod h1:YtF20GUxjgoKZ4zmXj8j3Nb2TUSBHFlOCetzYdbZL5I= github.com/libp2p/go-libp2p-discovery v0.1.0/go.mod h1:4F/x+aldVHjHDHuX85x1zWoFTGElt8HnoDzwkFZm29g= github.com/libp2p/go-libp2p-discovery v0.2.0/go.mod h1:s4VGaxYMbw4+4+tsoQTqh7wfxg97AEdo4GYBt6BadWg= -github.com/libp2p/go-libp2p-discovery v0.3.0 h1:+JnYBRLzZQtRq0mK3xhyjBwHytLmJXMTZkQfbw+UrGA= github.com/libp2p/go-libp2p-discovery v0.3.0/go.mod h1:o03drFnz9BVAZdzC/QUQ+NeQOu38Fu7LJGEOK2gQltw= -github.com/libp2p/go-libp2p-discovery v0.4.0 h1:dK78UhopBk48mlHtRCzbdLm3q/81g77FahEBTjcqQT8= -github.com/libp2p/go-libp2p-discovery v0.4.0/go.mod h1:bZ0aJSrFc/eX2llP0ryhb1kpgkPyTo23SJ5b7UQCMh4= -github.com/libp2p/go-libp2p-discovery v0.5.0 h1:Qfl+e5+lfDgwdrXdu4YNCWyEo3fWuP+WgN9mN0iWviQ= github.com/libp2p/go-libp2p-discovery v0.5.0/go.mod h1:+srtPIU9gDaBNu//UHvcdliKBIcr4SfDcm0/PfPJLug= -github.com/libp2p/go-libp2p-discovery v0.5.1/go.mod h1:+srtPIU9gDaBNu//UHvcdliKBIcr4SfDcm0/PfPJLug= -github.com/libp2p/go-libp2p-gostream v0.2.1/go.mod h1:1Mjp3LDmkqICe5tH9yLVNCqFaRTy6OwBvuJV6j1b9Nk= +github.com/libp2p/go-libp2p-gostream v0.5.0/go.mod h1:rXrb0CqfcRRxa7m3RSKORQiKiWgk3IPeXWda66ZXKsA= github.com/libp2p/go-libp2p-host v0.0.1/go.mod h1:qWd+H1yuU0m5CwzAkvbSjqKairayEHdR5MMl7Cwa7Go= github.com/libp2p/go-libp2p-host v0.0.3/go.mod h1:Y/qPyA6C8j2coYyos1dfRm0I8+nvd4TGrDGt4tA7JR8= -github.com/libp2p/go-libp2p-http v0.1.5/go.mod h1:2YfPjsQxUlBGFQl2u461unkQ7ukwiSs7NX2eSslOJiU= +github.com/libp2p/go-libp2p-http v0.4.0/go.mod h1:92tmLGrlBliQFDlZRpBXT3BJM7rGFONy0vsNrG/bMPg= github.com/libp2p/go-libp2p-interface-connmgr v0.0.1/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= github.com/libp2p/go-libp2p-interface-connmgr v0.0.4/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= github.com/libp2p/go-libp2p-interface-connmgr v0.0.5/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= github.com/libp2p/go-libp2p-interface-pnet v0.0.1/go.mod h1:el9jHpQAXK5dnTpKA4yfCNBZXvrzdOU75zz+C6ryp3k= -github.com/libp2p/go-libp2p-kad-dht v0.8.2 h1:s7y38B+hdj1AkNR3PCTpvNqBsZHxOf7hoUy7+fNlSZQ= -github.com/libp2p/go-libp2p-kad-dht v0.8.2/go.mod h1:u3rbYbp3CSraAHD5s81CJ3hHozKTud/UOXfAgh93Gek= -github.com/libp2p/go-libp2p-kad-dht v0.9.0 h1:AKeFYZvfAa/32Sgm0LrPDxGXB62AUtU8MRqqMobBfUM= -github.com/libp2p/go-libp2p-kad-dht v0.9.0/go.mod h1:LEKcCFHxnvypOPaqZ0m6h0fLQ9Y8t1iZMOg7a0aQDD4= -github.com/libp2p/go-libp2p-kbucket v0.4.2 h1:wg+VPpCtY61bCasGRexCuXOmEmdKjN+k1w+JtTwu9gA= -github.com/libp2p/go-libp2p-kbucket v0.4.2/go.mod h1:7sCeZx2GkNK1S6lQnGUW5JYZCFPnXzAZCCBBS70lytY= -github.com/libp2p/go-libp2p-kbucket v0.4.7 h1:spZAcgxifvFZHBD8tErvppbnNiKA5uokDu3CV7axu70= -github.com/libp2p/go-libp2p-kbucket v0.4.7/go.mod h1:XyVo99AfQH0foSf176k4jY1xUJ2+jUJIZCSDm7r2YKk= +github.com/libp2p/go-libp2p-kad-dht v0.20.0 h1:1bcMa74JFwExCHZMFEmjtHzxX5DovhJ07EtR6UOTEpc= +github.com/libp2p/go-libp2p-kad-dht v0.20.0/go.mod h1:qPIXdiZsLczhV4/+4EO1jE8ae0YCW4ZOogc4WVIyTEU= +github.com/libp2p/go-libp2p-kbucket v0.3.1/go.mod h1:oyjT5O7tS9CQurok++ERgc46YLwEpuGoFq9ubvoUOio= +github.com/libp2p/go-libp2p-kbucket v0.5.0 h1:g/7tVm8ACHDxH29BGrpsQlnNeu+6OF1A9bno/4/U1oA= +github.com/libp2p/go-libp2p-kbucket v0.5.0/go.mod h1:zGzGCpQd78b5BNTDGHNDLaTt9aDK/A02xeZp9QeFC4U= github.com/libp2p/go-libp2p-loggables v0.0.1/go.mod h1:lDipDlBNYbpyqyPX/KcoO+eq0sJYEVR2JgOexcivchg= -github.com/libp2p/go-libp2p-loggables v0.1.0 h1:h3w8QFfCt2UJl/0/NW4K829HX/0S4KD31PQ7m8UXXO8= github.com/libp2p/go-libp2p-loggables v0.1.0/go.mod h1:EyumB2Y6PrYjr55Q3/tiJ/o3xoDasoRYM7nOzEpoa90= github.com/libp2p/go-libp2p-metrics v0.0.1/go.mod h1:jQJ95SXXA/K1VZi13h52WZMa9ja78zjyy5rspMsC/08= -github.com/libp2p/go-libp2p-metrics v0.1.0/go.mod h1:rpoJmXWFxnj7qs5sJ02sxSzrhaZvpqBn8GCG6Sx6E1k= github.com/libp2p/go-libp2p-mplex v0.1.1/go.mod h1:KUQWpGkCzfV7UIpi8SKsAVxyBgz1c9R5EvxgnwLsb/I= github.com/libp2p/go-libp2p-mplex v0.2.0/go.mod h1:Ejl9IyjvXJ0T9iqUTE1jpYATQ9NM3g+OtR+EMMODbKo= github.com/libp2p/go-libp2p-mplex v0.2.1/go.mod h1:SC99Rxs8Vuzrf/6WhmH41kNn13TiYdAWNYHrwImKLnE= github.com/libp2p/go-libp2p-mplex v0.2.2/go.mod h1:74S9eum0tVQdAfFiKxAyKzNdSuLqw5oadDq7+L/FELo= -github.com/libp2p/go-libp2p-mplex v0.2.3 h1:2zijwaJvpdesST2MXpI5w9wWFRgYtMcpRX7rrw0jmOo= github.com/libp2p/go-libp2p-mplex v0.2.3/go.mod h1:CK3p2+9qH9x+7ER/gWWDYJ3QW5ZxWDkm+dVvjfuG3ek= -github.com/libp2p/go-libp2p-mplex v0.2.4 h1:XFFXaN4jhqnIuJVjYOR3k6bnRj0mFfJOlIuDVww+4Zo= -github.com/libp2p/go-libp2p-mplex v0.2.4/go.mod h1:mI7iOezdWFOisvUwaYd3IDrJ4oVmgoXK8H331ui39CE= github.com/libp2p/go-libp2p-mplex v0.4.0/go.mod h1:yCyWJE2sc6TBTnFpjvLuEJgTSw/u+MamvzILKdX7asw= github.com/libp2p/go-libp2p-mplex v0.4.1/go.mod h1:cmy+3GfqfM1PceHTLL7zQzAAYaryDu6iPSC+CIb094g= -github.com/libp2p/go-libp2p-nat v0.0.2/go.mod h1:QrjXQSD5Dj4IJOdEcjHRkWTSomyxRo6HnUkf/TfQpLQ= github.com/libp2p/go-libp2p-nat v0.0.4/go.mod h1:N9Js/zVtAXqaeT99cXgTV9e75KpnWCvVOiGzlcHmBbY= github.com/libp2p/go-libp2p-nat v0.0.5/go.mod h1:1qubaE5bTZMJE+E/uu2URroMbzdubFz1ChgiN79yKPE= -github.com/libp2p/go-libp2p-nat v0.0.6 h1:wMWis3kYynCbHoyKLPBEMu4YRLltbm8Mk08HGSfvTkU= github.com/libp2p/go-libp2p-nat v0.0.6/go.mod h1:iV59LVhB3IkFvS6S6sauVTSOrNEANnINbI/fkaLimiw= github.com/libp2p/go-libp2p-net v0.0.1/go.mod h1:Yt3zgmlsHOgUWSXmt5V/Jpz9upuJBE8EgNU9DrCcR8c= github.com/libp2p/go-libp2p-net v0.0.2/go.mod h1:Yt3zgmlsHOgUWSXmt5V/Jpz9upuJBE8EgNU9DrCcR8c= github.com/libp2p/go-libp2p-netutil v0.0.1/go.mod h1:GdusFvujWZI9Vt0X5BKqwWWmZFxecf9Gt03cKxm2f/Q= -github.com/libp2p/go-libp2p-netutil v0.1.0 h1:zscYDNVEcGxyUpMd0JReUZTrpMfia8PmLKcKF72EAMQ= github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU= -github.com/libp2p/go-libp2p-noise v0.1.1 h1:vqYQWvnIcHpIoWJKC7Al4D6Hgj0H012TuXRhPwSMGpQ= github.com/libp2p/go-libp2p-noise v0.1.1/go.mod h1:QDFLdKX7nluB7DEnlVPbz7xlLHdwHFA9HiohJRr3vwM= github.com/libp2p/go-libp2p-noise v0.2.0/go.mod h1:IEbYhBBzGyvdLBoxxULL/SGbJARhUeqlO8lVSREYu2Q= github.com/libp2p/go-libp2p-peer v0.0.1/go.mod h1:nXQvOBbwVqoP+T5Y5nCjeH4sP9IX/J0AMzcDUVruVoo= github.com/libp2p/go-libp2p-peer v0.1.1/go.mod h1:jkF12jGB4Gk/IOo+yomm+7oLWxF278F7UnrYUQ1Q8es= -github.com/libp2p/go-libp2p-peer v0.2.0 h1:EQ8kMjaCUwt/Y5uLgjT8iY2qg0mGUT0N1zUjer50DsY= github.com/libp2p/go-libp2p-peer v0.2.0/go.mod h1:RCffaCvUyW2CJmG2gAWVqwePwW7JMgxjsHm7+J5kjWY= github.com/libp2p/go-libp2p-peerstore v0.0.1/go.mod h1:RabLyPVJLuNQ+GFyoEkfi8H4Ti6k/HtZJ7YKgtSq+20= github.com/libp2p/go-libp2p-peerstore v0.0.6/go.mod h1:RabLyPVJLuNQ+GFyoEkfi8H4Ti6k/HtZJ7YKgtSq+20= @@ -1034,260 +1887,287 @@ github.com/libp2p/go-libp2p-peerstore v0.1.4/go.mod h1:+4BDbDiiKf4PzpANZDAT+knVd github.com/libp2p/go-libp2p-peerstore v0.2.0/go.mod h1:N2l3eVIeAitSg3Pi2ipSrJYnqhVnMNQZo9nkSCuAbnQ= github.com/libp2p/go-libp2p-peerstore v0.2.1/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= github.com/libp2p/go-libp2p-peerstore v0.2.2/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= -github.com/libp2p/go-libp2p-peerstore v0.2.3 h1:MofRq2l3c15vQpEygTetV+zRRrncz+ktiXW7H2EKoEQ= -github.com/libp2p/go-libp2p-peerstore v0.2.3/go.mod h1:K8ljLdFn590GMttg/luh4caB/3g0vKuY01psze0upRw= -github.com/libp2p/go-libp2p-peerstore v0.2.4/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= -github.com/libp2p/go-libp2p-peerstore v0.2.6 h1:2ACefBX23iMdJU9Ke+dcXt3w86MIryes9v7In4+Qq3U= github.com/libp2p/go-libp2p-peerstore v0.2.6/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= github.com/libp2p/go-libp2p-peerstore v0.2.7/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= -github.com/libp2p/go-libp2p-pnet v0.2.0 h1:J6htxttBipJujEjz1y0a5+eYoiPcFHhSYHH6na5f0/k= github.com/libp2p/go-libp2p-pnet v0.2.0/go.mod h1:Qqvq6JH/oMZGwqs3N1Fqhv8NVhrdYcO0BW4wssv21LA= github.com/libp2p/go-libp2p-protocol v0.0.1/go.mod h1:Af9n4PiruirSDjHycM1QuiMi/1VZNHYcK8cLgFJLZ4s= github.com/libp2p/go-libp2p-protocol v0.1.0/go.mod h1:KQPHpAabB57XQxGrXCNvbL6UEXfQqUgC/1adR2Xtflk= -github.com/libp2p/go-libp2p-pubsub v0.3.0/go.mod h1:TxPOBuo1FPdsTjFnv+FGZbNbWYsp74Culx+4ViQpato= -github.com/libp2p/go-libp2p-pubsub v0.3.1 h1:7Hyv2d8BK/x1HGRJTZ8X++VQEP+WqDTSwpUSZGTVLYA= -github.com/libp2p/go-libp2p-pubsub v0.3.1/go.mod h1:TxPOBuo1FPdsTjFnv+FGZbNbWYsp74Culx+4ViQpato= -github.com/libp2p/go-libp2p-pubsub v0.3.2/go.mod h1:Uss7/Cfz872KggNb+doCVPHeCDmXB7z500m/R8DaAUk= -github.com/libp2p/go-libp2p-pubsub v0.3.5 h1:iF75GWpcxKEUQU8tTkgLy69qIQvfhL+t6U6ndQrB6ho= -github.com/libp2p/go-libp2p-pubsub v0.3.5/go.mod h1:DTMSVmZZfXodB/pvdTGrY2eHPZ9W2ev7hzTH83OKHrI= -github.com/libp2p/go-libp2p-pubsub-router v0.3.0 h1:ghpHApTMXN+aZ+InYvpJa/ckBW4orypzNI0aWQDth3s= -github.com/libp2p/go-libp2p-pubsub-router v0.3.0/go.mod h1:6kZb1gGV1yGzXTfyNsi4p+hyt1JnA1OMGHeExTOJR3A= -github.com/libp2p/go-libp2p-pubsub-router v0.3.2 h1:BGC4irCUXlwmlCSxnA2DVDNY8JqhfAUUaiq3CZvcddw= -github.com/libp2p/go-libp2p-pubsub-router v0.3.2/go.mod h1:G4MAvYzPxhoR0LEBluS9Ow+Nnr/8iDalUN+RNwVgNkY= -github.com/libp2p/go-libp2p-quic-transport v0.5.0/go.mod h1:IEcuC5MLxvZ5KuHKjRu+dr3LjCT1Be3rcD/4d8JrX8M= -github.com/libp2p/go-libp2p-quic-transport v0.7.1 h1:X6Ond9GANspXpgwJlSR9yxcMMD6SLBnGKRtwjBG5awc= -github.com/libp2p/go-libp2p-quic-transport v0.7.1/go.mod h1:TD31to4E5exogR/GWHClXCfkktigjAl5rXSt7HoxNvY= -github.com/libp2p/go-libp2p-quic-transport v0.8.0 h1:mHA94K2+TD0e9XtjWx/P5jGGZn0GdQ4OFYwNllagv4E= -github.com/libp2p/go-libp2p-quic-transport v0.8.0/go.mod h1:F2FG/6Bzz0U6essUVxDzE0s9CrY4XGLbl7QEmDNvU7A= +github.com/libp2p/go-libp2p-pubsub v0.8.1 h1:hSw09NauFUaA0FLgQPBJp6QOy0a2n+HSkb8IeOx8OnY= +github.com/libp2p/go-libp2p-pubsub v0.8.1/go.mod h1:e4kT+DYjzPUYGZeWk4I+oxCSYTXizzXii5LDRRhjKSw= +github.com/libp2p/go-libp2p-pubsub-router v0.6.0 h1:D30iKdlqDt5ZmLEYhHELCMRj8b4sFAqrUcshIUvVP/s= +github.com/libp2p/go-libp2p-pubsub-router v0.6.0/go.mod h1:FY/q0/RBTKsLA7l4vqC2cbRbOvyDotg8PJQ7j8FDudE= github.com/libp2p/go-libp2p-quic-transport v0.10.0/go.mod h1:RfJbZ8IqXIhxBRm5hqUEJqjiiY8xmEuq3HUDS993MkA= github.com/libp2p/go-libp2p-record v0.0.1/go.mod h1:grzqg263Rug/sRex85QrDOLntdFAymLDLm7lxMgU79Q= github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q= -github.com/libp2p/go-libp2p-record v0.1.1/go.mod h1:VRgKajOyMVgP/F0L5g3kH7SVskp17vFi2xheb5uMJtg= -github.com/libp2p/go-libp2p-record v0.1.2 h1:M50VKzWnmUrk/M5/Dz99qO9Xh4vs8ijsK+7HkJvRP+0= github.com/libp2p/go-libp2p-record v0.1.2/go.mod h1:pal0eNcT5nqZaTV7UGhqeGqxFgGdsU/9W//C8dqjQDk= -github.com/libp2p/go-libp2p-record v0.1.3 h1:R27hoScIhQf/A8XJZ8lYpnqh9LatJ5YbHs28kCIfql0= -github.com/libp2p/go-libp2p-record v0.1.3/go.mod h1:yNUff/adKIfPnYQXgp6FQmNu3gLJ6EMg7+/vv2+9pY4= +github.com/libp2p/go-libp2p-record v0.2.0 h1:oiNUOCWno2BFuxt3my4i1frNrt7PerzB3queqa1NkQ0= +github.com/libp2p/go-libp2p-record v0.2.0/go.mod h1:I+3zMkvvg5m2OcSdoL0KPljyJyvNDFGKX7QdlpYUcwk= github.com/libp2p/go-libp2p-routing v0.0.1/go.mod h1:N51q3yTr4Zdr7V8Jt2JIktVU+3xBBylx1MZeVA6t1Ys= -github.com/libp2p/go-libp2p-routing-helpers v0.2.3 h1:xY61alxJ6PurSi+MXbywZpelvuU4U4p/gPTxjqCqTzY= -github.com/libp2p/go-libp2p-routing-helpers v0.2.3/go.mod h1:795bh+9YeoFl99rMASoiVgHdi5bjack0N1+AFAdbvBw= -github.com/libp2p/go-libp2p-secio v0.0.1/go.mod h1:IdG6iQybdcYmbTzxp4J5dwtUEDTOvZrT0opIDVNPrJs= +github.com/libp2p/go-libp2p-routing-helpers v0.4.0 h1:b7y4aixQ7AwbqYfcOQ6wTw8DQvuRZeTAA0Od3YYN5yc= +github.com/libp2p/go-libp2p-routing-helpers v0.4.0/go.mod h1:dYEAgkVhqho3/YKxfOEGdFMIcWfAFNlZX8iAIihYA2E= github.com/libp2p/go-libp2p-secio v0.0.3/go.mod h1:hS7HQ00MgLhRO/Wyu1bTX6ctJKhVpm+j2/S2A5UqYb0= github.com/libp2p/go-libp2p-secio v0.1.0/go.mod h1:tMJo2w7h3+wN4pgU2LSYeiKPrfqBgkOsdiKK77hE7c8= github.com/libp2p/go-libp2p-secio v0.2.0/go.mod h1:2JdZepB8J5V9mBp79BmwsaPQhRPNN2NrnB2lKQcdy6g= github.com/libp2p/go-libp2p-secio v0.2.1/go.mod h1:cWtZpILJqkqrSkiYcDBh5lA3wbT2Q+hz3rJQq3iftD8= -github.com/libp2p/go-libp2p-secio v0.2.2 h1:rLLPvShPQAcY6eNurKNZq3eZjPWfU9kXF2eI9jIYdrg= github.com/libp2p/go-libp2p-secio v0.2.2/go.mod h1:wP3bS+m5AUnFA+OFO7Er03uO1mncHG0uVwGrwvjYlNY= -github.com/libp2p/go-libp2p-swarm v0.0.1/go.mod h1:mh+KZxkbd3lQnveQ3j2q60BM1Cw2mX36XXQqwfPOShs= github.com/libp2p/go-libp2p-swarm v0.0.6/go.mod h1:s5GZvzg9xXe8sbeESuFpjt8CJPTCa8mhEusweJqyFy8= github.com/libp2p/go-libp2p-swarm v0.1.0/go.mod h1:wQVsCdjsuZoc730CgOvh5ox6K8evllckjebkdiY5ta4= github.com/libp2p/go-libp2p-swarm v0.2.2/go.mod h1:fvmtQ0T1nErXym1/aa1uJEyN7JzaTNyBcHImCxRpPKU= -github.com/libp2p/go-libp2p-swarm v0.2.3 h1:uVkCb8Blfg7HQ/f30TyHn1g/uCwXsAET7pU0U59gx/A= github.com/libp2p/go-libp2p-swarm v0.2.3/go.mod h1:P2VO/EpxRyDxtChXz/VPVXyTnszHvokHKRhfkEgFKNM= -github.com/libp2p/go-libp2p-swarm v0.2.4/go.mod h1:/xIpHFPPh3wmSthtxdGbkHZ0OET1h/GGZes8Wku/M5Y= -github.com/libp2p/go-libp2p-swarm v0.2.7/go.mod h1:ZSJ0Q+oq/B1JgfPHJAT2HTall+xYRNYp1xs4S2FBWKA= -github.com/libp2p/go-libp2p-swarm v0.2.8 h1:cIUUvytBzNQmGSjnXFlI6UpoBGsaud82mJPIJVfkDlg= github.com/libp2p/go-libp2p-swarm v0.2.8/go.mod h1:JQKMGSth4SMqonruY0a8yjlPVIkb0mdNSwckW7OYziM= github.com/libp2p/go-libp2p-swarm v0.3.0/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk= +github.com/libp2p/go-libp2p-swarm v0.4.0/go.mod h1:XVFcO52VoLoo0eitSxNQWYq4D6sydGOweTOAjJNraCw= github.com/libp2p/go-libp2p-swarm v0.5.0/go.mod h1:sU9i6BoHE0Ve5SKz3y9WfKrh8dUat6JknzUehFx8xW4= github.com/libp2p/go-libp2p-testing v0.0.1/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.4/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.1.0/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= -github.com/libp2p/go-libp2p-testing v0.1.1 h1:U03z3HnGI7Ni8Xx6ONVZvUFOAzWYmolWf5W5jAOPNmU= github.com/libp2p/go-libp2p-testing v0.1.1/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= github.com/libp2p/go-libp2p-testing v0.1.2-0.20200422005655-8775583591d8/go.mod h1:Qy8sAncLKpwXtS2dSnDOP8ktexIAHKu+J+pnZOFZLTc= -github.com/libp2p/go-libp2p-testing v0.2.0/go.mod h1:Qy8sAncLKpwXtS2dSnDOP8ktexIAHKu+J+pnZOFZLTc= github.com/libp2p/go-libp2p-testing v0.3.0/go.mod h1:efZkql4UZ7OVsEfaxNHZPzIehtsBXMrXnCfJIgDti5g= github.com/libp2p/go-libp2p-testing v0.4.0/go.mod h1:Q+PFXYoiYFN5CAEG2w3gLPEzotlKsNSbKQ/lImlOWF0= -github.com/libp2p/go-libp2p-tls v0.1.3 h1:twKMhMu44jQO+HgQK9X8NHO5HkeJu2QbhLzLJpa8oNM= +github.com/libp2p/go-libp2p-testing v0.11.0/go.mod h1:qG4sF27dfKFoK9KlVzK2y52LQKhp0VEmLjV5aDqr1Hg= +github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= +github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg= github.com/libp2p/go-libp2p-tls v0.1.3/go.mod h1:wZfuewxOndz5RTnCAxFliGjvYSDA40sKitV4c50uI1M= github.com/libp2p/go-libp2p-transport v0.0.1/go.mod h1:UzbUs9X+PHOSw7S3ZmeOxfnwaQY5vGDzZmKPod3N3tk= -github.com/libp2p/go-libp2p-transport v0.0.4/go.mod h1:StoY3sx6IqsP6XKoabsPnHCwqKXWUMWU7Rfcsubee/A= github.com/libp2p/go-libp2p-transport v0.0.5/go.mod h1:StoY3sx6IqsP6XKoabsPnHCwqKXWUMWU7Rfcsubee/A= -github.com/libp2p/go-libp2p-transport-upgrader v0.0.1/go.mod h1:NJpUAgQab/8K6K0m+JmZCe5RUXG10UMEx4kWe9Ipj5c= github.com/libp2p/go-libp2p-transport-upgrader v0.0.4/go.mod h1:RGq+tupk+oj7PzL2kn/m1w6YXxcIAYJYeI90h6BGgUc= github.com/libp2p/go-libp2p-transport-upgrader v0.1.1/go.mod h1:IEtA6or8JUbsV07qPW4r01GnTenLW4oi3lOPbUMGJJA= -github.com/libp2p/go-libp2p-transport-upgrader v0.2.0 h1:5EhPgQhXZNyfL22ERZTUoVp9UVVbNowWNVtELQaKCHk= github.com/libp2p/go-libp2p-transport-upgrader v0.2.0/go.mod h1:mQcrHj4asu6ArfSoMuyojOdjx73Q47cYD7s5+gZOlns= -github.com/libp2p/go-libp2p-transport-upgrader v0.3.0 h1:q3ULhsknEQ34eVDhv4YwKS8iet69ffs9+Fir6a7weN4= github.com/libp2p/go-libp2p-transport-upgrader v0.3.0/go.mod h1:i+SKzbRnvXdVbU3D1dwydnTmKRPXiAR/fyvi1dXuL4o= +github.com/libp2p/go-libp2p-transport-upgrader v0.4.0/go.mod h1:J4ko0ObtZSmgn5BX5AmegP+dK3CSnU2lMCKsSq/EY0s= github.com/libp2p/go-libp2p-transport-upgrader v0.4.2/go.mod h1:NR8ne1VwfreD5VIWIU62Agt/J18ekORFU/j1i2y8zvk= +github.com/libp2p/go-libp2p-xor v0.1.0 h1:hhQwT4uGrBcuAkUGXADuPltalOdpf9aag9kaYNT2tLA= +github.com/libp2p/go-libp2p-xor v0.1.0/go.mod h1:LSTM5yRnjGZbWNTA/hRwq2gGFrvRIbQJscoIL/u6InY= github.com/libp2p/go-libp2p-yamux v0.1.2/go.mod h1:xUoV/RmYkg6BW/qGxA9XJyg+HzXFYkeXbnhjmnYzKp8= github.com/libp2p/go-libp2p-yamux v0.1.3/go.mod h1:VGSQVrqkh6y4nm0189qqxMtvyBft44MOYYPpYKXiVt4= github.com/libp2p/go-libp2p-yamux v0.2.0/go.mod h1:Db2gU+XfLpm6E4rG5uGCFX6uXA8MEXOxFcRoXUODaK8= github.com/libp2p/go-libp2p-yamux v0.2.1/go.mod h1:1FBXiHDk1VyRM1C0aez2bCfHQ4vMZKkAQzZbkSQt5fI= github.com/libp2p/go-libp2p-yamux v0.2.2/go.mod h1:lIohaR0pT6mOt0AZ0L2dFze9hds9Req3OfS+B+dv4qw= github.com/libp2p/go-libp2p-yamux v0.2.5/go.mod h1:Zpgj6arbyQrmZ3wxSZxfBmbdnWtbZ48OpsfmQVTErwA= -github.com/libp2p/go-libp2p-yamux v0.2.7 h1:vzKu0NVtxvEIDGCv6mjKRcK0gipSgaXmJZ6jFv0d/dk= github.com/libp2p/go-libp2p-yamux v0.2.7/go.mod h1:X28ENrBMU/nm4I3Nx4sZ4dgjZ6VhLEn0XhIoZ5viCwU= -github.com/libp2p/go-libp2p-yamux v0.2.8 h1:0s3ELSLu2O7hWKfX1YjzudBKCP0kZ+m9e2+0veXzkn4= github.com/libp2p/go-libp2p-yamux v0.2.8/go.mod h1:/t6tDqeuZf0INZMTgd0WxIRbtK2EzI2h7HbFm9eAKI4= github.com/libp2p/go-libp2p-yamux v0.4.0/go.mod h1:+DWDjtFMzoAwYLVkNZftoucn7PelNoy5nm3tZ3/Zw30= github.com/libp2p/go-libp2p-yamux v0.5.0/go.mod h1:AyR8k5EzyM2QN9Bbdg6X1SkVVuqLwTGf0L4DFq9g6po= +github.com/libp2p/go-libp2p-yamux v0.5.1/go.mod h1:dowuvDu8CRWmr0iqySMiSxK+W0iL5cMVO9S94Y6gkv4= github.com/libp2p/go-libp2p-yamux v0.5.4/go.mod h1:tfrXbyaTqqSU654GTvK3ocnSZL3BuHoeTSqhcel1wsE= github.com/libp2p/go-maddr-filter v0.0.1/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= github.com/libp2p/go-maddr-filter v0.0.4/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= -github.com/libp2p/go-maddr-filter v0.0.5 h1:CW3AgbMO6vUvT4kf87y4N+0P8KUl2aqLYhrGyDUbLSg= github.com/libp2p/go-maddr-filter v0.0.5/go.mod h1:Jk+36PMfIqCJhAnaASRH83bdAvfDRp/w6ENFaC9bG+M= github.com/libp2p/go-maddr-filter v0.1.0/go.mod h1:VzZhTXkMucEGGEOSKddrwGiOv0tUhgnKqNEmIAz/bPU= -github.com/libp2p/go-mplex v0.0.1/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTWe7l4Yd0= github.com/libp2p/go-mplex v0.0.3/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTWe7l4Yd0= github.com/libp2p/go-mplex v0.0.4/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTWe7l4Yd0= github.com/libp2p/go-mplex v0.1.0/go.mod h1:SXgmdki2kwCUlCCbfGLEgHjC4pFqhTp0ZoV6aiKgxDU= github.com/libp2p/go-mplex v0.1.1/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= -github.com/libp2p/go-mplex v0.1.2 h1:qOg1s+WdGLlpkrczDqmhYzyk3vCfsQ8+RxRTQjOZWwI= github.com/libp2p/go-mplex v0.1.2/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= github.com/libp2p/go-mplex v0.2.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= github.com/libp2p/go-mplex v0.3.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= -github.com/libp2p/go-msgio v0.0.1/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= +github.com/libp2p/go-mplex v0.7.0 h1:BDhFZdlk5tbr0oyFq/xv/NPGfjbnrsDam1EvutpBDbY= +github.com/libp2p/go-mplex v0.7.0/go.mod h1:rW8ThnRcYWft/Jb2jeORBmPd6xuG3dGxWN/W168L9EU= github.com/libp2p/go-msgio v0.0.2/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= github.com/libp2p/go-msgio v0.0.3/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= -github.com/libp2p/go-msgio v0.0.4 h1:agEFehY3zWJFUHK6SEMR7UYmk2z6kC3oeCM7ybLhguA= github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= -github.com/libp2p/go-msgio v0.0.6 h1:lQ7Uc0kS1wb1EfRxO2Eir/RJoHkHn7t6o+EiwsYIKJA= github.com/libp2p/go-msgio v0.0.6/go.mod h1:4ecVB6d9f4BDSL5fqvPiC4A3KivjWn+Venn/1ALLMWA= +github.com/libp2p/go-msgio v0.2.0 h1:W6shmB+FeynDrUVl2dgFQvzfBZcXiyqY4VmpQLu9FqU= +github.com/libp2p/go-msgio v0.2.0/go.mod h1:dBVM1gW3Jk9XqHkU4eKdGvVHdLa51hoGfll6jMJMSlY= github.com/libp2p/go-nat v0.0.3/go.mod h1:88nUEt0k0JD45Bk93NIwDqjlhiOwOoV36GchpcVc1yI= github.com/libp2p/go-nat v0.0.4/go.mod h1:Nmw50VAvKuk38jUBcmNh6p9lUJLoODbJRvYAa/+KSDo= -github.com/libp2p/go-nat v0.0.5 h1:qxnwkco8RLKqVh1NmjQ+tJ8p8khNLFxuElYG/TwqW4Q= github.com/libp2p/go-nat v0.0.5/go.mod h1:B7NxsVNPZmRLvMOwiEO1scOSyjA56zxYAGv1yQgRkEU= -github.com/libp2p/go-netroute v0.1.2 h1:UHhB35chwgvcRI392znJA3RCBtZ3MpE3ahNCN5MR4Xg= +github.com/libp2p/go-nat v0.1.0 h1:MfVsH6DLcpa04Xr+p8hmVRG4juse0s3J8HyNWYHffXg= +github.com/libp2p/go-nat v0.1.0/go.mod h1:X7teVkwRHNInVNWQiO/tAiAVRwSr5zoRz4YSTC3uRBM= github.com/libp2p/go-netroute v0.1.2/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= -github.com/libp2p/go-netroute v0.1.3 h1:1ngWRx61us/EpaKkdqkMjKk/ufr/JlIFYQAxV2XX8Ig= github.com/libp2p/go-netroute v0.1.3/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= github.com/libp2p/go-netroute v0.1.5/go.mod h1:V1SR3AaECRkEQCoFFzYwVYWvYIEtlxx89+O3qcpCl4A= +github.com/libp2p/go-netroute v0.1.6/go.mod h1:AqhkMh0VuWmfgtxKPp3Oc1LdU5QSWS7wl0QLhSZqXxQ= +github.com/libp2p/go-netroute v0.2.0/go.mod h1:Vio7LTzZ+6hoT4CMZi5/6CpY3Snzh2vgZhWgxMNwlQI= +github.com/libp2p/go-netroute v0.2.1 h1:V8kVrpD8GK0Riv15/7VN6RbUQ3URNZVosw7H2v9tksU= +github.com/libp2p/go-netroute v0.2.1/go.mod h1:hraioZr0fhBjG0ZRXJJ6Zj2IVEVNx6tDTFQfSmcq7mQ= github.com/libp2p/go-openssl v0.0.2/go.mod h1:v8Zw2ijCSWBQi8Pq5GAixw6DbFfa9u6VIYDXnvOXkc0= github.com/libp2p/go-openssl v0.0.3/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= -github.com/libp2p/go-openssl v0.0.4 h1:d27YZvLoTyMhIN4njrkr8zMDOM4lfpHIp6A+TK9fovg= github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= -github.com/libp2p/go-openssl v0.0.5 h1:pQkejVhF0xp08D4CQUcw8t+BFJeXowja6RVcb5p++EA= github.com/libp2p/go-openssl v0.0.5/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= github.com/libp2p/go-openssl v0.0.7/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= -github.com/libp2p/go-reuseport v0.0.1 h1:7PhkfH73VXfPJYKQ6JwS5I/eVcoyYi9IMNGc6FWpFLw= +github.com/libp2p/go-openssl v0.1.0 h1:LBkKEcUv6vtZIQLVTegAil8jbNpJErQ9AnT+bWV+Ooo= +github.com/libp2p/go-openssl v0.1.0/go.mod h1:OiOxwPpL3n4xlenjx2h7AwSGaFSC/KZvf6gNdOBQMtc= github.com/libp2p/go-reuseport v0.0.1/go.mod h1:jn6RmB1ufnQwl0Q1f+YxAj8isJgDCQzaaxIFYDhcYEA= -github.com/libp2p/go-reuseport v0.0.2 h1:XSG94b1FJfGA01BUrT82imejHQyTxO4jEWqheyCXYvU= github.com/libp2p/go-reuseport v0.0.2/go.mod h1:SPD+5RwGC7rcnzngoYC86GjPzjSywuQyMVAheVBD9nQ= -github.com/libp2p/go-reuseport-transport v0.0.1/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs= +github.com/libp2p/go-reuseport v0.2.0 h1:18PRvIMlpY6ZK85nIAicSBuXXvrYoSw3dsBAR7zc560= +github.com/libp2p/go-reuseport v0.2.0/go.mod h1:bvVho6eLMm6Bz5hmU0LYN3ixd3nPPvtIlaURZZgOY4k= github.com/libp2p/go-reuseport-transport v0.0.2/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs= -github.com/libp2p/go-reuseport-transport v0.0.3 h1:zzOeXnTooCkRvoH+bSXEfXhn76+LAiwoneM0gnXjF2M= github.com/libp2p/go-reuseport-transport v0.0.3/go.mod h1:Spv+MPft1exxARzP2Sruj2Wb5JSyHNncjf1Oi2dEbzM= -github.com/libp2p/go-reuseport-transport v0.0.4 h1:OZGz0RB620QDGpv300n1zaOcKGGAoGVf8h9txtt/1uM= github.com/libp2p/go-reuseport-transport v0.0.4/go.mod h1:trPa7r/7TJK/d+0hdBLOCGvpQQVOU74OXbNCIMkufGw= -github.com/libp2p/go-sockaddr v0.0.2 h1:tCuXfpA9rq7llM/v834RKc/Xvovy/AqM9kHvTV/jY/Q= github.com/libp2p/go-sockaddr v0.0.2/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= -github.com/libp2p/go-sockaddr v0.1.0 h1:Y4s3/jNoryVRKEBrkJ576F17CPOaMIzUeCsg7dlTDj0= github.com/libp2p/go-sockaddr v0.1.0/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= -github.com/libp2p/go-socket-activation v0.0.2/go.mod h1:KP44C+yZ7gA8sTxavgaD0b8vXVFJwam2CEW0s7+f094= +github.com/libp2p/go-sockaddr v0.1.1/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= +github.com/libp2p/go-socket-activation v0.1.0/go.mod h1:gzda2dNkMG5Ti2OfWNNwW0FDIbj0g/aJJU320FcLfhk= github.com/libp2p/go-stream-muxer v0.0.1/go.mod h1:bAo8x7YkSpadMTbtTaxGVHWUQsR/l5MEaHbKaliuT14= github.com/libp2p/go-stream-muxer v0.1.0/go.mod h1:8JAVsjeRBCWwPoZeH0W1imLOcriqXJyFvB0mR4A04sQ= github.com/libp2p/go-stream-muxer-multistream v0.1.1/go.mod h1:zmGdfkQ1AzOECIAcccoL8L//laqawOsO03zX8Sa+eGw= -github.com/libp2p/go-stream-muxer-multistream v0.2.0 h1:714bRJ4Zy9mdhyTLJ+ZKiROmAFwUHpeRidG+q7LTQOg= github.com/libp2p/go-stream-muxer-multistream v0.2.0/go.mod h1:j9eyPol/LLRqT+GPLSxvimPhNph4sfYfMoDPd7HkzIc= -github.com/libp2p/go-stream-muxer-multistream v0.3.0 h1:TqnSHPJEIqDEO7h1wZZ0p3DXdvDSiLHQidKKUGZtiOY= github.com/libp2p/go-stream-muxer-multistream v0.3.0/go.mod h1:yDh8abSIzmZtqtOt64gFJUXEryejzNb0lisTt+fAMJA= -github.com/libp2p/go-tcp-transport v0.0.1/go.mod h1:mnjg0o0O5TmXUaUIanYPUqkW4+u6mK0en8rlpA6BBTs= github.com/libp2p/go-tcp-transport v0.0.4/go.mod h1:+E8HvC8ezEVOxIo3V5vCK9l1y/19K427vCzQ+xHKH/o= github.com/libp2p/go-tcp-transport v0.1.0/go.mod h1:oJ8I5VXryj493DEJ7OsBieu8fcg2nHGctwtInJVpipc= github.com/libp2p/go-tcp-transport v0.1.1/go.mod h1:3HzGvLbx6etZjnFlERyakbaYPdfjg2pWP97dFZworkY= -github.com/libp2p/go-tcp-transport v0.2.0 h1:YoThc549fzmNJIh7XjHVtMIFaEDRtIrtWciG5LyYAPo= github.com/libp2p/go-tcp-transport v0.2.0/go.mod h1:vX2U0CnWimU4h0SGSEsg++AzvBcroCGYw28kh94oLe0= -github.com/libp2p/go-tcp-transport v0.2.1 h1:ExZiVQV+h+qL16fzCWtd1HSzPsqWottJ8KXwWaVi8Ns= github.com/libp2p/go-tcp-transport v0.2.1/go.mod h1:zskiJ70MEfWz2MKxvFB/Pv+tPIB1PpPUrHIWQ8aFw7M= github.com/libp2p/go-tcp-transport v0.2.3/go.mod h1:9dvr03yqrPyYGIEN6Dy5UvdJZjyPFvl1S/igQ5QD1SU= github.com/libp2p/go-testutil v0.0.1/go.mod h1:iAcJc/DKJQanJ5ws2V+u5ywdL2n12X1WbbEG+Jjy69I= github.com/libp2p/go-testutil v0.1.0/go.mod h1:81b2n5HypcVyrCg/MJx4Wgfp/VHojytjVe/gLzZ2Ehc= -github.com/libp2p/go-ws-transport v0.0.1/go.mod h1:p3bKjDWHEgtuKKj+2OdPYs5dAPIjtpQGHF2tJfGz7Ww= github.com/libp2p/go-ws-transport v0.0.5/go.mod h1:Qbl4BxPfXXhhd/o0wcrgoaItHqA9tnZjoFZnxykuaXU= github.com/libp2p/go-ws-transport v0.1.0/go.mod h1:rjw1MG1LU9YDC6gzmwObkPd/Sqwhw7yT74kj3raBFuo= github.com/libp2p/go-ws-transport v0.2.0/go.mod h1:9BHJz/4Q5A9ludYWKoGCFC5gUElzlHoKzu0yY9p/klM= -github.com/libp2p/go-ws-transport v0.3.0 h1:mjo6pL5aVR9rCjl9wNq3DupbaQlyR61pzoOT2MdtxaA= github.com/libp2p/go-ws-transport v0.3.0/go.mod h1:bpgTJmRZAvVHrgHybCVyqoBmyLQ1fiZuEaBYusP5zsk= -github.com/libp2p/go-ws-transport v0.3.1 h1:ZX5rWB8nhRRJVaPO6tmkGI/Xx8XNboYX20PW5hXIscw= -github.com/libp2p/go-ws-transport v0.3.1/go.mod h1:bpgTJmRZAvVHrgHybCVyqoBmyLQ1fiZuEaBYusP5zsk= github.com/libp2p/go-ws-transport v0.4.0/go.mod h1:EcIEKqf/7GDjth6ksuS/6p7R49V4CBY6/E7R/iyhYUA= github.com/libp2p/go-yamux v1.2.1/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= github.com/libp2p/go-yamux v1.2.2/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= github.com/libp2p/go-yamux v1.2.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= github.com/libp2p/go-yamux v1.3.0/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= github.com/libp2p/go-yamux v1.3.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.3.5 h1:ibuz4naPAully0pN6J/kmUARiqLpnDQIzI/8GCOrljg= github.com/libp2p/go-yamux v1.3.5/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.3.7 h1:v40A1eSPJDIZwz2AvrV3cxpTZEGDP11QJbukmEhYyQI= github.com/libp2p/go-yamux v1.3.7/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= github.com/libp2p/go-yamux v1.4.0/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= +github.com/libp2p/go-yamux v1.4.1 h1:P1Fe9vF4th5JOxxgQvfbOHkrGqIZniTLf+ddhZp8YTI= github.com/libp2p/go-yamux v1.4.1/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= +github.com/libp2p/go-yamux/v2 v2.0.0/go.mod h1:NVWira5+sVUIU6tu1JWvaRn1dRnG+cawOJiflsAM+7U= github.com/libp2p/go-yamux/v2 v2.2.0/go.mod h1:3So6P6TV6r75R9jiBpiIKgU/66lOarCZjqROGxzPpPQ= +github.com/libp2p/go-yamux/v3 v3.1.2/go.mod h1:jeLEQgLXqE2YqX1ilAClIfCMDY+0uXQUKmmb/qp0gT4= +github.com/libp2p/go-yamux/v4 v4.0.0 h1:+Y80dV2Yx/kv7Y7JKu0LECyVdMXm1VUoko+VQ9rBfZQ= +github.com/libp2p/go-yamux/v4 v4.0.0/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4= +github.com/libp2p/zeroconf/v2 v2.2.0 h1:Cup06Jv6u81HLhIj1KasuNM/RHHrJ8T7wOTS4+Tv53Q= +github.com/libp2p/zeroconf/v2 v2.2.0/go.mod h1:fuJqLnUwZTshS3U/bMRJ3+ow/v9oid1n0DmyYyNO1Xs= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/lispad/go-generics-tools v1.0.0/go.mod h1:stn7X24ZIyFvaSyttafq3VlJzGJJJkUtLYdbgi/gopM= +github.com/lispad/go-generics-tools v1.1.0/go.mod h1:2csd1EJljo/gy5qG4khXol7ivCPptNjG5Uv2X8MgK84= github.com/looplab/fsm v0.1.0/go.mod h1:m2VaOfDHxqXBBMgc26m6yUOwkFn8H2AlJDE+jd/uafI= -github.com/lucas-clemente/quic-go v0.16.0/go.mod h1:I0+fcNTdb9eS1ZcjQZbDVPGchJ86chcIxPALn9lEJqE= -github.com/lucas-clemente/quic-go v0.17.3 h1:jMX/MmDNCljfisgMmPGUcBJ+zUh9w3d3ia4YJjYS3TM= -github.com/lucas-clemente/quic-go v0.17.3/go.mod h1:I0+fcNTdb9eS1ZcjQZbDVPGchJ86chcIxPALn9lEJqE= -github.com/lucas-clemente/quic-go v0.18.0 h1:JhQDdqxdwdmGdKsKgXi1+coHRoGhvU6z0rNzOJqZ/4o= +github.com/lucas-clemente/quic-go v0.7.1-0.20190401152353-907071221cf9/go.mod h1:PpMmPfPKO9nKJ/psF49ESTAGQSdfXxlg1otPbEB2nOw= github.com/lucas-clemente/quic-go v0.18.0/go.mod h1:yXttHsSNxQi8AWijC/vLP+OJczXqzHSOcJrM5ITUlCg= github.com/lucas-clemente/quic-go v0.19.3/go.mod h1:ADXpNbTQjq1hIzCpB+y/k5iz4n4z4IwqoLb94Kh5Hu8= +github.com/lucas-clemente/quic-go v0.28.1/go.mod h1:oGz5DKK41cJt5+773+BSO9BXDsREY4HLf7+0odGAPO0= +github.com/lucas-clemente/quic-go v0.29.1/go.mod h1:CTcNfLYJS2UuRNB+zcNlgvkjBhxX6Hm3WUxxAQx2mgE= +github.com/lucas-clemente/quic-go v0.31.0/go.mod h1:0wFbizLgYzqHqtlyxyCaJKlE7bYgE6JQ+54TLd/Dq2g= +github.com/lucas-clemente/quic-go v0.31.1 h1:O8Od7hfioqq0PMYHDyBkxU2aA7iZ2W9pjbrWuja2YR4= +github.com/lucas-clemente/quic-go v0.31.1/go.mod h1:0wFbizLgYzqHqtlyxyCaJKlE7bYgE6JQ+54TLd/Dq2g= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/lukechampine/stm v0.0.0-20191022212748-05486c32d236/go.mod h1:wTLsd5FC9rts7GkMpsPGk64CIuea+03yaLAp19Jmlg8= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= +github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/mailgun/raymond/v2 v2.0.46/go.mod h1:lsgvL50kgt1ylcFJYZiULi5fjPBkkhNfj4KA0W54Z18= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/marten-seemann/qpack v0.1.0/go.mod h1:LFt1NU/Ptjip0C2CPkhimBz5CGE3WGDAUWqna+CNTrI= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/markbates/pkger v0.17.0/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI= github.com/marten-seemann/qpack v0.2.0/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc= github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc= -github.com/marten-seemann/qtls v0.9.1 h1:O0YKQxNVPaiFgMng0suWEOY2Sb4LT2sRn9Qimq3Z1IQ= -github.com/marten-seemann/qtls v0.9.1/go.mod h1:T1MmAdDPyISzxlK6kjRr0pcZFBVd1OZbBb/j3cvzHhk= -github.com/marten-seemann/qtls v0.10.0 h1:ECsuYUKalRL240rRD4Ri33ISb7kAQ3qGDlrrl55b2pc= +github.com/marten-seemann/qpack v0.3.0 h1:UiWstOgT8+znlkDPOg2+3rIuYXJ2CnGDkGUXN6ki6hE= +github.com/marten-seemann/qpack v0.3.0/go.mod h1:cGfKPBiP4a9EQdxCwEwI/GEeWAsjSekBvx/X8mh58+g= +github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk= github.com/marten-seemann/qtls v0.10.0/go.mod h1:UvMd1oaYDACI99/oZUYLzMCkBXQVT0aGm99sJhbT8hs= github.com/marten-seemann/qtls-go1-15 v0.1.0/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= github.com/marten-seemann/qtls-go1-15 v0.1.1/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= +github.com/marten-seemann/qtls-go1-16 v0.1.5/go.mod h1:gNpI2Ol+lRS3WwSOtIUUtRwZEQMXjYK+dQSBFbethAk= +github.com/marten-seemann/qtls-go1-17 v0.1.2/go.mod h1:C2ekUKcDdz9SDWxec1N/MvcXBpaX9l3Nx67XaR84L5s= +github.com/marten-seemann/qtls-go1-18 v0.1.2/go.mod h1:mJttiymBAByA49mhlNZZGrH5u1uXYZJ+RW28Py7f4m4= +github.com/marten-seemann/qtls-go1-18 v0.1.3 h1:R4H2Ks8P6pAtUagjFty2p7BVHn3XiwDAl7TTQf5h7TI= +github.com/marten-seemann/qtls-go1-18 v0.1.3/go.mod h1:mJttiymBAByA49mhlNZZGrH5u1uXYZJ+RW28Py7f4m4= +github.com/marten-seemann/qtls-go1-19 v0.1.0-beta.1/go.mod h1:5HTDWtVudo/WFsHKRNuOhWlbdjrfs5JHrYb0wIJqGpI= +github.com/marten-seemann/qtls-go1-19 v0.1.0/go.mod h1:5HTDWtVudo/WFsHKRNuOhWlbdjrfs5JHrYb0wIJqGpI= +github.com/marten-seemann/qtls-go1-19 v0.1.1 h1:mnbxeq3oEyQxQXwI4ReCgW9DPoPR94sNlqWoDZnjRIE= +github.com/marten-seemann/qtls-go1-19 v0.1.1/go.mod h1:5HTDWtVudo/WFsHKRNuOhWlbdjrfs5JHrYb0wIJqGpI= +github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= +github.com/marten-seemann/webtransport-go v0.1.1/go.mod h1:kBEh5+RSvOA4troP1vyOVBWK4MIMzDICXVrvCPrYcrM= +github.com/marten-seemann/webtransport-go v0.4.3 h1:vkt5o/Ci+luknRteWdYGYH1KcB7ziup+J+1PzZJIvmg= +github.com/marten-seemann/webtransport-go v0.4.3/go.mod h1:4xcfySgZMLP4aG5GBGj1egP7NlpfwgYJ1WJMvPPiVMU= +github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= +github.com/matryer/try v0.0.0-20161228173917-9ac251b645a2/go.mod h1:0KeJpeMD6o+O4hW7qJOT7vyQPKrWmj26uf5wMc/IiIs= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5-0.20180830101745-3fb116b82035/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-pointer v0.0.1 h1:n+XhsuGeVO6MEAp7xyEukFINEa+Quek5psIR/ylA6o0= +github.com/mattn/go-pointer v0.0.1/go.mod h1:2zXcozF6qYGgmsG+SeTZz3oAbFLdD3OWqnUbNvJZAlc= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-sqlite3 v1.7.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v1.13.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v1.14.8/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v1.14.9/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v2.0.2+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= +github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mediocregopher/mediocre-go-lib v0.0.0-20181029021733-cb65787f37ed/go.mod h1:dSsfyI2zABAdhcbvkXqgxOxrCsbYeHCPgrZkku60dSg= +github.com/mediocregopher/radix/v3 v3.3.0/go.mod h1:EmfVyvspXz1uZEyPBMyGK+kjWiKQGvsUt6O3Pj+LDCQ= +github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8= +github.com/mediocregopher/radix/v3 v3.8.0/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/mholt/archiver/v3 v3.3.0/go.mod h1:YnQtqsp+94Rwd0D/rk5cnLrxusUBUXg+08Ebtr1Mqao= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= +github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= +github.com/microcosm-cc/bluemonday v1.0.20/go.mod h1:yfBmMi8mxvaZut3Yytv+jTXRY8mxyjJ0/kQBTElld50= +github.com/microcosm-cc/bluemonday v1.0.21/go.mod h1:ytNkv4RrDrLJ2pqlsSI46O6IVXmZOBBD4SaJyDwwTkM= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.4/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.28 h1:gQhy5bsJa8zTlVI8lywCTZp1lguor+xevFoYlzeCTQY= github.com/miekg/dns v1.1.28/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= -github.com/miekg/dns v1.1.29 h1:xHBEhR+t5RzcFJjBLJlax2daXOrTYtr9z4WdKEfWFzg= -github.com/miekg/dns v1.1.29/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= -github.com/miekg/dns v1.1.31 h1:sJFOl9BgwbYAWOGEwr61FU28pqsBNdpRBnhGXtO06Oo= github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= +github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= +github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= +github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= +github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms= +github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKoFL8DUUmalo2yJJUCxbPKtm8OKfqr2/FTNU= +github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc= github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s= -github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= github.com/minio/sha256-simd v0.1.0/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= -github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU= github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -1298,41 +2178,54 @@ github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS4 github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mmcloughlin/avo v0.0.0-20200523190732-4439b6b2c061/go.mod h1:wqKykBG2QzQDJEzvRkcS8x6MiSJkF52hXZsXcjaB3ls= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4= +github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU= +github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= +github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= github.com/mr-tron/base58 v1.1.1/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= -github.com/mr-tron/base58 v1.1.3 h1:v+sk57XuaCKGXpWtVBX8YJzO7hMGx4Aajh4TQbdEFdc= github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= -github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI= +github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw= github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= -github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4= +github.com/multiformats/go-base32 v0.0.4/go.mod h1:jNLFzjPZtp3aIARHbJRZIaPuspdH0J6q39uUM5pnABM= +github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= +github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= +github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= +github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= github.com/multiformats/go-multiaddr v0.0.1/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= github.com/multiformats/go-multiaddr v0.0.2/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= github.com/multiformats/go-multiaddr v0.0.4/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= github.com/multiformats/go-multiaddr v0.1.0/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= -github.com/multiformats/go-multiaddr v0.2.1 h1:SgG/cw5vqyB5QQe5FPe2TqggU9WtrA9X4nZw7LlVqOI= github.com/multiformats/go-multiaddr v0.2.1/go.mod h1:s/Apk6IyxfvMjDafnhJgJ3/46z7tZ04iMk5wP4QMGGE= -github.com/multiformats/go-multiaddr v0.2.2 h1:XZLDTszBIJe6m0zF6ITBrEcZR73OPUhCBBS9rYAuUzI= github.com/multiformats/go-multiaddr v0.2.2/go.mod h1:NtfXiOtHvghW9KojvtySjH5y0u0xW5UouOmQQrn6a3Y= github.com/multiformats/go-multiaddr v0.3.0/go.mod h1:dF9kph9wfJ+3VLAaeBqo9Of8x4fJxp6ggJGteB8HQTI= -github.com/multiformats/go-multiaddr v0.3.1 h1:1bxa+W7j9wZKTZREySx1vPMs2TqrYWjVZ7zE6/XLG1I= github.com/multiformats/go-multiaddr v0.3.1/go.mod h1:uPbspcUPd5AfaP6ql3ujFY+QWzmBD8uLLL4bXW0XfGc= github.com/multiformats/go-multiaddr v0.3.3/go.mod h1:lCKNGP1EQ1eZ35Za2wlqnabm9xQkib3fyB+nZXHLag0= +github.com/multiformats/go-multiaddr v0.4.0/go.mod h1:YcpyLH8ZPudLxQlemYBPhSm0/oCXAT8Z4mzFpyoPyRc= +github.com/multiformats/go-multiaddr v0.4.1/go.mod h1:3afI9HfVW8csiF8UZqtpYRiDyew8pRX7qLIGHu9FLuM= +github.com/multiformats/go-multiaddr v0.6.0/go.mod h1:F4IpaKZuPP360tOMn2Tpyu0At8w23aRyVqeK0DbFeGM= +github.com/multiformats/go-multiaddr v0.7.0/go.mod h1:Fs50eBDWvZu+l3/9S6xAE7ZYj6yhxlvaVZjakWN7xRs= +github.com/multiformats/go-multiaddr v0.8.0 h1:aqjksEcqK+iD/Foe1RRFsGZh8+XFiGo7FgUCZlpv3LU= +github.com/multiformats/go-multiaddr v0.8.0/go.mod h1:Fs50eBDWvZu+l3/9S6xAE7ZYj6yhxlvaVZjakWN7xRs= github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= -github.com/multiformats/go-multiaddr-dns v0.2.0 h1:YWJoIDwLePniH7OU5hBnDZV6SWuvJqJ0YtN6pLeH9zA= github.com/multiformats/go-multiaddr-dns v0.2.0/go.mod h1:TJ5pr5bBO7Y1B18djPuRsVkduhQH2YqYSbxWJzYGdK0= +github.com/multiformats/go-multiaddr-dns v0.3.0/go.mod h1:mNzQ4eTGDg0ll1N9jKPOUogZPoJ30W8a7zk66FQPpdQ= +github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= github.com/multiformats/go-multiaddr-fmt v0.0.1/go.mod h1:aBYjqL4T/7j4Qx+R73XSv/8JsgnRFlf0w2KGLCmXl3Q= github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= @@ -1342,42 +2235,49 @@ github.com/multiformats/go-multiaddr-net v0.1.0/go.mod h1:5JNbcfBOP4dnhoZOv10JJV github.com/multiformats/go-multiaddr-net v0.1.1/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= github.com/multiformats/go-multiaddr-net v0.1.2/go.mod h1:QsWt3XK/3hwvNxZJp92iMQKME1qHfpYmyIjFVsSOY6Y= github.com/multiformats/go-multiaddr-net v0.1.3/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= -github.com/multiformats/go-multiaddr-net v0.1.4 h1:g6gwydsfADqFvrHoMkS0n9Ok9CG6F7ytOH/bJDkhIOY= github.com/multiformats/go-multiaddr-net v0.1.4/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= -github.com/multiformats/go-multiaddr-net v0.1.5 h1:QoRKvu0xHN1FCFJcMQLbG/yQE2z441L5urvG3+qyz7g= github.com/multiformats/go-multiaddr-net v0.1.5/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= -github.com/multiformats/go-multiaddr-net v0.2.0 h1:MSXRGN0mFymt6B1yo/6BPnIRpLPEnKgQNvVfCX5VDJk= github.com/multiformats/go-multiaddr-net v0.2.0/go.mod h1:gGdH3UXny6U3cKKYCvpXI5rnK7YaOIEOPVDI9tsJbEA= github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= -github.com/multiformats/go-multibase v0.0.2 h1:2pAgScmS1g9XjH7EtAfNhTuyrWYEWcxy0G5Wo85hWDA= -github.com/multiformats/go-multibase v0.0.2/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= -github.com/multiformats/go-multibase v0.0.3 h1:l/B6bJDQjvQ5G52jw4QGSYeOTZoAwIO77RblWplfIqk= github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= +github.com/multiformats/go-multibase v0.1.1 h1:3ASCDsuLX8+j4kx58qnJ4YFq/JWTJpCyDW27ztsVTOI= +github.com/multiformats/go-multibase v0.1.1/go.mod h1:ZEjHE+IsUrgp5mhlEAYjMtZwK1k4haNkcaPg9aoe1a8= +github.com/multiformats/go-multicodec v0.3.0/go.mod h1:qGGaQmioCDh+TeFOnxrbU0DaIPw8yFgAZgFG0V7p1qQ= +github.com/multiformats/go-multicodec v0.3.1-0.20210902112759-1539a079fd61/go.mod h1:1Hj/eHRaVWSXiSNNfcEPcwZleTmdNP81xlxDLnWU9GQ= +github.com/multiformats/go-multicodec v0.3.1-0.20211210143421-a526f306ed2c/go.mod h1:1Hj/eHRaVWSXiSNNfcEPcwZleTmdNP81xlxDLnWU9GQ= +github.com/multiformats/go-multicodec v0.4.1/go.mod h1:1Hj/eHRaVWSXiSNNfcEPcwZleTmdNP81xlxDLnWU9GQ= +github.com/multiformats/go-multicodec v0.5.0/go.mod h1:DiY2HFaEp5EhEXb/iYzVAunmyX/aSFMxq2KMKfWEues= +github.com/multiformats/go-multicodec v0.6.0/go.mod h1:GUC8upxSBE4oG+q3kWZRw/+6yC1BqO550bjhWsJbZlw= +github.com/multiformats/go-multicodec v0.7.0/go.mod h1:GUC8upxSBE4oG+q3kWZRw/+6yC1BqO550bjhWsJbZlw= +github.com/multiformats/go-multicodec v0.8.1 h1:ycepHwavHafh3grIbR1jIXnKCsFm0fqsfEOsJ8NtKE8= +github.com/multiformats/go-multicodec v0.8.1/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k= github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= github.com/multiformats/go-multihash v0.0.5/go.mod h1:lt/HCbqlQwlPBz7lv0sQCdtfcMtlJvakRUn/0Ual8po= -github.com/multiformats/go-multihash v0.0.7/go.mod h1:XuKXPp8VHcTygube3OWZC+aZrA+H1IhmjoCDtJc7PXM= github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.0.9/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= -github.com/multiformats/go-multihash v0.0.13 h1:06x+mk/zj1FoMsgNejLpy6QTvJqlSt/BhLEy87zidlc= github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= -github.com/multiformats/go-multihash v0.0.14 h1:QoBceQYQQtNUuf6s7wHxnE2c8bhbMqhfGzNI032se/I= github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= github.com/multiformats/go-multihash v0.0.15/go.mod h1:D6aZrWNLFTV/ynMpKsNtB40mJzmCl4jb1alC0OvHiHg= +github.com/multiformats/go-multihash v0.1.0/go.mod h1:RJlXsxt6vHGaia+S8We0ErjhojtKzPP2AH4+kYM7k84= +github.com/multiformats/go-multihash v0.2.0/go.mod h1:WxoMcYG85AZVQUyRyo9s4wULvW5qrI9vb2Lt6evduFc= +github.com/multiformats/go-multihash v0.2.1 h1:aem8ZT0VA2nCHHk7bPJ1BjUbHNciqZC/d16Vve9l108= +github.com/multiformats/go-multihash v0.2.1/go.mod h1:WxoMcYG85AZVQUyRyo9s4wULvW5qrI9vb2Lt6evduFc= github.com/multiformats/go-multistream v0.0.1/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= github.com/multiformats/go-multistream v0.0.4/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= -github.com/multiformats/go-multistream v0.1.1 h1:JlAdpIFhBhGRLxe9W6Om0w++Gd6KMWoFPZL/dEnm9nI= github.com/multiformats/go-multistream v0.1.1/go.mod h1:KmHZ40hzVxiaiwlj3MEbYgK9JFk2/9UktWZAF54Du38= -github.com/multiformats/go-multistream v0.1.2 h1:knyamLYMPFPngQjGQ0lhnlys3jtVR/3xV6TREUJr+fE= -github.com/multiformats/go-multistream v0.1.2/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= +github.com/multiformats/go-multistream v0.2.0/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= github.com/multiformats/go-multistream v0.2.1/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= +github.com/multiformats/go-multistream v0.2.2/go.mod h1:UIcnm7Zuo8HKG+HkWgfQsGL+/MIEhyTqbODbIUwSXKs= +github.com/multiformats/go-multistream v0.3.3 h1:d5PZpjwRgVlbwfdTDjife7XszfZd8KYWfROYFlGcR8o= +github.com/multiformats/go-multistream v0.3.3/go.mod h1:ODRoqamLUsETKS9BNcII4gcRsJBU5VAwRIv7O39cEXg= github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.2/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= -github.com/multiformats/go-varint v0.0.5 h1:XVZwSo04Cs3j/jS0uAEPpT3JY6DzMcVLLoWOSnCxOjg= github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= -github.com/multiformats/go-varint v0.0.6 h1:gk85QWKxh3TazbLxED/NlDVv8+q+ReFJk7Y2W/KhfNY= github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= +github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= @@ -1386,16 +2286,27 @@ github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= +github.com/nats-io/jwt/v2 v2.2.1-0.20220330180145-442af02fd36a/go.mod h1:0tqz9Hlu6bCBFLWAASKhE5vUA4c24L9KPUUgvwumE/k= +github.com/nats-io/jwt/v2 v2.3.0/go.mod h1:0tqz9Hlu6bCBFLWAASKhE5vUA4c24L9KPUUgvwumE/k= github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= +github.com/nats-io/nats-server/v2 v2.8.4/go.mod h1:8zZa+Al3WsESfmgSs98Fi06dRWLH5Bnq90m5bKD/eT4= +github.com/nats-io/nats.go v1.8.1/go.mod h1:BrFz9vVn0fU3AcH9Vn4Kd7W0NpJ651tD5omQ3M8LwxM= github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nats.go v1.15.0/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= +github.com/nats-io/nats.go v1.16.0/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= +github.com/nats-io/nkeys v0.0.2/go.mod h1:dab7URMsZm6Z/jp9Z5UGa87Uutgc2mVpXLC4B7TDb/4= github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= +github.com/neelance/sourcemap v0.0.0-20200213170602-2833bce08e4c/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nwaples/rardecode v1.0.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0= -github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= @@ -1408,28 +2319,53 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.0 h1:Iw5WCbBcaAAd0fpRb1c9r5YCylv4XDoCSigm1zLevwU= github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= -github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.1/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= +github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= +github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk= +github.com/onsi/ginkgo/v2 v2.2.0/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk= +github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8AyFNU9d0= +github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= +github.com/onsi/ginkgo/v2 v2.5.1 h1:auzK7OI497k6x4OvWq+TKAcpcSAlod0doAH72oIN0Jw= +github.com/onsi/ginkgo/v2 v2.5.1/go.mod h1:63DOGlLAH8+REH8jUGdL3YpCpu7JODesutUjdENfUAc= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= -github.com/onsi/gomega v1.9.0 h1:R1uwffexN6Pr340GtYRIdZmAiN4J+iw6WG4wog1DUXg= github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= +github.com/onsi/gomega v1.11.0/go.mod h1:azGKhqFUon9Vuj0YmTfLSmx0FUwqXYSTl5re8lQLTUg= +github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= +github.com/onsi/gomega v1.21.1/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc= +github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ1tuM= +github.com/onsi/gomega v1.24.0 h1:+0glovB9Jd6z3VR+ScSwQqXVTIfJcGA9UBM8yzQxhqg= +github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= @@ -1441,103 +2377,247 @@ github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnh github.com/orcaman/concurrent-map v0.0.0-20190826125027-8c72a8bb44f6/go.mod h1:Lu3tH6HLW3feq74c2GC+jIMS/K2CFcDWnWD9XkenwhI= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= github.com/pborman/uuid v0.0.0-20170112150404-1b00554d8222/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= +github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 h1:1/WtZae0yGtPq+TI6+Tv1WTxkukpXeMlviSxvL7SRgk= +github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9/go.mod h1:x3N5drFsm2uilKKuuYo6LdyD8vZAW55sH/9w+pbo1sw= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pion/datachannel v1.4.21/go.mod h1:oiNyP4gHx2DIwRzX/MFyH0Rz/Gz05OgBlayAI2hAWjg= +github.com/pion/datachannel v1.5.2/go.mod h1:FTGQWaHrdCwIJ1rw6xBIfZVkslikjShim5yr05XFuCQ= +github.com/pion/dtls/v2 v2.0.1/go.mod h1:uMQkz2W0cSqY00xav7WByQ4Hb+18xeQh2oH2fRezr5U= +github.com/pion/dtls/v2 v2.0.2/go.mod h1:27PEO3MDdaCfo21heT59/vsdmZc0zMt9wQPcSlLu/1I= +github.com/pion/dtls/v2 v2.0.4/go.mod h1:qAkFscX0ZHoI1E07RfYPoRw3manThveu+mlTDdOxoGI= +github.com/pion/dtls/v2 v2.0.7/go.mod h1:QuDII+8FVvk9Dp5t5vYIMTo7hh7uBkra+8QIm7QGm10= +github.com/pion/dtls/v2 v2.0.9/go.mod h1:O0Wr7si/Zj5/EBFlDzDd6UtVxx25CE1r7XM7BQKYQho= +github.com/pion/dtls/v2 v2.1.1/go.mod h1:qG3gA7ZPZemBqpEFqRKyURYdKEwFZQCGb7gv9T3ON3Y= +github.com/pion/dtls/v2 v2.1.2/go.mod h1:o6+WvyLDAlXF7YiPB/RlskRoeK+/JtuaZa5emwQcWus= +github.com/pion/dtls/v2 v2.1.3/go.mod h1:o6+WvyLDAlXF7YiPB/RlskRoeK+/JtuaZa5emwQcWus= +github.com/pion/dtls/v2 v2.1.5/go.mod h1:BqCE7xPZbPSubGasRoDFJeTsyJtdD1FanJYL0JGheqY= +github.com/pion/ice v0.7.18/go.mod h1:+Bvnm3nYC6Nnp7VV6glUkuOfToB/AtMRZpOU8ihuf4c= +github.com/pion/ice/v2 v2.0.15/go.mod h1:ZIiVGevpgAxF/cXiIVmuIUtCb3Xs4gCzCbXB6+nFkSI= +github.com/pion/ice/v2 v2.1.7/go.mod h1:kV4EODVD5ux2z8XncbLHIOtcXKtYXVgLVCeVqnpoeP0= +github.com/pion/ice/v2 v2.1.10/go.mod h1:kV4EODVD5ux2z8XncbLHIOtcXKtYXVgLVCeVqnpoeP0= +github.com/pion/ice/v2 v2.1.12/go.mod h1:ovgYHUmwYLlRvcCLI67PnQ5YGe+upXZbGgllBDG/ktU= +github.com/pion/ice/v2 v2.1.20/go.mod h1:hEAldRzBhTtAfvlU1V/2/nLCMvveQWFKPNCop+63/Iw= +github.com/pion/ice/v2 v2.2.6/go.mod h1:SWuHiOGP17lGromHTFadUe1EuPgFh/oCU6FCMZHooVE= +github.com/pion/interceptor v0.0.9/go.mod h1:dHgEP5dtxOTf21MObuBAjJeAayPxLUAZjerGH8Xr07c= +github.com/pion/interceptor v0.0.12/go.mod h1:qzeuWuD/ZXvPqOnxNcnhWfkCZ2e1kwwslicyyPnhoK4= +github.com/pion/interceptor v0.0.13/go.mod h1:svsW2QoLHLoGLUr4pDoSopGBEWk8FZwlfxId/OKRKzo= +github.com/pion/interceptor v0.0.15/go.mod h1:pg3J253eGi5bqyKzA74+ej5Y19ez2jkWANVnF+Z9Dfk= +github.com/pion/interceptor v0.1.7/go.mod h1:Lh3JSl/cbJ2wP8I3ccrjh1K/deRGRn3UlSPuOTiHb6U= +github.com/pion/interceptor v0.1.11/go.mod h1:tbtKjZY14awXd7Bq0mmWvgtHB5MDaRN7HV3OZ/uy7s8= +github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms= +github.com/pion/mdns v0.0.4/go.mod h1:R1sL0p50l42S5lJs91oNdUL58nm0QHrhxnSegr++qC0= +github.com/pion/mdns v0.0.5/go.mod h1:UgssrvdD3mxpi8tMxAXbsppL3vJ4Jipw1mTCW+al01g= +github.com/pion/quic v0.1.1/go.mod h1:zEU51v7ru8Mp4AUBJvj6psrSth5eEFNnVQK5K48oV3k= +github.com/pion/quic v0.1.4/go.mod h1:dBhNvkLoQqRwfi6h3Vqj3IcPLgiW7rkZxBbRdp7Vzvk= +github.com/pion/randutil v0.0.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8= +github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8= +github.com/pion/rtcp v1.2.3/go.mod h1:zGhIv0RPRF0Z1Wiij22pUt5W/c9fevqSzT4jje/oK7I= +github.com/pion/rtcp v1.2.4/go.mod h1:52rMNPWFsjr39z9B9MhnkqhPLoeHTv1aN63o/42bWE0= +github.com/pion/rtcp v1.2.6/go.mod h1:52rMNPWFsjr39z9B9MhnkqhPLoeHTv1aN63o/42bWE0= +github.com/pion/rtcp v1.2.9/go.mod h1:qVPhiCzAm4D/rxb6XzKeyZiQK69yJpbUDJSF7TgrqNo= +github.com/pion/rtp v1.6.0/go.mod h1:QgfogHsMBVE/RFNno467U/KBqfUywEH+HK+0rtnwsdI= +github.com/pion/rtp v1.6.1/go.mod h1:bDb5n+BFZxXx0Ea7E5qe+klMuqiBrP+w8XSjiWtCUko= +github.com/pion/rtp v1.6.2/go.mod h1:bDb5n+BFZxXx0Ea7E5qe+klMuqiBrP+w8XSjiWtCUko= +github.com/pion/rtp v1.6.5/go.mod h1:bDb5n+BFZxXx0Ea7E5qe+klMuqiBrP+w8XSjiWtCUko= +github.com/pion/rtp v1.7.0/go.mod h1:bDb5n+BFZxXx0Ea7E5qe+klMuqiBrP+w8XSjiWtCUko= +github.com/pion/rtp v1.7.2/go.mod h1:bDb5n+BFZxXx0Ea7E5qe+klMuqiBrP+w8XSjiWtCUko= +github.com/pion/rtp v1.7.4/go.mod h1:bDb5n+BFZxXx0Ea7E5qe+klMuqiBrP+w8XSjiWtCUko= +github.com/pion/rtp v1.7.13/go.mod h1:bDb5n+BFZxXx0Ea7E5qe+klMuqiBrP+w8XSjiWtCUko= +github.com/pion/sctp v1.7.10/go.mod h1:EhpTUQu1/lcK3xI+eriS6/96fWetHGCvBi9MSsnaBN0= +github.com/pion/sctp v1.7.11/go.mod h1:EhpTUQu1/lcK3xI+eriS6/96fWetHGCvBi9MSsnaBN0= +github.com/pion/sctp v1.7.12/go.mod h1:xFe9cLMZ5Vj6eOzpyiKjT9SwGM4KpK/8Jbw5//jc+0s= +github.com/pion/sctp v1.8.0/go.mod h1:xFe9cLMZ5Vj6eOzpyiKjT9SwGM4KpK/8Jbw5//jc+0s= +github.com/pion/sctp v1.8.2/go.mod h1:xFe9cLMZ5Vj6eOzpyiKjT9SwGM4KpK/8Jbw5//jc+0s= +github.com/pion/sdp/v2 v2.4.0/go.mod h1:L2LxrOpSTJbAns244vfPChbciR/ReU1KWfG04OpkR7E= +github.com/pion/sdp/v3 v3.0.4/go.mod h1:bNiSknmJE0HYBprTHXKPQ3+JjacTv5uap92ueJZKsRk= +github.com/pion/sdp/v3 v3.0.5/go.mod h1:iiFWFpQO8Fy3S5ldclBkpXqmWy02ns78NOKoLLL0YQw= +github.com/pion/srtp v1.5.1/go.mod h1:B+QgX5xPeQTNc1CJStJPHzOlHK66ViMDWTT0HZTCkcA= +github.com/pion/srtp v1.5.2/go.mod h1:NiBff/MSxUwMUwx/fRNyD/xGE+dVvf8BOCeXhjCXZ9U= +github.com/pion/srtp/v2 v2.0.1/go.mod h1:c8NWHhhkFf/drmHTAblkdu8++lsISEBBdAuiyxgqIsE= +github.com/pion/srtp/v2 v2.0.2/go.mod h1:VEyLv4CuxrwGY8cxM+Ng3bmVy8ckz/1t6A0q/msKOw0= +github.com/pion/srtp/v2 v2.0.5/go.mod h1:8k6AJlal740mrZ6WYxc4Dg6qDqqhxoRG2GSjlUhDF0A= +github.com/pion/srtp/v2 v2.0.9/go.mod h1:5TtM9yw6lsH0ppNCehB/EjEUli7VkUgKSPJqWVqbhQ4= +github.com/pion/stun v0.3.5/go.mod h1:gDMim+47EeEtfWogA37n6qXZS88L5V6LqFcf+DZA2UA= +github.com/pion/transport v0.6.0/go.mod h1:iWZ07doqOosSLMhZ+FXUTq+TamDoXSllxpbGcfkCmbE= +github.com/pion/transport v0.8.10/go.mod h1:tBmha/UCjpum5hqTWhfAEs3CO4/tHSg0MYRhSzR+CZ8= +github.com/pion/transport v0.10.0/go.mod h1:BnHnUipd0rZQyTVB2SBGojFHT9CBt5C5TcsJSQGkvSE= +github.com/pion/transport v0.10.1/go.mod h1:PBis1stIILMiis0PewDw91WJeLJkyIMcEk+DwKOzf4A= +github.com/pion/transport v0.12.1/go.mod h1:N3+vZQD9HlDP5GWkZ85LohxNsDcNgofQmyL6ojX5d8Q= +github.com/pion/transport v0.12.2/go.mod h1:N3+vZQD9HlDP5GWkZ85LohxNsDcNgofQmyL6ojX5d8Q= +github.com/pion/transport v0.12.3/go.mod h1:OViWW9SP2peE/HbwBvARicmAVnesphkNkCVZIWJ6q9A= +github.com/pion/transport v0.13.0/go.mod h1:yxm9uXpK9bpBBWkITk13cLo1y5/ur5VQpG22ny6EP7g= +github.com/pion/transport v0.13.1/go.mod h1:EBxbqzyv+ZrmDb82XswEE0BjfQFtuw1Nu6sjnjWCsGg= +github.com/pion/turn/v2 v2.0.4/go.mod h1:1812p4DcGVbYVBTiraUmP50XoKye++AMkbfp+N27mog= +github.com/pion/turn/v2 v2.0.5/go.mod h1:APg43CFyt/14Uy7heYUOGWdkem/Wu4PhCO/bjyrTqMw= +github.com/pion/turn/v2 v2.0.6/go.mod h1:+y7xl719J8bAEVpSXBXvTxStjJv3hbz9YFflvkpcGPw= +github.com/pion/turn/v2 v2.0.8/go.mod h1:+y7xl719J8bAEVpSXBXvTxStjJv3hbz9YFflvkpcGPw= +github.com/pion/udp v0.1.0/go.mod h1:BPELIjbwE9PRbd/zxI/KYBnbo7B6+oA6YuEaNE8lths= +github.com/pion/udp v0.1.1/go.mod h1:6AFo+CMdKQm7UiA0eUPA8/eVCTx8jBIITLZHc9DWX5M= +github.com/pion/webrtc/v2 v2.2.26/go.mod h1:XMZbZRNHyPDe1gzTIHFcQu02283YO45CbiwFgKvXnmc= +github.com/pion/webrtc/v3 v3.0.11/go.mod h1:WEvXneGTeqNmiR59v5jTsxMc4yXQyOQcRsrdAbNwSEU= +github.com/pion/webrtc/v3 v3.0.27/go.mod h1:QpLDmsU5a/a05n230gRtxZRvfHhFzn9ukGUL2x4G5ic= +github.com/pion/webrtc/v3 v3.0.32/go.mod h1:wX3V5dQQUGCifhT1mYftC2kCrDQX6ZJ3B7Yad0R9JK0= +github.com/pion/webrtc/v3 v3.1.24-0.20220208053747-94262c1b2b38/go.mod h1:L5S/oAhL0Fzt/rnftVQRrP80/j5jygY7XRZzWwFx6P4= +github.com/pion/webrtc/v3 v3.1.42/go.mod h1:ffD9DulDrPxyWvDPUIPAOSAWx9GUlOExiJPf7cCcMLA= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ= -github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= -github.com/polydawn/refmt v0.0.0-20190408063855-01bf1e26dd14 h1:2m16U/rLwVaRdz7ANkHtHTodP3zTP3N451MADg64x5k= github.com/polydawn/refmt v0.0.0-20190408063855-01bf1e26dd14/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= -github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a h1:hjZfReYVLbqFkAtr2us7vdy04YWz3LVAirzP7reh8+M= -github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e h1:ZOcivgkkFRnjfoTcGsDq3UQYiBmekwLA+qg0OjyB/ls= +github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= -github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4= +github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.8.0/go.mod h1:O9VU6huf47PktckDQfMTX0Y8tY0/7TSWwj+ITvv0TnM= +github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.0/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= +github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= +github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI= +github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.1-0.20210607210712-147c58e9608a/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.14.0 h1:RHRyE8UocrbjU+6UvRzwi6HjiDfxrrBU91TtbKzkGp4= -github.com/prometheus/common v0.14.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= +github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= +github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y= +github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= +github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.3.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.2/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= +github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= +github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= +github.com/prometheus/statsd_exporter v0.22.7/go.mod h1:N/TevpjkIh9ccs6nuzY3jQn9dFqnUakOjnEuMPJJJnI= github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= +github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho= +github.com/robertkrimen/godocdown v0.0.0-20130622164427-0bfa04905481/go.mod h1:C9WhFzY47SzYBIvzFqSvHIR6ROgDo4TtdTuRaOMjF/s= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= +github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= +github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rs/cors v0.0.0-20160617231935-a62a804a8a00/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rs/dnscache v0.0.0-20190621150935-06bb5526f76b/go.mod h1:qe5TWALJ8/a1Lqznoc5BDHpYX/8HU60Hm2AwRmqzxqA= +github.com/rs/dnscache v0.0.0-20210201191234-295bba877686/go.mod h1:qe5TWALJ8/a1Lqznoc5BDHpYX/8HU60Hm2AwRmqzxqA= +github.com/rs/dnscache v0.0.0-20211102005908-e0241e321417/go.mod h1:qe5TWALJ8/a1Lqznoc5BDHpYX/8HU60Hm2AwRmqzxqA= github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521/go.mod h1:RvLn4FgxWubrpZHtQLnOf6EwhN2hEMusxZOhcW9H3UQ= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/zerolog v1.21.0/go.mod h1:ZPhntP/xmq1nnND05hhpAh2QMhSsA4UN3MGZ6O2J3hM= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/schollz/closestmatch v2.1.0+incompatible/go.mod h1:RtP1ddjLong6gTkbtmuhtR2uUrrJOpYzYRvbcPAid+g= +github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/segmentio/asm v1.1.3/go.mod h1:Ld3L4ZXGNcSLRg4JBsZ3//1+f/TjYl0Mzen/DQy1EJg= +github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= github.com/segmentio/encoding v0.1.10/go.mod h1:RWhr02uzMB9gQC1x+MfYxedtmBibb9cZ6Vv9VxRSSbw= github.com/segmentio/encoding v0.1.11/go.mod h1:RWhr02uzMB9gQC1x+MfYxedtmBibb9cZ6Vv9VxRSSbw= github.com/segmentio/encoding v0.1.15/go.mod h1:RWhr02uzMB9gQC1x+MfYxedtmBibb9cZ6Vv9VxRSSbw= +github.com/segmentio/encoding v0.3.6/go.mod h1:n0JeuIqEQrQoPDGsjo8UNd1iA0U8d8+oHAA4E3G3OxM= github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shirou/gopsutil v2.20.5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/shirou/gopsutil v2.20.7+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/gopsutil/v3 v3.20.12/go.mod h1:igHnfak0qnw1biGeI2qKQvu0ZkwvEkUcCLlYhZzdr/4= +github.com/shirou/gopsutil/v3 v3.22.8/go.mod h1:s648gW4IywYzUfE/KjXxUsqrqx/T2xO5VqOXxONeRfI= github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go v0.0.0-20200502201357-93f07166e636/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= @@ -1547,6 +2627,7 @@ github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9A github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= @@ -1556,21 +2637,27 @@ github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1l github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= +github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/assertions v1.0.0 h1:UVQPSSmc3qtTi+zPPkCXvZX9VvW/xT/NsRvKfwY81a8= +github.com/smartystreets/assertions v0.0.0-20190215210624-980c5ac6f3ac/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= -github.com/smartystreets/assertions v1.0.1 h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w= -github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= +github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= +github.com/smartystreets/assertions v1.13.0 h1:Dx1kYM01xsSqKPno3aqLnrwac2LetPvN23diwyr69Qs= +github.com/smartystreets/assertions v1.13.0/go.mod h1:wDmR7qL282YbGsPy6H/yAsesrxfxaaSlJazyFLYVFx8= +github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s= github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2RVY1rIf+2J2o/IM9+vPq9RzmHDSseB7FoXiSNIUsoU= +github.com/smartystreets/goconvey v0.0.0-20190306220146-200a235640ff/go.mod h1:KSQcGKpxUMHk3nbYzs/tIBAM2iDooCn0BmttHOJEbLs= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 h1:WN9BUFbdyOsSH/XohnWpXOlq9NBD5sGAB2FciQMUEe8= -github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg= +github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM= github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= @@ -1584,77 +2671,111 @@ github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0b github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= +github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= github.com/src-d/envconfig v1.0.0/go.mod h1:Q9YQZ7BKITldTBnoxsE5gOeB5y66RyPXeue/R4aaNBc= github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q= -github.com/status-im/keycard-go v0.0.0-20200402102358-957c09536969/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q= +github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg= github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw= github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3/go.mod h1:hpGUWaI9xL8pRQCTXQgocU38Qw1g0Us7n5PxxTwTCYU= +github.com/stephens2424/writerset v1.0.2/go.mod h1:aS2JhsMn6eA7e82oNmW4rfsgAOp9COBTTl8mzkwADnc= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.1.5-0.20170601210322-f6abca593680/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stvp/go-udp-testing v0.0.0-20201019212854-469649b16807/go.mod h1:7jxmlfBCDBXRzr0eAQJ48XC1hBu1np4CS5+cHEYfwpc= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/supranational/blst v0.3.8-0.20220526154634-513d2456b344/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= +github.com/syncthing/syncthing v0.14.48-rc.4/go.mod h1:nw3siZwHPA6M8iSfjDCWQ402eqvEIasMQOE8nFOxy7M= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= -github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA= -github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca h1:Ld/zXl5t4+D69SiV4JoN7kkfvJdOWlPpfxrzxpLMoUk= github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM= -github.com/syndtr/goleveldb v1.0.1-0.20210305035536-64b5b1c73954/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM= +github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= +github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/tdewolff/minify/v2 v2.12.1/go.mod h1:p5pwbvNs1ghbFED/ZW1towGsnnWwzvM8iz8l0eURi9g= +github.com/tdewolff/minify/v2 v2.12.4/go.mod h1:h+SRvSIX3kwgwTFOpSckvSxgax3uy8kZTSF1Ojrr3bk= +github.com/tdewolff/parse/v2 v2.6.3/go.mod h1:woz0cgbLwFdtbjJu8PIKxhW05KplTFQkOdX78o+Jgrs= +github.com/tdewolff/parse/v2 v2.6.4/go.mod h1:woz0cgbLwFdtbjJu8PIKxhW05KplTFQkOdX78o+Jgrs= +github.com/tdewolff/test v1.0.7/go.mod h1:6DAvZliBAAnD7rhVgwaM7DE5/d9NMOAJ09SqYqeK4QE= github.com/texttheater/golang-levenshtein v0.0.0-20180516184445-d188e65d659e/go.mod h1:XDKHRm5ThF8YJjx001LtgelzsoaEcvnA7lVWz9EeX3g= -github.com/thedevsaddam/gojsonq/v2 v2.5.2/go.mod h1:bv6Xa7kWy82uT0LnXPE2SzGqTj33TAEeR560MdJkiXs= +github.com/tidwall/btree v1.3.1/go.mod h1:LGm8L/DZjPLmeWGjv5kFrY8dL4uVhMmzmmLYmsObdKE= github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= +github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= +github.com/tinylib/msgp v1.1.1/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= +github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= +github.com/tinylib/msgp v1.1.5/go.mod h1:eQsjooMTnV42mHu917E26IogZ2930nFyBQdofk10Udg= +github.com/tj/assert v0.0.3/go.mod h1:Ne6X72Q+TB1AteidzQncjw9PabbMp4PBMZ1k+vd1Pvk= github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI= +github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM= +github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tron-us/go-btfs-common v0.2.11/go.mod h1:9ND33JahGMg52sCC2/gO5DakLsd1Pg2lVe2CihW7lBE= -github.com/tron-us/go-btfs-common v0.3.7/go.mod h1:FbYoo6ZrtnJH3TKdyJTGQrUP2rbwNVATQpxplwaYQ/c= -github.com/tron-us/go-btfs-common v0.7.0/go.mod h1:0T4AmMiAWKkT6qgi/QSONy/KlX2OeKiAcYOFMwVo97I= -github.com/tron-us/go-btfs-common v0.7.10 h1:3w9Ui44rNs6uiofZn0jFc4hkClGbaRqPU1KAlQArm4Q= -github.com/tron-us/go-btfs-common v0.7.10/go.mod h1:xnIFfbMRS5VsF948fBHPcYIeYGZkQgaJ6NIEGIPfYUs= -github.com/tron-us/go-btfs-common v0.7.13 h1:LGppZsnJRsiZZy9wiqwbzjc4igz3HNWdmQVaijCUGO4= -github.com/tron-us/go-btfs-common v0.7.13/go.mod h1:xnIFfbMRS5VsF948fBHPcYIeYGZkQgaJ6NIEGIPfYUs= -github.com/tron-us/go-common/v2 v2.0.5/go.mod h1:GiKX9noBLHotkZAU+7ET4h7N0DYWnm3OcGHOFJg1Q68= -github.com/tron-us/go-common/v2 v2.1.1/go.mod h1:YIEJZF9Ph79g0zZWOvfNDtJhvO5OqSNPAb/TM1i+KvQ= -github.com/tron-us/go-common/v2 v2.1.9/go.mod h1:YIEJZF9Ph79g0zZWOvfNDtJhvO5OqSNPAb/TM1i+KvQ= -github.com/tron-us/go-common/v2 v2.2.1/go.mod h1:YIEJZF9Ph79g0zZWOvfNDtJhvO5OqSNPAb/TM1i+KvQ= -github.com/tron-us/go-common/v2 v2.3.0/go.mod h1:/ktTJfsQWnrtSsoAvT3ybJR1nw7qMSEX+dcDxcv0xro= -github.com/tron-us/protobuf v1.3.4/go.mod h1:INMJF54ZV6c8ZMc3imHsMl1kqIpe4VnbCUK4zYcVHqE= -github.com/tron-us/protobuf v1.3.7 h1:nYnRqyiyEHK5YzQT0DScL8W65X6py+F9xDnMZx63qaY= -github.com/tron-us/protobuf v1.3.7/go.mod h1:INMJF54ZV6c8ZMc3imHsMl1kqIpe4VnbCUK4zYcVHqE= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31/go.mod h1:onvgF043R+lC5RZ8IT9rBXDaEDnpnw/Cl+HFiw+v/7Q= github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c h1:u6SKchux2yDvFQnDHS3lPnIRmfVJ5Sxy3ao2SIdysLQ= github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= -github.com/tyler-smith/go-bip32 v0.0.0-20170922074101-2c9cfd177564/go.mod h1:0/YuQQF676+d4CMNclTqGUam1EDwz0B8o03K9pQqA3c= +github.com/tyler-smith/go-bip32 v1.0.0/go.mod h1:onot+eHknzV4BVPwrzqY5OoVpyCvnwD7lMawL5aQupE= github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= -github.com/tyler-smith/go-bip39 v1.0.2/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U= +github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb/go.mod h1:ikPs9bRWicNw3S7XpJ8sK/smGwU9WcSVU3dy9qahYBM= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY= github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli/v2 v2.0.0/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= +github.com/urfave/cli/v2 v2.10.2/go.mod h1:f8iq5LtQ/bLxafbdBSLPPNsgaW0l/2fYYEHhAyPlwvo= +github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI= +github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w= +github.com/valyala/fasthttp v1.40.0/go.mod h1:t/G+3rLek+CyY9bnIE+YlMRddxVAAGjhxndDB4i4C0I= +github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= +github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= +github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= +github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= github.com/vmihailenco/bufpool v0.1.5/go.mod h1:fL9i/PRTuS7AELqAHwSU1Zf1c70xhkhGe/cD5ud9pJk= @@ -1663,22 +2784,27 @@ github.com/vmihailenco/msgpack/v4 v4.3.5/go.mod h1:DuaveEe48abshDmz5UBKyZ+yDugva github.com/vmihailenco/msgpack/v4 v4.3.7/go.mod h1:Ii+PksJlvFT5ZRcB/4YLAInMIp6a0WOCm0L3BU0aNG4= github.com/vmihailenco/msgpack/v4 v4.3.11/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= +github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc= github.com/vmihailenco/tagparser v0.1.0/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= +github.com/vmihailenco/tagparser v0.1.2/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= +github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= github.com/wangjia184/sortedset v0.0.0-20160527075905-f5d03557ba30/go.mod h1:YkocrP2K2tcw938x9gCOmT5G5eCD6jsTz0SZuyAqwIE= +github.com/warpfork/go-testmark v0.3.0/go.mod h1:jhEf8FVxd+F17juRubpmut64NEG6I2rgkUhlcqqXwE0= +github.com/warpfork/go-testmark v0.10.0 h1:E86YlUMYfwIacEsQGlnTvjk1IgYkyTGjPhF0RnwTCmw= +github.com/warpfork/go-testmark v0.10.0/go.mod h1:jhEf8FVxd+F17juRubpmut64NEG6I2rgkUhlcqqXwE0= github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= -github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830 h1:8kxMKmKzXXL4Ru1nyhvdms/JjWt+3YLpvRb/bAjO/y0= github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a h1:G++j5e0OC488te356JvdhaM8YS6nMsjLAYF7JxCv07w= github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc h1:BCPnHtcboadS0DvysUuJXZ4lWVv5Bh5i7+tbIyi+ck4= github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc/go.mod h1:r45hJU7yEoA81k6MWNhpMj/kms0n14dkzkxYHoB96UM= -github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158 h1:WXhVOwj2USAXB5oMDwRl3piOux2XMV9TANaYxXHdkoE= +github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 h1:5HZfQkwe0mIfyDmc1Em5GqlNRzcdtlv4HTNmdpt7XH0= +github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11/go.mod h1:Wlo/SzPmxVp6vXpGt/zaXhHH0fn4IxgqZc82aKg6bpQ= github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= -github.com/whyrusleeping/cbor-gen v0.0.0-20200402171437-3d27c146c105 h1:Sh6UG5dW5xW8Ek2CtRGq4ipdEvvx9hOyBJjEGyTYDl0= -github.com/whyrusleeping/cbor-gen v0.0.0-20200402171437-3d27c146c105/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= -github.com/whyrusleeping/cbor-gen v0.0.0-20200710004633-5379fc63235d h1:wSxKhvbN7kUoP0sfRS+w2tWr45qlU8409i94hHLOT8w= github.com/whyrusleeping/cbor-gen v0.0.0-20200710004633-5379fc63235d/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20210219115102-f37d292932f2 h1:bsUlNhdmbtlfdLVXAVfuvKQ01RnWAM09TVrJkI7NZs4= +github.com/whyrusleeping/cbor-gen v0.0.0-20210219115102-f37d292932f2/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E= github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= @@ -1686,34 +2812,54 @@ github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM= github.com/whyrusleeping/go-logging v0.0.1/go.mod h1:lDPYj54zutzG1XYfHAhcc7oNXEburHQBn+Iqd4yS4vE= github.com/whyrusleeping/go-notifier v0.0.0-20170827234753-097c5d47330f/go.mod h1:cZNvX9cFybI01GriPRMXDtczuvUhgbcYr9iCGaNlRv8= -github.com/whyrusleeping/go-smux-multiplex v3.0.16+incompatible/go.mod h1:34LEDbeKFZInPUrAG+bjuJmUXONGdEFW7XL0SpTY1y4= -github.com/whyrusleeping/go-smux-multistream v2.0.2+incompatible/go.mod h1:dRWHHvc4HDQSHh9gbKEBbUZ+f2Q8iZTPG3UOGYODxSQ= -github.com/whyrusleeping/go-smux-yamux v2.0.8+incompatible/go.mod h1:6qHUzBXUbB9MXmw3AUdB52L8sEb/hScCqOdW2kj/wuI= -github.com/whyrusleeping/go-smux-yamux v2.0.9+incompatible/go.mod h1:6qHUzBXUbB9MXmw3AUdB52L8sEb/hScCqOdW2kj/wuI= github.com/whyrusleeping/go-sysinfo v0.0.0-20190219211824-4a357d4b90b1/go.mod h1:tKH72zYNt/exx6/5IQO6L9LoQ0rEjd5SbbWaDTs9Zso= -github.com/whyrusleeping/mafmt v1.2.8 h1:TCghSl5kkwEE0j+sU/gudyhVMRlpBin8fMBBHg59EbA= github.com/whyrusleeping/mafmt v1.2.8/go.mod h1:faQJFPbLSxzD9xpA02ttW/tS9vZykNvXwGvqIpk20FA= github.com/whyrusleeping/mdns v0.0.0-20180901202407-ef14215e6b30/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= -github.com/whyrusleeping/mdns v0.0.0-20190826153040-b9b60ed33aa9 h1:Y1/FEOpaCpD21WxrmfeIYCFPuVPRCY2XZTWzTNHGw30= github.com/whyrusleeping/mdns v0.0.0-20190826153040-b9b60ed33aa9/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 h1:E9S12nwJwEOXe2d6gT6qxdvqMnNq+VnSsKPgm2ZZNds= github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7/go.mod h1:X2c0RVCI1eSUFI8eLcY3c0423ykwiUdxLJtkDvruhjI= -github.com/whyrusleeping/tar-utils v0.0.0-20180509141711-8c6c8ba81d5c/go.mod h1:xxcJeBb7SIUl/Wzkz1eVKJE/CB34YNrqX2TQI6jY9zs= +github.com/whyrusleeping/tar-utils v0.0.0-20201201191210-20a61371de5b/go.mod h1:xT1Y5p2JR2PfSZihE0s4mjdJaRGp1waCTf5JzhQLBck= github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee h1:lYbXeSvJi5zk5GLKVuid9TVjS9a0OmLIDKTfoZBL6Ow= github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee/go.mod h1:m2aV4LZI4Aez7dP5PMyVKEHhUyEJ/RjmPEDOpDvudHg= -github.com/whyrusleeping/yamux v1.1.5/go.mod h1:E8LnQQ8HKx5KD29HZFUwM1PxCOdPRzGwur1mcYhXcD8= github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= +github.com/willf/bloom v0.0.0-20170505221640-54e3b963ee16/go.mod h1:MmAltL9pDMNTrvUkxdg0k0q5I0suxmuwp3KbyrZLOZ8= +github.com/willf/bloom v2.0.3+incompatible/go.mod h1:MmAltL9pDMNTrvUkxdg0k0q5I0suxmuwp3KbyrZLOZ8= github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208/go.mod h1:IotVbo4F+mw0EzQ08zFqg7pK3FebNXpaMsRy2RT+Ees= github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xhit/go-str2duration v1.2.0/go.mod h1:3cPSlfZlUHVlneIVfePFWcJZsuwf+P1v2SRTV4cUmp4= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= +github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI= +github.com/yosssi/ace v0.0.5/go.mod h1:ALfIzm2vT7t5ZE7uoIZqF3TQ7SAOyupFZnkrF5id+K0= +github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= +github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= +github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= @@ -1721,51 +2867,80 @@ go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.41.1/go.mod h1:2FmkXne0k9nkp27LD/m+uoh8dNlstsiCJ7PLc/S72aI= +go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= +go.opentelemetry.io/otel v1.2.0/go.mod h1:aT17Fk0Z1Nor9e0uisf98LrntPGMnk4frBO9+dkf69I= +go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk= +go.opentelemetry.io/otel v1.8.0/go.mod h1:2pkj+iMj0o03Y+cW6/m8Y4WkRdYN3AvCXCnzRMp9yvM= +go.opentelemetry.io/otel v1.15.1 h1:3Iwq3lfRByPaws0f6bU3naAqOR1n5IeDWd9390kWHa8= +go.opentelemetry.io/otel v1.15.1/go.mod h1:mHHGEHVDLal6YrKMmk9LqC4a3sF5g+fHfrttQIB1NTc= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.8.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.8.0/go.mod h1:w8aZL87GMOvOBa2lU/JlVXE1q4chk/0FX+8ai4513bw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.8.0/go.mod h1:twhIvtDQW2sWP1O2cT1N8nkSBgKCRZv2z6COTTBrf8Q= +go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= +go.opentelemetry.io/otel/metric v0.38.1/go.mod h1:FwqNHD3I/5iX9pfrRGZIlYICrJv0rHEUl2Ln5vdIVnQ= +go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= +go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= +go.opentelemetry.io/otel/sdk v1.2.0/go.mod h1:jNN8QtpvbsKhgaC6V5lHiejMoKD+V8uadoSafgHPx1U= +go.opentelemetry.io/otel/sdk v1.8.0 h1:xwu69/fNuwbSHWe/0PGS888RmjWY181OmcXDQKu7ZQk= +go.opentelemetry.io/otel/sdk v1.8.0/go.mod h1:uPSfc+yfDH2StDM/Rm35WE8gXSNdvCg023J6HeGNO0c= +go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= +go.opentelemetry.io/otel/trace v1.2.0/go.mod h1:N5FLswTubnxKxOJHM7XZC074qpeEdLy3CgAVsdMucK0= +go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU= +go.opentelemetry.io/otel/trace v1.8.0/go.mod h1:0Bt3PXY8w+3pheS3hQUt+wow8b1ojPaTBoTCh2zIFI4= +go.opentelemetry.io/otel/trace v1.15.1 h1:uXLo6iHJEzDfrNC0L0mNjItIp06SyaBQxu5t3xMlngY= +go.opentelemetry.io/otel/trace v1.15.1/go.mod h1:IWdQG/5N1x7f6YUlmdLeJvH9yxtuJAfc4VW5Agv9r/8= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/proto/otlp v0.18.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/dig v1.9.0 h1:pJTDXKEhRqBI8W7rU7kwT5EgyRZuSMVSFcZolOvKK9U= -go.uber.org/dig v1.9.0/go.mod h1:X34SnWGr8Fyla9zQNO2GSO2D+TIuqB14OS8JhYocIyw= -go.uber.org/dig v1.10.0 h1:yLmDDj9/zuDjv3gz8GQGviXMs9TfysIUMUilCpgzUJY= -go.uber.org/dig v1.10.0/go.mod h1:X34SnWGr8Fyla9zQNO2GSO2D+TIuqB14OS8JhYocIyw= -go.uber.org/fx v1.12.0 h1:+1+3Cz9M0dFMPy9SW9XUIUHye8bnPUm7q7DroNGWYG4= -go.uber.org/fx v1.12.0/go.mod h1:egT3Kyg1JFYQkvKLZ3EsykxkNrZxgXS+gKoKo7abERY= -go.uber.org/fx v1.13.1 h1:CFNTr1oin5OJ0VCZ8EycL3wzF29Jz2g0xe55RFsf2a4= -go.uber.org/fx v1.13.1/go.mod h1:bREWhavnedxpJeTq9pQT53BbvwhUv7TcpsOqcH4a+3w= -go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI= -go.uber.org/goleak v1.0.0 h1:qsup4IcBdlmsnGfqyLl4Ntn3C2XCCuKAE7DwHpScyUo= +go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= +go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/dig v1.15.0 h1:vq3YWr8zRj1eFGC7Gvf907hE0eRjPTZ1d3xHadD6liE= +go.uber.org/dig v1.15.0/go.mod h1:pKHs0wMynzL6brANhB2hLMro+zalv1osARTviTcqHLM= +go.uber.org/fx v1.18.2 h1:bUNI6oShr+OVFQeU8cDNbnN7VFsu+SsjHzUF51V/GAU= +go.uber.org/fx v1.18.2/go.mod h1:g0V1KMQ66zIRk8bLu3Ea5Jt2w/cHlOIp4wdRsgh0JaY= go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= +go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= +go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -go.uber.org/zap v1.14.1 h1:nYDKopTbvAPq/NrUVZwT15y2lpROBiLLyoRTbXOYWOo= go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= -go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM= go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= -go.uber.org/zap v1.16.0 h1:uFRZXykJGK9lLY4HtgSw44DnIcAM+kRBP7x5m+NpAOM= go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= -go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= +go.uber.org/zap v1.22.0/go.mod h1:H4siCOZOrAolnUPJEkfaSjDqyP+BDS0DdDWzwcgt3+U= +go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY= +go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= +go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= go4.org v0.0.0-20200411211856-f5505b9728dd h1:BNJlw5kRTzdmyfh5U8F93HA2OwkP7ZGwA51eJ/0wKOU= go4.org v0.0.0-20200411211856-f5505b9728dd/go.mod h1:CIiUVy99QCPfoE13bO4EZaz5GZMZXMSBGhxRdsvzbkg= -golang.org/x/arch v0.0.0-20190909030613-46d78d1859ac/go.mod h1:flIaEI6LNU6xOCD5PaJvn9wGP0agmIOqjrtsKGRguv4= golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= +golang.org/x/crypto v0.0.0-20170613210332-850760c427c5/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -1777,6 +2952,7 @@ golang.org/x/crypto v0.0.0-20190225124518-7f87c0fbb88b/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -1788,25 +2964,48 @@ golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190909091759-094676da4a83/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= -golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191029031824-8986dd9e96cf/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20191128160524-b544559bb6d1/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5 h1:Q7tZBpemrlsc2I7IyODzhtallWRSm4Q0d09pL6XbQtU= golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200427165652-729f1e841bcc/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37 h1:cg5LA/zNPRzIXIWSCxQW10Rvpy94aQh3LT/ShoCpkHw= golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= +golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210813211128-0a44fdfbc16e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220516162934-403b01795ae8/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220926161630-eccd6366d1be/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= +golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1822,6 +3021,18 @@ golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= +golang.org/x/exp v0.0.0-20200513190911-00229845015e/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= +golang.org/x/exp v0.0.0-20210615023648-acb5c1269671/go.mod h1:DVyR6MI7P4kEQgvZJSj1fQGrWIi2RzIrfYWycwheUAc= +golang.org/x/exp v0.0.0-20220328175248-053ad81199eb/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE= +golang.org/x/exp v0.0.0-20220426173459-3bcf042a4bf5/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE= +golang.org/x/exp v0.0.0-20220428152302-39d4317da171/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE= +golang.org/x/exp v0.0.0-20220609121020-a51bd0440498/go.mod h1:yh0Ynu2b5ZUe3MQfp2nM0ecK7wsgouWTDN0FNeJuIys= +golang.org/x/exp v0.0.0-20220613132600-b0d781184e0d/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= +golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= +golang.org/x/exp v0.0.0-20220916125017-b168a2c6b86b/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= +golang.org/x/exp v0.0.0-20221205204356-47842c84f3db/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp v0.0.0-20230206171751-46f607a40771 h1:xP7rWLUr1e1n2xkK5YB4LI0hPEy3LJC6Wk+D4pGlOJg= +golang.org/x/exp v0.0.0-20230206171751-46f607a40771/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -1833,24 +3044,35 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mobile v0.0.0-20200801112145-973feb4309de/go.mod h1:skQtrUTUwhdJvXM/2KKJzY8pDgNr9I/FOMqDVRPBUS4= +golang.org/x/mobile v0.0.0-20201217150744-e6ae53a27f4f/go.mod h1:skQtrUTUwhdJvXM/2KKJzY8pDgNr9I/FOMqDVRPBUS4= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= +golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= +golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180524181706-dfa909b99c79/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1867,9 +3089,10 @@ golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190228165749-92fc7df08ae7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190318221613-d196dffd7c2b/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190327091125-710a502c58a2/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190420063019-afa5a82059c6/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -1879,12 +3102,13 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190611141213-3f473d35a33a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191125084936-ffdde1057850/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191126235420-ef20fe5d7933/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1895,21 +3119,75 @@ golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2 h1:eDrdRpKgkcCqKZQwyZRyeFZgfqt37SL7Kv3tok06cKE= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201201195509-5d6afe98e0b7/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210220033124-5f55cee0dc0d/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210331212208-0fccb6fa2b5c/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210420210106-798c2154c571/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= +golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210427231257-85d9c07bbe3a/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211201190559-0a0e4e1bb54c/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220401154927-543a649e0bdd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220531201128-c960675eff93/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220630215102-69896b714898/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220812174116-3211cb980234/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20220920183852-bf014ff85ad5/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20220923203811-8be639271d50/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221002022538-bcab6841153b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1917,21 +3195,48 @@ golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.3.0/go.mod h1:rQrIauxkUhJ6CuwEXwymO2/eh4xz2ZWF1nBkcxS+tGk= +golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= +golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a h1:WXEvlFVvvGxCJLG6REjsT03iWnKLEWinaScsxF2Vm2o= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1942,7 +3247,7 @@ golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1963,16 +3268,23 @@ golang.org/x/sys v0.0.0-20190610200419-93c9922d18ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190910064555-bbd175535a8b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191105231009-c1f44814a5cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191126131656-8a8471f7e56d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1980,72 +3292,176 @@ golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200413165638-669c56c373c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121 h1:rITEj+UZHYC927n8GT97eC3zrpzXdb/voyeOuVKS46o= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200724161237-0e2f3a69832c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200810151505-1b9f1253b3ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200824131525-c12d262b63d8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201024232916-9f70ab9862d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201112073958-5cba982894dd h1:5CtCZbICpIOFdgO940moixOPjc0178IU44m4EjOO5IY= golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201126233918-771906719818/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201207223542-d4d67f95c62d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210317225723-c4fcb01b228e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210420205809-ac73e9fd8988/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210902050250-f475640dd07b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211020174200-9d6173849985/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211023085530-d6a326fbbf70/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211103235746-7861aae1554b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211110154304-99a53858aa08/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220517195934-5e4e11fc645e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220608164250-635b8c9b7f68/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220702020025-31831981b65f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220708085239-5a0f0661e09d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20221013171732-95e765b1cc43/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.1-0.20221102194838-fc697a31fa06/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220609170525-579cf78fd858/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181130052023-1c3d964395ce/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190327201419-c70d86f8b7cf/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= @@ -2059,17 +3475,15 @@ golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191030062658-86caa796c7ab/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191114200427-caa0b0f7d508/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191126055441-b0650ceb63d9/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216052735-49a3e744a425 h1:VvQyQJN0tSuecqgcIxMWnnfG5kSmgy9KZR9sW3W5QeA= golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361 h1:RIIXAeV6GvDBuADKumTODatUqANFZ+5BPMnzsy4hulY= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -2082,23 +3496,56 @@ golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200425043458-8463f397d07c/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200522201501-cb1345f3a375 h1:SjQ2+AKWgZLc1xej6WSzL+Dfs5Uyd5xcZH1mGC411IA= -golang.org/x/tools v0.0.0-20200522201501-cb1345f3a375/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200423201157-2723c5de0d66/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200827010519-17fd2f27a9e3/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20200928182047-19e03678916f/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= +golang.org/x/tools v0.3.0 h1:SrNbZl6ECOS1qFzgTdQfWXZM9XBkiA6tkFrH9YSTPHM= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= +gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= @@ -2115,7 +3562,51 @@ google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsb google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= +google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= +google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= +google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= +google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2124,6 +3615,8 @@ google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= @@ -2148,10 +3641,108 @@ google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= +google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230221151758-ace64dc21148 h1:muK+gVBJBfFb4SejshDBlN2/UgxCCOKH9Y34ljqEGOc= +google.golang.org/genproto v0.0.0-20230221151758-ace64dc21148/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= +google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -2167,38 +3758,76 @@ google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQ google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.28.1/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.34.0 h1:raiipEjMOIC/TO2AvyTxP25XFdLxNIBwzDh3FM3XztI= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= +google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc= +google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= +gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.51.1/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c= gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns= @@ -2214,15 +3843,18 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -2230,40 +3862,143 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= -k8s.io/api v0.17.0/go.mod h1:npsyOePkeP0CPwyGfXDHxvypiYMJxBWAMpQxCaJ4ZxI= -k8s.io/api v0.17.4/go.mod h1:5qxx6vjmwUVG2nHQTKGlLts8Tbok8PzHl4vHtVFuZCA= k8s.io/api v0.20.0/go.mod h1:HyLC5l5eoS/ygQYl1BXBgFzWNlkHiAuyNAbevIn+FKg= -k8s.io/apimachinery v0.17.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= -k8s.io/apimachinery v0.17.4/go.mod h1:gxLnyZcGNdZTCLnq3fgzyg2A5BVCHTNDFrw8AmuJ+0g= k8s.io/apimachinery v0.20.0/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= -k8s.io/client-go v0.17.0/go.mod h1:TYgR6EUHs6k45hb6KWjVD6jFZvJV4gHDikv/It0xz+k= k8s.io/client-go v0.20.0/go.mod h1:4KWh/g+Ocd8KkCwKF8vUNnmqgv+EVnQDK4MBF4oB5tY= -k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= -k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -k8s.io/utils v0.0.0-20200318093247-d1ab8797c558/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= launchpad.net/gocheck v0.0.0-20140225173054-000000000087/go.mod h1:hj7XX3B/0A+80Vse0e+BUHsHMTEhd0O4cpUHr/e/BUM= +lukechampine.com/blake3 v1.1.6/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= +lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0= +lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= +lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= mellium.im/sasl v0.2.1/go.mod h1:ROaEDLQNuf9vjKqE1SrAfnsobm2YKXT1gnN1uDp1PjQ= +mellium.im/sasl v0.3.1/go.mod h1:xm59PUYpZHhgQ9ZqoJ5QaCqzWMi8IeS49dhp6plPCzw= +modernc.org/cc/v3 v3.31.5-0.20210308123301-7a3e9dab9009/go.mod h1:0R6jl1aZlIl2avnYfbfHBS1QB6/f+16mihBObaBC878= +modernc.org/cc/v3 v3.33.6/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= +modernc.org/cc/v3 v3.33.9/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= +modernc.org/cc/v3 v3.33.11/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= +modernc.org/cc/v3 v3.34.0/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= +modernc.org/cc/v3 v3.35.0/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= +modernc.org/cc/v3 v3.35.4/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= +modernc.org/cc/v3 v3.35.5/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= +modernc.org/cc/v3 v3.35.7/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= +modernc.org/cc/v3 v3.35.8/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= +modernc.org/cc/v3 v3.35.10/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= +modernc.org/cc/v3 v3.35.15/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= +modernc.org/cc/v3 v3.35.16/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= +modernc.org/cc/v3 v3.35.17/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= +modernc.org/cc/v3 v3.35.18/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= +modernc.org/ccgo/v3 v3.9.0/go.mod h1:nQbgkn8mwzPdp4mm6BT6+p85ugQ7FrGgIcYaE7nSrpY= +modernc.org/ccgo/v3 v3.9.5/go.mod h1:umuo2EP2oDSBnD3ckjaVUXMrmeAw8C8OSICVa0iFf60= +modernc.org/ccgo/v3 v3.10.0/go.mod h1:c0yBmkRFi7uW4J7fwx/JiijwOjeAeR2NoSaRVFPmjMw= +modernc.org/ccgo/v3 v3.11.0/go.mod h1:dGNposbDp9TOZ/1KBxghxtUp/bzErD0/0QW4hhSaBMI= +modernc.org/ccgo/v3 v3.11.1/go.mod h1:lWHxfsn13L3f7hgGsGlU28D9eUOf6y3ZYHKoPaKU0ag= +modernc.org/ccgo/v3 v3.11.2/go.mod h1:6kii3AptTDI+nUrM9RFBoIEUEisSWCbdczD9ZwQH2FE= +modernc.org/ccgo/v3 v3.11.3/go.mod h1:0oHunRBMBiXOKdaglfMlRPBALQqsfrCKXgw9okQ3GEw= +modernc.org/ccgo/v3 v3.12.4/go.mod h1:Bk+m6m2tsooJchP/Yk5ji56cClmN6R1cqc9o/YtbgBQ= +modernc.org/ccgo/v3 v3.12.6/go.mod h1:0Ji3ruvpFPpz+yu+1m0wk68pdr/LENABhTrDkMDWH6c= +modernc.org/ccgo/v3 v3.12.8/go.mod h1:Hq9keM4ZfjCDuDXxaHptpv9N24JhgBZmUG5q60iLgUo= +modernc.org/ccgo/v3 v3.12.11/go.mod h1:0jVcmyDwDKDGWbcrzQ+xwJjbhZruHtouiBEvDfoIsdg= +modernc.org/ccgo/v3 v3.12.14/go.mod h1:GhTu1k0YCpJSuWwtRAEHAol5W7g1/RRfS4/9hc9vF5I= +modernc.org/ccgo/v3 v3.12.18/go.mod h1:jvg/xVdWWmZACSgOiAhpWpwHWylbJaSzayCqNOJKIhs= +modernc.org/ccgo/v3 v3.12.20/go.mod h1:aKEdssiu7gVgSy/jjMastnv/q6wWGRbszbheXgWRHc8= +modernc.org/ccgo/v3 v3.12.21/go.mod h1:ydgg2tEprnyMn159ZO/N4pLBqpL7NOkJ88GT5zNU2dE= +modernc.org/ccgo/v3 v3.12.22/go.mod h1:nyDVFMmMWhMsgQw+5JH6B6o4MnZ+UQNw1pp52XYFPRk= +modernc.org/ccgo/v3 v3.12.25/go.mod h1:UaLyWI26TwyIT4+ZFNjkyTbsPsY3plAEB6E7L/vZV3w= +modernc.org/ccgo/v3 v3.12.29/go.mod h1:FXVjG7YLf9FetsS2OOYcwNhcdOLGt8S9bQ48+OP75cE= +modernc.org/ccgo/v3 v3.12.36/go.mod h1:uP3/Fiezp/Ga8onfvMLpREq+KUjUmYMxXPO8tETHtA8= +modernc.org/ccgo/v3 v3.12.38/go.mod h1:93O0G7baRST1vNj4wnZ49b1kLxt0xCW5Hsa2qRaZPqc= +modernc.org/ccgo/v3 v3.12.43/go.mod h1:k+DqGXd3o7W+inNujK15S5ZYuPoWYLpF5PYougCmthU= +modernc.org/ccgo/v3 v3.12.46/go.mod h1:UZe6EvMSqOxaJ4sznY7b23/k13R8XNlyWsO5bAmSgOE= +modernc.org/ccgo/v3 v3.12.47/go.mod h1:m8d6p0zNps187fhBwzY/ii6gxfjob1VxWb919Nk1HUk= +modernc.org/ccgo/v3 v3.12.50/go.mod h1:bu9YIwtg+HXQxBhsRDE+cJjQRuINuT9PUK4orOco/JI= +modernc.org/ccgo/v3 v3.12.51/go.mod h1:gaIIlx4YpmGO2bLye04/yeblmvWEmE4BBBls4aJXFiE= +modernc.org/ccgo/v3 v3.12.53/go.mod h1:8xWGGTFkdFEWBEsUmi+DBjwu/WLy3SSOrqEmKUjMeEg= +modernc.org/ccgo/v3 v3.12.54/go.mod h1:yANKFTm9llTFVX1FqNKHE0aMcQb1fuPJx6p8AcUx+74= +modernc.org/ccgo/v3 v3.12.55/go.mod h1:rsXiIyJi9psOwiBkplOaHye5L4MOOaCjHg1Fxkj7IeU= +modernc.org/ccgo/v3 v3.12.56/go.mod h1:ljeFks3faDseCkr60JMpeDb2GSO3TKAmrzm7q9YOcMU= +modernc.org/ccgo/v3 v3.12.57/go.mod h1:hNSF4DNVgBl8wYHpMvPqQWDQx8luqxDnNGCMM4NFNMc= +modernc.org/ccgo/v3 v3.12.60/go.mod h1:k/Nn0zdO1xHVWjPYVshDeWKqbRWIfif5dtsIOCUVMqM= +modernc.org/ccgo/v3 v3.12.65/go.mod h1:D6hQtKxPNZiY6wDBtehSGKFKmyXn53F8nGTpH+POmS4= +modernc.org/ccgo/v3 v3.12.66/go.mod h1:jUuxlCFZTUZLMV08s7B1ekHX5+LIAurKTTaugUr/EhQ= +modernc.org/ccgo/v3 v3.12.67/go.mod h1:Bll3KwKvGROizP2Xj17GEGOTrlvB1XcVaBrC90ORO84= +modernc.org/ccgo/v3 v3.12.73/go.mod h1:hngkB+nUUqzOf3iqsM48Gf1FZhY599qzVg1iX+BT3cQ= +modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= +modernc.org/libc v1.7.13-0.20210308123627-12f642a52bb8/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w= +modernc.org/libc v1.8.0/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w= +modernc.org/libc v1.9.8/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w= +modernc.org/libc v1.9.11/go.mod h1:NyF3tsA5ArIjJ83XB0JlqhjTabTCHm9aX4XMPHyQn0Q= +modernc.org/libc v1.11.0/go.mod h1:2lOfPmj7cz+g1MrPNmX65QCzVxgNq2C5o0jdLY2gAYg= +modernc.org/libc v1.11.2/go.mod h1:ioIyrl3ETkugDO3SGZ+6EOKvlP3zSOycUETe4XM4n8M= +modernc.org/libc v1.11.3/go.mod h1:k3HDCP95A6U111Q5TmG3nAyUcp3kR5YFZTeDS9v8vSU= +modernc.org/libc v1.11.5/go.mod h1:k3HDCP95A6U111Q5TmG3nAyUcp3kR5YFZTeDS9v8vSU= +modernc.org/libc v1.11.6/go.mod h1:ddqmzR6p5i4jIGK1d/EiSw97LBcE3dK24QEwCFvgNgE= +modernc.org/libc v1.11.11/go.mod h1:lXEp9QOOk4qAYOtL3BmMve99S5Owz7Qyowzvg6LiZso= +modernc.org/libc v1.11.13/go.mod h1:ZYawJWlXIzXy2Pzghaf7YfM8OKacP3eZQI81PDLFdY8= +modernc.org/libc v1.11.16/go.mod h1:+DJquzYi+DMRUtWI1YNxrlQO6TcA5+dRRiq8HWBWRC8= +modernc.org/libc v1.11.19/go.mod h1:e0dgEame6mkydy19KKaVPBeEnyJB4LGNb0bBH1EtQ3I= +modernc.org/libc v1.11.24/go.mod h1:FOSzE0UwookyT1TtCJrRkvsOrX2k38HoInhw+cSCUGk= +modernc.org/libc v1.11.26/go.mod h1:SFjnYi9OSd2W7f4ct622o/PAYqk7KHv6GS8NZULIjKY= +modernc.org/libc v1.11.27/go.mod h1:zmWm6kcFXt/jpzeCgfvUNswM0qke8qVwxqZrnddlDiE= +modernc.org/libc v1.11.28/go.mod h1:Ii4V0fTFcbq3qrv3CNn+OGHAvzqMBvC7dBNyC4vHZlg= +modernc.org/libc v1.11.31/go.mod h1:FpBncUkEAtopRNJj8aRo29qUiyx5AvAlAxzlx9GNaVM= +modernc.org/libc v1.11.34/go.mod h1:+Tzc4hnb1iaX/SKAutJmfzES6awxfU1BPvrrJO0pYLg= +modernc.org/libc v1.11.37/go.mod h1:dCQebOwoO1046yTrfUE5nX1f3YpGZQKNcITUYWlrAWo= +modernc.org/libc v1.11.39/go.mod h1:mV8lJMo2S5A31uD0k1cMu7vrJbSA3J3waQJxpV4iqx8= +modernc.org/libc v1.11.42/go.mod h1:yzrLDU+sSjLE+D4bIhS7q1L5UwXDOw99PLSX0BlZvSQ= +modernc.org/libc v1.11.44/go.mod h1:KFq33jsma7F5WXiYelU8quMJasCCTnHK0mkri4yPHgA= +modernc.org/libc v1.11.45/go.mod h1:Y192orvfVQQYFzCNsn+Xt0Hxt4DiO4USpLNXBlXg/tM= +modernc.org/libc v1.11.47/go.mod h1:tPkE4PzCTW27E6AIKIR5IwHAQKCAtudEIeAV1/SiyBg= +modernc.org/libc v1.11.49/go.mod h1:9JrJuK5WTtoTWIFQ7QjX2Mb/bagYdZdscI3xrvHbXjE= +modernc.org/libc v1.11.51/go.mod h1:R9I8u9TS+meaWLdbfQhq2kFknTW0O3aw3kEMqDDxMaM= +modernc.org/libc v1.11.53/go.mod h1:5ip5vWYPAoMulkQ5XlSJTy12Sz5U6blOQiYasilVPsU= +modernc.org/libc v1.11.54/go.mod h1:S/FVnskbzVUrjfBqlGFIPA5m7UwB3n9fojHhCNfSsnw= +modernc.org/libc v1.11.55/go.mod h1:j2A5YBRm6HjNkoSs/fzZrSxCuwWqcMYTDPLNx0URn3M= +modernc.org/libc v1.11.56/go.mod h1:pakHkg5JdMLt2OgRadpPOTnyRXm/uzu+Yyg/LSLdi18= +modernc.org/libc v1.11.58/go.mod h1:ns94Rxv0OWyoQrDqMFfWwka2BcaF6/61CqJRK9LP7S8= +modernc.org/libc v1.11.70/go.mod h1:DUOmMYe+IvKi9n6Mycyx3DbjfzSKrdr/0Vgt3j7P5gw= +modernc.org/libc v1.11.71/go.mod h1:DUOmMYe+IvKi9n6Mycyx3DbjfzSKrdr/0Vgt3j7P5gw= +modernc.org/libc v1.11.75/go.mod h1:dGRVugT6edz361wmD9gk6ax1AbDSe0x5vji0dGJiPT0= +modernc.org/libc v1.11.82/go.mod h1:NF+Ek1BOl2jeC7lw3a7Jj5PWyHPwWD4aq3wVKxqV1fI= +modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.4.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/memory v1.0.4/go.mod h1:nV2OApxradM3/OVbs2/0OsP6nPfakXpi50C7dcoHXlc= +modernc.org/memory v1.0.5/go.mod h1:B7OYswTRnfGg+4tDH1t1OeUNnsy2viGTdME4tzd+IjM= +modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/sqlite v1.10.0/go.mod h1:PGzq6qlhyYjL6uVbSgS6WoF7ZopTW/sI7+7p+mb4ZVU= +modernc.org/sqlite v1.13.0/go.mod h1:2qO/6jZJrcQaxFUHxOwa6Q6WfiGSsiVj6GXX0Ker+Jg= +modernc.org/sqlite v1.14.2-0.20211125151325-d4ed92c0a70f/go.mod h1:YT5XFRKOueohjppHO4cHb54eQlnaUGsZMHoryaCpNo4= +modernc.org/strutil v1.1.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= +modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= +modernc.org/tcl v1.5.0/go.mod h1:gb57hj4pO8fRrK54zveIfFXBaMHK3SKJNWcmRw1cRzc= +modernc.org/tcl v1.5.9/go.mod h1:bcwjvBJ2u0exY6K35eAmxXBBij5kXb1dHlAWmfhqThE= +modernc.org/tcl v1.8.13/go.mod h1:V+q/Ef0IJaNUSECieLU4o+8IScapxnMyFV6i/7uQlAY= +modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/z v1.0.1-0.20210308123920-1f282aa71362/go.mod h1:8/SRk5C/HgiQWCgXdfpb+1RvhORdkz5sw72d3jjtyqA= +modernc.org/z v1.0.1/go.mod h1:8/SRk5C/HgiQWCgXdfpb+1RvhORdkz5sw72d3jjtyqA= +modernc.org/z v1.1.2/go.mod h1:sj9T1AGBG0dm6SCVzldPOHWrif6XBpooJtbttMn1+Js= +modernc.org/z v1.2.19/go.mod h1:+ZpP0pc4zz97eukOzW3xagV/lS82IpPN9NGG5pNF9vY= +moul.io/http2curl v1.0.0/go.mod h1:f6cULg+e4Md/oW1cYmwW4IWQOVl2lGbmCNGOHvzX2kE= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= +rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= +zombiezen.com/go/sqlite v0.2.0/go.mod h1:VyBqNtpcF4vdvYgdwTSHJlwxyvTYCDQAZM9/qmGPyLg= +zombiezen.com/go/sqlite v0.8.0/go.mod h1:EMNzBZwTS5Yg6nwujgJdEo0brNm2a6f8Y4zoGiWZ5RU= From 860a7fca34b88d66072181e16965b48d2cf2278b Mon Sep 17 00:00:00 2001 From: Steve Date: Sat, 26 Aug 2023 22:37:33 +0800 Subject: [PATCH 069/139] chore: change default s3 server address to local --- s3/server/server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/s3/server/server.go b/s3/server/server.go index 5973e2c4f..733c96dc4 100644 --- a/s3/server/server.go +++ b/s3/server/server.go @@ -9,7 +9,7 @@ import ( "sync" ) -const defaultServerAddress = ":15001" +const defaultServerAddress = "127.0.0.1:15001" var ( ErrServerStarted = errors.New("server started") From d38caf15273d509255fe91ead217ba259b455688 Mon Sep 17 00:00:00 2001 From: fish <920886811@163.com> Date: Mon, 28 Aug 2023 17:50:15 +0800 Subject: [PATCH 070/139] mod: add object api --- s3/handlers/handlers_object.go | 557 ++++++++++++++++++++++++++++++++- s3/handlers/proto.go | 7 + s3/responses/object_header.go | 64 ++++ s3/responses/types.go | 98 ++++++ s3/routers/routers.go | 19 ++ s3/services/object/proto.go | 9 + s3/services/object/service.go | 243 +++++++++++++- 7 files changed, 989 insertions(+), 8 deletions(-) create mode 100644 s3/responses/object_header.go diff --git a/s3/handlers/handlers_object.go b/s3/handlers/handlers_object.go index 9af768abd..026a15877 100644 --- a/s3/handlers/handlers_object.go +++ b/s3/handlers/handlers_object.go @@ -1,7 +1,17 @@ package handlers import ( + "encoding/base64" "errors" + "fmt" + "io" + "net/http" + "net/url" + "path" + "strconv" + "strings" + "time" + "github.com/bittorrent/go-btfs/s3/action" "github.com/bittorrent/go-btfs/s3/cctx" "github.com/bittorrent/go-btfs/s3/consts" @@ -9,9 +19,8 @@ import ( "github.com/bittorrent/go-btfs/s3/responses" "github.com/bittorrent/go-btfs/s3/s3utils" "github.com/bittorrent/go-btfs/s3/services/bucket" + "github.com/bittorrent/go-btfs/s3/services/object" "github.com/bittorrent/go-btfs/s3/utils/hash" - "net/http" - "time" ) const lockWaitTimeout = 5 * time.Minute @@ -91,8 +100,8 @@ func (h *Handlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) { return } + //objsvc obj, err := h.objsvc.PutObject(ctx, bucname, objname, hrdr, r.ContentLength, meta) - if err != nil { responses.WriteErrorResponse(w, r, err) return @@ -102,3 +111,545 @@ func (h *Handlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) { return } + +// HeadObjectHandler - HEAD Object +func (h *Handlers) HeadObjectHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + ack := cctx.GetAccessKey(r) + var err error + defer func() { + cctx.SetHandleInf(r, h.name(), err) + }() + + bucname, objname, err := requests.ParseBucketAndObject(r) + if err != nil { + responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) + return + } + + if err := s3utils.CheckGetObjArgs(ctx, bucname, objname); err != nil { + responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) + return + } + + err = h.bucsvc.CheckACL(ack, bucname, action.HeadObjectAction) + if errors.Is(err, bucket.ErrNotFound) { + responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) + return + } + if err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + + //objsvc + obj, err := h.objsvc.GetObjectInfo(ctx, bucname, objname) + if err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + w.Header().Set(consts.AmzServerSideEncryption, consts.AmzEncryptionAES) + + // Set standard object headers. + responses.SetObjectHeaders(w, r, obj) + // Set any additional requested response headers. + responses.SetHeadGetRespHeaders(w, r.Form) + + // Successful response. + w.WriteHeader(http.StatusOK) +} + +// CopyObjectHandler - Copy Object +// ---------- +// This implementation of the PUT operation adds an object to a bucket +// while reading the object from another source. +// Notice: The S3 client can send secret keys in headers for encryption related jobs, +// the handler should ensure to remove these keys before sending them to the object layer. +// Currently these keys are: +// - X-Amz-Server-Side-Encryption-Customer-Key +// - X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key +func (h *Handlers) CopyObjectHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + ack := cctx.GetAccessKey(r) + var err error + defer func() { + cctx.SetHandleInf(r, h.name(), err) + }() + + dstBucket, dstObject, err := requests.ParseBucketAndObject(r) + if err != nil { + responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) + return + } + if err := s3utils.CheckPutObjectArgs(ctx, dstBucket, dstObject); err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + err = h.bucsvc.CheckACL(ack, dstBucket, action.CopyObjectAction) + if errors.Is(err, bucket.ErrNotFound) { + responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) + return + } + if err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + + // Copy source path. + cpSrcPath, err := url.QueryUnescape(r.Header.Get(consts.AmzCopySource)) + if err != nil { + // Save unescaped string as is. + cpSrcPath = r.Header.Get(consts.AmzCopySource) + } + srcBucket, srcObject := pathToBucketAndObject(cpSrcPath) + // If source object is empty or bucket is empty, reply back invalid copy source. + if srcObject == "" || srcBucket == "" { + responses.WriteErrorResponse(w, r, responses.ErrInvalidCopySource) + return + } + if err = s3utils.CheckGetObjArgs(ctx, srcBucket, srcObject); err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + if srcBucket == dstBucket && srcObject == dstObject { + responses.WriteErrorResponse(w, r, responses.ErrInvalidCopyDest) + return + } + err = h.bucsvc.CheckACL(ack, srcBucket, action.CopyObjectAction) + if errors.Is(err, bucket.ErrNotFound) { + responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) + return + } + if err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + + fmt.Printf("CopyObjectHandler %s %s => %s %s \n", srcBucket, srcObject, dstBucket, dstObject) + + //objsvc + srcObjInfo, err := h.objsvc.GetObjectInfo(ctx, srcBucket, srcObject) + if err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + + metadata := make(map[string]string) + metadata[strings.ToLower(consts.ContentType)] = srcObjInfo.ContentType + metadata[strings.ToLower(consts.ContentEncoding)] = srcObjInfo.ContentEncoding + if isReplace(r) { + inputMeta, err := extractMetadata(ctx, r) + if err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + for key, val := range inputMeta { + metadata[key] = val + } + } + + //objsvc + obj, err := h.objsvc.CopyObject(ctx, dstBucket, dstObject, srcObjInfo, srcObjInfo.Size, metadata) + if err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + + resp := responses.CopyObjectResult{ + ETag: "\"" + obj.ETag + "\"", + LastModified: obj.ModTime.UTC().Format(consts.Iso8601TimeFormat), + } + + setPutObjHeaders(w, obj, false) + + responses.WriteSuccessResponseXML(w, r, resp) +} + +// DeleteObjectHandler - delete an object +// Delete objectAPIHandlers +// https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html +func (h *Handlers) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + ack := cctx.GetAccessKey(r) + var err error + defer func() { + cctx.SetHandleInf(r, h.name(), err) + }() + + bucname, objname, err := requests.ParseBucketAndObject(r) + if err != nil { + responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) + return + } + if err := s3utils.CheckDelObjArgs(ctx, bucname, objname); err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + + err = h.bucsvc.CheckACL(ack, bucname, action.DeleteObjectAction) + if errors.Is(err, bucket.ErrNotFound) { + responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) + return + } + if err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + + //objsvc + obj, err := h.objsvc.GetObjectInfo(ctx, bucname, objname) + if err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + //objsvc + err = h.objsvc.DeleteObject(ctx, bucname, objname) + if err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + setPutObjHeaders(w, obj, true) + responses.WriteSuccessNoContent(w) +} + +// GetObjectHandler - GET Object +// ---------- +// This implementation of the GET operation retrieves object. To use GET, +// you must have READ access to the object. +// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html +func (h *Handlers) GetObjectHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + ack := cctx.GetAccessKey(r) + var err error + defer func() { + cctx.SetHandleInf(r, h.name(), err) + }() + + bucname, objname, err := requests.ParseBucketAndObject(r) + if err != nil { + responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) + return + } + if err = s3utils.CheckGetObjArgs(ctx, bucname, objname); err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + + err = h.bucsvc.CheckACL(ack, bucname, action.GetObjectAction) + if errors.Is(err, bucket.ErrNotFound) { + responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) + return + } + if err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + + //objsvc + obj, reader, err := h.objsvc.GetObject(ctx, bucname, objname) + if err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + //w.Header().Set(consts.AmzServerSideEncryption, consts.AmzEncryptionAES) + + responses.SetObjectHeaders(w, r, obj) + w.Header().Set(consts.ContentLength, strconv.FormatInt(obj.Size, 10)) + responses.SetHeadGetRespHeaders(w, r.Form) + _, err = io.Copy(w, reader) + if err != nil { + responses.WriteErrorResponse(w, r, responses.ErrInternalError) + return + } +} + +// GetObjectACLHandler - GET Object ACL +// ----------------- +// This operation uses the ACL +// subresource to return the ACL of a specified object. +func (h *Handlers) GetObjectACLHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + ack := cctx.GetAccessKey(r) + var err error + defer func() { + cctx.SetHandleInf(r, h.name(), err) + }() + + bucname, _, err := requests.ParseBucketAndObject(r) + if err != nil { + responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) + return + } + + err = h.bucsvc.CheckACL(ack, bucname, action.GetBucketAclAction) + if errors.Is(err, bucket.ErrNotFound) { + responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) + return + } + if err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + + acl, err := h.bucsvc.GetBucketAcl(ctx, bucname) + if err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + + responses.WriteGetBucketAclResponse(w, r, ack, acl) +} + +func (h *Handlers) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + ack := cctx.GetAccessKey(r) + var err error + defer func() { + cctx.SetHandleInf(r, h.name(), err) + }() + + bucname, _, err := requests.ParseBucketAndObject(r) + if err != nil { + responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) + return + } + + err = h.bucsvc.CheckACL(ack, bucname, action.ListObjectsAction) + if errors.Is(err, bucket.ErrNotFound) { + responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) + return + } + if err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + + // Extract all the litsObjectsV1 query params to their native values. + prefix, marker, delimiter, maxKeys, encodingType, s3Error := getListObjectsV1Args(r.Form) + if s3Error != nil { + responses.WriteErrorResponse(w, r, s3Error) + return + } + + if err := s3utils.CheckListObjsArgs(ctx, bucname, prefix, marker); err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + + //objsvc + objs, err := h.objsvc.ListObjects(ctx, bucname, prefix, marker, delimiter, maxKeys) + if err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + resp := responses.GenerateListObjectsV1Response(bucname, prefix, marker, delimiter, encodingType, maxKeys, objs) + // Write success response. + responses.WriteSuccessResponseXML(w, r, resp) +} + +func (h *Handlers) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + ack := cctx.GetAccessKey(r) + var err error + defer func() { + cctx.SetHandleInf(r, h.name(), err) + }() + + bucname, _, err := requests.ParseBucketAndObject(r) + if err != nil { + responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) + return + } + + err = h.bucsvc.CheckACL(ack, bucname, action.ListObjectsAction) + if errors.Is(err, bucket.ErrNotFound) { + responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) + return + } + if err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + + urlValues := r.Form + // Extract all the listObjectsV2 query params to their native values. + prefix, token, startAfter, delimiter, fetchOwner, maxKeys, encodingType, errCode := getListObjectsV2Args(urlValues) + if errCode != nil { + responses.WriteErrorResponse(w, r, errCode) + return + } + + marker := token + if marker == "" { + marker = startAfter + } + if err := s3utils.CheckListObjsArgs(ctx, bucname, prefix, marker); err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + + // Validate the query params before beginning to serve the request. + // fetch-owner is not validated since it is a boolean + s3Error := validateListObjectsArgs(token, delimiter, encodingType, maxKeys) + if s3Error != nil { + responses.WriteErrorResponse(w, r, s3Error) + return + } + + // Initiate a list objects operation based on the input params. + // On success would return back ListObjectsInfo object to be + // marshaled into S3 compatible XML header. + //objsvc + listObjectsV2Info, err := h.objsvc.ListObjectsV2(ctx, bucname, prefix, token, delimiter, + maxKeys, fetchOwner, startAfter) + if err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + + resp := responses.GenerateListObjectsV2Response( + bucname, prefix, token, listObjectsV2Info.NextContinuationToken, startAfter, + delimiter, encodingType, listObjectsV2Info.IsTruncated, + maxKeys, listObjectsV2Info.Objects, listObjectsV2Info.Prefixes) + + // Write success response. + responses.WriteSuccessResponseXML(w, r, resp) +} + +// setPutObjHeaders sets all the necessary headers returned back +// upon a success Put/Copy/CompleteMultipart/Delete requests +// to activate delete only headers set delete as true +func setPutObjHeaders(w http.ResponseWriter, obj object.Object, delete bool) { + // We must not use the http.Header().Set method here because some (broken) + // clients expect the ETag header key to be literally "ETag" - not "Etag" (case-sensitive). + // Therefore, we have to set the ETag directly as map entry. + if obj.ETag != "" && !delete { + w.Header()[consts.ETag] = []string{`"` + obj.ETag + `"`} + } + + // Set the relevant version ID as part of the response header. + if obj.VersionID != "" { + w.Header()[consts.AmzVersionID] = []string{obj.VersionID} + // If version is a deleted marker, set this header as well + if obj.DeleteMarker && delete { // only returned during delete object + w.Header()[consts.AmzDeleteMarker] = []string{strconv.FormatBool(obj.DeleteMarker)} + } + } + + if obj.Bucket != "" && obj.Name != "" { + // do something + } +} + +func pathToBucketAndObject(path string) (bucket, object string) { + path = strings.TrimPrefix(path, consts.SlashSeparator) + idx := strings.Index(path, consts.SlashSeparator) + if idx < 0 { + return path, "" + } + return path[:idx], path[idx+len(consts.SlashSeparator):] +} + +func isReplace(r *http.Request) bool { + return r.Header.Get("X-Amz-Metadata-Directive") == "REPLACE" +} + +// Parse bucket url queries +func getListObjectsV1Args(values url.Values) ( + prefix, marker, delimiter string, maxkeys int, encodingType string, errCode error) { + + if values.Get("max-keys") != "" { + var err error + if maxkeys, err = strconv.Atoi(values.Get("max-keys")); err != nil { + errCode = responses.ErrInvalidMaxKeys + return + } + } else { + maxkeys = consts.MaxObjectList + } + + prefix = trimLeadingSlash(values.Get("prefix")) + marker = trimLeadingSlash(values.Get("marker")) + delimiter = values.Get("delimiter") + encodingType = values.Get("encoding-type") + return +} + +// Parse bucket url queries for ListObjects V2. +func getListObjectsV2Args(values url.Values) ( + prefix, token, startAfter, delimiter string, + fetchOwner bool, maxkeys int, encodingType string, errCode error) { + + // The continuation-token cannot be empty. + if val, ok := values["continuation-token"]; ok { + if len(val[0]) == 0 { + errCode = responses.ErrInvalidToken + return + } + } + + if values.Get("max-keys") != "" { + var err error + if maxkeys, err = strconv.Atoi(values.Get("max-keys")); err != nil { + errCode = responses.ErrInvalidMaxKeys + return + } + // Over flowing count - reset to maxObjectList. + if maxkeys > consts.MaxObjectList { + maxkeys = consts.MaxObjectList + } + } else { + maxkeys = consts.MaxObjectList + } + + prefix = trimLeadingSlash(values.Get("prefix")) + startAfter = trimLeadingSlash(values.Get("start-after")) + delimiter = values.Get("delimiter") + fetchOwner = values.Get("fetch-owner") == "true" + encodingType = values.Get("encoding-type") + + if token = values.Get("continuation-token"); token != "" { + decodedToken, err := base64.StdEncoding.DecodeString(token) + if err != nil { + errCode = responses.ErrIncorrectContinuationToken + return + } + token = string(decodedToken) + } + return +} + +func trimLeadingSlash(ep string) string { + if len(ep) > 0 && ep[0] == '/' { + // Path ends with '/' preserve it + if ep[len(ep)-1] == '/' && len(ep) > 1 { + ep = path.Clean(ep) + ep += "/" + } else { + ep = path.Clean(ep) + } + ep = ep[1:] + } + return ep +} + +// Validate all the ListObjects query arguments, returns an APIErrorCode +// if one of the args do not meet the required conditions. +// - delimiter if set should be equal to '/', otherwise the request is rejected. +// - marker if set should have a common prefix with 'prefix' param, otherwise +// the request is rejected. +func validateListObjectsArgs(marker, delimiter, encodingType string, maxKeys int) error { + // Max keys cannot be negative. + if maxKeys < 0 { + return responses.ErrInvalidMaxKeys + } + + if encodingType != "" { + // AWS S3 spec only supports 'url' encoding type + if !strings.EqualFold(encodingType, "url") { + return responses.ErrInvalidEncodingMethod + } + } + + return nil +} diff --git a/s3/handlers/proto.go b/s3/handlers/proto.go index 32c956fc9..f65231865 100644 --- a/s3/handlers/proto.go +++ b/s3/handlers/proto.go @@ -20,6 +20,13 @@ type Handlerser interface { // object PutObjectHandler(w http.ResponseWriter, r *http.Request) + HeadObjectHandler(w http.ResponseWriter, r *http.Request) + CopyObjectHandler(w http.ResponseWriter, r *http.Request) + DeleteObjectHandler(w http.ResponseWriter, r *http.Request) + GetObjectHandler(w http.ResponseWriter, r *http.Request) + GetObjectACLHandler(w http.ResponseWriter, r *http.Request) + ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) + ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) // multipart CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Request) diff --git a/s3/responses/object_header.go b/s3/responses/object_header.go new file mode 100644 index 000000000..2f66c84fe --- /dev/null +++ b/s3/responses/object_header.go @@ -0,0 +1,64 @@ +package responses + +import ( + "github.com/bittorrent/go-btfs/s3/consts" + "github.com/bittorrent/go-btfs/s3/services/object" + "net/http" + "net/url" + "strconv" + "strings" +) + +// SetObjectHeaders Write object header +func SetObjectHeaders(w http.ResponseWriter, r *http.Request, objInfo object.Object) { + // set common headers + setCommonHeaders(w, r) + + // Set last modified time. + lastModified := objInfo.ModTime.UTC().Format(http.TimeFormat) + w.Header().Set(consts.LastModified, lastModified) + + // Set Etag if available. + if objInfo.ETag != "" { + w.Header()[consts.ETag] = []string{"\"" + objInfo.ETag + "\""} + } + + if objInfo.ContentType != "" { + w.Header().Set(consts.ContentType, objInfo.ContentType) + } + + if objInfo.ContentEncoding != "" { + w.Header().Set(consts.ContentEncoding, objInfo.ContentEncoding) + } + + if !objInfo.Expires.IsZero() { + w.Header().Set(consts.Expires, objInfo.Expires.UTC().Format(http.TimeFormat)) + } + + // Set content length + w.Header().Set(consts.ContentLength, strconv.FormatInt(objInfo.Size, 10)) + + // Set the relevant version ID as part of the response header. + if objInfo.VersionID != "" { + w.Header()[consts.AmzVersionID] = []string{objInfo.VersionID} + } + +} + +// SetHeadGetRespHeaders - set any requested parameters as response headers. +func SetHeadGetRespHeaders(w http.ResponseWriter, reqParams url.Values) { + for k, v := range reqParams { + if header, ok := supportedHeadGetReqParams[strings.ToLower(k)]; ok { + w.Header()[header] = v + } + } +} + +// supportedHeadGetReqParams - supported request parameters for GET and HEAD presigned request. +var supportedHeadGetReqParams = map[string]string{ + "response-expires": consts.Expires, + "response-content-type": consts.ContentType, + "response-content-encoding": consts.ContentEncoding, + "response-content-language": consts.ContentLanguage, + "response-content-disposition": consts.ContentDisposition, +} diff --git a/s3/responses/types.go b/s3/responses/types.go index 3f63653e0..dd6b64e7e 100644 --- a/s3/responses/types.go +++ b/s3/responses/types.go @@ -1,9 +1,12 @@ package responses import ( + "encoding/base64" "encoding/xml" "github.com/aws/aws-sdk-go/service/s3" + "github.com/bittorrent/go-btfs/s3/consts" "github.com/bittorrent/go-btfs/s3/services/object" + "github.com/bittorrent/go-btfs/s3/utils" ) type GetBucketAclResponse AccessControlPolicy @@ -233,3 +236,98 @@ func GenerateCompleteMultipartUploadResponse(bucname, objname, location string, } return c } + +// GenerateListObjectsV2Response Generates an ListObjectsV2 response for the said bucket with other enumerated options. +func GenerateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, isTruncated bool, maxKeys int, objects []object.Object, prefixes []string) ListObjectsV2Response { + contents := make([]Object, 0, len(objects)) + id := consts.DefaultOwnerID + name := consts.DisplayName + owner := s3.Owner{ + ID: &id, + DisplayName: &name, + } + data := ListObjectsV2Response{} + + for _, object := range objects { + content := Object{} + if object.Name == "" { + continue + } + content.Key = utils.S3EncodeName(object.Name, encodingType) + content.LastModified = object.ModTime.UTC().Format(consts.Iso8601TimeFormat) + if object.ETag != "" { + content.ETag = "\"" + object.ETag + "\"" + } + content.Size = object.Size + content.Owner = owner + contents = append(contents, content) + } + data.Name = bucket + data.Contents = contents + + data.EncodingType = encodingType + data.StartAfter = utils.S3EncodeName(startAfter, encodingType) + data.Delimiter = utils.S3EncodeName(delimiter, encodingType) + data.Prefix = utils.S3EncodeName(prefix, encodingType) + data.MaxKeys = maxKeys + data.ContinuationToken = base64.StdEncoding.EncodeToString([]byte(token)) + data.NextContinuationToken = base64.StdEncoding.EncodeToString([]byte(nextToken)) + data.IsTruncated = isTruncated + + commonPrefixes := make([]CommonPrefix, 0, len(prefixes)) + for _, prefix := range prefixes { + prefixItem := CommonPrefix{} + prefixItem.Prefix = utils.S3EncodeName(prefix, encodingType) + commonPrefixes = append(commonPrefixes, prefixItem) + } + data.CommonPrefixes = commonPrefixes + data.KeyCount = len(data.Contents) + len(data.CommonPrefixes) + return data +} + +// generates an ListObjectsV1 response for the said bucket with other enumerated options. +func GenerateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp object.ListObjectsInfo) ListObjectsResponse { + contents := make([]Object, 0, len(resp.Objects)) + id := consts.DefaultOwnerID + name := consts.DisplayName + owner := s3.Owner{ + ID: &id, + DisplayName: &name, + } + data := ListObjectsResponse{} + + for _, object := range resp.Objects { + content := Object{} + if object.Name == "" { + continue + } + content.Key = utils.S3EncodeName(object.Name, encodingType) + content.LastModified = object.ModTime.UTC().Format(consts.Iso8601TimeFormat) + if object.ETag != "" { + content.ETag = "\"" + object.ETag + "\"" + } + content.Size = object.Size + content.StorageClass = "" + content.Owner = owner + contents = append(contents, content) + } + data.Name = bucket + data.Contents = contents + + data.EncodingType = encodingType + data.Prefix = utils.S3EncodeName(prefix, encodingType) + data.Marker = utils.S3EncodeName(marker, encodingType) + data.Delimiter = utils.S3EncodeName(delimiter, encodingType) + data.MaxKeys = maxKeys + data.NextMarker = utils.S3EncodeName(resp.NextMarker, encodingType) + data.IsTruncated = resp.IsTruncated + + prefixes := make([]CommonPrefix, 0, len(resp.Prefixes)) + for _, prefix := range resp.Prefixes { + prefixItem := CommonPrefix{} + prefixItem.Prefix = utils.S3EncodeName(prefix, encodingType) + prefixes = append(prefixes, prefixItem) + } + data.CommonPrefixes = prefixes + return data +} diff --git a/s3/routers/routers.go b/s3/routers/routers.go index 466a3764f..ca1c9c910 100644 --- a/s3/routers/routers.go +++ b/s3/routers/routers.go @@ -32,6 +32,7 @@ func (routers *Routers) Register() http.Handler { bucket := root.PathPrefix("/{bucket}").Subrouter() + // multipart object... // CreateMultipart bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(hs.CreateMultipartUploadHandler).Queries("uploads", "") // UploadPart @@ -41,9 +42,27 @@ func (routers *Routers) Register() http.Handler { // AbortMultipart bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(hs.AbortMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}") + //object... + // ListObjectsV2 + bucket.Methods(http.MethodGet).HandlerFunc(hs.ListObjectsV2Handler).Queries("list-type", "2") + // ListObjectsV1 + bucket.Methods(http.MethodGet).HandlerFunc(hs.ListObjectsV1Handler) + // HeadObject + bucket.Methods(http.MethodHead).Path("/{object:.+}").HandlerFunc(hs.HeadObjectHandler) // PutObject bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(hs.PutObjectHandler) + // CopyObject + bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(hs.CopyObjectHandler) + // DeleteObject + bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(hs.DeleteObjectHandler) + //todo DeleteObjects new ? + bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(hs.DeleteObjectHandler) + // GetObject + bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(hs.GetObjectHandler) + // GetObjectACL + bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(hs.GetObjectACLHandler).Queries("acl", "") + //bucket... // GetBucketAcl bucket.Methods(http.MethodGet).HandlerFunc(hs.GetBucketAclHandler).Queries("acl", "") // PutBucketAcl diff --git a/s3/services/object/proto.go b/s3/services/object/proto.go index b7e714cc7..fcbad57e6 100644 --- a/s3/services/object/proto.go +++ b/s3/services/object/proto.go @@ -4,6 +4,7 @@ import ( "context" "errors" "github.com/bittorrent/go-btfs/s3/utils/hash" + "io" "time" ) @@ -13,7 +14,15 @@ var ( ) type Service interface { + // object PutObject(ctx context.Context, bucname, objname string, reader *hash.Reader, size int64, meta map[string]string) (obj Object, err error) + CopyObject(ctx context.Context, bucket, object string, info Object, size int64, meta map[string]string) (Object, error) + GetObject(ctx context.Context, bucket, object string) (Object, io.ReadCloser, error) + GetObjectInfo(ctx context.Context, bucket, object string) (Object, error) + DeleteObject(ctx context.Context, bucket, object string) error + ListObjects(ctx context.Context, bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi ListObjectsInfo, err error) + EmptyBucket(ctx context.Context, bucket string) (bool, error) + ListObjectsV2(ctx context.Context, bucket string, prefix string, continuationToken string, delimiter string, maxKeys int, owner bool, startAfter string) (ListObjectsV2Info, error) // martipart CreateMultipartUpload(ctx context.Context, bucname string, objname string, meta map[string]string) (mtp Multipart, err error) diff --git a/s3/services/object/service.go b/s3/services/object/service.go index 33a88737e..a91dcad39 100644 --- a/s3/services/object/service.go +++ b/s3/services/object/service.go @@ -5,6 +5,12 @@ import ( "encoding/hex" "errors" "fmt" + "io" + "net/http" + "regexp" + "strings" + "time" + "github.com/bittorrent/go-btfs/s3/consts" "github.com/bittorrent/go-btfs/s3/etag" "github.com/bittorrent/go-btfs/s3/providers" @@ -12,11 +18,6 @@ import ( "github.com/bittorrent/go-btfs/s3/utils/hash" "github.com/dustin/go-humanize" "github.com/google/uuid" - "io" - "net/http" - "regexp" - "strings" - "time" ) const ( @@ -109,6 +110,238 @@ func (s *service) PutObject(ctx context.Context, bucname, objname string, reader return } +// CopyObject store object +func (s *service) CopyObject(ctx context.Context, bucket, object string, info Object, size int64, meta map[string]string) (Object, error) { + obj := Object{ + Bucket: bucket, + Name: object, + ModTime: time.Now().UTC(), + Size: size, + IsDir: false, + ETag: info.ETag, + Cid: info.Cid, + VersionID: "", + IsLatest: true, + DeleteMarker: false, + ContentType: meta[strings.ToLower(consts.ContentType)], + ContentEncoding: meta[strings.ToLower(consts.ContentEncoding)], + SuccessorModTime: time.Now().UTC(), + } + // Update expires + if exp, ok := meta[strings.ToLower(consts.Expires)]; ok { + if t, e := time.Parse(http.TimeFormat, exp); e == nil { + obj.Expires = t.UTC() + } + } + + err := s.providers.GetStateStore().Put(getObjectKey(bucket, object), obj) + if err != nil { + return Object{}, err + } + return obj, nil +} + +// GetObject Get object +func (s *service) GetObject(ctx context.Context, bucket, object string) (Object, io.ReadCloser, error) { + var obj Object + err := s.providers.GetStateStore().Get(getObjectKey(bucket, object), &obj) + if errors.Is(err, providers.ErrStateStoreNotFound) { + err = ErrObjectNotFound + return Object{}, nil, err + } + + reader, err := s.providers.GetFileStore().Cat(obj.Cid) + if err != nil { + return Object{}, nil, err + } + + return obj, reader, nil +} + +// GetObjectInfo Get object info +func (s *service) GetObjectInfo(ctx context.Context, bucket, object string) (Object, error) { + var obj Object + err := s.providers.GetStateStore().Get(getObjectKey(bucket, object), &obj) + if errors.Is(err, providers.ErrStateStoreNotFound) { + err = ErrObjectNotFound + return Object{}, err + } + + return obj, nil +} + +// DeleteObject delete object +func (s *service) DeleteObject(ctx context.Context, bucket, object string) error { + var obj Object + err := s.providers.GetStateStore().Get(getObjectKey(bucket, object), &obj) + if errors.Is(err, providers.ErrStateStoreNotFound) { + err = ErrObjectNotFound + return err + } + + if err = s.providers.GetStateStore().Delete(getObjectKey(bucket, object)); err != nil { + return err + } + + //todo 是否先进性unpin,然后remove? + if bl := s.providers.GetFileStore().Remove(obj.Cid); !bl { + errMsg := fmt.Sprintf("mark Objet to delete error, bucket:%s, object:%s, cid:%s, error:%v \n", bucket, object, obj.Cid, err) + return errors.New(errMsg) + } + return nil +} + +func (s *service) CleanObjectsInBucket(ctx context.Context, bucket string) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + prefixKey := fmt.Sprintf(allObjectPrefixFormat, bucket, "") + err := s.providers.GetStateStore().Iterate(prefixKey, func(key, _ []byte) (stop bool, er error) { + record := &Object{} + er = s.providers.GetStateStore().Get(string(key), record) + if er != nil { + return + } + + if err := s.DeleteObject(ctx, bucket, record.Name); err != nil { + return + } + return + }) + + return err +} + +// ListObjectsInfo - container for list objects. +type ListObjectsInfo struct { + // Indicates whether the returned list objects response is truncated. A + // value of true indicates that the list was truncated. The list can be truncated + // if the number of objects exceeds the limit allowed or specified + // by max keys. + IsTruncated bool + + // When response is truncated (the IsTruncated element value in the response is true), + // you can use the key name in this field as marker in the subsequent + // request to get next set of objects. + // + // NOTE: AWS S3 returns NextMarker only if you have delimiter request parameter specified, + NextMarker string + + // List of objects info for this request. + Objects []Object + + // List of prefixes for this request. + Prefixes []string +} + +// ListObjects list user object +// TODO use more params +func (s *service) ListObjects(ctx context.Context, bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi ListObjectsInfo, err error) { + if maxKeys == 0 { + return loi, nil + } + + if len(prefix) > 0 && maxKeys == 1 && delimiter == "" && marker == "" { + // Optimization for certain applications like + // - Cohesity + // - Actifio, Splunk etc. + // which send ListObjects requests where the actual object + // itself is the prefix and max-keys=1 in such scenarios + // we can simply verify locally if such an object exists + // to avoid the need for ListObjects(). + var obj Object + err = s.providers.GetStateStore().Get(getObjectKey(bucket, prefix), &obj) + if err == nil { + loi.Objects = append(loi.Objects, obj) + return loi, nil + } + } + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + seekKey := "" + if marker != "" { + seekKey = fmt.Sprintf(allObjectSeekKeyFormat, bucket, marker) + } + prefixKey := fmt.Sprintf(allObjectPrefixFormat, bucket, prefix) + all, err := s.providers.GetStateStore().ReadAllChan(ctx, prefixKey, seekKey) + if err != nil { + return loi, err + } + index := 0 + for entry := range all { + if index == maxKeys { + loi.IsTruncated = true + break + } + var o Object + if err = entry.UnmarshalValue(&o); err != nil { + return loi, err + } + index++ + loi.Objects = append(loi.Objects, o) + } + if loi.IsTruncated { + loi.NextMarker = loi.Objects[len(loi.Objects)-1].Name + } + + return loi, nil +} + +func (s *service) EmptyBucket(ctx context.Context, bucket string) (bool, error) { + loi, err := s.ListObjects(ctx, bucket, "", "", "", 1) + if err != nil { + return false, err + } + return len(loi.Objects) == 0, nil +} + +// ListObjectsV2Info - container for list objects version 2. +type ListObjectsV2Info struct { + // Indicates whether the returned list objects response is truncated. A + // value of true indicates that the list was truncated. The list can be truncated + // if the number of objects exceeds the limit allowed or specified + // by max keys. + IsTruncated bool + + // When response is truncated (the IsTruncated element value in the response + // is true), you can use the key name in this field as marker in the subsequent + // request to get next set of objects. + // + // NOTE: This element is returned only if you have delimiter request parameter + // specified. + ContinuationToken string + NextContinuationToken string + + // List of objects info for this request. + Objects []Object + + // List of prefixes for this request. + Prefixes []string +} + +// ListObjectsV2 list objects +func (s *service) ListObjectsV2(ctx context.Context, bucket string, prefix string, continuationToken string, delimiter string, maxKeys int, owner bool, startAfter string) (ListObjectsV2Info, error) { + marker := continuationToken + if marker == "" { + marker = startAfter + } + loi, err := s.ListObjects(ctx, bucket, prefix, marker, delimiter, maxKeys) + if err != nil { + return ListObjectsV2Info{}, err + } + listV2Info := ListObjectsV2Info{ + IsTruncated: loi.IsTruncated, + ContinuationToken: continuationToken, + NextContinuationToken: loi.NextMarker, + Objects: loi.Objects, + Prefixes: loi.Prefixes, + } + return listV2Info, nil +} + +/*---------------------------------------------------*/ + func (s *service) CreateMultipartUpload(ctx context.Context, bucname string, objname string, meta map[string]string) (mtp Multipart, err error) { uploadId := uuid.NewString() mtp = Multipart{ From 2a611679ce3d2a7b68c627a7e0791073b1a67087 Mon Sep 17 00:00:00 2001 From: Steve Date: Wed, 30 Aug 2023 00:40:02 +0800 Subject: [PATCH 071/139] feat: s3-compatible-api - 1. add start option and configure; 2. optmize providers interfaces and implements; 3. rewrite the server construct function --- cmd/btfs/daemon.go | 15 +++-- go.mod | 2 +- go.sum | 4 +- s3/consts/consts.go | 11 ++++ s3/handlers/handlers.go | 19 +++--- s3/handlers/handlers_middlewares.go | 26 +++++--- s3/handlers/options.go | 60 ++++++++++++++++++ s3/providers/btfs_api.go | 41 +++++++++++++ s3/providers/file_store.go | 17 ----- s3/providers/proto.go | 16 ++--- s3/providers/state_store.go | 42 ------------- s3/providers/storage_state_store_proxy.go | 42 +++++++++++++ s3/routers/routers.go | 6 +- s3/s3.go | 34 ++++++---- s3/server/server.go | 11 +--- s3/server/server_options.go | 6 +- s3/services/cors/proto.go | 7 --- s3/services/cors/service.go | 75 ----------------------- s3/services/cors/service_options.go | 21 ------- s3/services/object/service.go | 11 ++-- 20 files changed, 243 insertions(+), 223 deletions(-) create mode 100644 s3/providers/btfs_api.go delete mode 100644 s3/providers/file_store.go delete mode 100644 s3/providers/state_store.go create mode 100644 s3/providers/storage_state_store_proxy.go delete mode 100644 s3/services/cors/proto.go delete mode 100644 s3/services/cors/service.go delete mode 100644 s3/services/cors/service_options.go diff --git a/cmd/btfs/daemon.go b/cmd/btfs/daemon.go index e921485f4..d5dc0790e 100644 --- a/cmd/btfs/daemon.go +++ b/cmd/btfs/daemon.go @@ -102,6 +102,7 @@ const ( chainID = "chain-id" // apiAddrKwd = "address-api" // swarmAddrKwd = "address-swarm" + enableS3CompatibleAPIKwd = "s3-compatible-api" ) // BTFS daemon test exit error code @@ -229,6 +230,7 @@ Headers. // TODO: add way to override addresses. tricky part: updating the config if also --init. // cmds.StringOption(apiAddrKwd, "Address for the daemon rpc API (overrides config)"), // cmds.StringOption(swarmAddrKwd, "Address for the swarm socket (overrides config)"), + cmds.BoolOption(enableS3CompatibleAPIKwd, "Enable s3-compatible-api server"), }, Subcommands: map[string]*cmds.Command{}, NoRemote: true, @@ -716,10 +718,15 @@ If the user need to start multiple nodes on the same machine, the configuration } // access-key init - accesskey.InitService(s3.GetProviders(statestore)) - s3Server := s3.NewServer(statestore) - _ = s3Server.Start() - defer s3Server.Stop() + accesskey.InitService(s3.GetProviders()) + + // start s3-compatible-api server + s3OptEnable, s3Opt := req.Options[enableS3CompatibleAPIKwd].(bool) + if s3OptEnable || (!s3Opt && cfg.S3CompatibleAPI.Enable) { + s3Server := s3.NewServer(cfg.S3CompatibleAPI) + _ = s3Server.Start() + defer s3Server.Stop() + } if SimpleMode == false { // set Analytics flag if specified diff --git a/go.mod b/go.mod index c7aed1319..e93c84df0 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/bittorrent/go-btfs-chunker v0.4.0 github.com/bittorrent/go-btfs-cmds v0.3.0 github.com/bittorrent/go-btfs-common v0.9.0 - github.com/bittorrent/go-btfs-config v0.12.3 + github.com/bittorrent/go-btfs-config v0.13.0-pre2 github.com/bittorrent/go-btfs-files v0.3.1 github.com/bittorrent/go-btns v0.2.0 github.com/bittorrent/go-common/v2 v2.4.0 diff --git a/go.sum b/go.sum index ab57d764b..25113f808 100644 --- a/go.sum +++ b/go.sum @@ -207,8 +207,8 @@ github.com/bittorrent/go-btfs-cmds v0.3.0 h1:xpCBgk3zIm84Ne6EjeJgi8WLB5YJJUIFMjK github.com/bittorrent/go-btfs-cmds v0.3.0/go.mod h1:Fbac/Rou32G0jpoa6wLrNNDxcGOZbGfk+GiG0r3uEIU= github.com/bittorrent/go-btfs-common v0.9.0 h1:jHcFvYQmvmA4IdvVtkI5d/S/HW65Qz21C6oxeyK812w= github.com/bittorrent/go-btfs-common v0.9.0/go.mod h1:OG1n3DfcTxQYfLd5zco54LfL3IiDDaw3s7Igahu0Rj0= -github.com/bittorrent/go-btfs-config v0.12.3 h1:Zi/GTwHo/PJV+90+w45P7axkWsUpOB/XFhgvNk+TwRs= -github.com/bittorrent/go-btfs-config v0.12.3/go.mod h1:DNaHVC9wU84KLKoC4HkvdoFJKVZ7TF530qzfYu30fCI= +github.com/bittorrent/go-btfs-config v0.13.0-pre2 h1:sneJ4a5bA15ST9WRUR4G+1FuGUVmszSEuihCeKzeyNk= +github.com/bittorrent/go-btfs-config v0.13.0-pre2/go.mod h1:DNaHVC9wU84KLKoC4HkvdoFJKVZ7TF530qzfYu30fCI= github.com/bittorrent/go-btfs-files v0.3.0/go.mod h1:ylMf73m6oK94hL7VPblY1ZZpePsr6XbPV4BaNUwGZR0= github.com/bittorrent/go-btfs-files v0.3.1 h1:esq3j+6FtZ+SlaxKjVtiYgvXk/SWUiTcv0Q1MeJoPnQ= github.com/bittorrent/go-btfs-files v0.3.1/go.mod h1:ylMf73m6oK94hL7VPblY1ZZpePsr6XbPV4BaNUwGZR0= diff --git a/s3/consts/consts.go b/s3/consts/consts.go index 7429b39a1..e6ffc4116 100644 --- a/s3/consts/consts.go +++ b/s3/consts/consts.go @@ -143,7 +143,18 @@ const ( ContentDisposition = "Content-Disposition" Authorization = "Authorization" Action = "Action" + XRequestWith = "X-Requested-With" Range = "Range" + UserAgent = "User-Agent" +) + +// Standard HTTP cors headers +const ( + AccessControlAllowOrigin = "Access-Control-Allow-Origin" + AccessControlAllowMethods = "Access-Control-Allow-Methods" + AccessControlAllowHeaders = "Access-Control-Allow-Headers" + AccessControlExposeHeaders = "Access-Control-Expose-Headers" + AccessControlAllowCredentials = "Access-Control-Allow-Credentials" ) // Standard BTFS HTTP response constants diff --git a/s3/handlers/handlers.go b/s3/handlers/handlers.go index 2735a61e1..ac5364b28 100644 --- a/s3/handlers/handlers.go +++ b/s3/handlers/handlers.go @@ -8,7 +8,6 @@ import ( "github.com/bittorrent/go-btfs/s3/responses" "github.com/bittorrent/go-btfs/s3/services/accesskey" "github.com/bittorrent/go-btfs/s3/services/bucket" - "github.com/bittorrent/go-btfs/s3/services/cors" "github.com/bittorrent/go-btfs/s3/services/object" "github.com/bittorrent/go-btfs/s3/services/sign" "net/http" @@ -22,16 +21,16 @@ const lockPrefix = "s3:lock/" var _ Handlerser = (*Handlers)(nil) type Handlers struct { - corsvc cors.Service + headers map[string][]string + nslock ctxmu.MultiCtxRWLocker + acksvc accesskey.Service sigsvc sign.Service bucsvc bucket.Service objsvc object.Service - nslock ctxmu.MultiCtxRWLocker } func NewHandlers( - corsvc cors.Service, acksvc accesskey.Service, sigsvc sign.Service, bucsvc bucket.Service, @@ -39,12 +38,12 @@ func NewHandlers( options ...Option, ) (handlers *Handlers) { handlers = &Handlers{ - corsvc: corsvc, - acksvc: acksvc, - sigsvc: sigsvc, - bucsvc: bucsvc, - objsvc: objsvc, - nslock: ctxmu.NewDefaultMultiCtxRWMutex(), + headers: defaultHeaders, + nslock: ctxmu.NewDefaultMultiCtxRWMutex(), + acksvc: acksvc, + sigsvc: sigsvc, + bucsvc: bucsvc, + objsvc: objsvc, } for _, option := range options { option(handlers) diff --git a/s3/handlers/handlers_middlewares.go b/s3/handlers/handlers_middlewares.go index 522b40d7a..f5c8da14f 100644 --- a/s3/handlers/handlers_middlewares.go +++ b/s3/handlers/handlers_middlewares.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "github.com/bittorrent/go-btfs/s3/cctx" + "github.com/bittorrent/go-btfs/s3/consts" "github.com/bittorrent/go-btfs/s3/responses" "github.com/bittorrent/go-btfs/s3/services/accesskey" rscors "github.com/rs/cors" @@ -11,13 +12,24 @@ import ( ) func (h *Handlers) Cors(handler http.Handler) http.Handler { - return rscors.New(rscors.Options{ - AllowedOrigins: h.corsvc.GetAllowOrigins(), - AllowedMethods: h.corsvc.GetAllowMethods(), - AllowedHeaders: h.corsvc.GetAllowHeaders(), - ExposedHeaders: h.corsvc.GetAllowHeaders(), - AllowCredentials: true, - }).Handler(handler) + headers := h.headers + cred := len(headers[consts.AccessControlAllowCredentials]) > 0 && + headers[consts.AccessControlAllowCredentials][0] == "true" + ch := rscors.New(rscors.Options{ + AllowedOrigins: headers[consts.AccessControlAllowOrigin], + AllowedMethods: headers[consts.AccessControlAllowMethods], + AllowedHeaders: headers[consts.AccessControlExposeHeaders], + ExposedHeaders: headers[consts.AccessControlAllowHeaders], + AllowCredentials: cred, + }) + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // add all user headers + for k, v := range h.headers { + w.Header()[k] = v + } + // next + ch.Handler(handler).ServeHTTP(w, r) + }) } func (h *Handlers) Log(handler http.Handler) http.Handler { diff --git a/s3/handlers/options.go b/s3/handlers/options.go index ae5643114..ccad07eb4 100644 --- a/s3/handlers/options.go +++ b/s3/handlers/options.go @@ -1,3 +1,63 @@ package handlers +import ( + "github.com/bittorrent/go-btfs/s3/consts" + "net/http" +) + +var defaultCorsMethods = []string{ + http.MethodGet, + http.MethodPut, + http.MethodHead, + http.MethodPost, + http.MethodDelete, + http.MethodOptions, + http.MethodPatch, +} + +var defaultCorsHeaders = []string{ + consts.BTFSHash, + consts.Date, + consts.ETag, + consts.ServerInfo, + consts.Connection, + consts.AcceptRanges, + consts.ContentRange, + consts.ContentEncoding, + consts.ContentLength, + consts.ContentType, + consts.ContentMD5, + consts.ContentDisposition, + consts.LastModified, + consts.ContentLanguage, + consts.CacheControl, + consts.RetryAfter, + consts.AmzBucketRegion, + consts.Expires, + consts.Authorization, + consts.Action, + consts.XRequestWith, + consts.Range, + consts.UserAgent, + "X-Amz*", + "x-amz*", + "*", +} + +var defaultHeaders = map[string][]string{ + consts.AccessControlAllowOrigin: []string{"*"}, + consts.AccessControlAllowMethods: defaultCorsMethods, + consts.AccessControlAllowHeaders: defaultCorsHeaders, + consts.AccessControlExposeHeaders: defaultCorsHeaders, + consts.AccessControlAllowCredentials: []string{"true"}, +} + type Option func(handlers *Handlers) + +func WithHeaders(headers map[string][]string) Option { + return func(handlers *Handlers) { + if headers != nil { + handlers.headers = headers + } + } +} diff --git a/s3/providers/btfs_api.go b/s3/providers/btfs_api.go new file mode 100644 index 000000000..c29d9217c --- /dev/null +++ b/s3/providers/btfs_api.go @@ -0,0 +1,41 @@ +package providers + +import ( + "errors" + shell "github.com/bittorrent/go-btfs-api" + "io" +) + +var _ FileStorer = (*BtfsAPI)(nil) + +type BtfsAPI struct { + shell *shell.Shell +} + +func NewBtfsAPI(endpointUrl string) (api *BtfsAPI) { + api = &BtfsAPI{} + if endpointUrl == "" { + api.shell = shell.NewLocalShell() + } else { + api.shell = shell.NewShell(endpointUrl) + } + return +} + +func (api *BtfsAPI) Store(r io.Reader) (id string, err error) { + id, err = api.shell.Add(r, shell.Pin(true)) + return +} + +func (api *BtfsAPI) Remove(id string) (err error) { + ok := api.shell.Remove(id) + if !ok { + err = errors.New("not removed") + } + return +} + +func (api *BtfsAPI) Cat(id string) (rc io.ReadCloser, err error) { + rc, err = api.shell.Cat(id) + return +} diff --git a/s3/providers/file_store.go b/s3/providers/file_store.go deleted file mode 100644 index e5b72933e..000000000 --- a/s3/providers/file_store.go +++ /dev/null @@ -1,17 +0,0 @@ -package providers - -import ( - shell "github.com/bittorrent/go-btfs-api" -) - -var _ FileStorer = (*FileStore)(nil) - -type FileStore struct { - *shell.Shell -} - -func NewFileStore() *FileStore { - return &FileStore{ - Shell: shell.NewLocalShell(), - } -} diff --git a/s3/providers/proto.go b/s3/providers/proto.go index 23a741746..bd3724fac 100644 --- a/s3/providers/proto.go +++ b/s3/providers/proto.go @@ -5,16 +5,20 @@ import ( "io" ) +var ( + ErrStateStoreNotFound = errors.New("not found in state store") + ErrFileStoreNotFound = errors.New("not found in file store") +) + type Providerser interface { GetFileStore() FileStorer GetStateStore() StateStorer } type FileStorer interface { - AddWithOpts(r io.Reader, pin bool, rawLeaves bool) (hash string, err error) - Remove(hash string) (removed bool) - Cat(path string) (readCloser io.ReadCloser, err error) - Unpin(path string) (err error) + Store(r io.Reader) (id string, err error) + Remove(id string) (err error) + Cat(id string) (readCloser io.ReadCloser, err error) } type StateStorer interface { @@ -25,7 +29,3 @@ type StateStorer interface { } type StateStoreIterFunc func(key, value []byte) (stop bool, err error) - -var ( - ErrStateStoreNotFound = errors.New("not found") -) diff --git a/s3/providers/state_store.go b/s3/providers/state_store.go deleted file mode 100644 index 836d358a8..000000000 --- a/s3/providers/state_store.go +++ /dev/null @@ -1,42 +0,0 @@ -package providers - -import ( - "errors" - "github.com/bittorrent/go-btfs/transaction/storage" -) - -var _ StateStorer = (*StateStore)(nil) - -type StateStore struct { - proxy storage.StateStorer -} - -func NewStorageStateStoreProxy(proxy storage.StateStorer) *StateStore { - return &StateStore{ - proxy: proxy, - } -} - -func (s *StateStore) Put(key string, val interface{}) (err error) { - return s.proxy.Put(key, val) -} - -func (s *StateStore) Get(key string, i interface{}) (err error) { - err = s.proxy.Get(key, i) - if errors.Is(err, storage.ErrNotFound) { - err = ErrStateStoreNotFound - } - return -} - -func (s *StateStore) Delete(key string) (err error) { - err = s.proxy.Delete(key) - if errors.Is(err, storage.ErrNotFound) { - err = ErrStateStoreNotFound - } - return -} - -func (s *StateStore) Iterate(prefix string, iterFunc StateStoreIterFunc) (err error) { - return s.proxy.Iterate(prefix, storage.StateIterFunc(iterFunc)) -} diff --git a/s3/providers/storage_state_store_proxy.go b/s3/providers/storage_state_store_proxy.go new file mode 100644 index 000000000..58dacd35f --- /dev/null +++ b/s3/providers/storage_state_store_proxy.go @@ -0,0 +1,42 @@ +package providers + +import ( + "errors" + "github.com/bittorrent/go-btfs/transaction/storage" +) + +var _ StateStorer = (*StorageStateStoreProxy)(nil) + +type StorageStateStoreProxy struct { + to storage.StateStorer +} + +func NewStorageStateStoreProxy(to storage.StateStorer) *StorageStateStoreProxy { + return &StorageStateStoreProxy{ + to: to, + } +} + +func (s *StorageStateStoreProxy) Put(key string, val interface{}) (err error) { + return s.to.Put(key, val) +} + +func (s *StorageStateStoreProxy) Get(key string, i interface{}) (err error) { + err = s.to.Get(key, i) + if errors.Is(err, storage.ErrNotFound) { + err = ErrStateStoreNotFound + } + return +} + +func (s *StorageStateStoreProxy) Delete(key string) (err error) { + err = s.to.Delete(key) + if errors.Is(err, storage.ErrNotFound) { + err = ErrStateStoreNotFound + } + return +} + +func (s *StorageStateStoreProxy) Iterate(prefix string, iterFunc StateStoreIterFunc) (err error) { + return s.to.Iterate(prefix, storage.StateIterFunc(iterFunc)) +} diff --git a/s3/routers/routers.go b/s3/routers/routers.go index 466a3764f..c844d422e 100644 --- a/s3/routers/routers.go +++ b/s3/routers/routers.go @@ -25,9 +25,9 @@ func (routers *Routers) Register() http.Handler { root := mux.NewRouter() root.Use( - routers.handlers.Cors, - routers.handlers.Log, - routers.handlers.Sign, + hs.Cors, + hs.Log, + hs.Sign, ) bucket := root.PathPrefix("/{bucket}").Subrouter() diff --git a/s3/s3.go b/s3/s3.go index f683c5432..f521bab59 100644 --- a/s3/s3.go +++ b/s3/s3.go @@ -1,16 +1,16 @@ package s3 import ( + config "github.com/bittorrent/go-btfs-config" + "github.com/bittorrent/go-btfs/chain" "github.com/bittorrent/go-btfs/s3/handlers" "github.com/bittorrent/go-btfs/s3/providers" "github.com/bittorrent/go-btfs/s3/routers" "github.com/bittorrent/go-btfs/s3/server" "github.com/bittorrent/go-btfs/s3/services/accesskey" "github.com/bittorrent/go-btfs/s3/services/bucket" - "github.com/bittorrent/go-btfs/s3/services/cors" "github.com/bittorrent/go-btfs/s3/services/object" "github.com/bittorrent/go-btfs/s3/services/sign" - "github.com/bittorrent/go-btfs/transaction/storage" "sync" ) @@ -19,34 +19,46 @@ var ( once sync.Once ) -func GetProviders(storageStore storage.StateStorer) *providers.Providers { +func initProviders() { once.Do(func() { - sstore := providers.NewStorageStateStoreProxy(storageStore) - fstore := providers.NewFileStore() + sstore := providers.NewStorageStateStoreProxy(chain.StateStore) + fstore := providers.NewBtfsAPI("") ps = providers.NewProviders(sstore, fstore) }) +} + +func GetProviders() *providers.Providers { + initProviders() return ps } -func NewServer(storageStore storage.StateStorer) *server.Server { - _ = GetProviders(storageStore) +func NewServer(cfg config.S3CompatibleAPI) *server.Server { + // providers + initProviders() // services - corsvc := cors.NewService() acksvc := accesskey.NewService(ps) sigsvc := sign.NewService() bucsvc := bucket.NewService(ps) - bucsvc.SetEmptyBucket(bucsvc.EmptyBucket) //todo EmptyBucket参数后续更新为object对象 objsvc := object.NewService(ps) // handlers - hs := handlers.NewHandlers(corsvc, acksvc, sigsvc, bucsvc, objsvc) + hs := handlers.NewHandlers( + acksvc, + sigsvc, + bucsvc, + objsvc, + handlers.WithHeaders(cfg.HTTPHeaders), + ) // routers rs := routers.NewRouters(hs) // server - svr := server.NewServer(rs) + svr := server.NewServer( + rs, + server.WithAddress(cfg.Address), + ) return svr } diff --git a/s3/server/server.go b/s3/server/server.go index 733c96dc4..876a882a7 100644 --- a/s3/server/server.go +++ b/s3/server/server.go @@ -9,8 +9,6 @@ import ( "sync" ) -const defaultServerAddress = "127.0.0.1:15001" - var ( ErrServerStarted = errors.New("server started") ErrServerNotStarted = errors.New("server not started") @@ -56,11 +54,9 @@ func (s *Server) Start() (err error) { } go func() { - fmt.Printf("start s3-compatible-api server\n") + fmt.Printf("Start s3-compatible-api server, endpoint-url: http://%s\n", httpSvr.Addr) lErr := httpSvr.ListenAndServe() - if lErr != nil && !errors.Is(lErr, http.ErrServerClosed) { - fmt.Printf("start s3-compatible-api server: %v\n", lErr) - } + fmt.Printf("Stop s3-compatible-api server: %v\n", lErr) }() return @@ -70,11 +66,10 @@ func (s *Server) Stop() (err error) { s.mutex.Lock() defer s.mutex.Unlock() if s.shutdown == nil { - err = ErrServerNotStarted + err = ErrServerStarted return } err = s.shutdown() s.shutdown = nil - fmt.Printf("stoped s3-compatible-api server: %v\n", err) return } diff --git a/s3/server/server_options.go b/s3/server/server_options.go index 7b71e719b..2c8ec3f81 100644 --- a/s3/server/server_options.go +++ b/s3/server/server_options.go @@ -1,9 +1,13 @@ package server +const defaultServerAddress = "127.0.0.1:15001" + type Option func(*Server) func WithAddress(address string) Option { return func(s *Server) { - s.address = address + if address != "" { + s.address = address + } } } diff --git a/s3/services/cors/proto.go b/s3/services/cors/proto.go deleted file mode 100644 index 55115b9f6..000000000 --- a/s3/services/cors/proto.go +++ /dev/null @@ -1,7 +0,0 @@ -package cors - -type Service interface { - GetAllowOrigins() []string - GetAllowMethods() []string - GetAllowHeaders() []string -} diff --git a/s3/services/cors/service.go b/s3/services/cors/service.go deleted file mode 100644 index 97ade9c68..000000000 --- a/s3/services/cors/service.go +++ /dev/null @@ -1,75 +0,0 @@ -package cors - -import ( - "github.com/bittorrent/go-btfs/s3/consts" - "net/http" -) - -var ( - defaultAllowOrigins = []string{"*"} - defaultAllowMethods = []string{ - http.MethodGet, - http.MethodPut, - http.MethodHead, - http.MethodPost, - http.MethodDelete, - http.MethodOptions, - http.MethodPatch, - } - defaultAllowHeaders = []string{ - consts.Date, - consts.ETag, - consts.ServerInfo, - consts.Connection, - consts.AcceptRanges, - consts.ContentRange, - consts.ContentEncoding, - consts.ContentLength, - consts.ContentType, - consts.ContentDisposition, - consts.LastModified, - consts.ContentLanguage, - consts.CacheControl, - consts.RetryAfter, - consts.AmzBucketRegion, - consts.Expires, - consts.Authorization, - consts.Action, - consts.Range, - "X-Amz*", - "x-amz*", - "*", - } -) - -var _ Service = (*service)(nil) - -type service struct { - allowOrigins []string - allowMethods []string - allowHeaders []string -} - -func NewService(options ...Option) Service { - svc := &service{ - allowOrigins: defaultAllowOrigins, - allowMethods: defaultAllowMethods, - allowHeaders: defaultAllowHeaders, - } - for _, option := range options { - option(svc) - } - return svc -} - -func (svc *service) GetAllowOrigins() []string { - return svc.allowOrigins -} - -func (svc *service) GetAllowMethods() []string { - return svc.allowMethods -} - -func (svc *service) GetAllowHeaders() []string { - return svc.allowHeaders -} diff --git a/s3/services/cors/service_options.go b/s3/services/cors/service_options.go deleted file mode 100644 index c25cbfc89..000000000 --- a/s3/services/cors/service_options.go +++ /dev/null @@ -1,21 +0,0 @@ -package cors - -type Option func(svc *service) - -func WithAllowOrigins(origins []string) Option { - return func(svc *service) { - svc.allowOrigins = origins - } -} - -func WithAllowMethods(methods []string) Option { - return func(svc *service) { - svc.allowMethods = methods - } -} - -func WithAllowHeaders(headers []string) Option { - return func(svc *service) { - svc.allowHeaders = headers - } -} diff --git a/s3/services/object/service.go b/s3/services/object/service.go index 33a88737e..2d52fee1c 100644 --- a/s3/services/object/service.go +++ b/s3/services/object/service.go @@ -72,7 +72,7 @@ func getUploadKey(bucname, objname, uploadID string) string { } func (s *service) PutObject(ctx context.Context, bucname, objname string, reader *hash.Reader, size int64, meta map[string]string) (obj Object, err error) { - cid, err := s.providers.GetFileStore().AddWithOpts(reader, true, true) + cid, err := s.providers.GetFileStore().Store(reader) if err != nil { return } @@ -128,7 +128,7 @@ func (s *service) CreateMultipartUpload(ctx context.Context, bucname string, obj } func (s *service) UploadPart(ctx context.Context, bucname string, objname string, uploadID string, partID int, reader *hash.Reader, size int64, meta map[string]string) (part ObjectPart, err error) { - cid, err := s.providers.GetFileStore().AddWithOpts(reader, true, true) + cid, err := s.providers.GetFileStore().Store(reader) if err != nil { return } @@ -162,9 +162,8 @@ func (s *service) AbortMultipartUpload(ctx context.Context, bucname string, objn } for _, part := range mtp.Parts { - ok := s.providers.GetFileStore().Remove(part.Cid) - if !ok { - err = errors.New("remove file failed") + err = s.providers.GetFileStore().Remove(part.Cid) + if err != nil { return } } @@ -239,7 +238,7 @@ func (s *service) CompleteMultiPartUpload(ctx context.Context, bucname string, o readers = append(readers, rdr) } - cid, err := s.providers.GetFileStore().AddWithOpts(io.MultiReader(readers...), true, true) + cid, err := s.providers.GetFileStore().Store(io.MultiReader(readers...)) if err != nil { return } From 71ee7b31bdfb27d20f61e563a62e2b722c111113 Mon Sep 17 00:00:00 2001 From: fish <920886811@163.com> Date: Wed, 30 Aug 2023 12:02:05 +0800 Subject: [PATCH 072/139] merge: object --- s3/s3.go | 3 ++- s3/services/bucket/proto.go | 2 +- s3/services/bucket/service.go | 9 ++------- s3/services/object/service.go | 38 ++++++++++++++++++++++------------- 4 files changed, 29 insertions(+), 23 deletions(-) diff --git a/s3/s3.go b/s3/s3.go index f521bab59..5ab6b184a 100644 --- a/s3/s3.go +++ b/s3/s3.go @@ -39,8 +39,9 @@ func NewServer(cfg config.S3CompatibleAPI) *server.Server { // services acksvc := accesskey.NewService(ps) sigsvc := sign.NewService() - bucsvc := bucket.NewService(ps) objsvc := object.NewService(ps) + bucsvc := bucket.NewService(ps) + bucsvc.EmptyBucket(objsvc.EmptyBucket) // handlers hs := handlers.NewHandlers( diff --git a/s3/services/bucket/proto.go b/s3/services/bucket/proto.go index 4d2907a4e..5a8c4d5b6 100644 --- a/s3/services/bucket/proto.go +++ b/s3/services/bucket/proto.go @@ -19,7 +19,7 @@ type Service interface { GetAllBucketsOfUser(username string) (list []*Bucket, err error) UpdateBucketAcl(ctx context.Context, bucket, acl string) error GetBucketAcl(ctx context.Context, bucket string) (string, error) - EmptyBucket(ctx context.Context, bucket string) (bool, error) + EmptyBucket(emptyBucket func(ctx context.Context, bucket string) (bool, error)) } // Bucket contains bucket metadata. diff --git a/s3/services/bucket/service.go b/s3/services/bucket/service.go index b9c17d2bb..92698a868 100644 --- a/s3/services/bucket/service.go +++ b/s3/services/bucket/service.go @@ -194,11 +194,6 @@ func (s *service) GetBucketAcl(ctx context.Context, bucket string) (string, erro } // EmptyBucket object中后续添加 -func (s *service) EmptyBucket(ctx context.Context, bucket string) (bool, error) { - //loi, err := s.ListObjects(ctx, bucket, "", "", "", 1) - //if err != nil { - // return false, err - //} - //return len(loi.Objects) == 0, nil - return true, nil +func (s *service) EmptyBucket(emptyBucket func(ctx context.Context, bucket string) (bool, error)) { + s.emptyBucket = emptyBucket } diff --git a/s3/services/object/service.go b/s3/services/object/service.go index fab739ec7..7323ca790 100644 --- a/s3/services/object/service.go +++ b/s3/services/object/service.go @@ -184,7 +184,7 @@ func (s *service) DeleteObject(ctx context.Context, bucket, object string) error } //todo 是否先进性unpin,然后remove? - if bl := s.providers.GetFileStore().Remove(obj.Cid); !bl { + if err := s.providers.GetFileStore().Remove(obj.Cid); err != nil { errMsg := fmt.Sprintf("mark Objet to delete error, bucket:%s, object:%s, cid:%s, error:%v \n", bucket, object, obj.Cid, err) return errors.New(errMsg) } @@ -264,23 +264,33 @@ func (s *service) ListObjects(ctx context.Context, bucket string, prefix string, seekKey = fmt.Sprintf(allObjectSeekKeyFormat, bucket, marker) } prefixKey := fmt.Sprintf(allObjectPrefixFormat, bucket, prefix) - all, err := s.providers.GetStateStore().ReadAllChan(ctx, prefixKey, seekKey) - if err != nil { - return loi, err - } + + begin := false index := 0 - for entry := range all { + err = s.providers.GetStateStore().Iterate(prefixKey, func(key, _ []byte) (stop bool, er error) { + record := &Object{} + er = s.providers.GetStateStore().Get(string(key), record) + if er != nil { + return + } + if seekKey == string(key) { + begin = true + } + + if begin { + loi.Objects = append(loi.Objects, *record) + index++ + } + if index == maxKeys { loi.IsTruncated = true - break - } - var o Object - if err = entry.UnmarshalValue(&o); err != nil { - return loi, err + begin = false + return } - index++ - loi.Objects = append(loi.Objects, o) - } + + return + }) + if loi.IsTruncated { loi.NextMarker = loi.Objects[len(loi.Objects)-1].Name } From 83889ee070f26f0842c3cf44157a8a82e59d9204 Mon Sep 17 00:00:00 2001 From: fish <920886811@163.com> Date: Wed, 30 Aug 2023 12:27:56 +0800 Subject: [PATCH 073/139] chore: add object lock --- s3/handlers/handlers_object.go | 91 ++++++++++++++++++++++++++++++++++ 1 file changed, 91 insertions(+) diff --git a/s3/handlers/handlers_object.go b/s3/handlers/handlers_object.go index 026a15877..ab9b96c2f 100644 --- a/s3/handlers/handlers_object.go +++ b/s3/handlers/handlers_object.go @@ -142,6 +142,20 @@ func (h *Handlers) HeadObjectHandler(w http.ResponseWriter, r *http.Request) { return } + // rlock bucket + runlock, err := h.rlock(ctx, bucname, w, r) + if err != nil { + return + } + defer runlock() + + // rlock object + runlockObj, err := h.rlock(ctx, bucname+"/"+objname, w, r) + if err != nil { + return + } + defer runlockObj() + //objsvc obj, err := h.objsvc.GetObjectInfo(ctx, bucname, objname) if err != nil { @@ -227,6 +241,34 @@ func (h *Handlers) CopyObjectHandler(w http.ResponseWriter, r *http.Request) { fmt.Printf("CopyObjectHandler %s %s => %s %s \n", srcBucket, srcObject, dstBucket, dstObject) + // rlock bucket 1 + runlock1, err := h.rlock(ctx, srcBucket, w, r) + if err != nil { + return + } + defer runlock1() + + // rlock object 1 + runlockObj1, err := h.rlock(ctx, srcBucket+"/"+srcObject, w, r) + if err != nil { + return + } + defer runlockObj1() + + // rlock bucket 2 + runlock2, err := h.rlock(ctx, dstBucket, w, r) + if err != nil { + return + } + defer runlock2() + + // lock object 2 + unlockObj2, err := h.lock(ctx, dstBucket+"/"+dstObject, w, r) + if err != nil { + return + } + defer unlockObj2() + //objsvc srcObjInfo, err := h.objsvc.GetObjectInfo(ctx, srcBucket, srcObject) if err != nil { @@ -296,6 +338,20 @@ func (h *Handlers) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) { return } + // rlock bucket + runlock, err := h.rlock(ctx, bucname, w, r) + if err != nil { + return + } + defer runlock() + + // lock object + unlock, err := h.lock(ctx, bucname+"/"+objname, w, r) + if err != nil { + return + } + defer unlock() + //objsvc obj, err := h.objsvc.GetObjectInfo(ctx, bucname, objname) if err != nil { @@ -345,6 +401,20 @@ func (h *Handlers) GetObjectHandler(w http.ResponseWriter, r *http.Request) { return } + // rlock bucket + runlock, err := h.rlock(ctx, bucname, w, r) + if err != nil { + return + } + defer runlock() + + // rlock object + runlockObj, err := h.rlock(ctx, bucname+"/"+objname, w, r) + if err != nil { + return + } + defer runlockObj() + //objsvc obj, reader, err := h.objsvc.GetObject(ctx, bucname, objname) if err != nil { @@ -391,6 +461,13 @@ func (h *Handlers) GetObjectACLHandler(w http.ResponseWriter, r *http.Request) { return } + // rlock bucket + runlock, err := h.rlock(ctx, bucname, w, r) + if err != nil { + return + } + defer runlock() + acl, err := h.bucsvc.GetBucketAcl(ctx, bucname) if err != nil { responses.WriteErrorResponse(w, r, err) @@ -436,6 +513,13 @@ func (h *Handlers) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) return } + // rlock bucket + runlock, err := h.rlock(ctx, bucname, w, r) + if err != nil { + return + } + defer runlock() + //objsvc objs, err := h.objsvc.ListObjects(ctx, bucname, prefix, marker, delimiter, maxKeys) if err != nil { @@ -496,6 +580,13 @@ func (h *Handlers) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) return } + // rlock bucket + runlock, err := h.rlock(ctx, bucname, w, r) + if err != nil { + return + } + defer runlock() + // Initiate a list objects operation based on the input params. // On success would return back ListObjectsInfo object to be // marshaled into S3 compatible XML header. From 278fe647c3cd0c95efd97e6ced27e4cebfb83634 Mon Sep 17 00:00:00 2001 From: fish <920886811@163.com> Date: Wed, 30 Aug 2023 12:48:07 +0800 Subject: [PATCH 074/139] chore: of delete objs --- s3/handlers/handlers_object.go | 61 ++++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) diff --git a/s3/handlers/handlers_object.go b/s3/handlers/handlers_object.go index ab9b96c2f..727d28e6f 100644 --- a/s3/handlers/handlers_object.go +++ b/s3/handlers/handlers_object.go @@ -368,6 +368,67 @@ func (h *Handlers) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) { responses.WriteSuccessNoContent(w) } +// DeleteObjectsHandler - delete objects +// Delete objectsAPIHandlers +// https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html +func (h *Handlers) DeleteObjectsHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + ack := cctx.GetAccessKey(r) + var err error + defer func() { + cctx.SetHandleInf(r, h.name(), err) + }() + + bucname, objname, err := requests.ParseBucketAndObject(r) + if err != nil { + responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) + return + } + if err := s3utils.CheckDelObjArgs(ctx, bucname, objname); err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + + err = h.bucsvc.CheckACL(ack, bucname, action.DeleteObjectAction) + if errors.Is(err, bucket.ErrNotFound) { + responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) + return + } + if err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + + // rlock bucket + runlock, err := h.rlock(ctx, bucname, w, r) + if err != nil { + return + } + defer runlock() + + // lock object + unlock, err := h.lock(ctx, bucname+"/"+objname, w, r) + if err != nil { + return + } + defer unlock() + + //objsvc + obj, err := h.objsvc.GetObjectInfo(ctx, bucname, objname) + if err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + //objsvc + err = h.objsvc.DeleteObject(ctx, bucname, objname) + if err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + setPutObjHeaders(w, obj, true) + responses.WriteSuccessNoContent(w) +} + // GetObjectHandler - GET Object // ---------- // This implementation of the GET operation retrieves object. To use GET, From 73ca8c8589fb8ba24e142af99c39625f4181e00d Mon Sep 17 00:00:00 2001 From: fish <920886811@163.com> Date: Wed, 30 Aug 2023 12:50:58 +0800 Subject: [PATCH 075/139] chore: --- s3/handlers/handlers_object.go | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/s3/handlers/handlers_object.go b/s3/handlers/handlers_object.go index 727d28e6f..bdd0b6b28 100644 --- a/s3/handlers/handlers_object.go +++ b/s3/handlers/handlers_object.go @@ -174,14 +174,6 @@ func (h *Handlers) HeadObjectHandler(w http.ResponseWriter, r *http.Request) { } // CopyObjectHandler - Copy Object -// ---------- -// This implementation of the PUT operation adds an object to a bucket -// while reading the object from another source. -// Notice: The S3 client can send secret keys in headers for encryption related jobs, -// the handler should ensure to remove these keys before sending them to the object layer. -// Currently these keys are: -// - X-Amz-Server-Side-Encryption-Customer-Key -// - X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key func (h *Handlers) CopyObjectHandler(w http.ResponseWriter, r *http.Request) { ctx := r.Context() ack := cctx.GetAccessKey(r) @@ -308,7 +300,6 @@ func (h *Handlers) CopyObjectHandler(w http.ResponseWriter, r *http.Request) { } // DeleteObjectHandler - delete an object -// Delete objectAPIHandlers // https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html func (h *Handlers) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) { ctx := r.Context() @@ -369,7 +360,6 @@ func (h *Handlers) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) { } // DeleteObjectsHandler - delete objects -// Delete objectsAPIHandlers // https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html func (h *Handlers) DeleteObjectsHandler(w http.ResponseWriter, r *http.Request) { ctx := r.Context() @@ -430,9 +420,6 @@ func (h *Handlers) DeleteObjectsHandler(w http.ResponseWriter, r *http.Request) } // GetObjectHandler - GET Object -// ---------- -// This implementation of the GET operation retrieves object. To use GET, -// you must have READ access to the object. // https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html func (h *Handlers) GetObjectHandler(w http.ResponseWriter, r *http.Request) { ctx := r.Context() @@ -495,9 +482,6 @@ func (h *Handlers) GetObjectHandler(w http.ResponseWriter, r *http.Request) { } // GetObjectACLHandler - GET Object ACL -// ----------------- -// This operation uses the ACL -// subresource to return the ACL of a specified object. func (h *Handlers) GetObjectACLHandler(w http.ResponseWriter, r *http.Request) { ctx := r.Context() ack := cctx.GetAccessKey(r) From bbfcb978fe1e7a7603e0411f10a18a45a02fcd7e Mon Sep 17 00:00:00 2001 From: Steve Date: Wed, 30 Aug 2023 18:57:55 +0800 Subject: [PATCH 076/139] fix: list objects bug --- s3/handlers/handlers_object.go | 20 ++++++------- s3/responses/types.go | 2 ++ s3/services/object/service.go | 53 ++++++++++++++++++++++++---------- 3 files changed, 49 insertions(+), 26 deletions(-) diff --git a/s3/handlers/handlers_object.go b/s3/handlers/handlers_object.go index bdd0b6b28..0edf448d5 100644 --- a/s3/handlers/handlers_object.go +++ b/s3/handlers/handlers_object.go @@ -536,16 +536,6 @@ func (h *Handlers) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) return } - err = h.bucsvc.CheckACL(ack, bucname, action.ListObjectsAction) - if errors.Is(err, bucket.ErrNotFound) { - responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) - return - } - if err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - // Extract all the litsObjectsV1 query params to their native values. prefix, marker, delimiter, maxKeys, encodingType, s3Error := getListObjectsV1Args(r.Form) if s3Error != nil { @@ -565,6 +555,16 @@ func (h *Handlers) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) } defer runlock() + err = h.bucsvc.CheckACL(ack, bucname, action.ListObjectsAction) + if errors.Is(err, bucket.ErrNotFound) { + responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) + return + } + if err != nil { + responses.WriteErrorResponse(w, r, err) + return + } + //objsvc objs, err := h.objsvc.ListObjects(ctx, bucname, prefix, marker, delimiter, maxKeys) if err != nil { diff --git a/s3/responses/types.go b/s3/responses/types.go index dd6b64e7e..8008f69ad 100644 --- a/s3/responses/types.go +++ b/s3/responses/types.go @@ -149,6 +149,7 @@ type Object struct { Key string LastModified string // time string of format "2006-01-02T15:04:05.000Z" ETag string + BTFSHash string // BTFS Cid Size int64 // Owner of the object. @@ -306,6 +307,7 @@ func GenerateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy if object.ETag != "" { content.ETag = "\"" + object.ETag + "\"" } + content.BTFSHash = object.Cid content.Size = object.Size content.StorageClass = "" content.Owner = owner diff --git a/s3/services/object/service.go b/s3/services/object/service.go index 7323ca790..1099f01cc 100644 --- a/s3/services/object/service.go +++ b/s3/services/object/service.go @@ -27,6 +27,7 @@ const ( chunkSize int = 1 << 20 objectKeyFormat = "obj/%s/%s" + objectPrefix = "obj/%s/" allObjectPrefixFormat = "obj/%s/%s" allObjectSeekKeyFormat = "obj/%s/%s" @@ -259,42 +260,62 @@ func (s *service) ListObjects(ctx context.Context, bucket string, prefix string, ctx, cancel := context.WithCancel(ctx) defer cancel() + seekKey := "" if marker != "" { seekKey = fmt.Sprintf(allObjectSeekKeyFormat, bucket, marker) } + prefixKey := fmt.Sprintf(allObjectPrefixFormat, bucket, prefix) + objPrefix := fmt.Sprintf(objectPrefix, bucket) - begin := false - index := 0 + begin := seekKey == "" + nkeys := 0 + seen := make(map[string]bool) err = s.providers.GetStateStore().Iterate(prefixKey, func(key, _ []byte) (stop bool, er error) { - record := &Object{} - er = s.providers.GetStateStore().Get(string(key), record) - if er != nil { - return + objKey := (string(key))[len(objPrefix):] + commonPrefix := prefix + + if delimiter != "" { + idx := strings.Index(objKey[len(prefix):], delimiter) + if idx >= 0 { + commonPrefix = objKey[:idx] + delimiter + } } - if seekKey == string(key) { + + if !begin && (seekKey == objKey || seekKey == commonPrefix) { begin = true + return + } + + if !begin { + return } - if begin { + if commonPrefix == prefix { + record := &Object{} + er = s.providers.GetStateStore().Get(string(key), record) + if er != nil { + return + } loi.Objects = append(loi.Objects, *record) - index++ + loi.NextMarker = record.Name + nkeys++ + } else if !seen[commonPrefix] { + loi.Prefixes = append(loi.Prefixes, commonPrefix) + seen[commonPrefix] = true + loi.NextMarker = commonPrefix + nkeys++ } - if index == maxKeys { + if nkeys == maxKeys { loi.IsTruncated = true - begin = false - return + stop = true } return }) - if loi.IsTruncated { - loi.NextMarker = loi.Objects[len(loi.Objects)-1].Name - } - return loi, nil } From bbed9cdeab50deed4778fe5ce329f65c7d6af016 Mon Sep 17 00:00:00 2001 From: Steve Date: Wed, 30 Aug 2023 19:10:20 +0800 Subject: [PATCH 077/139] chore: rename ListObjetV1Handler to ListObjectHandler, rename BTFS-Hash to CID --- s3/consts/consts.go | 6 +----- s3/handlers/handlers_object.go | 2 +- s3/handlers/options.go | 2 +- s3/handlers/proto.go | 2 +- s3/responses/types.go | 5 +++-- s3/responses/writers_common.go | 2 +- s3/routers/routers.go | 4 ++-- 7 files changed, 10 insertions(+), 13 deletions(-) diff --git a/s3/consts/consts.go b/s3/consts/consts.go index e6ffc4116..4f0449b05 100644 --- a/s3/consts/consts.go +++ b/s3/consts/consts.go @@ -146,6 +146,7 @@ const ( XRequestWith = "X-Requested-With" Range = "Range" UserAgent = "User-Agent" + CID = "CID" ) // Standard HTTP cors headers @@ -157,11 +158,6 @@ const ( AccessControlAllowCredentials = "Access-Control-Allow-Credentials" ) -// Standard BTFS HTTP response constants -const ( - BTFSHash = "BTFS-Hash" -) - // object const const ( MaxObjectSize = 5 * humanize.TiByte diff --git a/s3/handlers/handlers_object.go b/s3/handlers/handlers_object.go index 0edf448d5..ed58bf333 100644 --- a/s3/handlers/handlers_object.go +++ b/s3/handlers/handlers_object.go @@ -522,7 +522,7 @@ func (h *Handlers) GetObjectACLHandler(w http.ResponseWriter, r *http.Request) { responses.WriteGetBucketAclResponse(w, r, ack, acl) } -func (h *Handlers) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) { +func (h *Handlers) ListObjectsHandler(w http.ResponseWriter, r *http.Request) { ctx := r.Context() ack := cctx.GetAccessKey(r) var err error diff --git a/s3/handlers/options.go b/s3/handlers/options.go index ccad07eb4..1e9bdcbb4 100644 --- a/s3/handlers/options.go +++ b/s3/handlers/options.go @@ -16,7 +16,7 @@ var defaultCorsMethods = []string{ } var defaultCorsHeaders = []string{ - consts.BTFSHash, + consts.CID, consts.Date, consts.ETag, consts.ServerInfo, diff --git a/s3/handlers/proto.go b/s3/handlers/proto.go index f65231865..1eb28fdcf 100644 --- a/s3/handlers/proto.go +++ b/s3/handlers/proto.go @@ -25,7 +25,7 @@ type Handlerser interface { DeleteObjectHandler(w http.ResponseWriter, r *http.Request) GetObjectHandler(w http.ResponseWriter, r *http.Request) GetObjectACLHandler(w http.ResponseWriter, r *http.Request) - ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) + ListObjectsHandler(w http.ResponseWriter, r *http.Request) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) // multipart diff --git a/s3/responses/types.go b/s3/responses/types.go index 8008f69ad..bdc7eccd6 100644 --- a/s3/responses/types.go +++ b/s3/responses/types.go @@ -149,7 +149,7 @@ type Object struct { Key string LastModified string // time string of format "2006-01-02T15:04:05.000Z" ETag string - BTFSHash string // BTFS Cid + CID string // CID Size int64 // Owner of the object. @@ -261,6 +261,7 @@ func GenerateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, } content.Size = object.Size content.Owner = owner + content.CID = object.Cid contents = append(contents, content) } data.Name = bucket @@ -307,7 +308,7 @@ func GenerateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy if object.ETag != "" { content.ETag = "\"" + object.ETag + "\"" } - content.BTFSHash = object.Cid + content.CID = object.Cid content.Size = object.Size content.StorageClass = "" content.Owner = owner diff --git a/s3/responses/writers_common.go b/s3/responses/writers_common.go index bdb0ca27f..8b5d4347d 100644 --- a/s3/responses/writers_common.go +++ b/s3/responses/writers_common.go @@ -203,6 +203,6 @@ func setPutObjHeaders(w http.ResponseWriter, etag, cid string, delete bool) { w.Header()[consts.ETag] = []string{`"` + etag + `"`} } if cid != "" { - w.Header()[consts.BTFSHash] = []string{cid} + w.Header()[consts.CID] = []string{cid} } } diff --git a/s3/routers/routers.go b/s3/routers/routers.go index 0f4b745ec..189d80050 100644 --- a/s3/routers/routers.go +++ b/s3/routers/routers.go @@ -45,8 +45,8 @@ func (routers *Routers) Register() http.Handler { //object... // ListObjectsV2 bucket.Methods(http.MethodGet).HandlerFunc(hs.ListObjectsV2Handler).Queries("list-type", "2") - // ListObjectsV1 - bucket.Methods(http.MethodGet).HandlerFunc(hs.ListObjectsV1Handler) + // ListObjects + bucket.Methods(http.MethodGet).HandlerFunc(hs.ListObjectsHandler) // HeadObject bucket.Methods(http.MethodHead).Path("/{object:.+}").HandlerFunc(hs.HeadObjectHandler) // PutObject From d34b4c63e58910b0843b1c09df79e5dfe3cca2a9 Mon Sep 17 00:00:00 2001 From: Steve Date: Thu, 31 Aug 2023 18:39:15 +0800 Subject: [PATCH 078/139] refractor: bucket service --- s3/handlers/handlers.go | 5 +- s3/handlers/handlers_bucket.go | 4 +- s3/handlers/handlers_multipart.go | 9 +- s3/handlers/handlers_object.go | 21 +- s3/providers/proto.go | 4 +- s3/providers/providers.go | 4 +- s3/responses/wirters.go | 3 +- s3/services/accesskey/service.go | 12 +- s3/services/bucket/proto.go | 32 - s3/services/bucket/service.go | 199 ------ .../object/{service_option.go => options.go} | 0 s3/services/object/proto.go | 110 ++-- s3/services/object/service.go | 570 ++---------------- s3/services/object/service_bucket.go | 246 ++++++++ s3/services/object/service_object.go | 544 +++++++++++++++++ 15 files changed, 903 insertions(+), 860 deletions(-) delete mode 100644 s3/services/bucket/proto.go delete mode 100644 s3/services/bucket/service.go rename s3/services/object/{service_option.go => options.go} (100%) create mode 100644 s3/services/object/service_bucket.go create mode 100644 s3/services/object/service_object.go diff --git a/s3/handlers/handlers.go b/s3/handlers/handlers.go index ac5364b28..ea1c72329 100644 --- a/s3/handlers/handlers.go +++ b/s3/handlers/handlers.go @@ -7,7 +7,6 @@ import ( "github.com/bittorrent/go-btfs/s3/ctxmu" "github.com/bittorrent/go-btfs/s3/responses" "github.com/bittorrent/go-btfs/s3/services/accesskey" - "github.com/bittorrent/go-btfs/s3/services/bucket" "github.com/bittorrent/go-btfs/s3/services/object" "github.com/bittorrent/go-btfs/s3/services/sign" "net/http" @@ -26,14 +25,14 @@ type Handlers struct { acksvc accesskey.Service sigsvc sign.Service - bucsvc bucket.Service + bucsvc object.Service objsvc object.Service } func NewHandlers( acksvc accesskey.Service, sigsvc sign.Service, - bucsvc bucket.Service, + bucsvc object.Service, objsvc object.Service, options ...Option, ) (handlers *Handlers) { diff --git a/s3/handlers/handlers_bucket.go b/s3/handlers/handlers_bucket.go index 39bab2981..2299c3d7d 100644 --- a/s3/handlers/handlers_bucket.go +++ b/s3/handlers/handlers_bucket.go @@ -8,7 +8,7 @@ import ( "github.com/bittorrent/go-btfs/s3/requests" "github.com/bittorrent/go-btfs/s3/responses" "github.com/bittorrent/go-btfs/s3/s3utils" - "github.com/bittorrent/go-btfs/s3/services/bucket" + "github.com/bittorrent/go-btfs/s3/services/object" "net/http" ) @@ -202,7 +202,7 @@ func (h *Handlers) HeadBucketHandler(w http.ResponseWriter, r *http.Request) { ack := cctx.GetAccessKey(r) err = h.bucsvc.CheckACL(ack, req.Bucket, s3action.HeadBucketAction) - if errors.Is(err, bucket.ErrNotFound) { + if errors.Is(err, object.ErrBucketNotFound) { responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) return } diff --git a/s3/handlers/handlers_multipart.go b/s3/handlers/handlers_multipart.go index 99566cfaa..e4fc77015 100644 --- a/s3/handlers/handlers_multipart.go +++ b/s3/handlers/handlers_multipart.go @@ -8,7 +8,6 @@ import ( "github.com/bittorrent/go-btfs/s3/requests" "github.com/bittorrent/go-btfs/s3/responses" "github.com/bittorrent/go-btfs/s3/s3utils" - "github.com/bittorrent/go-btfs/s3/services/bucket" "github.com/bittorrent/go-btfs/s3/services/object" "github.com/bittorrent/go-btfs/s3/utils" "github.com/bittorrent/go-btfs/s3/utils/hash" @@ -58,7 +57,7 @@ func (h *Handlers) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.R defer unlock() err = h.bucsvc.CheckACL(ack, bucname, action.CreateMultipartUploadAction) - if errors.Is(err, bucket.ErrNotFound) { + if errors.Is(err, object.ErrBucketNotFound) { responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) return } @@ -158,7 +157,7 @@ func (h *Handlers) UploadPartHandler(w http.ResponseWriter, r *http.Request) { defer unlock() err = h.bucsvc.CheckACL(ack, bucname, action.PutObjectAction) - if errors.Is(err, bucket.ErrNotFound) { + if errors.Is(err, object.ErrBucketNotFound) { responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) return } @@ -220,7 +219,7 @@ func (h *Handlers) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Re defer unlock() err = h.bucsvc.CheckACL(ack, bucname, action.AbortMultipartUploadAction) - if errors.Is(err, bucket.ErrNotFound) { + if errors.Is(err, object.ErrBucketNotFound) { responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) return } @@ -303,7 +302,7 @@ func (h *Handlers) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http defer unlock() err = h.bucsvc.CheckACL(ack, bucname, action.CompleteMultipartUploadAction) - if errors.Is(err, bucket.ErrNotFound) { + if errors.Is(err, object.ErrBucketNotFound) { responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) return } diff --git a/s3/handlers/handlers_object.go b/s3/handlers/handlers_object.go index ed58bf333..559e37b2d 100644 --- a/s3/handlers/handlers_object.go +++ b/s3/handlers/handlers_object.go @@ -18,7 +18,6 @@ import ( "github.com/bittorrent/go-btfs/s3/requests" "github.com/bittorrent/go-btfs/s3/responses" "github.com/bittorrent/go-btfs/s3/s3utils" - "github.com/bittorrent/go-btfs/s3/services/bucket" "github.com/bittorrent/go-btfs/s3/services/object" "github.com/bittorrent/go-btfs/s3/utils/hash" ) @@ -91,7 +90,7 @@ func (h *Handlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) { defer unlock() err = h.bucsvc.CheckACL(ack, bucname, action.PutObjectAction) - if errors.Is(err, bucket.ErrNotFound) { + if errors.Is(err, object.ErrBucketNotFound) { responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) return } @@ -133,7 +132,7 @@ func (h *Handlers) HeadObjectHandler(w http.ResponseWriter, r *http.Request) { } err = h.bucsvc.CheckACL(ack, bucname, action.HeadObjectAction) - if errors.Is(err, bucket.ErrNotFound) { + if errors.Is(err, object.ErrBucketNotFound) { responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) return } @@ -192,7 +191,7 @@ func (h *Handlers) CopyObjectHandler(w http.ResponseWriter, r *http.Request) { return } err = h.bucsvc.CheckACL(ack, dstBucket, action.CopyObjectAction) - if errors.Is(err, bucket.ErrNotFound) { + if errors.Is(err, object.ErrBucketNotFound) { responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) return } @@ -222,7 +221,7 @@ func (h *Handlers) CopyObjectHandler(w http.ResponseWriter, r *http.Request) { return } err = h.bucsvc.CheckACL(ack, srcBucket, action.CopyObjectAction) - if errors.Is(err, bucket.ErrNotFound) { + if errors.Is(err, object.ErrBucketNotFound) { responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) return } @@ -320,7 +319,7 @@ func (h *Handlers) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) { } err = h.bucsvc.CheckACL(ack, bucname, action.DeleteObjectAction) - if errors.Is(err, bucket.ErrNotFound) { + if errors.Is(err, object.ErrBucketNotFound) { responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) return } @@ -380,7 +379,7 @@ func (h *Handlers) DeleteObjectsHandler(w http.ResponseWriter, r *http.Request) } err = h.bucsvc.CheckACL(ack, bucname, action.DeleteObjectAction) - if errors.Is(err, bucket.ErrNotFound) { + if errors.Is(err, object.ErrBucketNotFound) { responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) return } @@ -440,7 +439,7 @@ func (h *Handlers) GetObjectHandler(w http.ResponseWriter, r *http.Request) { } err = h.bucsvc.CheckACL(ack, bucname, action.GetObjectAction) - if errors.Is(err, bucket.ErrNotFound) { + if errors.Is(err, object.ErrBucketNotFound) { responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) return } @@ -497,7 +496,7 @@ func (h *Handlers) GetObjectACLHandler(w http.ResponseWriter, r *http.Request) { } err = h.bucsvc.CheckACL(ack, bucname, action.GetBucketAclAction) - if errors.Is(err, bucket.ErrNotFound) { + if errors.Is(err, object.ErrBucketNotFound) { responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) return } @@ -556,7 +555,7 @@ func (h *Handlers) ListObjectsHandler(w http.ResponseWriter, r *http.Request) { defer runlock() err = h.bucsvc.CheckACL(ack, bucname, action.ListObjectsAction) - if errors.Is(err, bucket.ErrNotFound) { + if errors.Is(err, object.ErrBucketNotFound) { responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) return } @@ -591,7 +590,7 @@ func (h *Handlers) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) } err = h.bucsvc.CheckACL(ack, bucname, action.ListObjectsAction) - if errors.Is(err, bucket.ErrNotFound) { + if errors.Is(err, object.ErrBucketNotFound) { responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) return } diff --git a/s3/providers/proto.go b/s3/providers/proto.go index bd3724fac..661d36d5e 100644 --- a/s3/providers/proto.go +++ b/s3/providers/proto.go @@ -11,8 +11,8 @@ var ( ) type Providerser interface { - GetFileStore() FileStorer - GetStateStore() StateStorer + FileStore() FileStorer + StateStore() StateStorer } type FileStorer interface { diff --git a/s3/providers/providers.go b/s3/providers/providers.go index 9e67f9de3..f5d84b3ca 100644 --- a/s3/providers/providers.go +++ b/s3/providers/providers.go @@ -18,10 +18,10 @@ func NewProviders(stateStore StateStorer, fileStore FileStorer, options ...Optio return } -func (p *Providers) GetStateStore() StateStorer { +func (p *Providers) StateStore() StateStorer { return p.stateStore } -func (p *Providers) GetFileStore() FileStorer { +func (p *Providers) FileStore() FileStorer { return p.fileStore } diff --git a/s3/responses/wirters.go b/s3/responses/wirters.go index 81e130be7..f47b8dd79 100644 --- a/s3/responses/wirters.go +++ b/s3/responses/wirters.go @@ -5,7 +5,6 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/services/bucket" "github.com/bittorrent/go-btfs/s3/services/object" "net/http" ) @@ -25,7 +24,7 @@ func WriteDeleteBucketResponse(w http.ResponseWriter) { return } -func WriteListBucketsResponse(w http.ResponseWriter, r *http.Request, bucketMetas []*bucket.Bucket) { +func WriteListBucketsResponse(w http.ResponseWriter, r *http.Request, bucketMetas []*object.Bucket) { var buckets []*s3.Bucket for _, b := range bucketMetas { buckets = append(buckets, &s3.Bucket{ diff --git a/s3/services/accesskey/service.go b/s3/services/accesskey/service.go index 219a6d962..6ba1f91e2 100644 --- a/s3/services/accesskey/service.go +++ b/s3/services/accesskey/service.go @@ -51,7 +51,7 @@ func (svc *service) Generate() (record *AccessKey, err error) { CreatedAt: now, UpdatedAt: now, } - err = svc.providers.GetStateStore().Put(svc.getStoreKey(record.Key), record) + err = svc.providers.StateStore().Put(svc.getStoreKey(record.Key), record) return } @@ -89,7 +89,7 @@ func (svc *service) Delete(key string) (err error) { func (svc *service) Get(key string) (ack *AccessKey, err error) { ack = &AccessKey{} - err = svc.providers.GetStateStore().Get(svc.getStoreKey(key), ack) + err = svc.providers.StateStore().Get(svc.getStoreKey(key), ack) if err != nil && !errors.Is(err, providers.ErrStateStoreNotFound) { return } @@ -100,9 +100,9 @@ func (svc *service) Get(key string) (ack *AccessKey, err error) { } func (svc *service) List() (list []*AccessKey, err error) { - err = svc.providers.GetStateStore().Iterate(svc.storeKeyPrefix, func(key, _ []byte) (stop bool, er error) { + err = svc.providers.StateStore().Iterate(svc.storeKeyPrefix, func(key, _ []byte) (stop bool, er error) { record := &AccessKey{} - er = svc.providers.GetStateStore().Get(string(key), record) + er = svc.providers.StateStore().Get(string(key), record) if er != nil { return } @@ -149,7 +149,7 @@ func (svc *service) update(key string, args *updateArgs) (err error) { record := &AccessKey{} stk := svc.getStoreKey(key) - err = svc.providers.GetStateStore().Get(stk, record) + err = svc.providers.StateStore().Get(stk, record) if err != nil && !errors.Is(err, storage.ErrNotFound) { return } @@ -170,7 +170,7 @@ func (svc *service) update(key string, args *updateArgs) (err error) { record.UpdatedAt = time.Now() - err = svc.providers.GetStateStore().Put(stk, record) + err = svc.providers.StateStore().Put(stk, record) return } diff --git a/s3/services/bucket/proto.go b/s3/services/bucket/proto.go deleted file mode 100644 index 5a8c4d5b6..000000000 --- a/s3/services/bucket/proto.go +++ /dev/null @@ -1,32 +0,0 @@ -package bucket - -import ( - "context" - "errors" - "github.com/bittorrent/go-btfs/s3/action" - "time" -) - -var ErrNotFound = errors.New("bucket not found") - -type Service interface { - CheckACL(accessKey string, bucketName string, action action.Action) (err error) - CreateBucket(ctx context.Context, bucket, region, accessKey, acl string) error - GetBucketMeta(ctx context.Context, bucket string) (meta Bucket, err error) - HasBucket(ctx context.Context, bucket string) bool - SetEmptyBucket(emptyBucket func(ctx context.Context, bucket string) (bool, error)) - DeleteBucket(ctx context.Context, bucket string) error - GetAllBucketsOfUser(username string) (list []*Bucket, err error) - UpdateBucketAcl(ctx context.Context, bucket, acl string) error - GetBucketAcl(ctx context.Context, bucket string) (string, error) - EmptyBucket(emptyBucket func(ctx context.Context, bucket string) (bool, error)) -} - -// Bucket contains bucket metadata. -type Bucket struct { - Name string - Region string - Owner string - Acl string - Created time.Time -} diff --git a/s3/services/bucket/service.go b/s3/services/bucket/service.go deleted file mode 100644 index 92698a868..000000000 --- a/s3/services/bucket/service.go +++ /dev/null @@ -1,199 +0,0 @@ -package bucket - -import ( - "context" - "errors" - "github.com/bittorrent/go-btfs/s3/providers" - "time" - - "github.com/bittorrent/go-btfs/s3/action" - "github.com/bittorrent/go-btfs/s3/ctxmu" - "github.com/bittorrent/go-btfs/s3/policy" -) - -const ( - bucketPrefix = "bkt/" - defaultUpdateTimeoutMS = 200 -) - -var _ Service = (*service)(nil) - -// service captures all bucket metadata for a given cluster. -type service struct { - providers providers.Providerser - emptyBucket func(ctx context.Context, bucket string) (bool, error) - locks *ctxmu.MultiCtxRWMutex - updateTimeout time.Duration -} - -// NewService - creates new policy system. -func NewService(providers providers.Providerser, options ...Option) Service { - s := &service{ - providers: providers, - locks: ctxmu.NewDefaultMultiCtxRWMutex(), - updateTimeout: time.Duration(defaultUpdateTimeoutMS) * time.Millisecond, - } - for _, option := range options { - option(s) - } - return s -} - -func (s *service) CheckACL(ack string, bucketName string, act action.Action) (err error) { - var bucketMeta Bucket - if act != action.CreateBucketAction && act != action.ListBucketAction { - if bucketName == "" { - return ErrNotFound - } - bucketMeta, err = s.GetBucketMeta(context.Background(), bucketName) - if err != nil { - return err - } - } - - if policy.IsAllowed(bucketMeta.Owner == ack, bucketMeta.Acl, act) == false { - return errors.New("not allowed") - } - return -} - -// NewBucketMetadata creates handlers.Bucket with the supplied name and Created to Now. -func (s *service) NewBucketMetadata(name, region, accessKey, acl string) *Bucket { - return &Bucket{ - Name: name, - Region: region, - Owner: accessKey, - Acl: acl, - Created: time.Now().UTC(), - } -} - -// lockSetBucketMeta - sets a new metadata in-db -func (s *service) lockSetBucketMeta(bucket string, meta *Bucket) error { - return s.providers.GetStateStore().Put(bucketPrefix+bucket, meta) -} - -// CreateBucket - create a new Bucket -func (s *service) CreateBucket(ctx context.Context, bucket, region, accessKey, acl string) error { - ctx, cancel := context.WithTimeout(context.Background(), s.updateTimeout) - defer cancel() - - err := s.locks.Lock(ctx, bucket) - if err != nil { - return err - } - defer s.locks.Unlock(bucket) - - return s.lockSetBucketMeta(bucket, s.NewBucketMetadata(bucket, region, accessKey, acl)) -} - -func (s *service) lockGetBucketMeta(bucket string) (meta Bucket, err error) { - err = s.providers.GetStateStore().Get(bucketPrefix+bucket, &meta) - if errors.Is(err, providers.ErrStateStoreNotFound) { - err = ErrNotFound - } - return -} - -// GetBucketMeta metadata for a bucket. -func (s *service) GetBucketMeta(ctx context.Context, bucket string) (meta Bucket, err error) { - ctx, cancel := context.WithTimeout(context.Background(), s.updateTimeout) - defer cancel() - - err = s.locks.RLock(ctx, bucket) - if err != nil { - return Bucket{Name: bucket}, err - } - defer s.locks.RUnlock(bucket) - - return s.lockGetBucketMeta(bucket) -} - -// HasBucket metadata for a bucket. -func (s *service) HasBucket(ctx context.Context, bucket string) bool { - _, err := s.GetBucketMeta(ctx, bucket) - return err == nil -} - -// DeleteBucket bucket. -func (s *service) DeleteBucket(ctx context.Context, bucket string) error { - ctx, cancel := context.WithTimeout(context.Background(), s.updateTimeout) - defer cancel() - - err := s.locks.Lock(ctx, bucket) - if err != nil { - return err - } - defer s.locks.Unlock(bucket) - - if _, err = s.lockGetBucketMeta(bucket); err != nil { - return err - } - - empty, err := s.emptyBucket(ctx, bucket) - if err != nil { - return err - } - - if !empty { - return errors.New("bucket not empty") - } - - return s.providers.GetStateStore().Delete(bucketPrefix + bucket) -} - -func (s *service) SetEmptyBucket(emptyBucket func(ctx context.Context, bucket string) (bool, error)) { - s.emptyBucket = emptyBucket -} - -// GetAllBucketsOfUser metadata for all bucket. -func (s *service) GetAllBucketsOfUser(username string) (list []*Bucket, err error) { - err = s.providers.GetStateStore().Iterate(bucketPrefix, func(key, _ []byte) (stop bool, er error) { - record := &Bucket{} - er = s.providers.GetStateStore().Get(string(key), record) - if er != nil { - return - } - if record.Owner == username { - list = append(list, record) - } - - return - }) - - return -} - -// UpdateBucketAcl . -func (s *service) UpdateBucketAcl(ctx context.Context, bucket, acl string) error { - ctx, cancel := context.WithTimeout(context.Background(), s.updateTimeout) - defer cancel() - - err := s.locks.Lock(ctx, bucket) - if err != nil { - return err - } - defer s.locks.Unlock(bucket) - - meta, err := s.lockGetBucketMeta(bucket) - if err != nil { - return err - } - - meta.Acl = acl - return s.lockSetBucketMeta(bucket, &meta) -} - -// GetBucketAcl . -func (s *service) GetBucketAcl(ctx context.Context, bucket string) (string, error) { - meta, err := s.GetBucketMeta(ctx, bucket) - if err != nil { - return "", err - } - return meta.Acl, nil -} - -// EmptyBucket object中后续添加 -func (s *service) EmptyBucket(emptyBucket func(ctx context.Context, bucket string) (bool, error)) { - s.emptyBucket = emptyBucket -} diff --git a/s3/services/object/service_option.go b/s3/services/object/options.go similarity index 100% rename from s3/services/object/service_option.go rename to s3/services/object/options.go diff --git a/s3/services/object/proto.go b/s3/services/object/proto.go index fcbad57e6..c9391e78f 100644 --- a/s3/services/object/proto.go +++ b/s3/services/object/proto.go @@ -9,11 +9,25 @@ import ( ) var ( - ErrObjectNotFound = errors.New("object not found") - ErrUploadNotFound = errors.New("upload not found") + ErrBucketNotFound = errors.New("bucket not found") + ErrObjectNotFound = errors.New("object not found") + ErrUploadNotFound = errors.New("upload not found") + ErrNotAllowed = errors.New("not allowed") + ErrBucketAlreadyExists = errors.New("bucket already exists") ) type Service interface { + // bucket + CreateBucket(ctx context.Context, bucket, region, accessKey, acl string) error + GetBucketMeta(ctx context.Context, bucket string) (meta Bucket, err error) + HasBucket(ctx context.Context, bucket string) bool + SetEmptyBucket(emptyBucket func(ctx context.Context, bucket string) (bool, error)) + DeleteBucket(ctx context.Context, bucket string) error + GetAllBucketsOfUser(username string) (list []*Bucket, err error) + UpdateBucketAcl(ctx context.Context, bucket, acl string) error + GetBucketAcl(ctx context.Context, bucket string) (string, error) + EmptyBucket(emptyBucket func(ctx context.Context, bucket string) (bool, error)) + // object PutObject(ctx context.Context, bucname, objname string, reader *hash.Reader, size int64, meta map[string]string) (obj Object, err error) CopyObject(ctx context.Context, bucket, object string, info Object, size int64, meta map[string]string) (Object, error) @@ -21,7 +35,6 @@ type Service interface { GetObjectInfo(ctx context.Context, bucket, object string) (Object, error) DeleteObject(ctx context.Context, bucket, object string) error ListObjects(ctx context.Context, bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi ListObjectsInfo, err error) - EmptyBucket(ctx context.Context, bucket string) (bool, error) ListObjectsV2(ctx context.Context, bucket string, prefix string, continuationToken string, delimiter string, maxKeys int, owner bool, startAfter string) (ListObjectsV2Info, error) // martipart @@ -32,54 +45,31 @@ type Service interface { GetMultipart(ctx context.Context, bucname string, objname string, uploadID string) (mtp Multipart, err error) } -type Object struct { - // Name of the bucket. - Bucket string - - // Name of the object. - Name string - - // Date and time when the object was last modified. - ModTime time.Time - - // Total object size. - Size int64 - - // IsDir indicates if the object is prefix. - IsDir bool - - // Hex encoded unique entity tag of the object. - ETag string - - // ipfs key - Cid string - Acl string - // Version ID of this object. - VersionID string - - // IsLatest indicates if this is the latest current version - // latest can be true for delete marker or a version. - IsLatest bool - - // DeleteMarker indicates if the versionId corresponds - // to a delete marker on an object. - DeleteMarker bool - - // A standard MIME type describing the format of the object. - ContentType string - - // Specifies what content encodings have been applied to the object and thus - // what decoding mechanisms must be applied to obtain the object referenced - // by the Content-Type header field. - ContentEncoding string - - // Date and time at which the object is no longer able to be cached - Expires time.Time - - // Date and time when the object was last accessed. - AccTime time.Time +// Bucket contains bucket metadata. +type Bucket struct { + Name string + Region string + Owner string + Acl string + Created time.Time +} - // The mod time of the successor object version if any +type Object struct { + Bucket string + Name string + ModTime time.Time + Size int64 + IsDir bool + ETag string + Cid string + Acl string + VersionID string + IsLatest bool + DeleteMarker bool + ContentType string + ContentEncoding string + Expires time.Time + AccTime time.Time SuccessorModTime time.Time } @@ -89,12 +79,9 @@ type Multipart struct { UploadID string Initiated time.Time MetaData map[string]string - // List of individual parts, maximum size of upto 10,000 - Parts []ObjectPart + Parts []ObjectPart } -// objectPartInfo Info of each part kept in the multipart metadata -// file after CompleteMultipartUpload() is called. type ObjectPart struct { ETag string `json:"etag,omitempty"` Cid string `json:"cid,omitempty"` @@ -103,32 +90,21 @@ type ObjectPart struct { ModTime time.Time `json:"mod_time"` } -// CompletePart - represents the part that was completed, this is sent by the client -// during CompleteMultipartUpload request. type CompletePart struct { - // Part number identifying the part. This is a positive integer between 1 and - // 10,000 - PartNumber int - - // Entity tag returned when the part was uploaded. - ETag string - - // Checksum values. Optional. + PartNumber int + ETag string ChecksumCRC32 string ChecksumCRC32C string ChecksumSHA1 string ChecksumSHA256 string } -// CompletedParts - is a collection satisfying sort.Interface. type CompletedParts []CompletePart func (a CompletedParts) Len() int { return len(a) } func (a CompletedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a CompletedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber } -// CompleteMultipartUpload - represents list of parts which are completed, this is sent by the -// client during CompleteMultipartUpload request. type CompleteMultipartUpload struct { Parts []CompletePart `xml:"Part"` } diff --git a/s3/services/object/service.go b/s3/services/object/service.go index 1099f01cc..d0e9aec66 100644 --- a/s3/services/object/service.go +++ b/s3/services/object/service.go @@ -2,30 +2,24 @@ package object import ( "context" - "encoding/hex" - "errors" - "fmt" - "io" - "net/http" + "github.com/bittorrent/go-btfs/s3/action" + "github.com/bittorrent/go-btfs/s3/ctxmu" + "github.com/bittorrent/go-btfs/s3/policy" "regexp" "strings" "time" - "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/etag" "github.com/bittorrent/go-btfs/s3/providers" - "github.com/bittorrent/go-btfs/s3/s3utils" - "github.com/bittorrent/go-btfs/s3/utils/hash" - "github.com/dustin/go-humanize" - "github.com/google/uuid" ) const ( - // bigFileThreshold is the point where we add readahead to put operations. - bigFileThreshold = 64 * humanize.MiByte - // equals unixfsChunkSize - chunkSize int = 1 << 20 + defaultKeySeparator = "/" + defaultBucketSpace = "bkt" + defaultObjectSpace = "obj" + defaultUploadSpace = "upl" + defaultOperationTimeout = 5 * time.Minute + bucketPrefix = "bkt/" objectKeyFormat = "obj/%s/%s" objectPrefix = "obj/%s/" allObjectPrefixFormat = "obj/%s/%s" @@ -51,13 +45,24 @@ var _ Service = (*service)(nil) // service captures all bucket metadata for a given cluster. type service struct { - providers providers.Providerser + providers providers.Providerser + lock ctxmu.MultiCtxRWLocker + keySeparator string + bucketSpace string + objectSpace string + uploadSpace string + operationTimeout time.Duration } -// NewService - creates new policy system. func NewService(providers providers.Providerser, options ...Option) Service { s := &service{ - providers: providers, + providers: providers, + lock: ctxmu.NewDefaultMultiCtxRWMutex(), + keySeparator: defaultKeySeparator, + bucketSpace: defaultBucketSpace, + objectSpace: defaultObjectSpace, + uploadSpace: defaultUploadSpace, + operationTimeout: defaultOperationTimeout, } for _, option := range options { option(s) @@ -65,538 +70,45 @@ func NewService(providers providers.Providerser, options ...Option) Service { return s } -func getObjectKey(bucname, objname string) string { - return fmt.Sprintf(objectKeyFormat, bucname, objname) -} - -func getUploadKey(bucname, objname, uploadID string) string { - return fmt.Sprintf(uploadKeyFormat, bucname, objname, uploadID) -} - -func (s *service) PutObject(ctx context.Context, bucname, objname string, reader *hash.Reader, size int64, meta map[string]string) (obj Object, err error) { - cid, err := s.providers.GetFileStore().Store(reader) - if err != nil { - return - } - - obj = Object{ - Bucket: bucname, - Name: objname, - ModTime: time.Now().UTC(), - Size: size, - IsDir: false, - ETag: reader.ETag().String(), - Cid: cid, - VersionID: "", - IsLatest: true, - DeleteMarker: false, - Acl: meta[consts.AmzACL], - ContentType: meta[strings.ToLower(consts.ContentType)], - ContentEncoding: meta[strings.ToLower(consts.ContentEncoding)], - SuccessorModTime: time.Now().UTC(), - } - - // Update expires - if exp, ok := meta[strings.ToLower(consts.Expires)]; ok { - if t, e := time.Parse(http.TimeFormat, exp); e == nil { - obj.Expires = t.UTC() - } - } - - err = s.providers.GetStateStore().Put(getObjectKey(bucname, objname), obj) - if err != nil { - return - } +// common helper methods +func (s *service) getBucketKeyPrefix() (prefix string) { + prefix = strings.Join([]string{s.bucketSpace}, s.keySeparator) return } -// CopyObject store object -func (s *service) CopyObject(ctx context.Context, bucket, object string, info Object, size int64, meta map[string]string) (Object, error) { - obj := Object{ - Bucket: bucket, - Name: object, - ModTime: time.Now().UTC(), - Size: size, - IsDir: false, - ETag: info.ETag, - Cid: info.Cid, - VersionID: "", - IsLatest: true, - DeleteMarker: false, - ContentType: meta[strings.ToLower(consts.ContentType)], - ContentEncoding: meta[strings.ToLower(consts.ContentEncoding)], - SuccessorModTime: time.Now().UTC(), - } - // Update expires - if exp, ok := meta[strings.ToLower(consts.Expires)]; ok { - if t, e := time.Parse(http.TimeFormat, exp); e == nil { - obj.Expires = t.UTC() - } - } - - err := s.providers.GetStateStore().Put(getObjectKey(bucket, object), obj) - if err != nil { - return Object{}, err - } - return obj, nil -} - -// GetObject Get object -func (s *service) GetObject(ctx context.Context, bucket, object string) (Object, io.ReadCloser, error) { - var obj Object - err := s.providers.GetStateStore().Get(getObjectKey(bucket, object), &obj) - if errors.Is(err, providers.ErrStateStoreNotFound) { - err = ErrObjectNotFound - return Object{}, nil, err - } - - reader, err := s.providers.GetFileStore().Cat(obj.Cid) - if err != nil { - return Object{}, nil, err - } - - return obj, reader, nil -} - -// GetObjectInfo Get object info -func (s *service) GetObjectInfo(ctx context.Context, bucket, object string) (Object, error) { - var obj Object - err := s.providers.GetStateStore().Get(getObjectKey(bucket, object), &obj) - if errors.Is(err, providers.ErrStateStoreNotFound) { - err = ErrObjectNotFound - return Object{}, err - } - - return obj, nil -} - -// DeleteObject delete object -func (s *service) DeleteObject(ctx context.Context, bucket, object string) error { - var obj Object - err := s.providers.GetStateStore().Get(getObjectKey(bucket, object), &obj) - if errors.Is(err, providers.ErrStateStoreNotFound) { - err = ErrObjectNotFound - return err - } - - if err = s.providers.GetStateStore().Delete(getObjectKey(bucket, object)); err != nil { - return err - } - - //todo 是否先进性unpin,然后remove? - if err := s.providers.GetFileStore().Remove(obj.Cid); err != nil { - errMsg := fmt.Sprintf("mark Objet to delete error, bucket:%s, object:%s, cid:%s, error:%v \n", bucket, object, obj.Cid, err) - return errors.New(errMsg) - } - return nil -} - -func (s *service) CleanObjectsInBucket(ctx context.Context, bucket string) error { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - prefixKey := fmt.Sprintf(allObjectPrefixFormat, bucket, "") - err := s.providers.GetStateStore().Iterate(prefixKey, func(key, _ []byte) (stop bool, er error) { - record := &Object{} - er = s.providers.GetStateStore().Get(string(key), record) - if er != nil { - return - } - - if err := s.DeleteObject(ctx, bucket, record.Name); err != nil { - return - } - return - }) - - return err -} - -// ListObjectsInfo - container for list objects. -type ListObjectsInfo struct { - // Indicates whether the returned list objects response is truncated. A - // value of true indicates that the list was truncated. The list can be truncated - // if the number of objects exceeds the limit allowed or specified - // by max keys. - IsTruncated bool - - // When response is truncated (the IsTruncated element value in the response is true), - // you can use the key name in this field as marker in the subsequent - // request to get next set of objects. - // - // NOTE: AWS S3 returns NextMarker only if you have delimiter request parameter specified, - NextMarker string - - // List of objects info for this request. - Objects []Object - - // List of prefixes for this request. - Prefixes []string -} - -// ListObjects list user object -// TODO use more params -func (s *service) ListObjects(ctx context.Context, bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi ListObjectsInfo, err error) { - if maxKeys == 0 { - return loi, nil - } - - if len(prefix) > 0 && maxKeys == 1 && delimiter == "" && marker == "" { - // Optimization for certain applications like - // - Cohesity - // - Actifio, Splunk etc. - // which send ListObjects requests where the actual object - // itself is the prefix and max-keys=1 in such scenarios - // we can simply verify locally if such an object exists - // to avoid the need for ListObjects(). - var obj Object - err = s.providers.GetStateStore().Get(getObjectKey(bucket, prefix), &obj) - if err == nil { - loi.Objects = append(loi.Objects, obj) - return loi, nil - } - } - - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - seekKey := "" - if marker != "" { - seekKey = fmt.Sprintf(allObjectSeekKeyFormat, bucket, marker) - } - - prefixKey := fmt.Sprintf(allObjectPrefixFormat, bucket, prefix) - objPrefix := fmt.Sprintf(objectPrefix, bucket) - - begin := seekKey == "" - nkeys := 0 - seen := make(map[string]bool) - err = s.providers.GetStateStore().Iterate(prefixKey, func(key, _ []byte) (stop bool, er error) { - objKey := (string(key))[len(objPrefix):] - commonPrefix := prefix - - if delimiter != "" { - idx := strings.Index(objKey[len(prefix):], delimiter) - if idx >= 0 { - commonPrefix = objKey[:idx] + delimiter - } - } - - if !begin && (seekKey == objKey || seekKey == commonPrefix) { - begin = true - return - } - - if !begin { - return - } - - if commonPrefix == prefix { - record := &Object{} - er = s.providers.GetStateStore().Get(string(key), record) - if er != nil { - return - } - loi.Objects = append(loi.Objects, *record) - loi.NextMarker = record.Name - nkeys++ - } else if !seen[commonPrefix] { - loi.Prefixes = append(loi.Prefixes, commonPrefix) - seen[commonPrefix] = true - loi.NextMarker = commonPrefix - nkeys++ - } - - if nkeys == maxKeys { - loi.IsTruncated = true - stop = true - } - - return - }) - - return loi, nil -} - -func (s *service) EmptyBucket(ctx context.Context, bucket string) (bool, error) { - loi, err := s.ListObjects(ctx, bucket, "", "", "", 1) - if err != nil { - return false, err - } - return len(loi.Objects) == 0, nil -} - -// ListObjectsV2Info - container for list objects version 2. -type ListObjectsV2Info struct { - // Indicates whether the returned list objects response is truncated. A - // value of true indicates that the list was truncated. The list can be truncated - // if the number of objects exceeds the limit allowed or specified - // by max keys. - IsTruncated bool - - // When response is truncated (the IsTruncated element value in the response - // is true), you can use the key name in this field as marker in the subsequent - // request to get next set of objects. - // - // NOTE: This element is returned only if you have delimiter request parameter - // specified. - ContinuationToken string - NextContinuationToken string - - // List of objects info for this request. - Objects []Object - - // List of prefixes for this request. - Prefixes []string -} - -// ListObjectsV2 list objects -func (s *service) ListObjectsV2(ctx context.Context, bucket string, prefix string, continuationToken string, delimiter string, maxKeys int, owner bool, startAfter string) (ListObjectsV2Info, error) { - marker := continuationToken - if marker == "" { - marker = startAfter - } - loi, err := s.ListObjects(ctx, bucket, prefix, marker, delimiter, maxKeys) - if err != nil { - return ListObjectsV2Info{}, err - } - listV2Info := ListObjectsV2Info{ - IsTruncated: loi.IsTruncated, - ContinuationToken: continuationToken, - NextContinuationToken: loi.NextMarker, - Objects: loi.Objects, - Prefixes: loi.Prefixes, - } - return listV2Info, nil -} - -/*---------------------------------------------------*/ - -func (s *service) CreateMultipartUpload(ctx context.Context, bucname string, objname string, meta map[string]string) (mtp Multipart, err error) { - uploadId := uuid.NewString() - mtp = Multipart{ - Bucket: bucname, - Object: objname, - UploadID: uploadId, - MetaData: meta, - Initiated: time.Now().UTC(), - } - - err = s.providers.GetStateStore().Put(getUploadKey(bucname, objname, uploadId), mtp) - if err != nil { - return - } - +func (s *service) getObjectKeyPrefix(bucname string) (prefix string) { + prefix = strings.Join([]string{s.objectSpace, bucname}, s.keySeparator) return } -func (s *service) UploadPart(ctx context.Context, bucname string, objname string, uploadID string, partID int, reader *hash.Reader, size int64, meta map[string]string) (part ObjectPart, err error) { - cid, err := s.providers.GetFileStore().Store(reader) - if err != nil { - return - } - - part = ObjectPart{ - Number: partID, - ETag: reader.ETag().String(), - Cid: cid, - Size: size, - ModTime: time.Now().UTC(), - } - - mtp, err := s.getMultipart(ctx, bucname, objname, uploadID) - if err != nil { - return - } - - mtp.Parts = append(mtp.Parts, part) - err = s.providers.GetStateStore().Put(getUploadKey(bucname, objname, uploadID), mtp) - if err != nil { - return part, err - } - +func (s *service) getUploadKeyPrefix(bucname, objname string) (prefix string) { + prefix = strings.Join([]string{s.uploadSpace, bucname, objname}, s.keySeparator) return } -func (s *service) AbortMultipartUpload(ctx context.Context, bucname string, objname string, uploadID string) (err error) { - mtp, err := s.getMultipart(ctx, bucname, objname, uploadID) - if err != nil { - return - } - - for _, part := range mtp.Parts { - err = s.providers.GetFileStore().Remove(part.Cid) - if err != nil { - return - } - } - - err = s.removeMultipart(ctx, bucname, objname, uploadID) - if err != nil { - return - } - +func (s *service) getBucketKey(bucname string) (key string) { + key = s.getBucketKeyPrefix() + bucname return } -func (s *service) CompleteMultiPartUpload(ctx context.Context, bucname string, objname string, uploadID string, parts []CompletePart) (obj Object, err error) { - mi, err := s.getMultipart(ctx, bucname, objname, uploadID) - if err != nil { - return - } - - var ( - readers []io.Reader - objectSize int64 - ) - - defer func() { - for _, rdr := range readers { - _ = rdr.(io.ReadCloser).Close() - } - }() - - idxMap := objectPartIndexMap(mi.Parts) - for i, part := range parts { - partIndex, ok := idxMap[part.PartNumber] - if !ok { - err = s3utils.InvalidPart{ - PartNumber: part.PartNumber, - GotETag: part.ETag, - } - return - } - - gotPart := mi.Parts[partIndex] - - part.ETag = canonicalizeETag(part.ETag) - if gotPart.ETag != part.ETag { - err = s3utils.InvalidPart{ - PartNumber: part.PartNumber, - ExpETag: gotPart.ETag, - GotETag: part.ETag, - } - return - } - - // All parts except the last part has to be at least 5MB. - if (i < len(parts)-1) && !(gotPart.Size >= consts.MinPartSize) { - err = s3utils.PartTooSmall{ - PartNumber: part.PartNumber, - PartSize: gotPart.Size, - PartETag: part.ETag, - } - return - } - - // Save for total objname size. - objectSize += gotPart.Size - - var rdr io.ReadCloser - rdr, err = s.providers.GetFileStore().Cat(gotPart.Cid) - if err != nil { - return - } - - readers = append(readers, rdr) - } - - cid, err := s.providers.GetFileStore().Store(io.MultiReader(readers...)) - if err != nil { - return - } - - obj = Object{ - Bucket: bucname, - Name: objname, - ModTime: time.Now().UTC(), - Size: objectSize, - IsDir: false, - ETag: computeCompleteMultipartMD5(parts), - Cid: cid, - VersionID: "", - IsLatest: true, - DeleteMarker: false, - ContentType: mi.MetaData[strings.ToLower(consts.ContentType)], - ContentEncoding: mi.MetaData[strings.ToLower(consts.ContentEncoding)], - SuccessorModTime: time.Now().UTC(), - } - - if exp, ok := mi.MetaData[strings.ToLower(consts.Expires)]; ok { - if t, e := time.Parse(http.TimeFormat, exp); e == nil { - obj.Expires = t.UTC() - } - } - - err = s.providers.GetStateStore().Put(getObjectKey(bucname, objname), obj) - if err != nil { - return - } - - err = s.removeMultipartInfo(ctx, bucname, objname, uploadID) - if err != nil { - return - } - +func (s *service) getObjectKey(bucname, objname string) (key string) { + key = s.getObjectKeyPrefix(bucname) + objname return } -func (s *service) GetMultipart(ctx context.Context, bucname string, objname string, uploadID string) (mtp Multipart, err error) { - return s.getMultipart(ctx, bucname, objname, uploadID) -} - -func (s *service) getMultipart(ctx context.Context, bucname string, objname string, uploadID string) (mtp Multipart, err error) { - err = s.providers.GetStateStore().Get(getUploadKey(bucname, objname, uploadID), &mtp) - if errors.Is(err, providers.ErrStateStoreNotFound) { - err = ErrUploadNotFound - return - } +func (s *service) getUploadKey(bucname, objname, uploadid string) (key string) { + key = s.getUploadKeyPrefix(bucname, objname) + uploadid return } -func (s *service) removeMultipart(ctx context.Context, bucname string, objname string, uploadID string) (err error) { - err = s.providers.GetStateStore().Delete(getUploadKey(bucname, objname, uploadID)) - if errors.Is(err, providers.ErrStateStoreNotFound) { - err = ErrUploadNotFound - return - } +func (s *service) checkAcl(owner, acl, user string, act action.Action) (allow bool) { + own := user != "" && user == owner + allow = policy.IsAllowed(own, acl, act) return } -func (s *service) removeMultipartInfo(ctx context.Context, bucname string, objname string, uploadID string) (err error) { - err = s.providers.GetStateStore().Delete(getUploadKey(bucname, objname, uploadID)) - if errors.Is(err, providers.ErrStateStoreNotFound) { - err = ErrUploadNotFound - return - } +func (s *service) opctx(parent context.Context) (ctx context.Context, cancel context.CancelFunc) { + ctx, cancel = context.WithTimeout(parent, s.operationTimeout) return } - -func objectPartIndexMap(parts []ObjectPart) map[int]int { - mp := make(map[int]int) - for i, part := range parts { - mp[part.Number] = i - } - return mp -} - -// canonicalizeETag returns ETag with leading and trailing double-quotes removed, -// if any present -func canonicalizeETag(etag string) string { - return etagRegex.ReplaceAllString(etag, "$1") -} - -func computeCompleteMultipartMD5(parts []CompletePart) string { - var finalMD5Bytes []byte - for _, part := range parts { - md5Bytes, err := hex.DecodeString(canonicalizeETag(part.ETag)) - if err != nil { - finalMD5Bytes = append(finalMD5Bytes, []byte(part.ETag)...) - } else { - finalMD5Bytes = append(finalMD5Bytes, md5Bytes...) - } - } - s3MD5 := fmt.Sprintf("%s-%d", etag.Multipart(finalMD5Bytes), len(parts)) - return s3MD5 -} diff --git a/s3/services/object/service_bucket.go b/s3/services/object/service_bucket.go new file mode 100644 index 000000000..de0e24fef --- /dev/null +++ b/s3/services/object/service_bucket.go @@ -0,0 +1,246 @@ +package object + +import ( + "context" + "errors" + "github.com/bittorrent/go-btfs/s3/policy" + "github.com/bittorrent/go-btfs/s3/providers" + "time" + + "github.com/bittorrent/go-btfs/s3/action" +) + +// CreateBucket create a new bucket for the specified user +func (s *service) CreateBucket(ctx context.Context, user, bucname, region, acl string) (err error) { + buckey := s.getBucketKey(bucname) + + ctx, cancel := s.opctx(ctx) + defer cancel() + + err = s.lock.Lock(ctx, buckey) + if err != nil { + return + } + + defer s.lock.Unlock(buckey) + + allow := s.checkAcl(user, acl, user, action.CreateBucketAction) + if !allow { + err = ErrNotAllowed + return + } + + bucket, err := s.getBucket(buckey) + if err == nil { + return + } + + if bucket != nil { + err = ErrBucketAlreadyExists + return + } + + err = s.providers.StateStore().Put( + buckey, + &Bucket{ + Name: bucname, + Region: region, + Owner: user, + Acl: acl, + Created: time.Now().UTC(), + }, + ) + + return +} + +// GetBucket get a bucket for the specified user +func (s *service) GetBucket(ctx context.Context, user, bucname string) (bucket *Bucket, err error) { + buckey := s.getBucketKey(bucname) + + ctx, cancel := s.opctx(ctx) + defer cancel() + + err = s.lock.RLock(ctx, buckey) + if err != nil { + return + } + + defer s.lock.RUnlock(buckey) + + bucket, err = s.getBucket(buckey) + if err != nil { + return + } + + if bucket == nil { + err = ErrBucketNotFound + return + } + + allow := s.checkAcl(bucket.Owner, bucket.Acl, user, action.HeadBucketAction) + if !allow { + err = ErrNotAllowed + } + + return +} + +// DeleteBucket delete the specified user bucket and all the bucket's objects +func (s *service) DeleteBucket(ctx context.Context, user, bucname string) (err error) { + buckey := s.getBucketKey(bucname) + + ctx, cancel := s.opctx(ctx) + defer cancel() + + err = s.lock.Lock(ctx, buckey) + if err != nil { + return + } + + defer s.lock.Unlock(buckey) + + bucket, err := s.getBucket(buckey) + if err != nil { + return + } + + if bucket == nil { + err = ErrBucketNotFound + return + } + + allow := s.checkAcl(bucket.Owner, bucket.Acl, user, action.DeleteBucketAction) + if !allow { + err = ErrNotAllowed + return + } + + err = s.providers.StateStore().Delete(buckey) + + // todo: delete all objects below to this bucket + + return +} + +// GetAllBuckets get all buckets of the specified user +func (s *service) GetAllBuckets(ctx context.Context, user string) (list []*Bucket, err error) { + bucprefix := s.getBucketKeyPrefix() + + ctx, cancel := s.opctx(ctx) + defer cancel() + + allow := s.checkAcl(user, policy.Private, user, action.ListBucketAction) + if !allow { + err = ErrNotAllowed + return + } + + err = s.providers.StateStore().Iterate(bucprefix, func(key, _ []byte) (stop bool, er error) { + defer func() { + if er != nil { + stop = true + } + }() + + er = ctx.Err() + if er != nil { + return + } + + var bucket *Bucket + + er = s.providers.StateStore().Get(string(key), bucket) + if er != nil { + return + } + + if bucket.Owner == user { + list = append(list, bucket) + } + + return + }) + + return +} + +// PutBucketAcl update the acl field value of the specified user's bucket +func (s *service) PutBucketAcl(ctx context.Context, user, bucname, acl string) (err error) { + buckey := s.getBucketKey(bucname) + + ctx, cancel := s.opctx(ctx) + defer cancel() + + err = s.lock.Lock(ctx, buckey) + if err != nil { + return + } + + defer s.lock.Unlock(buckey) + + bucket, err := s.getBucket(buckey) + if err != nil { + return + } + + if bucket == nil { + err = ErrBucketNotFound + return + } + + allow := s.checkAcl(bucket.Owner, bucket.Acl, user, action.PutBucketAclAction) + if !allow { + err = ErrNotAllowed + return + } + + bucket.Acl = acl + + err = s.providers.StateStore().Put(buckey, bucket) + + return +} + +// GetBucketAcl get the acl field value of the specified user's bucket +func (s *service) GetBucketAcl(ctx context.Context, user, bucname string) (acl string, err error) { + buckey := s.getBucketKey(bucname) + + ctx, cancel := s.opctx(ctx) + defer cancel() + + err = s.lock.RLock(ctx, buckey) + if err != nil { + return + } + + defer s.lock.RUnlock(buckey) + + bucket, err := s.getBucket(buckey) + if err != nil { + return + } + + if bucket == nil { + err = ErrBucketNotFound + return + } + + allow := s.checkAcl(bucket.Owner, bucket.Acl, user, action.GetBucketAclAction) + if !allow { + err = ErrNotAllowed + return + } + + acl = bucket.Acl + + return +} + +func (s *service) getBucket(buckey string) (bucket *Bucket, err error) { + err = s.providers.StateStore().Get(buckey, bucket) + if errors.Is(err, providers.ErrStateStoreNotFound) { + err = nil + } + return +} diff --git a/s3/services/object/service_object.go b/s3/services/object/service_object.go new file mode 100644 index 000000000..9ba36ec20 --- /dev/null +++ b/s3/services/object/service_object.go @@ -0,0 +1,544 @@ +package object + +import ( + "context" + "encoding/hex" + "fmt" + "github.com/bittorrent/go-btfs/s3/consts" + "github.com/bittorrent/go-btfs/s3/etag" + "github.com/bittorrent/go-btfs/s3/s3utils" + "github.com/bittorrent/go-btfs/s3/utils/hash" + "github.com/google/uuid" + "io" + "net/http" + "strings" + "time" +) + +func (s *service) PutObject(ctx context.Context, bucname, objname string, reader *hash.Reader, size int64, meta map[string]string) (obj Object, err error) { + cid, err := s.providers.FileStore().Store(reader) + if err != nil { + return + } + + obj = Object{ + Bucket: bucname, + Name: objname, + ModTime: time.Now().UTC(), + Size: size, + IsDir: false, + ETag: reader.ETag().String(), + Cid: cid, + VersionID: "", + IsLatest: true, + DeleteMarker: false, + Acl: meta[consts.AmzACL], + ContentType: meta[strings.ToLower(consts.ContentType)], + ContentEncoding: meta[strings.ToLower(consts.ContentEncoding)], + SuccessorModTime: time.Now().UTC(), + } + + // Update expires + if exp, ok := meta[strings.ToLower(consts.Expires)]; ok { + if t, e := time.Parse(http.TimeFormat, exp); e == nil { + obj.Expires = t.UTC() + } + } + + err = s.providers.StateStore().Put(getObjectKey(bucname, objname), obj) + if err != nil { + return + } + + return +} + +// CopyObject store object +func (s *service) CopyObject(ctx context.Context, bucket, object string, info Object, size int64, meta map[string]string) (Object, error) { + obj := Object{ + Bucket: bucket, + Name: object, + ModTime: time.Now().UTC(), + Size: size, + IsDir: false, + ETag: info.ETag, + Cid: info.Cid, + VersionID: "", + IsLatest: true, + DeleteMarker: false, + ContentType: meta[strings.ToLower(consts.ContentType)], + ContentEncoding: meta[strings.ToLower(consts.ContentEncoding)], + SuccessorModTime: time.Now().UTC(), + } + // Update expires + if exp, ok := meta[strings.ToLower(consts.Expires)]; ok { + if t, e := time.Parse(http.TimeFormat, exp); e == nil { + obj.Expires = t.UTC() + } + } + + err := s.providers.StateStore().Put(getObjectKey(bucket, object), obj) + if err != nil { + return Object{}, err + } + return obj, nil +} + +// GetObject Get object +func (s *service) GetObject(ctx context.Context, bucket, object string) (Object, io.ReadCloser, error) { + var obj Object + err := s.providers.StateStore().Get(getObjectKey(bucket, object), &obj) + if errors.Is(err, providers.ErrStateStoreNotFound) { + err = ErrObjectNotFound + return Object{}, nil, err + } + + reader, err := s.providers.FileStore().Cat(obj.Cid) + if err != nil { + return Object{}, nil, err + } + + return obj, reader, nil +} + +// GetObjectInfo Get object info +func (s *service) GetObjectInfo(ctx context.Context, bucket, object string) (Object, error) { + var obj Object + err := s.providers.StateStore().Get(getObjectKey(bucket, object), &obj) + if errors.Is(err, providers.ErrStateStoreNotFound) { + err = ErrObjectNotFound + return Object{}, err + } + + return obj, nil +} + +// DeleteObject delete object +func (s *service) DeleteObject(ctx context.Context, bucket, object string) error { + var obj Object + err := s.providers.StateStore().Get(getObjectKey(bucket, object), &obj) + if errors.Is(err, providers.ErrStateStoreNotFound) { + err = ErrObjectNotFound + return err + } + + if err = s.providers.StateStore().Delete(getObjectKey(bucket, object)); err != nil { + return err + } + + //todo 是否先进性unpin,然后remove? + if err := s.providers.FileStore().Remove(obj.Cid); err != nil { + errMsg := fmt.Sprintf("mark Objet to delete error, bucket:%s, object:%s, cid:%s, error:%v \n", bucket, object, obj.Cid, err) + return errors.New(errMsg) + } + return nil +} + +func (s *service) CleanObjectsInBucket(ctx context.Context, bucket string) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + prefixKey := fmt.Sprintf(allObjectPrefixFormat, bucket, "") + err := s.providers.StateStore().Iterate(prefixKey, func(key, _ []byte) (stop bool, er error) { + record := &Object{} + er = s.providers.StateStore().Get(string(key), record) + if er != nil { + return + } + + if err := s.DeleteObject(ctx, bucket, record.Name); err != nil { + return + } + return + }) + + return err +} + +// ListObjectsInfo - container for list objects. +type ListObjectsInfo struct { + // Indicates whether the returned list objects response is truncated. A + // value of true indicates that the list was truncated. The list can be truncated + // if the number of objects exceeds the limit allowed or specified + // by max keys. + IsTruncated bool + + // When response is truncated (the IsTruncated element value in the response is true), + // you can use the key name in this field as marker in the subsequent + // request to get next set of objects. + // + // NOTE: AWS S3 returns NextMarker only if you have delimiter request parameter specified, + NextMarker string + + // List of objects info for this request. + Objects []Object + + // List of prefixes for this request. + Prefixes []string +} + +// ListObjects list user object +// TODO use more params +func (s *service) ListObjects(ctx context.Context, bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi ListObjectsInfo, err error) { + if maxKeys == 0 { + return loi, nil + } + + if len(prefix) > 0 && maxKeys == 1 && delimiter == "" && marker == "" { + // Optimization for certain applications like + // - Cohesity + // - Actifio, Splunk etc. + // which send ListObjects requests where the actual object + // itself is the prefix and max-keys=1 in such scenarios + // we can simply verify locally if such an object exists + // to avoid the need for ListObjects(). + var obj Object + err = s.providers.StateStore().Get(getObjectKey(bucket, prefix), &obj) + if err == nil { + loi.Objects = append(loi.Objects, obj) + return loi, nil + } + } + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + seekKey := "" + if marker != "" { + seekKey = fmt.Sprintf(allObjectSeekKeyFormat, bucket, marker) + } + + prefixKey := fmt.Sprintf(allObjectPrefixFormat, bucket, prefix) + objPrefix := fmt.Sprintf(objectPrefix, bucket) + + begin := seekKey == "" + nkeys := 0 + seen := make(map[string]bool) + err = s.providers.StateStore().Iterate(prefixKey, func(key, _ []byte) (stop bool, er error) { + objKey := (string(key))[len(objPrefix):] + commonPrefix := prefix + + if delimiter != "" { + idx := strings.Index(objKey[len(prefix):], delimiter) + if idx >= 0 { + commonPrefix = objKey[:idx] + delimiter + } + } + + if !begin && (seekKey == objKey || seekKey == commonPrefix) { + begin = true + return + } + + if !begin { + return + } + + if commonPrefix == prefix { + record := &Object{} + er = s.providers.StateStore().Get(string(key), record) + if er != nil { + return + } + loi.Objects = append(loi.Objects, *record) + loi.NextMarker = record.Name + nkeys++ + } else if !seen[commonPrefix] { + loi.Prefixes = append(loi.Prefixes, commonPrefix) + seen[commonPrefix] = true + loi.NextMarker = commonPrefix + nkeys++ + } + + if nkeys == maxKeys { + loi.IsTruncated = true + stop = true + } + + return + }) + + return loi, nil +} + +func (s *service) EmptyBucket(ctx context.Context, bucket string) (bool, error) { + loi, err := s.ListObjects(ctx, bucket, "", "", "", 1) + if err != nil { + return false, err + } + return len(loi.Objects) == 0, nil +} + +// ListObjectsV2Info - container for list objects version 2. +type ListObjectsV2Info struct { + // Indicates whether the returned list objects response is truncated. A + // value of true indicates that the list was truncated. The list can be truncated + // if the number of objects exceeds the limit allowed or specified + // by max keys. + IsTruncated bool + + // When response is truncated (the IsTruncated element value in the response + // is true), you can use the key name in this field as marker in the subsequent + // request to get next set of objects. + // + // NOTE: This element is returned only if you have delimiter request parameter + // specified. + ContinuationToken string + NextContinuationToken string + + // List of objects info for this request. + Objects []Object + + // List of prefixes for this request. + Prefixes []string +} + +// ListObjectsV2 list objects +func (s *service) ListObjectsV2(ctx context.Context, bucket string, prefix string, continuationToken string, delimiter string, maxKeys int, owner bool, startAfter string) (ListObjectsV2Info, error) { + marker := continuationToken + if marker == "" { + marker = startAfter + } + loi, err := s.ListObjects(ctx, bucket, prefix, marker, delimiter, maxKeys) + if err != nil { + return ListObjectsV2Info{}, err + } + listV2Info := ListObjectsV2Info{ + IsTruncated: loi.IsTruncated, + ContinuationToken: continuationToken, + NextContinuationToken: loi.NextMarker, + Objects: loi.Objects, + Prefixes: loi.Prefixes, + } + return listV2Info, nil +} + +/*---------------------------------------------------*/ + +func (s *service) CreateMultipartUpload(ctx context.Context, bucname string, objname string, meta map[string]string) (mtp Multipart, err error) { + uploadId := uuid.NewString() + mtp = Multipart{ + Bucket: bucname, + Object: objname, + UploadID: uploadId, + MetaData: meta, + Initiated: time.Now().UTC(), + } + + err = s.providers.StateStore().Put(getUploadKey(bucname, objname, uploadId), mtp) + if err != nil { + return + } + + return +} + +func (s *service) UploadPart(ctx context.Context, bucname string, objname string, uploadID string, partID int, reader *hash.Reader, size int64, meta map[string]string) (part ObjectPart, err error) { + cid, err := s.providers.FileStore().Store(reader) + if err != nil { + return + } + + part = ObjectPart{ + Number: partID, + ETag: reader.ETag().String(), + Cid: cid, + Size: size, + ModTime: time.Now().UTC(), + } + + mtp, err := s.getMultipart(ctx, bucname, objname, uploadID) + if err != nil { + return + } + + mtp.Parts = append(mtp.Parts, part) + err = s.providers.StateStore().Put(getUploadKey(bucname, objname, uploadID), mtp) + if err != nil { + return part, err + } + + return +} + +func (s *service) AbortMultipartUpload(ctx context.Context, bucname string, objname string, uploadID string) (err error) { + mtp, err := s.getMultipart(ctx, bucname, objname, uploadID) + if err != nil { + return + } + + for _, part := range mtp.Parts { + err = s.providers.FileStore().Remove(part.Cid) + if err != nil { + return + } + } + + err = s.removeMultipart(ctx, bucname, objname, uploadID) + if err != nil { + return + } + + return +} + +func (s *service) CompleteMultiPartUpload(ctx context.Context, bucname string, objname string, uploadID string, parts []CompletePart) (obj Object, err error) { + mi, err := s.getMultipart(ctx, bucname, objname, uploadID) + if err != nil { + return + } + + var ( + readers []io.Reader + objectSize int64 + ) + + defer func() { + for _, rdr := range readers { + _ = rdr.(io.ReadCloser).Close() + } + }() + + idxMap := objectPartIndexMap(mi.Parts) + for i, part := range parts { + partIndex, ok := idxMap[part.PartNumber] + if !ok { + err = s3utils.InvalidPart{ + PartNumber: part.PartNumber, + GotETag: part.ETag, + } + return + } + + gotPart := mi.Parts[partIndex] + + part.ETag = canonicalizeETag(part.ETag) + if gotPart.ETag != part.ETag { + err = s3utils.InvalidPart{ + PartNumber: part.PartNumber, + ExpETag: gotPart.ETag, + GotETag: part.ETag, + } + return + } + + // All parts except the last part has to be at least 5MB. + if (i < len(parts)-1) && !(gotPart.Size >= consts.MinPartSize) { + err = s3utils.PartTooSmall{ + PartNumber: part.PartNumber, + PartSize: gotPart.Size, + PartETag: part.ETag, + } + return + } + + // Save for total objname size. + objectSize += gotPart.Size + + var rdr io.ReadCloser + rdr, err = s.providers.FileStore().Cat(gotPart.Cid) + if err != nil { + return + } + + readers = append(readers, rdr) + } + + cid, err := s.providers.FileStore().Store(io.MultiReader(readers...)) + if err != nil { + return + } + + obj = Object{ + Bucket: bucname, + Name: objname, + ModTime: time.Now().UTC(), + Size: objectSize, + IsDir: false, + ETag: computeCompleteMultipartMD5(parts), + Cid: cid, + VersionID: "", + IsLatest: true, + DeleteMarker: false, + ContentType: mi.MetaData[strings.ToLower(consts.ContentType)], + ContentEncoding: mi.MetaData[strings.ToLower(consts.ContentEncoding)], + SuccessorModTime: time.Now().UTC(), + } + + if exp, ok := mi.MetaData[strings.ToLower(consts.Expires)]; ok { + if t, e := time.Parse(http.TimeFormat, exp); e == nil { + obj.Expires = t.UTC() + } + } + + err = s.providers.StateStore().Put(getObjectKey(bucname, objname), obj) + if err != nil { + return + } + + err = s.removeMultipartInfo(ctx, bucname, objname, uploadID) + if err != nil { + return + } + + return +} + +func (s *service) GetMultipart(ctx context.Context, bucname string, objname string, uploadID string) (mtp Multipart, err error) { + return s.getMultipart(ctx, bucname, objname, uploadID) +} + +func (s *service) getMultipart(ctx context.Context, bucname string, objname string, uploadID string) (mtp Multipart, err error) { + err = s.providers.StateStore().Get(getUploadKey(bucname, objname, uploadID), &mtp) + if errors.Is(err, providers.ErrStateStoreNotFound) { + err = ErrUploadNotFound + return + } + return +} + +func (s *service) removeMultipart(ctx context.Context, bucname string, objname string, uploadID string) (err error) { + err = s.providers.StateStore().Delete(getUploadKey(bucname, objname, uploadID)) + if errors.Is(err, providers.ErrStateStoreNotFound) { + err = ErrUploadNotFound + return + } + return +} + +func (s *service) removeMultipartInfo(ctx context.Context, bucname string, objname string, uploadID string) (err error) { + err = s.providers.StateStore().Delete(getUploadKey(bucname, objname, uploadID)) + if errors.Is(err, providers.ErrStateStoreNotFound) { + err = ErrUploadNotFound + return + } + return +} + +func objectPartIndexMap(parts []ObjectPart) map[int]int { + mp := make(map[int]int) + for i, part := range parts { + mp[part.Number] = i + } + return mp +} + +// canonicalizeETag returns ETag with leading and trailing double-quotes removed, +// if any present +func canonicalizeETag(etag string) string { + return etagRegex.ReplaceAllString(etag, "$1") +} + +func computeCompleteMultipartMD5(parts []CompletePart) string { + var finalMD5Bytes []byte + for _, part := range parts { + md5Bytes, err := hex.DecodeString(canonicalizeETag(part.ETag)) + if err != nil { + finalMD5Bytes = append(finalMD5Bytes, []byte(part.ETag)...) + } else { + finalMD5Bytes = append(finalMD5Bytes, md5Bytes...) + } + } + s3MD5 := fmt.Sprintf("%s-%d", etag.Multipart(finalMD5Bytes), len(parts)) + return s3MD5 +} From d0d92c9edf5f2fba19f04c872ca4aa93a79d2074 Mon Sep 17 00:00:00 2001 From: Steve Date: Fri, 1 Sep 2023 04:24:14 +0800 Subject: [PATCH 079/139] refactor: object service --- s3/services/bucket/service_option.go | 3 - s3/services/object/clean_read_closer.go | 41 ++ s3/services/object/proto.go | 24 +- s3/services/object/service.go | 32 +- s3/services/object/service_bucket.go | 9 +- s3/services/object/service_multipart.go | 272 ++++++++ s3/services/object/service_object.go | 790 ++++++++++++------------ 7 files changed, 761 insertions(+), 410 deletions(-) delete mode 100644 s3/services/bucket/service_option.go create mode 100644 s3/services/object/clean_read_closer.go create mode 100644 s3/services/object/service_multipart.go diff --git a/s3/services/bucket/service_option.go b/s3/services/bucket/service_option.go deleted file mode 100644 index e01c02fde..000000000 --- a/s3/services/bucket/service_option.go +++ /dev/null @@ -1,3 +0,0 @@ -package bucket - -type Option func(svc *service) diff --git a/s3/services/object/clean_read_closer.go b/s3/services/object/clean_read_closer.go new file mode 100644 index 000000000..ffb5a9fd8 --- /dev/null +++ b/s3/services/object/clean_read_closer.go @@ -0,0 +1,41 @@ +package object + +import ( + "context" + "io" + "time" +) + +func WrapCleanReadCloser(rc io.ReadCloser, timeout time.Duration, afterCloseHooks ...func()) io.ReadCloser { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + arc := &cleanReadCloser{ + rc: rc, + cancel: cancel, + } + go func() { + <-ctx.Done() + _ = rc.Close() + // call after hooks stack + for len(afterCloseHooks) > 0 { + idx := len(afterCloseHooks) - 1 + f := afterCloseHooks[idx] + f() + afterCloseHooks = afterCloseHooks[:idx] + } + }() + return arc +} + +type cleanReadCloser struct { + rc io.ReadCloser + cancel context.CancelFunc +} + +func (h *cleanReadCloser) Read(p []byte) (n int, err error) { + return h.rc.Read(p) +} + +func (h *cleanReadCloser) Close() error { + defer h.cancel() + return h.rc.Close() +} diff --git a/s3/services/object/proto.go b/s3/services/object/proto.go index c9391e78f..f62a6035a 100644 --- a/s3/services/object/proto.go +++ b/s3/services/object/proto.go @@ -34,7 +34,7 @@ type Service interface { GetObject(ctx context.Context, bucket, object string) (Object, io.ReadCloser, error) GetObjectInfo(ctx context.Context, bucket, object string) (Object, error) DeleteObject(ctx context.Context, bucket, object string) error - ListObjects(ctx context.Context, bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi ListObjectsInfo, err error) + ListObjects(ctx context.Context, bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi Object, err error) ListObjectsV2(ctx context.Context, bucket string, prefix string, continuationToken string, delimiter string, maxKeys int, owner bool, startAfter string) (ListObjectsV2Info, error) // martipart @@ -90,6 +90,28 @@ type ObjectPart struct { ModTime time.Time `json:"mod_time"` } +// ListObjectsInfo - container for list objects. +type ObjectsList struct { + // Indicates whether the returned list objects response is truncated. A + // value of true indicates that the list was truncated. The list can be truncated + // if the number of objects exceeds the limit allowed or specified + // by max keys. + IsTruncated bool + + // When response is truncated (the IsTruncated element value in the response is true), + // you can use the key name in this field as marker in the subsequent + // request to get next set of objects. + // + // NOTE: AWS S3 returns NextMarker only if you have delimiter request parameter specified, + NextMarker string + + // List of objects info for this request. + Objects []*Object + + // List of prefixes for this request. + Prefixes []string +} + type CompletePart struct { PartNumber int ETag string diff --git a/s3/services/object/service.go b/s3/services/object/service.go index d0e9aec66..8ae0d2ae3 100644 --- a/s3/services/object/service.go +++ b/s3/services/object/service.go @@ -13,11 +13,12 @@ import ( ) const ( - defaultKeySeparator = "/" - defaultBucketSpace = "bkt" - defaultObjectSpace = "obj" - defaultUploadSpace = "upl" - defaultOperationTimeout = 5 * time.Minute + defaultKeySeparator = "/" + defaultBucketSpace = "bkt" + defaultObjectSpace = "obj" + defaultUploadSpace = "upl" + defaultOperationTimeout = 5 * time.Minute + defaultReadObjectTimeout = 1 * time.Hour bucketPrefix = "bkt/" objectKeyFormat = "obj/%s/%s" @@ -45,13 +46,14 @@ var _ Service = (*service)(nil) // service captures all bucket metadata for a given cluster. type service struct { - providers providers.Providerser - lock ctxmu.MultiCtxRWLocker - keySeparator string - bucketSpace string - objectSpace string - uploadSpace string - operationTimeout time.Duration + providers providers.Providerser + lock ctxmu.MultiCtxRWLocker + keySeparator string + bucketSpace string + objectSpace string + uploadSpace string + operationTimeout time.Duration + readObjectTimeout time.Duration } func NewService(providers providers.Providerser, options ...Option) Service { @@ -73,17 +75,17 @@ func NewService(providers providers.Providerser, options ...Option) Service { // common helper methods func (s *service) getBucketKeyPrefix() (prefix string) { - prefix = strings.Join([]string{s.bucketSpace}, s.keySeparator) + prefix = strings.Join([]string{s.bucketSpace, ""}, s.keySeparator) return } func (s *service) getObjectKeyPrefix(bucname string) (prefix string) { - prefix = strings.Join([]string{s.objectSpace, bucname}, s.keySeparator) + prefix = strings.Join([]string{s.objectSpace, bucname, ""}, s.keySeparator) return } func (s *service) getUploadKeyPrefix(bucname, objname string) (prefix string) { - prefix = strings.Join([]string{s.uploadSpace, bucname, objname}, s.keySeparator) + prefix = strings.Join([]string{s.uploadSpace, bucname, objname, ""}, s.keySeparator) return } diff --git a/s3/services/object/service_bucket.go b/s3/services/object/service_bucket.go index de0e24fef..caed601c5 100644 --- a/s3/services/object/service_bucket.go +++ b/s3/services/object/service_bucket.go @@ -117,8 +117,15 @@ func (s *service) DeleteBucket(ctx context.Context, user, bucname string) (err e } err = s.providers.StateStore().Delete(buckey) + if err != nil { + return + } + + // bucket objects prefix + objectsPrefix := s.getObjectKeyPrefix(bucname) - // todo: delete all objects below to this bucket + // delete all objects of the bucket + err = s.deleteObjectsByPrefix(objectsPrefix) return } diff --git a/s3/services/object/service_multipart.go b/s3/services/object/service_multipart.go new file mode 100644 index 000000000..455d5bced --- /dev/null +++ b/s3/services/object/service_multipart.go @@ -0,0 +1,272 @@ +package object + +import ( + "encoding/hex" + "fmt" + "github.com/bittorrent/go-btfs/s3/consts" + "github.com/bittorrent/go-btfs/s3/etag" + "github.com/bittorrent/go-btfs/s3/s3utils" + "github.com/bittorrent/go-btfs/s3/utils/hash" + "github.com/google/uuid" + "io" + "net/http" + "strings" + "time" +) + +func (s *service) CreateMultipartUpload(ctx context.Context, bucname string, objname string, meta map[string]string) (mtp Multipart, err error) { + uploadId := uuid.NewString() + mtp = Multipart{ + Bucket: bucname, + Object: objname, + UploadID: uploadId, + MetaData: meta, + Initiated: time.Now().UTC(), + } + + err = s.providers.StateStore().Put(getUploadKey(bucname, objname, uploadId), mtp) + if err != nil { + return + } + + return +} + +func (s *service) UploadPart(ctx context.Context, bucname string, objname string, uploadID string, partID int, reader *hash.Reader, size int64, meta map[string]string) (part ObjectPart, err error) { + cid, err := s.providers.FileStore().Store(reader) + if err != nil { + return + } + + part = ObjectPart{ + Number: partID, + ETag: reader.ETag().String(), + Cid: cid, + Size: size, + ModTime: time.Now().UTC(), + } + + mtp, err := s.getMultipart(ctx, bucname, objname, uploadID) + if err != nil { + return + } + + mtp.Parts = append(mtp.Parts, part) + err = s.providers.StateStore().Put(getUploadKey(bucname, objname, uploadID), mtp) + if err != nil { + return part, err + } + + return +} + +func (s *service) AbortMultipartUpload(ctx context.Context, bucname string, objname string, uploadID string) (err error) { + mtp, err := s.getMultipart(ctx, bucname, objname, uploadID) + if err != nil { + return + } + + for _, part := range mtp.Parts { + err = s.providers.FileStore().Remove(part.Cid) + if err != nil { + return + } + } + + err = s.removeMultipart(ctx, bucname, objname, uploadID) + if err != nil { + return + } + + return +} + +func (s *service) CompleteMultiPartUpload(ctx context.Context, bucname string, objname string, uploadID string, parts []CompletePart) (obj Object, err error) { + mi, err := s.getMultipart(ctx, bucname, objname, uploadID) + if err != nil { + return + } + + var ( + readers []io.Reader + objectSize int64 + ) + + defer func() { + for _, rdr := range readers { + _ = rdr.(io.ReadCloser).Close() + } + }() + + idxMap := objectPartIndexMap(mi.Parts) + for i, part := range parts { + partIndex, ok := idxMap[part.PartNumber] + if !ok { + err = s3utils.InvalidPart{ + PartNumber: part.PartNumber, + GotETag: part.ETag, + } + return + } + + gotPart := mi.Parts[partIndex] + + part.ETag = canonicalizeETag(part.ETag) + if gotPart.ETag != part.ETag { + err = s3utils.InvalidPart{ + PartNumber: part.PartNumber, + ExpETag: gotPart.ETag, + GotETag: part.ETag, + } + return + } + + // All parts except the last part has to be at least 5MB. + if (i < len(parts)-1) && !(gotPart.Size >= consts.MinPartSize) { + err = s3utils.PartTooSmall{ + PartNumber: part.PartNumber, + PartSize: gotPart.Size, + PartETag: part.ETag, + } + return + } + + // Save for total objname size. + objectSize += gotPart.Size + + var rdr io.ReadCloser + rdr, err = s.providers.FileStore().Cat(gotPart.Cid) + if err != nil { + return + } + + readers = append(readers, rdr) + } + + cid, err := s.providers.FileStore().Store(io.MultiReader(readers...)) + if err != nil { + return + } + + obj = Object{ + Bucket: bucname, + Name: objname, + ModTime: time.Now().UTC(), + Size: objectSize, + IsDir: false, + ETag: computeCompleteMultipartMD5(parts), + Cid: cid, + VersionID: "", + IsLatest: true, + DeleteMarker: false, + ContentType: mi.MetaData[strings.ToLower(consts.ContentType)], + ContentEncoding: mi.MetaData[strings.ToLower(consts.ContentEncoding)], + SuccessorModTime: time.Now().UTC(), + } + + if exp, ok := mi.MetaData[strings.ToLower(consts.Expires)]; ok { + if t, e := time.Parse(http.TimeFormat, exp); e == nil { + obj.Expires = t.UTC() + } + } + + err = s.providers.StateStore().Put(getObjectKey(bucname, objname), obj) + if err != nil { + return + } + + err = s.removeMultipartInfo(ctx, bucname, objname, uploadID) + if err != nil { + return + } + + return +} + +func (s *service) GetMultipart(ctx context.Context, bucname string, objname string, uploadID string) (mtp Multipart, err error) { + return s.getMultipart(ctx, bucname, objname, uploadID) +} + +func (s *service) getMultipart(ctx context.Context, bucname string, objname string, uploadID string) (mtp Multipart, err error) { + err = s.providers.StateStore().Get(getUploadKey(bucname, objname, uploadID), &mtp) + if errors.Is(err, providers.ErrStateStoreNotFound) { + err = ErrUploadNotFound + return + } + return +} + +func (s *service) removeMultipart(ctx context.Context, bucname string, objname string, uploadID string) (err error) { + err = s.providers.StateStore().Delete(getUploadKey(bucname, objname, uploadID)) + if errors.Is(err, providers.ErrStateStoreNotFound) { + err = ErrUploadNotFound + return + } + return +} + +func (s *service) removeMultipartInfo(ctx context.Context, bucname string, objname string, uploadID string) (err error) { + err = s.providers.StateStore().Delete(getUploadKey(bucname, objname, uploadID)) + if errors.Is(err, providers.ErrStateStoreNotFound) { + err = ErrUploadNotFound + return + } + return +} + +func objectPartIndexMap(parts []ObjectPart) map[int]int { + mp := make(map[int]int) + for i, part := range parts { + mp[part.Number] = i + } + return mp +} + +// canonicalizeETag returns ETag with leading and trailing double-quotes removed, +// if any present +func canonicalizeETag(etag string) string { + return etagRegex.ReplaceAllString(etag, "$1") +} + +func computeCompleteMultipartMD5(parts []CompletePart) string { + var finalMD5Bytes []byte + for _, part := range parts { + md5Bytes, err := hex.DecodeString(canonicalizeETag(part.ETag)) + if err != nil { + finalMD5Bytes = append(finalMD5Bytes, []byte(part.ETag)...) + } else { + finalMD5Bytes = append(finalMD5Bytes, md5Bytes...) + } + } + s3MD5 := fmt.Sprintf("%s-%d", etag.Multipart(finalMD5Bytes), len(parts)) + return s3MD5 +} + +func (s *service) getObject(objkey string) (object *Object, err error) { + err = s.providers.StateStore().Get(objkey, object) + if errors.Is(err, providers.ErrStateStoreNotFound) { + err = nil + } + return +} + +// deleteObjectsByPrefix delete all objects have common prefix +// it will continue even if one of the objects be deleted fail +func (s *service) deleteObjectsByPrefix(objectPrefix string) (err error) { + err = s.providers.StateStore().Iterate(objectPrefix, func(key, _ []byte) (stop bool, er error) { + keyStr := string(key) + var object *Object + er = s.providers.StateStore().Get(keyStr, object) + if er != nil { + return + } + er = s.providers.FileStore().Remove(object.Cid) + if er != nil { + return + } + er = s.providers.StateStore().Delete(keyStr) + return + }) + + return +} diff --git a/s3/services/object/service_object.go b/s3/services/object/service_object.go index 9ba36ec20..78f4d17d0 100644 --- a/s3/services/object/service_object.go +++ b/s3/services/object/service_object.go @@ -2,29 +2,86 @@ package object import ( "context" - "encoding/hex" - "fmt" + "github.com/bittorrent/go-btfs/s3/action" "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/etag" - "github.com/bittorrent/go-btfs/s3/s3utils" "github.com/bittorrent/go-btfs/s3/utils/hash" - "github.com/google/uuid" "io" "net/http" "strings" "time" ) -func (s *service) PutObject(ctx context.Context, bucname, objname string, reader *hash.Reader, size int64, meta map[string]string) (obj Object, err error) { +// PutObject put a user specified object +func (s *service) PutObject(ctx context.Context, user string, bucname, objname string, reader *hash.Reader, size int64, meta map[string]string) (object *Object, err error) { + // operation context + ctx, cancel := s.opctx(ctx) + defer cancel() + + // bucket key + buckey := s.getBucketKey(bucname) + + // rlock bucket + err = s.lock.RLock(ctx, buckey) + if err != nil { + return + } + defer s.lock.RUnlock(buckey) + + // get bucket + bucket, err := s.getBucket(buckey) + if err != nil { + return + } + if bucket == nil { + err = ErrBucketNotFound + return + } + + // check acl + allow := s.checkAcl(bucket.Owner, bucket.Acl, user, action.PutObjectAction) + if !allow { + err = ErrNotAllowed + return + } + + // object key + objkey := s.getObjectKey(bucname, objname) + + // lock object + err = s.lock.Lock(ctx, objkey) + if err != nil { + return + } + defer s.lock.Unlock(objkey) + + // get old object + oldObject, err := s.getObject(objkey) + if err != nil { + return + } + + // remove old file, if old object exists and put new object successfully + defer func() { + if oldObject != nil && err == nil { + _ = s.providers.FileStore().Remove(oldObject.Cid) + // todo: log this remove error + } + }() + + // store file cid, err := s.providers.FileStore().Store(reader) if err != nil { return } - obj = Object{ + // now + now := time.Now() + + // new object + object = &Object{ Bucket: bucname, Name: objname, - ModTime: time.Now().UTC(), + ModTime: now.UTC(), Size: size, IsDir: false, ETag: reader.ETag().String(), @@ -35,510 +92,463 @@ func (s *service) PutObject(ctx context.Context, bucname, objname string, reader Acl: meta[consts.AmzACL], ContentType: meta[strings.ToLower(consts.ContentType)], ContentEncoding: meta[strings.ToLower(consts.ContentEncoding)], - SuccessorModTime: time.Now().UTC(), + SuccessorModTime: now.UTC(), } - // Update expires - if exp, ok := meta[strings.ToLower(consts.Expires)]; ok { - if t, e := time.Parse(http.TimeFormat, exp); e == nil { - obj.Expires = t.UTC() - } + // set object expires + exp, er := time.Parse(http.TimeFormat, meta[strings.ToLower(consts.Expires)]) + if er == nil { + object.Expires = exp.UTC() } - err = s.providers.StateStore().Put(getObjectKey(bucname, objname), obj) - if err != nil { - return - } + // put object + err = s.providers.StateStore().Put(objkey, object) return } -// CopyObject store object -func (s *service) CopyObject(ctx context.Context, bucket, object string, info Object, size int64, meta map[string]string) (Object, error) { - obj := Object{ - Bucket: bucket, - Name: object, - ModTime: time.Now().UTC(), - Size: size, - IsDir: false, - ETag: info.ETag, - Cid: info.Cid, - VersionID: "", - IsLatest: true, - DeleteMarker: false, - ContentType: meta[strings.ToLower(consts.ContentType)], - ContentEncoding: meta[strings.ToLower(consts.ContentEncoding)], - SuccessorModTime: time.Now().UTC(), - } - // Update expires - if exp, ok := meta[strings.ToLower(consts.Expires)]; ok { - if t, e := time.Parse(http.TimeFormat, exp); e == nil { - obj.Expires = t.UTC() - } +// CopyObject copy from a user specified source object to a desert object +func (s *service) CopyObject(ctx context.Context, user string, srcBucname, srcObjname, dstBucname, dstObjname string, meta map[string]string) (dstObject *Object, err error) { + // operation context + ctx, cancel := s.opctx(ctx) + defer cancel() + + // source bucket key + srcBuckey := s.getBucketKey(srcBucname) + + // rlock source bucket + err = s.lock.RLock(ctx, srcBuckey) + if err != nil { + return } + defer s.lock.RUnlock(srcBuckey) - err := s.providers.StateStore().Put(getObjectKey(bucket, object), obj) + // get source bucket + srcBucket, err := s.getBucket(srcBuckey) if err != nil { - return Object{}, err + return + } + if srcBucket == nil { + err = ErrBucketNotFound + return } - return obj, nil -} -// GetObject Get object -func (s *service) GetObject(ctx context.Context, bucket, object string) (Object, io.ReadCloser, error) { - var obj Object - err := s.providers.StateStore().Get(getObjectKey(bucket, object), &obj) - if errors.Is(err, providers.ErrStateStoreNotFound) { - err = ErrObjectNotFound - return Object{}, nil, err + // check source acl + srcAllow := s.checkAcl(srcBucket.Owner, srcBucket.Acl, user, action.GetObjectAction) + if !srcAllow { + err = ErrNotAllowed + return } - reader, err := s.providers.FileStore().Cat(obj.Cid) + // source object key + srcObjkey := s.getObjectKey(srcBucname, srcObjname) + + // rlock source object + err = s.lock.RLock(ctx, srcObjkey) if err != nil { - return Object{}, nil, err + return } + defer s.lock.RUnlock(srcObjkey) - return obj, reader, nil -} - -// GetObjectInfo Get object info -func (s *service) GetObjectInfo(ctx context.Context, bucket, object string) (Object, error) { - var obj Object - err := s.providers.StateStore().Get(getObjectKey(bucket, object), &obj) - if errors.Is(err, providers.ErrStateStoreNotFound) { + // get source object + srcObject, err := s.getObject(srcObjkey) + if err != nil { + return + } + if srcObject == nil { err = ErrObjectNotFound - return Object{}, err + return } - return obj, nil -} + // desert bucket key + dstBuckey := s.getBucketKey(dstBucname) -// DeleteObject delete object -func (s *service) DeleteObject(ctx context.Context, bucket, object string) error { - var obj Object - err := s.providers.StateStore().Get(getObjectKey(bucket, object), &obj) - if errors.Is(err, providers.ErrStateStoreNotFound) { - err = ErrObjectNotFound - return err + // rlock desert bucket + err = s.lock.RLock(ctx, dstBuckey) + if err != nil { + return } + defer s.lock.RUnlock(dstBuckey) - if err = s.providers.StateStore().Delete(getObjectKey(bucket, object)); err != nil { - return err + // get desert bucket + dstBucket, err := s.getBucket(dstBuckey) + if err != nil { + return } - - //todo 是否先进性unpin,然后remove? - if err := s.providers.FileStore().Remove(obj.Cid); err != nil { - errMsg := fmt.Sprintf("mark Objet to delete error, bucket:%s, object:%s, cid:%s, error:%v \n", bucket, object, obj.Cid, err) - return errors.New(errMsg) + if dstBucket == nil { + err = ErrBucketNotFound + return } - return nil -} - -func (s *service) CleanObjectsInBucket(ctx context.Context, bucket string) error { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - prefixKey := fmt.Sprintf(allObjectPrefixFormat, bucket, "") - err := s.providers.StateStore().Iterate(prefixKey, func(key, _ []byte) (stop bool, er error) { - record := &Object{} - er = s.providers.StateStore().Get(string(key), record) - if er != nil { - return - } - if err := s.DeleteObject(ctx, bucket, record.Name); err != nil { - return - } + // check desert acl + dstAllow := s.checkAcl(dstBucket.Owner, dstBucket.Acl, user, action.PutObjectAction) + if !dstAllow { + err = ErrNotAllowed return - }) - - return err -} + } -// ListObjectsInfo - container for list objects. -type ListObjectsInfo struct { - // Indicates whether the returned list objects response is truncated. A - // value of true indicates that the list was truncated. The list can be truncated - // if the number of objects exceeds the limit allowed or specified - // by max keys. - IsTruncated bool - - // When response is truncated (the IsTruncated element value in the response is true), - // you can use the key name in this field as marker in the subsequent - // request to get next set of objects. - // - // NOTE: AWS S3 returns NextMarker only if you have delimiter request parameter specified, - NextMarker string - - // List of objects info for this request. - Objects []Object - - // List of prefixes for this request. - Prefixes []string -} + // desert object key + dstObjkey := s.getObjectKey(dstBucname, dstObjname) -// ListObjects list user object -// TODO use more params -func (s *service) ListObjects(ctx context.Context, bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi ListObjectsInfo, err error) { - if maxKeys == 0 { - return loi, nil - } - - if len(prefix) > 0 && maxKeys == 1 && delimiter == "" && marker == "" { - // Optimization for certain applications like - // - Cohesity - // - Actifio, Splunk etc. - // which send ListObjects requests where the actual object - // itself is the prefix and max-keys=1 in such scenarios - // we can simply verify locally if such an object exists - // to avoid the need for ListObjects(). - var obj Object - err = s.providers.StateStore().Get(getObjectKey(bucket, prefix), &obj) - if err == nil { - loi.Objects = append(loi.Objects, obj) - return loi, nil - } + // lock desert object + err = s.lock.Lock(ctx, dstObjkey) + if err != nil { + return } + defer s.lock.Unlock(dstObjkey) - ctx, cancel := context.WithCancel(ctx) - defer cancel() + // now + now := time.Now() - seekKey := "" - if marker != "" { - seekKey = fmt.Sprintf(allObjectSeekKeyFormat, bucket, marker) + // desert object + dstObject = &Object{ + Bucket: dstBucname, + Name: dstObjname, + ModTime: now.UTC(), + Size: srcObject.Size, + IsDir: false, + ETag: srcObject.ETag, + Cid: srcObject.Cid, + VersionID: "", + IsLatest: true, + DeleteMarker: false, + ContentType: meta[strings.ToLower(consts.ContentType)], + ContentEncoding: meta[strings.ToLower(consts.ContentEncoding)], + SuccessorModTime: now.UTC(), } - prefixKey := fmt.Sprintf(allObjectPrefixFormat, bucket, prefix) - objPrefix := fmt.Sprintf(objectPrefix, bucket) + // set object desert expires + exp, er := time.Parse(http.TimeFormat, strings.ToLower(consts.Expires)) + if er != nil { + dstObject.Expires = exp.UTC() + } - begin := seekKey == "" - nkeys := 0 - seen := make(map[string]bool) - err = s.providers.StateStore().Iterate(prefixKey, func(key, _ []byte) (stop bool, er error) { - objKey := (string(key))[len(objPrefix):] - commonPrefix := prefix + // put desert object + err = s.providers.StateStore().Put(dstObjkey, dstObject) - if delimiter != "" { - idx := strings.Index(objKey[len(prefix):], delimiter) - if idx >= 0 { - commonPrefix = objKey[:idx] + delimiter - } - } + return +} - if !begin && (seekKey == objKey || seekKey == commonPrefix) { - begin = true - return - } +// GetObject get an object for the specified user +func (s *service) GetObject(ctx context.Context, user, bucname, objname string) (object *Object, body io.ReadCloser, err error) { + // operation context + ctx, cancel := s.opctx(ctx) + defer cancel() - if !begin { - return - } + // bucket key + buckey := s.getBucketKey(bucname) - if commonPrefix == prefix { - record := &Object{} - er = s.providers.StateStore().Get(string(key), record) - if er != nil { - return - } - loi.Objects = append(loi.Objects, *record) - loi.NextMarker = record.Name - nkeys++ - } else if !seen[commonPrefix] { - loi.Prefixes = append(loi.Prefixes, commonPrefix) - seen[commonPrefix] = true - loi.NextMarker = commonPrefix - nkeys++ + // rlock bucket + err = s.lock.RLock(ctx, buckey) + if err != nil { + return + } + defer func() { + // rUnlock bucket just if getting failed + if err != nil { + s.lock.RUnlock(buckey) } + }() - if nkeys == maxKeys { - loi.IsTruncated = true - stop = true - } + // get bucket + bucket, err := s.getBucket(buckey) + if err != nil { + return + } + if bucket == nil { + err = ErrBucketNotFound + return + } + // check acl + allow := s.checkAcl(bucket.Owner, bucket.Acl, user, action.GetObjectAction) + if !allow { + err = ErrNotAllowed return - }) + } - return loi, nil -} + // object key + objkey := s.getObjectKey(bucname, objname) -func (s *service) EmptyBucket(ctx context.Context, bucket string) (bool, error) { - loi, err := s.ListObjects(ctx, bucket, "", "", "", 1) + // rlock object + err = s.lock.RLock(ctx, objkey) if err != nil { - return false, err + return } - return len(loi.Objects) == 0, nil -} - -// ListObjectsV2Info - container for list objects version 2. -type ListObjectsV2Info struct { - // Indicates whether the returned list objects response is truncated. A - // value of true indicates that the list was truncated. The list can be truncated - // if the number of objects exceeds the limit allowed or specified - // by max keys. - IsTruncated bool - - // When response is truncated (the IsTruncated element value in the response - // is true), you can use the key name in this field as marker in the subsequent - // request to get next set of objects. - // - // NOTE: This element is returned only if you have delimiter request parameter - // specified. - ContinuationToken string - NextContinuationToken string - - // List of objects info for this request. - Objects []Object - - // List of prefixes for this request. - Prefixes []string -} + defer func() { + // rUnlock object just if getting failed + if err != nil { + s.lock.RUnlock(objkey) + } + }() -// ListObjectsV2 list objects -func (s *service) ListObjectsV2(ctx context.Context, bucket string, prefix string, continuationToken string, delimiter string, maxKeys int, owner bool, startAfter string) (ListObjectsV2Info, error) { - marker := continuationToken - if marker == "" { - marker = startAfter - } - loi, err := s.ListObjects(ctx, bucket, prefix, marker, delimiter, maxKeys) + // get object + object, err = s.getObject(objkey) if err != nil { - return ListObjectsV2Info{}, err - } - listV2Info := ListObjectsV2Info{ - IsTruncated: loi.IsTruncated, - ContinuationToken: continuationToken, - NextContinuationToken: loi.NextMarker, - Objects: loi.Objects, - Prefixes: loi.Prefixes, + return } - return listV2Info, nil -} - -/*---------------------------------------------------*/ - -func (s *service) CreateMultipartUpload(ctx context.Context, bucname string, objname string, meta map[string]string) (mtp Multipart, err error) { - uploadId := uuid.NewString() - mtp = Multipart{ - Bucket: bucname, - Object: objname, - UploadID: uploadId, - MetaData: meta, - Initiated: time.Now().UTC(), + if object == nil { + err = ErrObjectNotFound + return } - err = s.providers.StateStore().Put(getUploadKey(bucname, objname, uploadId), mtp) + // get object body + body, err = s.providers.FileStore().Cat(object.Cid) if err != nil { return } + // wrap the body with timeout and unlock hooks + // this will enable the bucket and object keep rlocked until + // read timout or read closed. Normally, these locks will + // be released as soon as leave from the call + body = WrapCleanReadCloser( + body, + s.readObjectTimeout, + func() { + s.lock.RUnlock(objkey) // note: release object first + s.lock.RUnlock(buckey) + }, + ) + return } -func (s *service) UploadPart(ctx context.Context, bucname string, objname string, uploadID string, partID int, reader *hash.Reader, size int64, meta map[string]string) (part ObjectPart, err error) { - cid, err := s.providers.FileStore().Store(reader) +// DeleteObject delete a user specified object +func (s *service) DeleteObject(ctx context.Context, user, bucname, objname string) (err error) { + // operation context + ctx, cancel := s.opctx(ctx) + defer cancel() + + // bucket key + buckey := s.getBucketKey(bucname) + + // rlock bucket + err = s.lock.RLock(ctx, buckey) if err != nil { return } + defer s.lock.RUnlock(buckey) - part = ObjectPart{ - Number: partID, - ETag: reader.ETag().String(), - Cid: cid, - Size: size, - ModTime: time.Now().UTC(), - } - - mtp, err := s.getMultipart(ctx, bucname, objname, uploadID) + // get bucket + bucket, err := s.getBucket(buckey) if err != nil { return } + if bucket == nil { + err = ErrBucketNotFound + return + } - mtp.Parts = append(mtp.Parts, part) - err = s.providers.StateStore().Put(getUploadKey(bucname, objname, uploadID), mtp) - if err != nil { - return part, err + // check acl + allow := s.checkAcl(bucket.Owner, bucket.Acl, user, action.DeleteObjectAction) + if !allow { + err = ErrNotAllowed + return } - return -} + // object key + objkey := s.getObjectKey(bucname, objname) -func (s *service) AbortMultipartUpload(ctx context.Context, bucname string, objname string, uploadID string) (err error) { - mtp, err := s.getMultipart(ctx, bucname, objname, uploadID) + // lock object + err = s.lock.Lock(ctx, objkey) if err != nil { return } + defer s.lock.Unlock(objkey) - for _, part := range mtp.Parts { - err = s.providers.FileStore().Remove(part.Cid) - if err != nil { - return - } + // get object + object, err := s.getObject(objkey) + if err != nil { + return + } + if object == nil { + err = ErrObjectNotFound + return } - err = s.removeMultipart(ctx, bucname, objname, uploadID) + // delete object body + err = s.providers.FileStore().Remove(object.Cid) if err != nil { return } + // delete object + err = s.providers.StateStore().Delete(objkey) + return } -func (s *service) CompleteMultiPartUpload(ctx context.Context, bucname string, objname string, uploadID string, parts []CompletePart) (obj Object, err error) { - mi, err := s.getMultipart(ctx, bucname, objname, uploadID) +// ListObjects list user specified objects +func (s *service) ListObjects(ctx context.Context, user, bucname, prefix, delimiter, marker string, max int) (list *ObjectsList, err error) { + // operation context + ctx, cancel := s.opctx(ctx) + defer cancel() + + // bucket key + buckey := s.getBucketKey(bucname) + + // rlock bucket + err = s.lock.RLock(ctx, buckey) if err != nil { return } + defer s.lock.RUnlock(buckey) - var ( - readers []io.Reader - objectSize int64 - ) + // get bucket + bucket, err := s.getBucket(buckey) + if err != nil { + return + } + if bucket == nil { + err = ErrBucketNotFound + return + } - defer func() { - for _, rdr := range readers { - _ = rdr.(io.ReadCloser).Close() - } - }() + // check acl + allow := s.checkAcl(bucket.Owner, bucket.Acl, user, action.ListObjectsAction) + if !allow { + err = ErrNotAllowed + return + } + + // object key prefix + objkeyPrefix := s.getObjectKeyPrefix(bucname) + + // objects key prefix + objskeyPrefix := objkeyPrefix + prefix + + // accumulate count + count := 0 + + // begin collect + begin := marker == "" + + // seen keys + seen := make(map[string]bool) - idxMap := objectPartIndexMap(mi.Parts) - for i, part := range parts { - partIndex, ok := idxMap[part.PartNumber] - if !ok { - err = s3utils.InvalidPart{ - PartNumber: part.PartNumber, - GotETag: part.ETag, + // iterate all objects with the specified prefix to collect and group specified range items + err = s.providers.StateStore().Iterate(objskeyPrefix, func(key, _ []byte) (stop bool, er error) { + // object key + objkey := string(key) + + // object name + objname := objkey[len(objkeyPrefix):] + + // common prefix: if the part of object name without prefix include delimiter + // it is the string truncated object name after the delimiter, else + // it is the bucket name itself + commonPrefix := objname + if delimiter != "" { + dl := len(delimiter) + pl := len(prefix) + di := strings.Index(objname[pl:], delimiter) + if di >= 0 { + commonPrefix = objname[:(pl + di + dl)] } + } + + // if collect not begin, check the marker, if it is matched + // with the common prefix, then begin collection from next iterate turn + // and mark this common prefix as seen + // note: common prefix also can be object name, so when marker is + // an object name, the check will be also done correctly + if !begin && marker == commonPrefix { + begin = true + seen[commonPrefix] = true return } - gotPart := mi.Parts[partIndex] + // no begin, jump the item + if !begin { + return + } - part.ETag = canonicalizeETag(part.ETag) - if gotPart.ETag != part.ETag { - err = s3utils.InvalidPart{ - PartNumber: part.PartNumber, - ExpETag: gotPart.ETag, - GotETag: part.ETag, - } + // objects with same common prefix will be grouped into one + // note: the objects without common prefix will present only once, so + // it is not necessary to add these objects names in the seen map + if seen[commonPrefix] { return } - // All parts except the last part has to be at least 5MB. - if (i < len(parts)-1) && !(gotPart.Size >= consts.MinPartSize) { - err = s3utils.PartTooSmall{ - PartNumber: part.PartNumber, - PartSize: gotPart.Size, - PartETag: part.ETag, + // objects with common prefix grouped int one + if commonPrefix != objname { + list.Prefixes = append(list.Prefixes, commonPrefix) + list.NextMarker = commonPrefix + seen[commonPrefix] = true + } else { + // object without common prefix + var object *Object + er = s.providers.StateStore().Get(objkey, object) + if er != nil { + return } - return + list.Objects = append(list.Objects, object) + list.NextMarker = objname } - // Save for total objname size. - objectSize += gotPart.Size + // increment collection count + count++ - var rdr io.ReadCloser - rdr, err = s.providers.FileStore().Cat(gotPart.Cid) - if err != nil { - return + // check the count, if it matched the max, means + // the collect is complete, but the items may remain, so stop the + // iteration, and mark the list was truncated + if count == max { + list.IsTruncated = true + stop = true } - readers = append(readers, rdr) - } - - cid, err := s.providers.FileStore().Store(io.MultiReader(readers...)) - if err != nil { return - } + }) - obj = Object{ - Bucket: bucname, - Name: objname, - ModTime: time.Now().UTC(), - Size: objectSize, - IsDir: false, - ETag: computeCompleteMultipartMD5(parts), - Cid: cid, - VersionID: "", - IsLatest: true, - DeleteMarker: false, - ContentType: mi.MetaData[strings.ToLower(consts.ContentType)], - ContentEncoding: mi.MetaData[strings.ToLower(consts.ContentEncoding)], - SuccessorModTime: time.Now().UTC(), - } + return +} - if exp, ok := mi.MetaData[strings.ToLower(consts.Expires)]; ok { - if t, e := time.Parse(http.TimeFormat, exp); e == nil { - obj.Expires = t.UTC() - } - } +// EmptyBucket check if the user specified bucked is empty +func (s *service) EmptyBucket(ctx context.Context, user, bucname string) (empty bool, err error) { + ctx, cancel := s.opctx(ctx) + defer cancel() - err = s.providers.StateStore().Put(getObjectKey(bucname, objname), obj) + // bucket key + buckey := s.getBucketKey(bucname) + + // rlock bucket + err = s.lock.RLock(ctx, buckey) if err != nil { return } + defer s.lock.RUnlock(buckey) - err = s.removeMultipartInfo(ctx, bucname, objname, uploadID) + // get bucket + bucket, err := s.getBucket(buckey) if err != nil { return } - - return -} - -func (s *service) GetMultipart(ctx context.Context, bucname string, objname string, uploadID string) (mtp Multipart, err error) { - return s.getMultipart(ctx, bucname, objname, uploadID) -} - -func (s *service) getMultipart(ctx context.Context, bucname string, objname string, uploadID string) (mtp Multipart, err error) { - err = s.providers.StateStore().Get(getUploadKey(bucname, objname, uploadID), &mtp) - if errors.Is(err, providers.ErrStateStoreNotFound) { - err = ErrUploadNotFound + if bucket == nil { + err = ErrBucketNotFound return } - return -} -func (s *service) removeMultipart(ctx context.Context, bucname string, objname string, uploadID string) (err error) { - err = s.providers.StateStore().Delete(getUploadKey(bucname, objname, uploadID)) - if errors.Is(err, providers.ErrStateStoreNotFound) { - err = ErrUploadNotFound + // check acl + allow := s.checkAcl(bucket.Owner, bucket.Acl, user, action.HeadBucketAction) + if !allow { + err = ErrNotAllowed return } - return -} -func (s *service) removeMultipartInfo(ctx context.Context, bucname string, objname string, uploadID string) (err error) { - err = s.providers.StateStore().Delete(getUploadKey(bucname, objname, uploadID)) - if errors.Is(err, providers.ErrStateStoreNotFound) { - err = ErrUploadNotFound - return - } - return -} + // object key prefix + objkeyPrefix := s.getObjectKeyPrefix(bucname) -func objectPartIndexMap(parts []ObjectPart) map[int]int { - mp := make(map[int]int) - for i, part := range parts { - mp[part.Number] = i - } - return mp -} + // initially set empty to true + empty = true -// canonicalizeETag returns ETag with leading and trailing double-quotes removed, -// if any present -func canonicalizeETag(etag string) string { - return etagRegex.ReplaceAllString(etag, "$1") -} + // iterate the bucket objects, if no item, empty keep true + // if at least one, set empty to false, and stop iterate + err = s.providers.StateStore().Iterate(objkeyPrefix, func(_, _ []byte) (stop bool, er error) { + empty = false + stop = true + return + }) -func computeCompleteMultipartMD5(parts []CompletePart) string { - var finalMD5Bytes []byte - for _, part := range parts { - md5Bytes, err := hex.DecodeString(canonicalizeETag(part.ETag)) - if err != nil { - finalMD5Bytes = append(finalMD5Bytes, []byte(part.ETag)...) - } else { - finalMD5Bytes = append(finalMD5Bytes, md5Bytes...) - } - } - s3MD5 := fmt.Sprintf("%s-%d", etag.Multipart(finalMD5Bytes), len(parts)) - return s3MD5 + return } From 714aa30e6a2eb5be7e6d94a2f5879e8e6b9911ae Mon Sep 17 00:00:00 2001 From: Steve Date: Sat, 2 Sep 2023 03:21:13 +0800 Subject: [PATCH 080/139] refractor: refract object service --- s3/services/object/clean_read_closer.go | 2 +- s3/services/object/options.go | 58 ++++ s3/services/object/proto.go | 47 ++- s3/services/object/service.go | 80 ++--- s3/services/object/service_bucket.go | 206 ++++++++--- s3/services/object/service_multipart.go | 433 ++++++++++++++++++------ s3/services/object/service_object.go | 254 +++++++------- 7 files changed, 718 insertions(+), 362 deletions(-) diff --git a/s3/services/object/clean_read_closer.go b/s3/services/object/clean_read_closer.go index ffb5a9fd8..fd767da8d 100644 --- a/s3/services/object/clean_read_closer.go +++ b/s3/services/object/clean_read_closer.go @@ -15,7 +15,7 @@ func WrapCleanReadCloser(rc io.ReadCloser, timeout time.Duration, afterCloseHook go func() { <-ctx.Done() _ = rc.Close() - // call after hooks stack + // call after hooks by stack order for len(afterCloseHooks) > 0 { idx := len(afterCloseHooks) - 1 f := afterCloseHooks[idx] diff --git a/s3/services/object/options.go b/s3/services/object/options.go index 9109d3188..05c4dd9a3 100644 --- a/s3/services/object/options.go +++ b/s3/services/object/options.go @@ -1,3 +1,61 @@ package object +import ( + "github.com/bittorrent/go-btfs/s3/ctxmu" + "time" +) + +const ( + defaultKeySeparator = "/" + defaultBucketSpace = "bkt" + defaultObjectSpace = "obj" + defaultUploadSpace = "upl" + defaultOperationTimeout = 5 * time.Minute + defaultCloseBodyTimeout = 10 * time.Minute +) + +var defaultLock = ctxmu.NewDefaultMultiCtxRWMutex() + type Option func(svc *service) + +func WithKeySeparator(separator string) Option { + return func(svc *service) { + svc.keySeparator = separator + } +} + +func WithBucketSpace(space string) Option { + return func(svc *service) { + svc.bucketSpace = space + } +} + +func WithObjectSpace(space string) Option { + return func(svc *service) { + svc.objectSpace = space + } +} + +func WithUploadSpace(space string) Option { + return func(svc *service) { + svc.uploadSpace = space + } +} + +func WithOperationTimeout(timeout time.Duration) Option { + return func(svc *service) { + svc.operationTimeout = timeout + } +} + +func WithCloseBodyTimeout(timeout time.Duration) Option { + return func(svc *service) { + svc.closeBodyTimeout = timeout + } +} + +func WithLock(lock ctxmu.MultiCtxRWLocker) Option { + return func(svc *service) { + svc.lock = lock + } +} diff --git a/s3/services/object/proto.go b/s3/services/object/proto.go index f62a6035a..eb4a03263 100644 --- a/s3/services/object/proto.go +++ b/s3/services/object/proto.go @@ -17,32 +17,25 @@ var ( ) type Service interface { - // bucket - CreateBucket(ctx context.Context, bucket, region, accessKey, acl string) error - GetBucketMeta(ctx context.Context, bucket string) (meta Bucket, err error) - HasBucket(ctx context.Context, bucket string) bool - SetEmptyBucket(emptyBucket func(ctx context.Context, bucket string) (bool, error)) - DeleteBucket(ctx context.Context, bucket string) error - GetAllBucketsOfUser(username string) (list []*Bucket, err error) - UpdateBucketAcl(ctx context.Context, bucket, acl string) error - GetBucketAcl(ctx context.Context, bucket string) (string, error) - EmptyBucket(emptyBucket func(ctx context.Context, bucket string) (bool, error)) - - // object - PutObject(ctx context.Context, bucname, objname string, reader *hash.Reader, size int64, meta map[string]string) (obj Object, err error) - CopyObject(ctx context.Context, bucket, object string, info Object, size int64, meta map[string]string) (Object, error) - GetObject(ctx context.Context, bucket, object string) (Object, io.ReadCloser, error) - GetObjectInfo(ctx context.Context, bucket, object string) (Object, error) - DeleteObject(ctx context.Context, bucket, object string) error - ListObjects(ctx context.Context, bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi Object, err error) - ListObjectsV2(ctx context.Context, bucket string, prefix string, continuationToken string, delimiter string, maxKeys int, owner bool, startAfter string) (ListObjectsV2Info, error) - - // martipart - CreateMultipartUpload(ctx context.Context, bucname string, objname string, meta map[string]string) (mtp Multipart, err error) - AbortMultipartUpload(ctx context.Context, bucname string, objname string, uploadID string) (err error) - UploadPart(ctx context.Context, bucname string, objname string, uploadID string, partID int, reader *hash.Reader, size int64, meta map[string]string) (part ObjectPart, err error) - CompleteMultiPartUpload(ctx context.Context, bucname string, objname string, uploadID string, parts []CompletePart) (obj Object, err error) - GetMultipart(ctx context.Context, bucname string, objname string, uploadID string) (mtp Multipart, err error) + CreateBucket(ctx context.Context, user, bucname, region, acl string) (bucket *Bucket, err error) + GetBucket(ctx context.Context, user, bucname string) (bucket *Bucket, err error) + DeleteBucket(ctx context.Context, user, bucname string) (err error) + GetAllBuckets(ctx context.Context, user string) (list []*Bucket, err error) + PutBucketAcl(ctx context.Context, user, bucname, acl string) (err error) + GetBucketAcl(ctx context.Context, user, bucname string) (acl string, err error) + EmptyBucket(ctx context.Context, user, bucname string) (empty bool, err error) + + PutObject(ctx context.Context, user string, bucname, objname string, body *hash.Reader, size int64, meta map[string]string) (object *Object, err error) + CopyObject(ctx context.Context, user string, srcBucname, srcObjname, dstBucname, dstObjname string, meta map[string]string) (dstObject *Object, err error) + GetObject(ctx context.Context, user, bucname, objname string) (object *Object, body io.ReadCloser, err error) + DeleteObject(ctx context.Context, user, bucname, objname string) (err error) + // todo: DeleteObjects + ListObjects(ctx context.Context, user, bucname, prefix, delimiter, marker string, max int) (list *ObjectsList, err error) + + CreateMultipartUpload(ctx context.Context, user, bucname, objname string, meta map[string]string) (multipart *Multipart, err error) + UploadPart(ctx context.Context, user, bucname, objname, uplid string, partId int, reader *hash.Reader, size int64, meta map[string]string) (part *ObjectPart, err error) + AbortMultipartUpload(ctx context.Context, user, bucname, objname, uplid string) (err error) + CompleteMultiPartUpload(ctx context.Context, user string, bucname, objname, uplid string, parts []*CompletePart) (object *Object, err error) } // Bucket contains bucket metadata. @@ -79,7 +72,7 @@ type Multipart struct { UploadID string Initiated time.Time MetaData map[string]string - Parts []ObjectPart + Parts []*ObjectPart } type ObjectPart struct { diff --git a/s3/services/object/service.go b/s3/services/object/service.go index 8ae0d2ae3..0b45d90b4 100644 --- a/s3/services/object/service.go +++ b/s3/services/object/service.go @@ -5,66 +5,36 @@ import ( "github.com/bittorrent/go-btfs/s3/action" "github.com/bittorrent/go-btfs/s3/ctxmu" "github.com/bittorrent/go-btfs/s3/policy" - "regexp" "strings" "time" "github.com/bittorrent/go-btfs/s3/providers" ) -const ( - defaultKeySeparator = "/" - defaultBucketSpace = "bkt" - defaultObjectSpace = "obj" - defaultUploadSpace = "upl" - defaultOperationTimeout = 5 * time.Minute - defaultReadObjectTimeout = 1 * time.Hour - - bucketPrefix = "bkt/" - objectKeyFormat = "obj/%s/%s" - objectPrefix = "obj/%s/" - allObjectPrefixFormat = "obj/%s/%s" - allObjectSeekKeyFormat = "obj/%s/%s" - - uploadKeyFormat = "uploadObj/%s/%s/%s" - allUploadPrefixFormat = "uploadObj/%s/%s" - allUploadSeekKeyFormat = "uploadObj/%s/%s/%s" - - deleteKeyFormat = "delObj/%s" - allDeletePrefixFormat = "delObj/" - - globalOperationTimeout = 5 * time.Minute - deleteOperationTimeout = 1 * time.Minute - - maxCpuPercent = 60 - maxUsedMemoryPercent = 80 -) - -var etagRegex = regexp.MustCompile("\"*?([^\"]*?)\"*?$") - var _ Service = (*service)(nil) // service captures all bucket metadata for a given cluster. type service struct { - providers providers.Providerser - lock ctxmu.MultiCtxRWLocker - keySeparator string - bucketSpace string - objectSpace string - uploadSpace string - operationTimeout time.Duration - readObjectTimeout time.Duration + providers providers.Providerser + lock ctxmu.MultiCtxRWLocker + keySeparator string + bucketSpace string + objectSpace string + uploadSpace string + operationTimeout time.Duration + closeBodyTimeout time.Duration } func NewService(providers providers.Providerser, options ...Option) Service { s := &service{ providers: providers, - lock: ctxmu.NewDefaultMultiCtxRWMutex(), + lock: defaultLock, keySeparator: defaultKeySeparator, bucketSpace: defaultBucketSpace, objectSpace: defaultObjectSpace, uploadSpace: defaultUploadSpace, operationTimeout: defaultOperationTimeout, + closeBodyTimeout: defaultCloseBodyTimeout, } for _, option := range options { option(s) @@ -74,43 +44,43 @@ func NewService(providers providers.Providerser, options ...Option) Service { // common helper methods -func (s *service) getBucketKeyPrefix() (prefix string) { +func (s *service) getAllBucketsKeyPrefix() (prefix string) { prefix = strings.Join([]string{s.bucketSpace, ""}, s.keySeparator) return } -func (s *service) getObjectKeyPrefix(bucname string) (prefix string) { - prefix = strings.Join([]string{s.objectSpace, bucname, ""}, s.keySeparator) +func (s *service) getBucketKey(bucname string) (key string) { + key = s.getAllBucketsKeyPrefix() + bucname return } -func (s *service) getUploadKeyPrefix(bucname, objname string) (prefix string) { - prefix = strings.Join([]string{s.uploadSpace, bucname, objname, ""}, s.keySeparator) +func (s *service) getAllObjectsKeyPrefix(bucname string) (prefix string) { + prefix = strings.Join([]string{s.objectSpace, bucname, ""}, s.keySeparator) return } -func (s *service) getBucketKey(bucname string) (key string) { - key = s.getBucketKeyPrefix() + bucname +func (s *service) getObjectKey(bucname, objname string) (key string) { + key = s.getAllObjectsKeyPrefix(bucname) + objname return } -func (s *service) getObjectKey(bucname, objname string) (key string) { - key = s.getObjectKeyPrefix(bucname) + objname +func (s *service) getAllUploadsKeyPrefix(bucname string) (prefix string) { + prefix = strings.Join([]string{s.uploadSpace, bucname, ""}, s.keySeparator) return } func (s *service) getUploadKey(bucname, objname, uploadid string) (key string) { - key = s.getUploadKeyPrefix(bucname, objname) + uploadid + key = strings.Join([]string{s.getAllUploadsKeyPrefix(bucname), objname, uploadid}, s.keySeparator) return } -func (s *service) checkAcl(owner, acl, user string, act action.Action) (allow bool) { - own := user != "" && user == owner - allow = policy.IsAllowed(own, acl, act) +func (s *service) opctx(parent context.Context) (ctx context.Context, cancel context.CancelFunc) { + ctx, cancel = context.WithTimeout(parent, s.operationTimeout) return } -func (s *service) opctx(parent context.Context) (ctx context.Context, cancel context.CancelFunc) { - ctx, cancel = context.WithTimeout(parent, s.operationTimeout) +func (s *service) checkAcl(owner, acl, user string, act action.Action) (allow bool) { + own := user != "" && user == owner + allow = policy.IsAllowed(own, acl, act) return } diff --git a/s3/services/object/service_bucket.go b/s3/services/object/service_bucket.go index caed601c5..13625aa4b 100644 --- a/s3/services/object/service_bucket.go +++ b/s3/services/object/service_bucket.go @@ -11,73 +11,80 @@ import ( ) // CreateBucket create a new bucket for the specified user -func (s *service) CreateBucket(ctx context.Context, user, bucname, region, acl string) (err error) { - buckey := s.getBucketKey(bucname) - +func (s *service) CreateBucket(ctx context.Context, user, bucname, region, acl string) (bucket *Bucket, err error) { + // Operation context ctx, cancel := s.opctx(ctx) defer cancel() + // Bucket key + buckey := s.getBucketKey(bucname) + + // Lock bucket err = s.lock.Lock(ctx, buckey) if err != nil { return } - defer s.lock.Unlock(buckey) - allow := s.checkAcl(user, acl, user, action.CreateBucketAction) - if !allow { - err = ErrNotAllowed + // Get old bucket + bucketOld, err := s.getBucket(buckey) + if err == nil { return } - - bucket, err := s.getBucket(buckey) - if err == nil { + if bucketOld != nil { + err = ErrBucketAlreadyExists return } - if bucket != nil { - err = ErrBucketAlreadyExists + // Check action acl + allow := s.checkAcl(user, acl, user, action.CreateBucketAction) + if !allow { + err = ErrNotAllowed return } - err = s.providers.StateStore().Put( - buckey, - &Bucket{ - Name: bucname, - Region: region, - Owner: user, - Acl: acl, - Created: time.Now().UTC(), - }, - ) + // Bucket + bucket = &Bucket{ + Name: bucname, + Region: region, + Owner: user, + Acl: acl, + Created: time.Now().UTC(), + } + + // Put bucket + err = s.providers.StateStore().Put(buckey, bucket) return } -// GetBucket get a bucket for the specified user +// GetBucket get a user specified bucket func (s *service) GetBucket(ctx context.Context, user, bucname string) (bucket *Bucket, err error) { - buckey := s.getBucketKey(bucname) - + // Operation context ctx, cancel := s.opctx(ctx) defer cancel() + // Bucket key + buckey := s.getBucketKey(bucname) + + // RLock bucket err = s.lock.RLock(ctx, buckey) if err != nil { return } - defer s.lock.RUnlock(buckey) + // Get bucket bucket, err = s.getBucket(buckey) if err != nil { return } - if bucket == nil { err = ErrBucketNotFound return } + // Check action acl allow := s.checkAcl(bucket.Owner, bucket.Acl, user, action.HeadBucketAction) if !allow { err = ErrNotAllowed @@ -86,82 +93,99 @@ func (s *service) GetBucket(ctx context.Context, user, bucname string) (bucket * return } -// DeleteBucket delete the specified user bucket and all the bucket's objects +// DeleteBucket delete a user specified bucket and clear all bucket objects and uploads func (s *service) DeleteBucket(ctx context.Context, user, bucname string) (err error) { - buckey := s.getBucketKey(bucname) - + // Operation context ctx, cancel := s.opctx(ctx) defer cancel() + // Bucket key + buckey := s.getBucketKey(bucname) + + // Lock bucket err = s.lock.Lock(ctx, buckey) if err != nil { return } - defer s.lock.Unlock(buckey) + // Get bucket bucket, err := s.getBucket(buckey) if err != nil { return } - if bucket == nil { err = ErrBucketNotFound return } + // Check action acl allow := s.checkAcl(bucket.Owner, bucket.Acl, user, action.DeleteBucketAction) if !allow { err = ErrNotAllowed return } + // Delete bucket err = s.providers.StateStore().Delete(buckey) if err != nil { return } - // bucket objects prefix - objectsPrefix := s.getObjectKeyPrefix(bucname) + // All bucket objects prefix + objectsPrefix := s.getAllObjectsKeyPrefix(bucname) + + // Try to delete all bucket objects + _ = s.deleteObjectsByPrefix(objectsPrefix) + + // All bucket uploads prefix + uploadsPrefix := s.getAllUploadsKeyPrefix(bucname) - // delete all objects of the bucket - err = s.deleteObjectsByPrefix(objectsPrefix) + // Try to delete all bucket uploads + _ = s.deleteUploadsByPrefix(uploadsPrefix) return } // GetAllBuckets get all buckets of the specified user func (s *service) GetAllBuckets(ctx context.Context, user string) (list []*Bucket, err error) { - bucprefix := s.getBucketKeyPrefix() - + // Operation context ctx, cancel := s.opctx(ctx) defer cancel() + // Check action acl allow := s.checkAcl(user, policy.Private, user, action.ListBucketAction) if !allow { err = ErrNotAllowed return } + // All buckets prefix + bucketsPrefix := s.getAllBucketsKeyPrefix() - err = s.providers.StateStore().Iterate(bucprefix, func(key, _ []byte) (stop bool, er error) { + // Collect user's buckets from all buckets + err = s.providers.StateStore().Iterate(bucketsPrefix, func(key, _ []byte) (stop bool, er error) { + // Stop the iteration if error occurred defer func() { if er != nil { stop = true } }() - er = ctx.Err() + // Bucket key + buckey := string(key) + + // Get Bucket + bucket, er := s.getBucket(buckey) if er != nil { return } - var bucket *Bucket - - er = s.providers.StateStore().Get(string(key), bucket) - if er != nil { + // Bucket has been deleted + if bucket == nil { return } + // Collect user's bucket if bucket.Owner == user { list = append(list, bucket) } @@ -172,78 +196,152 @@ func (s *service) GetAllBuckets(ctx context.Context, user string) (list []*Bucke return } -// PutBucketAcl update the acl field value of the specified user's bucket +// PutBucketAcl update user specified bucket's acl field value func (s *service) PutBucketAcl(ctx context.Context, user, bucname, acl string) (err error) { - buckey := s.getBucketKey(bucname) - + // Operation context ctx, cancel := s.opctx(ctx) defer cancel() + // Bucket key + buckey := s.getBucketKey(bucname) + + // Lock bucket err = s.lock.Lock(ctx, buckey) if err != nil { return } - defer s.lock.Unlock(buckey) + // Get bucket bucket, err := s.getBucket(buckey) if err != nil { return } - if bucket == nil { err = ErrBucketNotFound return } + // Check action acl allow := s.checkAcl(bucket.Owner, bucket.Acl, user, action.PutBucketAclAction) if !allow { err = ErrNotAllowed return } + // Update bucket acl bucket.Acl = acl + // Put bucket err = s.providers.StateStore().Put(buckey, bucket) return } -// GetBucketAcl get the acl field value of the specified user's bucket +// GetBucketAcl get user specified bucket acl field value func (s *service) GetBucketAcl(ctx context.Context, user, bucname string) (acl string, err error) { - buckey := s.getBucketKey(bucname) - + // Operation context ctx, cancel := s.opctx(ctx) defer cancel() + // Bucket key + buckey := s.getBucketKey(bucname) + + // RLock bucket err = s.lock.RLock(ctx, buckey) if err != nil { return } - defer s.lock.RUnlock(buckey) + // Get bucket bucket, err := s.getBucket(buckey) if err != nil { return } - if bucket == nil { err = ErrBucketNotFound return } + // Check action acl allow := s.checkAcl(bucket.Owner, bucket.Acl, user, action.GetBucketAclAction) if !allow { err = ErrNotAllowed return } + // Get acl field value acl = bucket.Acl return } +// EmptyBucket check if the user specified bucked is empty +func (s *service) EmptyBucket(ctx context.Context, user, bucname string) (empty bool, err error) { + ctx, cancel := s.opctx(ctx) + defer cancel() + + // Bucket key + buckey := s.getBucketKey(bucname) + + // RLock bucket + err = s.lock.RLock(ctx, buckey) + if err != nil { + return + } + defer s.lock.RUnlock(buckey) + + // Get bucket + bucket, err := s.getBucket(buckey) + if err != nil { + return + } + if bucket == nil { + err = ErrBucketNotFound + return + } + + // Check action acl + allow := s.checkAcl(bucket.Owner, bucket.Acl, user, action.ListObjectsAction) + if !allow { + err = ErrNotAllowed + return + } + + // All bucket objects prefix + objectsPrefix := s.getAllObjectsKeyPrefix(bucname) + + // Initially set empty to true + empty = true + + // Iterate the bucket objects, if no item, empty keep true + // if at least one, set empty to false, and stop iterate + err = s.providers.StateStore().Iterate(objectsPrefix, func(_, _ []byte) (stop bool, er error) { + empty = false + stop = true + return + }) + + // If bucket have at least one object, return not empty, else check if bucket + // have at least one upload + if !empty { + return + } + + // All bucket uploads prefix + uploadsPrefix := s.getAllUploadsKeyPrefix(bucname) + + // Set empty to false if bucket has at least one upload + err = s.providers.StateStore().Iterate(uploadsPrefix, func(_, _ []byte) (stop bool, er error) { + empty = false + stop = true + return + }) + + return +} + func (s *service) getBucket(buckey string) (bucket *Bucket, err error) { err = s.providers.StateStore().Get(buckey, bucket) if errors.Is(err, providers.ErrStateStoreNotFound) { diff --git a/s3/services/object/service_multipart.go b/s3/services/object/service_multipart.go index 455d5bced..592d45c4e 100644 --- a/s3/services/object/service_multipart.go +++ b/s3/services/object/service_multipart.go @@ -1,106 +1,338 @@ package object import ( + "context" "encoding/hex" + "errors" "fmt" + "github.com/bittorrent/go-btfs/s3/action" "github.com/bittorrent/go-btfs/s3/consts" "github.com/bittorrent/go-btfs/s3/etag" + "github.com/bittorrent/go-btfs/s3/providers" "github.com/bittorrent/go-btfs/s3/s3utils" "github.com/bittorrent/go-btfs/s3/utils/hash" "github.com/google/uuid" "io" "net/http" + "regexp" "strings" "time" ) -func (s *service) CreateMultipartUpload(ctx context.Context, bucname string, objname string, meta map[string]string) (mtp Multipart, err error) { - uploadId := uuid.NewString() - mtp = Multipart{ +// CreateMultipartUpload create user specified multipart upload +func (s *service) CreateMultipartUpload(ctx context.Context, user, bucname, objname string, meta map[string]string) (multipart *Multipart, err error) { + // Operation context + ctx, cancel := s.opctx(ctx) + defer cancel() + + // Bucket key + buckey := s.getBucketKey(bucname) + + // RLock bucket + err = s.lock.RLock(ctx, buckey) + if err != nil { + return + } + defer s.lock.RUnlock(buckey) + + // Get bucket + bucket, err := s.getBucket(buckey) + if err != nil { + return + } + if bucket == nil { + err = ErrBucketNotFound + return + } + + // Check action acl + allow := s.checkAcl(bucket.Owner, bucket.Acl, user, action.CreateMultipartUploadAction) + if !allow { + err = ErrNotAllowed + return + } + + // Upload id + uplid := uuid.NewString() + + // upload key + uplkey := s.getUploadKey(bucname, objname, uplid) + + // Lock upload + err = s.lock.Lock(ctx, uplkey) + if err != nil { + return + } + defer s.lock.Unlock(uplkey) + + // Multipart upload + multipart = &Multipart{ Bucket: bucname, Object: objname, - UploadID: uploadId, + UploadID: uplid, MetaData: meta, Initiated: time.Now().UTC(), } - err = s.providers.StateStore().Put(getUploadKey(bucname, objname, uploadId), mtp) + // Put multipart upload + err = s.providers.StateStore().Put(uplkey, multipart) + + return +} + +// UploadPart upload user specified multipart part +func (s *service) UploadPart(ctx context.Context, user, bucname, objname, uplid string, partId int, body *hash.Reader, size int64, meta map[string]string) (part *ObjectPart, err error) { + // Operation context + ctx, cancel := s.opctx(ctx) + defer cancel() + + // Bucket key + buckey := s.getBucketKey(bucname) + + // RLock bucket + err = s.lock.RLock(ctx, buckey) if err != nil { return } + defer s.lock.RUnlock(buckey) - return -} + // Get bucket + bucket, err := s.getBucket(buckey) + if err != nil { + return + } + if bucket == nil { + err = ErrBucketNotFound + return + } -func (s *service) UploadPart(ctx context.Context, bucname string, objname string, uploadID string, partID int, reader *hash.Reader, size int64, meta map[string]string) (part ObjectPart, err error) { - cid, err := s.providers.FileStore().Store(reader) + // Check acl + allow := s.checkAcl(bucket.Owner, bucket.Acl, user, action.UploadPartAction) + if !allow { + err = ErrNotAllowed + return + } + + // Upload key + uplkey := s.getUploadKey(bucname, objname, uplid) + + // Lock upload + err = s.lock.Lock(ctx, uplkey) if err != nil { return } + defer s.lock.Unlock(uplkey) - part = ObjectPart{ - Number: partID, - ETag: reader.ETag().String(), + // Get multipart upload + multipart, err := s.getMultipart(uplkey) + if err != nil { + return + } + if multipart == nil { + err = ErrUploadNotFound + return + } + + // Store part body + cid, err := s.providers.FileStore().Store(body) + if err != nil { + return + } + + // Init a flag to mark if the part body should be removed, this + // flag will be set to false if the multipart has been successfully put + var removePartBody = true + + // Try to remove the part body + defer func() { + if removePartBody { + _ = s.providers.FileStore().Remove(cid) + } + }() + + // Part + part = &ObjectPart{ + Number: partId, + ETag: body.ETag().String(), Cid: cid, Size: size, ModTime: time.Now().UTC(), } - mtp, err := s.getMultipart(ctx, bucname, objname, uploadID) + // Append part + multipart.Parts = append(multipart.Parts, part) + + // Put multipart upload + err = s.providers.StateStore().Put(uplkey, multipart) if err != nil { return } - mtp.Parts = append(mtp.Parts, part) - err = s.providers.StateStore().Put(getUploadKey(bucname, objname, uploadID), mtp) - if err != nil { - return part, err - } + // Set remove part body flag to false, because this part body has been referenced by the upload + removePartBody = false return } -func (s *service) AbortMultipartUpload(ctx context.Context, bucname string, objname string, uploadID string) (err error) { - mtp, err := s.getMultipart(ctx, bucname, objname, uploadID) +// AbortMultipartUpload abort user specified multipart upload +func (s *service) AbortMultipartUpload(ctx context.Context, user, bucname, objname, uplid string) (err error) { + // Operation context + ctx, cancel := s.opctx(ctx) + defer cancel() + + // Bucket key + buckey := s.getBucketKey(bucname) + + // RLock bucket + err = s.lock.RLock(ctx, buckey) if err != nil { return } + defer s.lock.RUnlock(buckey) - for _, part := range mtp.Parts { - err = s.providers.FileStore().Remove(part.Cid) - if err != nil { - return - } + // Get bucket + bucket, err := s.getBucket(buckey) + if err != nil { + return + } + if bucket == nil { + err = ErrBucketNotFound + return } - err = s.removeMultipart(ctx, bucname, objname, uploadID) + // Check action acl + allow := s.checkAcl(bucket.Owner, bucket.Acl, user, action.AbortMultipartUploadAction) + if !allow { + err = ErrNotAllowed + return + } + + // Multipart upload key + uplkey := s.getUploadKey(bucname, objname, uplid) + + // Lock upload + err = s.lock.Lock(ctx, uplkey) if err != nil { return } + defer s.lock.Unlock(uplkey) + + // Get multipart upload + multipart, err := s.getMultipart(uplkey) + if err != nil { + return + } + if multipart == nil { + err = ErrUploadNotFound + return + } + + // Delete multipart upload + err = s.providers.StateStore().Delete(uplkey) + if err != nil { + return + } + + // Try to remove all parts body + for _, part := range multipart.Parts { + _ = s.providers.FileStore().Remove(part.Cid) + } return } -func (s *service) CompleteMultiPartUpload(ctx context.Context, bucname string, objname string, uploadID string, parts []CompletePart) (obj Object, err error) { - mi, err := s.getMultipart(ctx, bucname, objname, uploadID) +// CompleteMultiPartUpload complete user specified multipart upload +func (s *service) CompleteMultiPartUpload(ctx context.Context, user string, bucname, objname, uplid string, parts []*CompletePart) (object *Object, err error) { + // Operation context + ctx, cancel := s.opctx(ctx) + defer cancel() + + // Bucket key + buckey := s.getBucketKey(bucname) + + // RLock bucket + err = s.lock.RLock(ctx, buckey) + if err != nil { + return + } + defer s.lock.RUnlock(buckey) + + // Get bucket + bucket, err := s.getBucket(buckey) + if err != nil { + return + } + if bucket == nil { + err = ErrBucketNotFound + return + } + + // Check acl + allow := s.checkAcl(bucket.Owner, bucket.Acl, user, action.CompleteMultipartUploadAction) + if !allow { + err = ErrNotAllowed + return + } + + // Object key + objkey := s.getObjectKey(bucname, objname) + + // Lock object + err = s.lock.Lock(ctx, objkey) + if err != nil { + return + } + defer s.lock.Unlock(objkey) + + // Get old object for try to remove the old body + objectOld, err := s.getObject(objkey) if err != nil { return } - var ( - readers []io.Reader - objectSize int64 - ) + // Upload key + uplkey := s.getUploadKey(bucname, objname, uplid) + + // Lock upload + err = s.lock.Lock(ctx, uplkey) + if err != nil { + return + } + defer s.lock.Unlock(uplkey) + + // Get multipart upload + multipart, err := s.getMultipart(uplkey) + if err != nil { + return + } + if multipart == nil { + err = ErrUploadNotFound + return + } + + // All parts body readers + var readers []io.Reader + // Try to close all parts body readers, because some or all of + // these body may not be used defer func() { for _, rdr := range readers { _ = rdr.(io.ReadCloser).Close() } }() - idxMap := objectPartIndexMap(mi.Parts) + // Total object size + var size int64 + + // Mapping of part number to part index in multipart.Parts + idxmp := s.partIdxMap(multipart.Parts) + + // Iterate all parts to collect all body readers for i, part := range parts { - partIndex, ok := idxMap[part.PartNumber] + // Index in multipart.Parts + partIndex, ok := idxmp[part.PartNumber] + + // Part not exists in multipart if !ok { err = s3utils.InvalidPart{ PartNumber: part.PartNumber, @@ -109,9 +341,13 @@ func (s *service) CompleteMultiPartUpload(ctx context.Context, bucname string, o return } - gotPart := mi.Parts[partIndex] + // Got part in multipart.Parts + gotPart := multipart.Parts[partIndex] - part.ETag = canonicalizeETag(part.ETag) + // Canonicalize part etag + part.ETag = s.canonicalizeETag(part.ETag) + + // Check got part etag with part etag if gotPart.ETag != part.ETag { err = s3utils.InvalidPart{ PartNumber: part.PartNumber, @@ -131,90 +367,100 @@ func (s *service) CompleteMultiPartUpload(ctx context.Context, bucname string, o return } - // Save for total objname size. - objectSize += gotPart.Size + // Save for total object size. + size += gotPart.Size + // Get part body reader var rdr io.ReadCloser rdr, err = s.providers.FileStore().Cat(gotPart.Cid) if err != nil { return } + // Collect part body reader readers = append(readers, rdr) } - cid, err := s.providers.FileStore().Store(io.MultiReader(readers...)) + // Concat all parts body to one + body := io.MultiReader(readers...) + + // Store object body + cid, err := s.providers.FileStore().Store(body) if err != nil { return } - obj = Object{ + // Init a flag to mark if the object body should be removed, this + // flag will be set to false if the object has been successfully put + var removeObjectBody = true + + // Try to remove stored body if put object failed + defer func() { + if removeObjectBody { + _ = s.providers.FileStore().Remove(cid) + } + }() + + // Object + object = &Object{ Bucket: bucname, Name: objname, ModTime: time.Now().UTC(), - Size: objectSize, + Size: size, IsDir: false, - ETag: computeCompleteMultipartMD5(parts), + ETag: s.computeMultipartMD5(parts), Cid: cid, VersionID: "", IsLatest: true, DeleteMarker: false, - ContentType: mi.MetaData[strings.ToLower(consts.ContentType)], - ContentEncoding: mi.MetaData[strings.ToLower(consts.ContentEncoding)], + ContentType: multipart.MetaData[strings.ToLower(consts.ContentType)], + ContentEncoding: multipart.MetaData[strings.ToLower(consts.ContentEncoding)], SuccessorModTime: time.Now().UTC(), } - if exp, ok := mi.MetaData[strings.ToLower(consts.Expires)]; ok { - if t, e := time.Parse(http.TimeFormat, exp); e == nil { - obj.Expires = t.UTC() - } + // Set object expires + exp, e := time.Parse(http.TimeFormat, multipart.MetaData[strings.ToLower(consts.Expires)]) + if e == nil { + object.Expires = exp.UTC() } - err = s.providers.StateStore().Put(getObjectKey(bucname, objname), obj) + // Put object + err = s.providers.StateStore().Put(objkey, object) if err != nil { return } - err = s.removeMultipartInfo(ctx, bucname, objname, uploadID) - if err != nil { - return - } + // Set remove object body flag to false, because it has been referenced by the object + removeObjectBody = false - return -} - -func (s *service) GetMultipart(ctx context.Context, bucname string, objname string, uploadID string) (mtp Multipart, err error) { - return s.getMultipart(ctx, bucname, objname, uploadID) -} + // Try to remove old object body if exists, because it has been covered by new one + if objectOld != nil { + _ = s.providers.FileStore().Remove(objectOld.Cid) + } -func (s *service) getMultipart(ctx context.Context, bucname string, objname string, uploadID string) (mtp Multipart, err error) { - err = s.providers.StateStore().Get(getUploadKey(bucname, objname, uploadID), &mtp) - if errors.Is(err, providers.ErrStateStoreNotFound) { - err = ErrUploadNotFound + // Remove multipart upload + err = s.providers.StateStore().Delete(uplkey) + if err != nil { return } - return -} -func (s *service) removeMultipart(ctx context.Context, bucname string, objname string, uploadID string) (err error) { - err = s.providers.StateStore().Delete(getUploadKey(bucname, objname, uploadID)) - if errors.Is(err, providers.ErrStateStoreNotFound) { - err = ErrUploadNotFound - return + // Try to remove all parts body, because they are no longer be referenced + for _, part := range multipart.Parts { + _ = s.providers.FileStore().Remove(part.Cid) } + return } -func (s *service) removeMultipartInfo(ctx context.Context, bucname string, objname string, uploadID string) (err error) { - err = s.providers.StateStore().Delete(getUploadKey(bucname, objname, uploadID)) +func (s *service) getMultipart(uplkey string) (multipart *Multipart, err error) { + err = s.providers.StateStore().Get(uplkey, multipart) if errors.Is(err, providers.ErrStateStoreNotFound) { - err = ErrUploadNotFound - return + err = nil } return } -func objectPartIndexMap(parts []ObjectPart) map[int]int { +func (s *service) partIdxMap(parts []*ObjectPart) map[int]int { mp := make(map[int]int) for i, part := range parts { mp[part.Number] = i @@ -222,49 +468,44 @@ func objectPartIndexMap(parts []ObjectPart) map[int]int { return mp } +var etagRegex = regexp.MustCompile("\"*?([^\"]*?)\"*?$") + // canonicalizeETag returns ETag with leading and trailing double-quotes removed, // if any present -func canonicalizeETag(etag string) string { +func (s *service) canonicalizeETag(etag string) string { return etagRegex.ReplaceAllString(etag, "$1") } -func computeCompleteMultipartMD5(parts []CompletePart) string { +func (s *service) computeMultipartMD5(parts []*CompletePart) (md5 string) { var finalMD5Bytes []byte for _, part := range parts { - md5Bytes, err := hex.DecodeString(canonicalizeETag(part.ETag)) + md5Bytes, err := hex.DecodeString(s.canonicalizeETag(part.ETag)) if err != nil { finalMD5Bytes = append(finalMD5Bytes, []byte(part.ETag)...) } else { finalMD5Bytes = append(finalMD5Bytes, md5Bytes...) } } - s3MD5 := fmt.Sprintf("%s-%d", etag.Multipart(finalMD5Bytes), len(parts)) - return s3MD5 -} - -func (s *service) getObject(objkey string) (object *Object, err error) { - err = s.providers.StateStore().Get(objkey, object) - if errors.Is(err, providers.ErrStateStoreNotFound) { - err = nil - } + md5 = fmt.Sprintf("%s-%d", etag.Multipart(finalMD5Bytes), len(parts)) return } -// deleteObjectsByPrefix delete all objects have common prefix -// it will continue even if one of the objects be deleted fail -func (s *service) deleteObjectsByPrefix(objectPrefix string) (err error) { - err = s.providers.StateStore().Iterate(objectPrefix, func(key, _ []byte) (stop bool, er error) { - keyStr := string(key) - var object *Object - er = s.providers.StateStore().Get(keyStr, object) +// deleteUploadsByPrefix try to delete all multipart uploads with the specified common prefix +func (s *service) deleteUploadsByPrefix(uploadsPrefix string) (err error) { + err = s.providers.StateStore().Iterate(uploadsPrefix, func(key, _ []byte) (stop bool, er error) { + uplkey := string(key) + var multipart *Multipart + er = s.providers.StateStore().Get(uplkey, multipart) if er != nil { return } - er = s.providers.FileStore().Remove(object.Cid) + er = s.providers.StateStore().Delete(uplkey) if er != nil { return } - er = s.providers.StateStore().Delete(keyStr) + for _, part := range multipart.Parts { + _ = s.providers.FileStore().Remove(part.Cid) + } return }) diff --git a/s3/services/object/service_object.go b/s3/services/object/service_object.go index 78f4d17d0..a0bb6be73 100644 --- a/s3/services/object/service_object.go +++ b/s3/services/object/service_object.go @@ -2,8 +2,10 @@ package object import ( "context" + "errors" "github.com/bittorrent/go-btfs/s3/action" "github.com/bittorrent/go-btfs/s3/consts" + "github.com/bittorrent/go-btfs/s3/providers" "github.com/bittorrent/go-btfs/s3/utils/hash" "io" "net/http" @@ -12,22 +14,22 @@ import ( ) // PutObject put a user specified object -func (s *service) PutObject(ctx context.Context, user string, bucname, objname string, reader *hash.Reader, size int64, meta map[string]string) (object *Object, err error) { - // operation context +func (s *service) PutObject(ctx context.Context, user string, bucname, objname string, body *hash.Reader, size int64, meta map[string]string) (object *Object, err error) { + // Operation context ctx, cancel := s.opctx(ctx) defer cancel() - // bucket key + // Bucket key buckey := s.getBucketKey(bucname) - // rlock bucket + // RLock bucket err = s.lock.RLock(ctx, buckey) if err != nil { return } defer s.lock.RUnlock(buckey) - // get bucket + // Get bucket bucket, err := s.getBucket(buckey) if err != nil { return @@ -37,43 +39,46 @@ func (s *service) PutObject(ctx context.Context, user string, bucname, objname s return } - // check acl + // Check action acl allow := s.checkAcl(bucket.Owner, bucket.Acl, user, action.PutObjectAction) if !allow { err = ErrNotAllowed return } - // object key + // Object key objkey := s.getObjectKey(bucname, objname) - // lock object + // Lock object err = s.lock.Lock(ctx, objkey) if err != nil { return } defer s.lock.Unlock(objkey) - // get old object - oldObject, err := s.getObject(objkey) + // Get old object + objectOld, err := s.getObject(objkey) if err != nil { return } - // remove old file, if old object exists and put new object successfully - defer func() { - if oldObject != nil && err == nil { - _ = s.providers.FileStore().Remove(oldObject.Cid) - // todo: log this remove error - } - }() - - // store file - cid, err := s.providers.FileStore().Store(reader) + // Store object body + cid, err := s.providers.FileStore().Store(body) if err != nil { return } + // Init a flag to mark if the object body should be removed, this + // flag will be set to false if the object has been successfully put + var removeObjectBody = true + + // Try to remove stored body if put object failed + defer func() { + if removeObjectBody { + _ = s.providers.FileStore().Remove(cid) + } + }() + // now now := time.Now() @@ -84,7 +89,7 @@ func (s *service) PutObject(ctx context.Context, user string, bucname, objname s ModTime: now.UTC(), Size: size, IsDir: false, - ETag: reader.ETag().String(), + ETag: body.ETag().String(), Cid: cid, VersionID: "", IsLatest: true, @@ -103,27 +108,38 @@ func (s *service) PutObject(ctx context.Context, user string, bucname, objname s // put object err = s.providers.StateStore().Put(objkey, object) + if err != nil { + return + } + + // Set remove object body flag to false, because it has been referenced by the object + removeObjectBody = false + + // Try to remove old object body if exists, because it has been covered by new one + if objectOld != nil { + _ = s.providers.FileStore().Remove(objectOld.Cid) + } return } // CopyObject copy from a user specified source object to a desert object func (s *service) CopyObject(ctx context.Context, user string, srcBucname, srcObjname, dstBucname, dstObjname string, meta map[string]string) (dstObject *Object, err error) { - // operation context + // Operation context ctx, cancel := s.opctx(ctx) defer cancel() - // source bucket key + // Source bucket key srcBuckey := s.getBucketKey(srcBucname) - // rlock source bucket + // RLock source bucket err = s.lock.RLock(ctx, srcBuckey) if err != nil { return } defer s.lock.RUnlock(srcBuckey) - // get source bucket + // Get source bucket srcBucket, err := s.getBucket(srcBuckey) if err != nil { return @@ -133,24 +149,24 @@ func (s *service) CopyObject(ctx context.Context, user string, srcBucname, srcOb return } - // check source acl + // Check source action acl srcAllow := s.checkAcl(srcBucket.Owner, srcBucket.Acl, user, action.GetObjectAction) if !srcAllow { err = ErrNotAllowed return } - // source object key + // Source object key srcObjkey := s.getObjectKey(srcBucname, srcObjname) - // rlock source object + // RLock source object err = s.lock.RLock(ctx, srcObjkey) if err != nil { return } defer s.lock.RUnlock(srcObjkey) - // get source object + // Get source object srcObject, err := s.getObject(srcObjkey) if err != nil { return @@ -160,17 +176,17 @@ func (s *service) CopyObject(ctx context.Context, user string, srcBucname, srcOb return } - // desert bucket key + // Desert bucket key dstBuckey := s.getBucketKey(dstBucname) - // rlock desert bucket + // RLock destination bucket err = s.lock.RLock(ctx, dstBuckey) if err != nil { return } defer s.lock.RUnlock(dstBuckey) - // get desert bucket + // Get destination bucket dstBucket, err := s.getBucket(dstBuckey) if err != nil { return @@ -180,17 +196,17 @@ func (s *service) CopyObject(ctx context.Context, user string, srcBucname, srcOb return } - // check desert acl + // Check destination action acl dstAllow := s.checkAcl(dstBucket.Owner, dstBucket.Acl, user, action.PutObjectAction) if !dstAllow { err = ErrNotAllowed return } - // desert object key + // Destination object key dstObjkey := s.getObjectKey(dstBucname, dstObjname) - // lock desert object + // Lock Destination object err = s.lock.Lock(ctx, dstObjkey) if err != nil { return @@ -200,7 +216,7 @@ func (s *service) CopyObject(ctx context.Context, user string, srcBucname, srcOb // now now := time.Now() - // desert object + // Destination object dstObject = &Object{ Bucket: dstBucname, Name: dstObjname, @@ -217,40 +233,40 @@ func (s *service) CopyObject(ctx context.Context, user string, srcBucname, srcOb SuccessorModTime: now.UTC(), } - // set object desert expires + // Set destination object expires exp, er := time.Parse(http.TimeFormat, strings.ToLower(consts.Expires)) if er != nil { dstObject.Expires = exp.UTC() } - // put desert object + // Put destination object err = s.providers.StateStore().Put(dstObjkey, dstObject) return } -// GetObject get an object for the specified user +// GetObject get a user specified object func (s *service) GetObject(ctx context.Context, user, bucname, objname string) (object *Object, body io.ReadCloser, err error) { - // operation context + // Operation context ctx, cancel := s.opctx(ctx) defer cancel() // bucket key buckey := s.getBucketKey(bucname) - // rlock bucket + // RLock bucket err = s.lock.RLock(ctx, buckey) if err != nil { return } defer func() { - // rUnlock bucket just if getting failed + // RUnlock bucket just if getting failed if err != nil { s.lock.RUnlock(buckey) } }() - // get bucket + // Get bucket bucket, err := s.getBucket(buckey) if err != nil { return @@ -260,29 +276,29 @@ func (s *service) GetObject(ctx context.Context, user, bucname, objname string) return } - // check acl + // Check action acl allow := s.checkAcl(bucket.Owner, bucket.Acl, user, action.GetObjectAction) if !allow { err = ErrNotAllowed return } - // object key + // Object key objkey := s.getObjectKey(bucname, objname) - // rlock object + // RLock object err = s.lock.RLock(ctx, objkey) if err != nil { return } defer func() { - // rUnlock object just if getting failed + // RUnlock object just if getting failed if err != nil { s.lock.RUnlock(objkey) } }() - // get object + // Get object object, err = s.getObject(objkey) if err != nil { return @@ -292,21 +308,21 @@ func (s *service) GetObject(ctx context.Context, user, bucname, objname string) return } - // get object body + // Get object body body, err = s.providers.FileStore().Cat(object.Cid) if err != nil { return } - // wrap the body with timeout and unlock hooks + // Wrap the body with timeout and unlock hooks, // this will enable the bucket and object keep rlocked until // read timout or read closed. Normally, these locks will // be released as soon as leave from the call body = WrapCleanReadCloser( body, - s.readObjectTimeout, + s.closeBodyTimeout, func() { - s.lock.RUnlock(objkey) // note: release object first + s.lock.RUnlock(objkey) // Note: Release object first s.lock.RUnlock(buckey) }, ) @@ -316,21 +332,21 @@ func (s *service) GetObject(ctx context.Context, user, bucname, objname string) // DeleteObject delete a user specified object func (s *service) DeleteObject(ctx context.Context, user, bucname, objname string) (err error) { - // operation context + // Operation context ctx, cancel := s.opctx(ctx) defer cancel() - // bucket key + // Bucket key buckey := s.getBucketKey(bucname) - // rlock bucket + // RLock bucket err = s.lock.RLock(ctx, buckey) if err != nil { return } defer s.lock.RUnlock(buckey) - // get bucket + // Get bucket bucket, err := s.getBucket(buckey) if err != nil { return @@ -340,24 +356,24 @@ func (s *service) DeleteObject(ctx context.Context, user, bucname, objname strin return } - // check acl + // Check action acl allow := s.checkAcl(bucket.Owner, bucket.Acl, user, action.DeleteObjectAction) if !allow { err = ErrNotAllowed return } - // object key + // Object key objkey := s.getObjectKey(bucname, objname) - // lock object + // Lock object err = s.lock.Lock(ctx, objkey) if err != nil { return } defer s.lock.Unlock(objkey) - // get object + // Get object object, err := s.getObject(objkey) if err != nil { return @@ -367,35 +383,35 @@ func (s *service) DeleteObject(ctx context.Context, user, bucname, objname strin return } - // delete object body - err = s.providers.FileStore().Remove(object.Cid) + // Delete object + err = s.providers.StateStore().Delete(objkey) if err != nil { return } - // delete object - err = s.providers.StateStore().Delete(objkey) + // Try to delete object body + _ = s.providers.FileStore().Remove(object.Cid) return } // ListObjects list user specified objects func (s *service) ListObjects(ctx context.Context, user, bucname, prefix, delimiter, marker string, max int) (list *ObjectsList, err error) { - // operation context + // Operation context ctx, cancel := s.opctx(ctx) defer cancel() - // bucket key + // Bucket key buckey := s.getBucketKey(bucname) - // rlock bucket + // RLock bucket err = s.lock.RLock(ctx, buckey) if err != nil { return } defer s.lock.RUnlock(buckey) - // get bucket + // Get bucket bucket, err := s.getBucket(buckey) if err != nil { return @@ -405,37 +421,38 @@ func (s *service) ListObjects(ctx context.Context, user, bucname, prefix, delimi return } - // check acl + // Check action acl allow := s.checkAcl(bucket.Owner, bucket.Acl, user, action.ListObjectsAction) if !allow { err = ErrNotAllowed return } - // object key prefix - objkeyPrefix := s.getObjectKeyPrefix(bucname) + // All bucket objects key prefix + allObjectsKeyPrefix := s.getAllObjectsKeyPrefix(bucname) - // objects key prefix - objskeyPrefix := objkeyPrefix + prefix + // List objects key prefix + listObjectsKeyPrefix := allObjectsKeyPrefix + prefix - // accumulate count + // Accumulate count count := 0 - // begin collect + // Flag mark if begin collect, it initialized to true if + // marker is "" begin := marker == "" - // seen keys + // Seen keys, used to group common keys seen := make(map[string]bool) - // iterate all objects with the specified prefix to collect and group specified range items - err = s.providers.StateStore().Iterate(objskeyPrefix, func(key, _ []byte) (stop bool, er error) { - // object key + // Iterate all objects with the specified prefix to collect and group specified range items + err = s.providers.StateStore().Iterate(listObjectsKeyPrefix, func(key, _ []byte) (stop bool, er error) { + // Object key objkey := string(key) - // object name - objname := objkey[len(objkeyPrefix):] + // Object name + objname := strings.TrimPrefix(objkey, allObjectsKeyPrefix) - // common prefix: if the part of object name without prefix include delimiter + // Common prefix: if the part of object name without prefix include delimiter // it is the string truncated object name after the delimiter, else // it is the bucket name itself commonPrefix := objname @@ -448,7 +465,7 @@ func (s *service) ListObjects(ctx context.Context, user, bucname, prefix, delimi } } - // if collect not begin, check the marker, if it is matched + // If collect not begin, check the marker, if it is matched // with the common prefix, then begin collection from next iterate turn // and mark this common prefix as seen // note: common prefix also can be object name, so when marker is @@ -459,19 +476,19 @@ func (s *service) ListObjects(ctx context.Context, user, bucname, prefix, delimi return } - // no begin, jump the item + // Not begin, jump the item if !begin { return } - // objects with same common prefix will be grouped into one + // Objects with same common prefix will be grouped into one // note: the objects without common prefix will present only once, so // it is not necessary to add these objects names in the seen map if seen[commonPrefix] { return } - // objects with common prefix grouped int one + // Objects with common prefix grouped int one if commonPrefix != objname { list.Prefixes = append(list.Prefixes, commonPrefix) list.NextMarker = commonPrefix @@ -487,10 +504,10 @@ func (s *service) ListObjects(ctx context.Context, user, bucname, prefix, delimi list.NextMarker = objname } - // increment collection count + // Increment collection count count++ - // check the count, if it matched the max, means + // Check the count, if it matched the max, means // the collect is complete, but the items may remain, so stop the // iteration, and mark the list was truncated if count == max { @@ -504,49 +521,28 @@ func (s *service) ListObjects(ctx context.Context, user, bucname, prefix, delimi return } -// EmptyBucket check if the user specified bucked is empty -func (s *service) EmptyBucket(ctx context.Context, user, bucname string) (empty bool, err error) { - ctx, cancel := s.opctx(ctx) - defer cancel() - - // bucket key - buckey := s.getBucketKey(bucname) - - // rlock bucket - err = s.lock.RLock(ctx, buckey) - if err != nil { - return - } - defer s.lock.RUnlock(buckey) - - // get bucket - bucket, err := s.getBucket(buckey) - if err != nil { - return - } - if bucket == nil { - err = ErrBucketNotFound - return - } - - // check acl - allow := s.checkAcl(bucket.Owner, bucket.Acl, user, action.HeadBucketAction) - if !allow { - err = ErrNotAllowed - return +func (s *service) getObject(objkey string) (object *Object, err error) { + err = s.providers.StateStore().Get(objkey, object) + if errors.Is(err, providers.ErrStateStoreNotFound) { + err = nil } + return +} - // object key prefix - objkeyPrefix := s.getObjectKeyPrefix(bucname) - - // initially set empty to true - empty = true - - // iterate the bucket objects, if no item, empty keep true - // if at least one, set empty to false, and stop iterate - err = s.providers.StateStore().Iterate(objkeyPrefix, func(_, _ []byte) (stop bool, er error) { - empty = false - stop = true +// deleteObjectsByPrefix try to delete all objects with the specified common prefix +func (s *service) deleteObjectsByPrefix(objectsPrefix string) (err error) { + err = s.providers.StateStore().Iterate(objectsPrefix, func(key, _ []byte) (stop bool, er error) { + objkey := string(key) + var object *Object + er = s.providers.StateStore().Get(objkey, object) + if er != nil { + return + } + er = s.providers.StateStore().Delete(objkey) + if er != nil { + return + } + _ = s.providers.FileStore().Remove(object.Cid) return }) From 8097ebb6a21fff69129d953ab07c6afac3cf74bc Mon Sep 17 00:00:00 2001 From: Steve Date: Sat, 2 Sep 2023 04:52:02 +0800 Subject: [PATCH 081/139] refractor: handlers --- s3/action/action.go | 4 +- s3/consts/consts.go | 1 + s3/handlers/handlers.go | 4 +- s3/handlers/handlers_bucket.go | 54 ++++++++---------- s3/handlers/proto.go | 12 ++-- s3/requests/parsers.go | 76 +++++-------------------- s3/requests/parsers_common.go | 61 ++++++++++++++++++-- s3/requests/types.go | 1 + s3/requests/types_common.go | 8 +++ s3/responses/wirters.go | 3 + s3/responses/writers_common.go | 9 +++ s3/services/object/proto.go | 8 +-- s3/services/object/service_bucket.go | 2 +- s3/services/object/service_multipart.go | 2 +- s3/services/object/service_object.go | 4 +- 15 files changed, 136 insertions(+), 113 deletions(-) create mode 100644 s3/requests/types_common.go diff --git a/s3/action/action.go b/s3/action/action.go index 8a13e97db..49809b2c6 100644 --- a/s3/action/action.go +++ b/s3/action/action.go @@ -11,8 +11,8 @@ type Action string const ( //--- bucket - // CreateBucketAction - CreateBucket Rest API action. - CreateBucketAction = "s3:CreateBucket" + // CreateBucketAction - PutBucket Rest API action. + CreateBucketAction = "s3:PutBucket" // HeadBucketAction - HeadBucket Rest API action. HeadBucketAction = "s3:HeadBucket" diff --git a/s3/consts/consts.go b/s3/consts/consts.go index 4f0449b05..5ddaa2995 100644 --- a/s3/consts/consts.go +++ b/s3/consts/consts.go @@ -17,6 +17,7 @@ const ( EmptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" StsRequestBodyLimit = 10 * (1 << 20) // 10 MiB DefaultRegion = "" + DefaultAcl = "public-read" SlashSeparator = "/" MaxSkewTime = 15 * time.Minute // 15 minutes skew allowed. diff --git a/s3/handlers/handlers.go b/s3/handlers/handlers.go index ea1c72329..3e88a76d7 100644 --- a/s3/handlers/handlers.go +++ b/s3/handlers/handlers.go @@ -90,7 +90,7 @@ func (h *Handlers) lock(ctx context.Context, key string, w http.ResponseWriter, } // Parse object url queries -func (h *Handlers) getObjectResources(values url.Values) (uploadID string, partNumberMarker, maxParts int, encodingType string, rerr *responses.Error) { +func (h *Handlers) getObjectResources(values url.Values) (uploadId string, partNumberMarker, maxParts int, encodingType string, rerr *responses.Error) { var err error if values.Get("max-parts") != "" { if maxParts, err = strconv.Atoi(values.Get("max-parts")); err != nil { @@ -108,7 +108,7 @@ func (h *Handlers) getObjectResources(values url.Values) (uploadID string, partN } } - uploadID = values.Get("uploadId") + uploadId = values.Get("uploadId") encodingType = values.Get("encoding-type") return } diff --git a/s3/handlers/handlers_bucket.go b/s3/handlers/handlers_bucket.go index 2299c3d7d..a36b023b3 100644 --- a/s3/handlers/handlers_bucket.go +++ b/s3/handlers/handlers_bucket.go @@ -4,58 +4,50 @@ import ( "errors" s3action "github.com/bittorrent/go-btfs/s3/action" "github.com/bittorrent/go-btfs/s3/cctx" - "github.com/bittorrent/go-btfs/s3/consts" "github.com/bittorrent/go-btfs/s3/requests" "github.com/bittorrent/go-btfs/s3/responses" - "github.com/bittorrent/go-btfs/s3/s3utils" "github.com/bittorrent/go-btfs/s3/services/object" "net/http" ) +var errToRespErr = map[error]*responses.Error{ + object.ErrBucketNotFound: responses.ErrNoSuchBucket, + object.ErrObjectNotFound: responses.ErrNoSuchKey, + object.ErrUploadNotFound: responses.ErrNoSuchUpload, + object.ErrBucketAlreadyExists: responses.ErrBucketAlreadyExists, + object.ErrNotAllowed: responses.ErrAccessDenied, +} + +func (h *Handlers) respErr(err error) (rerr *responses.Error) { + rerr, ok := errToRespErr[err] + if !ok { + err = responses.ErrInternalError + } + return +} + func (h *Handlers) PutBucketHandler(w http.ResponseWriter, r *http.Request) { var err error defer func() { cctx.SetHandleInf(r, h.name(), err) }() - req, err := requests.ParsePutBucketRequest(r) - if err != nil { - responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestBody) - return - } - - // issue: lock for check ctx := r.Context() - ack := cctx.GetAccessKey(r) - - if err = s3utils.CheckValidBucketNameStrict(req.Bucket); err != nil { - responses.WriteErrorResponse(w, r, responses.ErrInvalidBucketName) - return - } - if !requests.CheckAclPermissionType(&req.ACL) { - err = responses.ErrNotImplemented - responses.WriteErrorResponse(w, r, responses.ErrNotImplemented) + req, rerr := requests.ParsePutBucketRequest(r) + if rerr != nil { + err = rerr + responses.WriteErrorResponse(w, r, rerr) return } - if ok := h.bucsvc.HasBucket(ctx, req.Bucket); ok { - err = responses.ErrBucketAlreadyExists - responses.WriteErrorResponseHeadersOnly(w, r, responses.ErrBucketAlreadyExists) - return - } - - err = h.bucsvc.CreateBucket(ctx, req.Bucket, req.Region, ack, req.ACL) + _, err = h.objsvc.PutBucket(ctx, req.User, req.Bucket, req.Region, req.ACL) if err != nil { - responses.WriteErrorResponse(w, r, responses.ErrInternalError) + rerr = h.respErr(err) + responses.WriteErrorResponse(w, r, rerr) return } - // Make sure to add Location information here only for bucket - if cp := requests.PathClean(r.URL.Path); cp != "" { - w.Header().Set(consts.Location, cp) // Clean any trailing slashes. - } - responses.WritePutBucketResponse(w, r) return diff --git a/s3/handlers/proto.go b/s3/handlers/proto.go index 1eb28fdcf..d4dd4c350 100644 --- a/s3/handlers/proto.go +++ b/s3/handlers/proto.go @@ -5,12 +5,14 @@ import ( ) type Handlerser interface { - // middlewares + // Middlewares + Cors(handler http.Handler) http.Handler Sign(handler http.Handler) http.Handler Log(handler http.Handler) http.Handler - // bucket + // Bucket + PutBucketHandler(w http.ResponseWriter, r *http.Request) HeadBucketHandler(w http.ResponseWriter, r *http.Request) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) @@ -18,7 +20,8 @@ type Handlerser interface { GetBucketAclHandler(w http.ResponseWriter, r *http.Request) PutBucketAclHandler(w http.ResponseWriter, r *http.Request) - // object + // Object + PutObjectHandler(w http.ResponseWriter, r *http.Request) HeadObjectHandler(w http.ResponseWriter, r *http.Request) CopyObjectHandler(w http.ResponseWriter, r *http.Request) @@ -28,7 +31,8 @@ type Handlerser interface { ListObjectsHandler(w http.ResponseWriter, r *http.Request) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) - // multipart + // Multipart + CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Request) UploadPartHandler(w http.ResponseWriter, r *http.Request) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Request) diff --git a/s3/requests/parsers.go b/s3/requests/parsers.go index 98368fcf1..abccb8f05 100644 --- a/s3/requests/parsers.go +++ b/s3/requests/parsers.go @@ -1,14 +1,12 @@ package requests import ( - "encoding/xml" + "github.com/bittorrent/go-btfs/s3/cctx" "github.com/bittorrent/go-btfs/s3/responses" "net/http" "path" "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/policy" - "github.com/bittorrent/go-btfs/s3/utils" "github.com/gorilla/mux" ) @@ -22,25 +20,18 @@ import ( // return //} -func ParsePutBucketRequest(r *http.Request) (req *PutBucketRequest, err error) { +func ParsePutBucketRequest(r *http.Request) (req *PutBucketRequest, rerr *responses.Error) { req = &PutBucketRequest{} - - vars := mux.Vars(r) - bucket := vars["bucket"] - - region, _ := parseLocationConstraint(r) - - acl := r.Header.Get(consts.AmzACL) - - //set request - req.Bucket = bucket - req.ACL = acl - req.Region = region - - if req.ACL == "" { - req.ACL = policy.PublicRead + req.User = cctx.GetAccessKey(r) + req.Bucket, rerr = parseBucket(r) + if rerr != nil { + return } - + req.ACL, rerr = parseAcl(r) + if rerr != nil { + return + } + req.Region, rerr = parseLocationConstraint(r) return } @@ -118,33 +109,6 @@ func ParsePutBucketAclRequest(r *http.Request) (req *PutBucketAclRequest, err er return } -/*********************************/ - -// Parses location constraint from the incoming reader. -func parseLocationConstraint(r *http.Request) (location string, s3Error *responses.Error) { - // If the request has no body with content-length set to 0, - // we do not have to validate location constraint. Bucket will - // be created at default region. - locationConstraint := createBucketLocationConfiguration{} - err := utils.XmlDecoder(r.Body, &locationConstraint, r.ContentLength) - if err != nil && r.ContentLength != 0 { - // Treat all other failures as XML parsing errors. - return "", responses.ErrMalformedXML - } // else for both err as nil or io.EOF - location = locationConstraint.Location - if location == "" { - location = consts.DefaultRegion - } - return location, nil -} - -// createBucketConfiguration container for bucket configuration request from client. -// Used for parsing the location from the request body for Makebucket. -type createBucketLocationConfiguration struct { - XMLName xml.Name `xml:"CreateBucketConfiguration" json:"-"` - Location string `xml:"LocationConstraint"` -} - // pathClean is like path.Clean but does not return "." for // empty inputs, instead returns "empty" as is. func PathClean(p string) string { @@ -170,19 +134,7 @@ func PathClean(p string) string { // return tagging, nil //} -func CheckAclPermissionType(s *string) bool { - if len(*s) == 0 { - *s = policy.PublicRead - return true - } - - switch *s { - case policy.PublicRead: - return true - case policy.PublicReadWrite: - return true - case policy.Private: - return true - } - return false +func checkAcl(acl string) (ok bool) { + _, ok = supportAcls[acl] + return } diff --git a/s3/requests/parsers_common.go b/s3/requests/parsers_common.go index 2042eb217..0c4374e80 100644 --- a/s3/requests/parsers_common.go +++ b/s3/requests/parsers_common.go @@ -1,16 +1,69 @@ package requests import ( + "github.com/bittorrent/go-btfs/s3/consts" + "github.com/bittorrent/go-btfs/s3/policy" + "github.com/bittorrent/go-btfs/s3/responses" + "github.com/bittorrent/go-btfs/s3/s3utils" + "github.com/bittorrent/go-btfs/s3/utils" "github.com/gorilla/mux" "net/http" "net/url" "path" ) -func ParseBucketAndObject(r *http.Request) (bucket, object string, err error) { - vars := mux.Vars(r) - bucket = vars["bucket"] - object, err = unescapePath(vars["object"]) +func parseBucket(r *http.Request) (bucket string, rerr *responses.Error) { + bucket = mux.Vars(r)["bucket"] + err := s3utils.CheckValidBucketNameStrict(bucket) + if err != nil { + rerr = responses.ErrInvalidBucketName + } + return +} + +func parseObject(r *http.Request) (object string, rerr *responses.Error) { + object, err := unescapePath(mux.Vars(r)["object"]) + if err != nil { + rerr = responses.ErrInvalidRequestParameter + } + return +} + +// Parses location constraint from the incoming reader. +func parseLocationConstraint(r *http.Request) (location string, rerr *responses.Error) { + // If the request has no body with content-length set to 0, + // we do not have to validate location constraint. Bucket will + // be created at default region. + locationConstraint := createBucketLocationConfiguration{} + err := utils.XmlDecoder(r.Body, &locationConstraint, r.ContentLength) + if err != nil && r.ContentLength != 0 { + rerr = responses.ErrMalformedXML + return + } // else for both err as nil or io.EOF + + location = locationConstraint.Location + if location == "" { + location = consts.DefaultRegion + } + + return +} + +var supportAcls = map[string]struct{}{ + policy.Private: {}, + policy.PublicRead: {}, + policy.PublicReadWrite: {}, +} + +func parseAcl(r *http.Request) (acl string, rerr *responses.Error) { + acl = r.Header.Get(consts.AmzACL) + if acl == "" { + acl = consts.DefaultAcl + } + _, ok := supportAcls[acl] + if !ok { + rerr = responses.ErrNotImplemented + } return } diff --git a/s3/requests/types.go b/s3/requests/types.go index 2d257b844..cd88036dd 100644 --- a/s3/requests/types.go +++ b/s3/requests/types.go @@ -2,6 +2,7 @@ package requests // PutBucketRequest . type PutBucketRequest struct { + User string Bucket string ACL string Region string diff --git a/s3/requests/types_common.go b/s3/requests/types_common.go new file mode 100644 index 000000000..116ba81ad --- /dev/null +++ b/s3/requests/types_common.go @@ -0,0 +1,8 @@ +package requests + +// createBucketConfiguration container for bucket configuration request from client. +// Used for parsing the location from the request body for Makebucket. +type createBucketLocationConfiguration struct { + XMLName xml.Name `xml:"CreateBucketConfiguration" json:"-"` + Location string `xml:"LocationConstraint"` +} diff --git a/s3/responses/wirters.go b/s3/responses/wirters.go index f47b8dd79..465e5e09d 100644 --- a/s3/responses/wirters.go +++ b/s3/responses/wirters.go @@ -10,6 +10,9 @@ import ( ) func WritePutBucketResponse(w http.ResponseWriter, r *http.Request) { + if cp := pathClean(r.URL.Path); cp != "" { + w.Header().Set(consts.Location, cp) + } WriteSuccessResponse(w, r) return } diff --git a/s3/responses/writers_common.go b/s3/responses/writers_common.go index 8b5d4347d..f9b3ff914 100644 --- a/s3/responses/writers_common.go +++ b/s3/responses/writers_common.go @@ -11,6 +11,7 @@ import ( logging "github.com/ipfs/go-log/v2" "net/http" "net/url" + "path" "strconv" "time" ) @@ -206,3 +207,11 @@ func setPutObjHeaders(w http.ResponseWriter, etag, cid string, delete bool) { w.Header()[consts.CID] = []string{cid} } } + +func pathClean(p string) string { + cp := path.Clean(p) + if cp == "." { + return "" + } + return cp +} diff --git a/s3/services/object/proto.go b/s3/services/object/proto.go index eb4a03263..5269a87b2 100644 --- a/s3/services/object/proto.go +++ b/s3/services/object/proto.go @@ -17,7 +17,7 @@ var ( ) type Service interface { - CreateBucket(ctx context.Context, user, bucname, region, acl string) (bucket *Bucket, err error) + PutBucket(ctx context.Context, user, bucname, region, acl string) (bucket *Bucket, err error) GetBucket(ctx context.Context, user, bucname string) (bucket *Bucket, err error) DeleteBucket(ctx context.Context, user, bucname string) (err error) GetAllBuckets(ctx context.Context, user string) (list []*Bucket, err error) @@ -25,8 +25,8 @@ type Service interface { GetBucketAcl(ctx context.Context, user, bucname string) (acl string, err error) EmptyBucket(ctx context.Context, user, bucname string) (empty bool, err error) - PutObject(ctx context.Context, user string, bucname, objname string, body *hash.Reader, size int64, meta map[string]string) (object *Object, err error) - CopyObject(ctx context.Context, user string, srcBucname, srcObjname, dstBucname, dstObjname string, meta map[string]string) (dstObject *Object, err error) + PutObject(ctx context.Context, user, bucname, objname string, body *hash.Reader, size int64, meta map[string]string) (object *Object, err error) + CopyObject(ctx context.Context, user, srcBucname, srcObjname, dstBucname, dstObjname string, meta map[string]string) (dstObject *Object, err error) GetObject(ctx context.Context, user, bucname, objname string) (object *Object, body io.ReadCloser, err error) DeleteObject(ctx context.Context, user, bucname, objname string) (err error) // todo: DeleteObjects @@ -35,7 +35,7 @@ type Service interface { CreateMultipartUpload(ctx context.Context, user, bucname, objname string, meta map[string]string) (multipart *Multipart, err error) UploadPart(ctx context.Context, user, bucname, objname, uplid string, partId int, reader *hash.Reader, size int64, meta map[string]string) (part *ObjectPart, err error) AbortMultipartUpload(ctx context.Context, user, bucname, objname, uplid string) (err error) - CompleteMultiPartUpload(ctx context.Context, user string, bucname, objname, uplid string, parts []*CompletePart) (object *Object, err error) + CompleteMultiPartUpload(ctx context.Context, user, bucname, objname, uplid string, parts []*CompletePart) (object *Object, err error) } // Bucket contains bucket metadata. diff --git a/s3/services/object/service_bucket.go b/s3/services/object/service_bucket.go index 13625aa4b..2bd6a3417 100644 --- a/s3/services/object/service_bucket.go +++ b/s3/services/object/service_bucket.go @@ -11,7 +11,7 @@ import ( ) // CreateBucket create a new bucket for the specified user -func (s *service) CreateBucket(ctx context.Context, user, bucname, region, acl string) (bucket *Bucket, err error) { +func (s *service) PutBucket(ctx context.Context, user, bucname, region, acl string) (bucket *Bucket, err error) { // Operation context ctx, cancel := s.opctx(ctx) defer cancel() diff --git a/s3/services/object/service_multipart.go b/s3/services/object/service_multipart.go index 592d45c4e..b9dcaaa8c 100644 --- a/s3/services/object/service_multipart.go +++ b/s3/services/object/service_multipart.go @@ -242,7 +242,7 @@ func (s *service) AbortMultipartUpload(ctx context.Context, user, bucname, objna } // CompleteMultiPartUpload complete user specified multipart upload -func (s *service) CompleteMultiPartUpload(ctx context.Context, user string, bucname, objname, uplid string, parts []*CompletePart) (object *Object, err error) { +func (s *service) CompleteMultiPartUpload(ctx context.Context, user, bucname, objname, uplid string, parts []*CompletePart) (object *Object, err error) { // Operation context ctx, cancel := s.opctx(ctx) defer cancel() diff --git a/s3/services/object/service_object.go b/s3/services/object/service_object.go index a0bb6be73..9d5af7970 100644 --- a/s3/services/object/service_object.go +++ b/s3/services/object/service_object.go @@ -14,7 +14,7 @@ import ( ) // PutObject put a user specified object -func (s *service) PutObject(ctx context.Context, user string, bucname, objname string, body *hash.Reader, size int64, meta map[string]string) (object *Object, err error) { +func (s *service) PutObject(ctx context.Context, user, bucname, objname string, body *hash.Reader, size int64, meta map[string]string) (object *Object, err error) { // Operation context ctx, cancel := s.opctx(ctx) defer cancel() @@ -124,7 +124,7 @@ func (s *service) PutObject(ctx context.Context, user string, bucname, objname s } // CopyObject copy from a user specified source object to a desert object -func (s *service) CopyObject(ctx context.Context, user string, srcBucname, srcObjname, dstBucname, dstObjname string, meta map[string]string) (dstObject *Object, err error) { +func (s *service) CopyObject(ctx context.Context, user, srcBucname, srcObjname, dstBucname, dstObjname string, meta map[string]string) (dstObject *Object, err error) { // Operation context ctx, cancel := s.opctx(ctx) defer cancel() From b7eca106316a132a834065e88981d3051b4ef4ad Mon Sep 17 00:00:00 2001 From: Steve Date: Sat, 2 Sep 2023 18:35:34 +0800 Subject: [PATCH 082/139] refractor: bucket handler --- s3/action/action.go | 4 +- s3/handlers/handlers.go | 54 +- s3/handlers/handlers_bucket.go | 138 +- s3/handlers/handlers_multipart.go | 647 +++++----- s3/handlers/handlers_object.go | 1547 +++++++++++------------ s3/handlers/proto.go | 32 +- s3/requests/parsers.go | 97 +- s3/requests/types.go | 14 - s3/requests/types_common.go | 2 + s3/responses/types.go | 3 +- s3/responses/wirters.go | 67 +- s3/responses/writers_common.go | 13 +- s3/routers/routers.go | 56 +- s3/s3.go | 9 +- s3/services/object/proto.go | 2 +- s3/services/object/service_bucket.go | 6 +- s3/services/object/service_multipart.go | 2 +- s3/services/object/service_object.go | 2 +- 18 files changed, 1268 insertions(+), 1427 deletions(-) delete mode 100644 s3/requests/types.go diff --git a/s3/action/action.go b/s3/action/action.go index 49809b2c6..8a13e97db 100644 --- a/s3/action/action.go +++ b/s3/action/action.go @@ -11,8 +11,8 @@ type Action string const ( //--- bucket - // CreateBucketAction - PutBucket Rest API action. - CreateBucketAction = "s3:PutBucket" + // CreateBucketAction - CreateBucket Rest API action. + CreateBucketAction = "s3:CreateBucket" // HeadBucketAction - HeadBucket Rest API action. HeadBucketAction = "s3:HeadBucket" diff --git a/s3/handlers/handlers.go b/s3/handlers/handlers.go index 3e88a76d7..f47c7dcdf 100644 --- a/s3/handlers/handlers.go +++ b/s3/handlers/handlers.go @@ -2,14 +2,11 @@ package handlers import ( - "context" "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/ctxmu" "github.com/bittorrent/go-btfs/s3/responses" "github.com/bittorrent/go-btfs/s3/services/accesskey" "github.com/bittorrent/go-btfs/s3/services/object" "github.com/bittorrent/go-btfs/s3/services/sign" - "net/http" "net/url" "runtime" "strconv" @@ -21,27 +18,16 @@ var _ Handlerser = (*Handlers)(nil) type Handlers struct { headers map[string][]string - nslock ctxmu.MultiCtxRWLocker - - acksvc accesskey.Service - sigsvc sign.Service - bucsvc object.Service - objsvc object.Service + acksvc accesskey.Service + sigsvc sign.Service + objsvc object.Service } -func NewHandlers( - acksvc accesskey.Service, - sigsvc sign.Service, - bucsvc object.Service, - objsvc object.Service, - options ...Option, -) (handlers *Handlers) { +func NewHandlers(acksvc accesskey.Service, sigsvc sign.Service, objsvc object.Service, options ...Option) (handlers *Handlers) { handlers = &Handlers{ headers: defaultHeaders, - nslock: ctxmu.NewDefaultMultiCtxRWMutex(), acksvc: acksvc, sigsvc: sigsvc, - bucsvc: bucsvc, objsvc: objsvc, } for _, option := range options { @@ -57,38 +43,6 @@ func (h *Handlers) name() string { return f.Name() } -func (h *Handlers) rlock(ctx context.Context, key string, w http.ResponseWriter, r *http.Request) (runlock func(), err error) { - key = lockPrefix + key - ctx, cancel := context.WithTimeout(ctx, lockWaitTimeout) - err = h.nslock.RLock(ctx, key) - if err != nil { - responses.WriteErrorResponse(w, r, err) - cancel() - return - } - runlock = func() { - h.nslock.RUnlock(key) - cancel() - } - return -} - -func (h *Handlers) lock(ctx context.Context, key string, w http.ResponseWriter, r *http.Request) (unlock func(), err error) { - key = lockPrefix + key - ctx, cancel := context.WithTimeout(ctx, lockWaitTimeout) - err = h.nslock.Lock(ctx, key) - if err != nil { - responses.WriteErrorResponse(w, r, err) - cancel() - return - } - unlock = func() { - h.nslock.Unlock(key) - cancel() - } - return -} - // Parse object url queries func (h *Handlers) getObjectResources(values url.Values) (uploadId string, partNumberMarker, maxParts int, encodingType string, rerr *responses.Error) { var err error diff --git a/s3/handlers/handlers_bucket.go b/s3/handlers/handlers_bucket.go index a36b023b3..038dbe0f1 100644 --- a/s3/handlers/handlers_bucket.go +++ b/s3/handlers/handlers_bucket.go @@ -1,8 +1,6 @@ package handlers import ( - "errors" - s3action "github.com/bittorrent/go-btfs/s3/action" "github.com/bittorrent/go-btfs/s3/cctx" "github.com/bittorrent/go-btfs/s3/requests" "github.com/bittorrent/go-btfs/s3/responses" @@ -21,19 +19,17 @@ var errToRespErr = map[error]*responses.Error{ func (h *Handlers) respErr(err error) (rerr *responses.Error) { rerr, ok := errToRespErr[err] if !ok { - err = responses.ErrInternalError + rerr = responses.ErrInternalError } return } -func (h *Handlers) PutBucketHandler(w http.ResponseWriter, r *http.Request) { +func (h *Handlers) CreateBucketHandler(w http.ResponseWriter, r *http.Request) { var err error defer func() { cctx.SetHandleInf(r, h.name(), err) }() - ctx := r.Context() - req, rerr := requests.ParsePutBucketRequest(r) if rerr != nil { err = rerr @@ -41,7 +37,7 @@ func (h *Handlers) PutBucketHandler(w http.ResponseWriter, r *http.Request) { return } - _, err = h.objsvc.PutBucket(ctx, req.User, req.Bucket, req.Region, req.ACL) + _, err = h.objsvc.CreateBucket(r.Context(), req.AccessKey, req.Bucket, req.Region, req.ACL) if err != nil { rerr = h.respErr(err) responses.WriteErrorResponse(w, r, rerr) @@ -53,155 +49,123 @@ func (h *Handlers) PutBucketHandler(w http.ResponseWriter, r *http.Request) { return } -func (h *Handlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) { +func (h *Handlers) HeadBucketHandler(w http.ResponseWriter, r *http.Request) { var err error defer func() { cctx.SetHandleInf(r, h.name(), err) }() - req, err := requests.ParseDeleteBucketRequest(r) - if err != nil { - responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestBody) + req, rerr := requests.ParseHeadBucketRequest(r) + if rerr != nil { + err = rerr + responses.WriteErrorResponse(w, r, rerr) return } - ctx := r.Context() - ack := cctx.GetAccessKey(r) - - err = h.bucsvc.CheckACL(ack, req.Bucket, s3action.HeadBucketAction) + _, err = h.objsvc.GetBucket(r.Context(), req.AccessKey, req.Bucket) if err != nil { - responses.WriteErrorResponse(w, r, err) + rerr = h.respErr(err) + responses.WriteErrorResponse(w, r, rerr) return } - //todo check all errors. - err = h.bucsvc.DeleteBucket(ctx, req.Bucket) - if err != nil { - responses.WriteErrorResponse(w, r, err) - return - } + responses.WriteHeadBucketResponse(w, r) - responses.WriteDeleteBucketResponse(w) + return } -func (h *Handlers) ListBucketsHandler(w http.ResponseWriter, r *http.Request) { +func (h *Handlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) { var err error defer func() { cctx.SetHandleInf(r, h.name(), err) }() - ack := cctx.GetAccessKey(r) - if ack == "" { - responses.WriteErrorResponse(w, r, responses.ErrNoAccessKey) + req, rerr := requests.ParseDeleteBucketRequest(r) + if rerr != nil { + err = rerr + responses.WriteErrorResponse(w, r, rerr) return } - //todo check all errors - bucketMetas, err := h.bucsvc.GetAllBucketsOfUser(ack) + err = h.objsvc.DeleteBucket(r.Context(), req.AccessKey, req.Bucket) if err != nil { - responses.WriteErrorResponse(w, r, err) + rerr = h.respErr(err) + responses.WriteErrorResponse(w, r, rerr) return } - responses.WriteListBucketsResponse(w, r, bucketMetas) + responses.WriteDeleteBucketResponse(w) + + return } -func (h *Handlers) GetBucketAclHandler(w http.ResponseWriter, r *http.Request) { +func (h *Handlers) ListBucketsHandler(w http.ResponseWriter, r *http.Request) { var err error defer func() { cctx.SetHandleInf(r, h.name(), err) }() - req, err := requests.ParseGetBucketAclRequest(r) - if err != nil { - responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestBody) - return - } - - ctx := r.Context() - ack := cctx.GetAccessKey(r) - - if !h.bucsvc.HasBucket(ctx, req.Bucket) { - responses.WriteErrorResponseHeadersOnly(w, r, responses.ErrNoSuchBucket) + req, rerr := requests.ParseListBucketsRequest(r) + if rerr != nil { + err = rerr + responses.WriteErrorResponse(w, r, rerr) return } - err = h.bucsvc.CheckACL(ack, req.Bucket, s3action.GetBucketAclAction) + list, err := h.objsvc.GetAllBuckets(r.Context(), req.AccessKey) if err != nil { - responses.WriteErrorResponse(w, r, err) + rerr = h.respErr(err) + responses.WriteErrorResponse(w, r, rerr) return } - //todo check all errors - acl, err := h.bucsvc.GetBucketAcl(ctx, req.Bucket) - if err != nil { - responses.WriteErrorResponse(w, r, err) - return - } + responses.WriteListBucketsResponse(w, r, req.AccessKey, "", list) - responses.WriteGetBucketAclResponse(w, r, ack, acl) + return } -func (h *Handlers) PutBucketAclHandler(w http.ResponseWriter, r *http.Request) { +func (h *Handlers) GetBucketAclHandler(w http.ResponseWriter, r *http.Request) { var err error defer func() { cctx.SetHandleInf(r, h.name(), err) }() - req, err := requests.ParsePutBucketAclRequest(r) - if err != nil || len(req.ACL) == 0 || len(req.Bucket) == 0 { - responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestBody) - return - } - - ctx := r.Context() - ack := cctx.GetAccessKey(r) - - err = h.bucsvc.CheckACL(ack, req.Bucket, s3action.PutBucketAclAction) - if err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - - if !requests.CheckAclPermissionType(&req.ACL) { - responses.WriteErrorResponse(w, r, responses.ErrNotImplemented) + req, rerr := requests.ParseGetBucketAclRequest(r) + if rerr != nil { + err = rerr + responses.WriteErrorResponse(w, r, rerr) return } - //todo check all errors - err = h.bucsvc.UpdateBucketAcl(ctx, req.Bucket, req.ACL) + acl, err := h.objsvc.GetBucketAcl(r.Context(), req.AccessKey, req.Bucket) if err != nil { - responses.WriteErrorResponse(w, r, err) + rerr = h.respErr(err) + responses.WriteErrorResponse(w, r, rerr) return } - //todo check no return? - responses.WritePutBucketAclResponse(w, r) + responses.WriteGetBucketAclResponse(w, r, req.AccessKey, "", acl) } -func (h *Handlers) HeadBucketHandler(w http.ResponseWriter, r *http.Request) { +func (h *Handlers) PutBucketAclHandler(w http.ResponseWriter, r *http.Request) { var err error defer func() { cctx.SetHandleInf(r, h.name(), err) }() - req, err := requests.ParseHeadBucketRequest(r) + req, rerr := requests.ParsePutBucketAclRequest(r) if err != nil { - responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestBody) + err = rerr + responses.WriteErrorResponse(w, r, rerr) return } - ack := cctx.GetAccessKey(r) - - err = h.bucsvc.CheckACL(ack, req.Bucket, s3action.HeadBucketAction) - if errors.Is(err, object.ErrBucketNotFound) { - responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) - return - } + err = h.objsvc.PutBucketAcl(r.Context(), req.AccessKey, req.Bucket, req.ACL) if err != nil { - responses.WriteErrorResponse(w, r, responses.ErrAccessDenied) + rerr = h.respErr(err) + responses.WriteErrorResponse(w, r, rerr) return } - responses.WriteHeadBucketResponse(w, r) + responses.WritePutBucketAclResponse(w, r) } diff --git a/s3/handlers/handlers_multipart.go b/s3/handlers/handlers_multipart.go index e4fc77015..ecc3870de 100644 --- a/s3/handlers/handlers_multipart.go +++ b/s3/handlers/handlers_multipart.go @@ -1,333 +1,318 @@ package handlers -import ( - "errors" - "github.com/bittorrent/go-btfs/s3/action" - "github.com/bittorrent/go-btfs/s3/cctx" - "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/requests" - "github.com/bittorrent/go-btfs/s3/responses" - "github.com/bittorrent/go-btfs/s3/s3utils" - "github.com/bittorrent/go-btfs/s3/services/object" - "github.com/bittorrent/go-btfs/s3/utils" - "github.com/bittorrent/go-btfs/s3/utils/hash" - "net/http" - "sort" - "strconv" -) - -func (h *Handlers) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - ack := cctx.GetAccessKey(r) - var err error - defer func() { - cctx.SetHandleInf(r, h.name(), err) - }() - - bucname, objname, err := requests.ParseBucketAndObject(r) - if err != nil { - responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) - return - } - - err = s3utils.CheckNewMultipartArgs(ctx, bucname, objname) - if err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - - meta, err := extractMetadata(ctx, r) - if err != nil { - responses.WriteErrorResponse(w, r, responses.ErrInvalidRequest) - return - } - - // rlock bucket - runlock, err := h.rlock(ctx, bucname, w, r) - if err != nil { - return - } - defer runlock() - - // lock object - unlock, err := h.lock(ctx, bucname+"/"+objname, w, r) - if err != nil { - return - } - defer unlock() - - err = h.bucsvc.CheckACL(ack, bucname, action.CreateMultipartUploadAction) - if errors.Is(err, object.ErrBucketNotFound) { - responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) - return - } - if err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - - mtp, err := h.objsvc.CreateMultipartUpload(ctx, bucname, objname, meta) - if err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - - responses.WriteCreateMultipartUploadResponse(w, r, bucname, objname, mtp.UploadID) - - return -} - -func (h *Handlers) UploadPartHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - ack := cctx.GetAccessKey(r) - var err error - defer func() { - cctx.SetHandleInf(r, h.name(), err) - }() - - // X-Amz-Copy-Source shouldn't be set for this call. - if _, ok := r.Header[consts.AmzCopySource]; ok { - err = errors.New("shouldn't be copy") - responses.WriteErrorResponse(w, r, responses.ErrInvalidCopySource) - return - } - - bucname, objname, err := requests.ParseBucketAndObject(r) - if err != nil { - responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) - return - } - - err = s3utils.CheckPutObjectPartArgs(ctx, bucname, objname) - if err != nil { // todo: convert error - responses.WriteErrorResponse(w, r, err) - return - } - - uploadID := r.Form.Get(consts.UploadID) - partIDString := r.Form.Get(consts.PartNumber) - partID, err := strconv.Atoi(partIDString) - if err != nil { - responses.WriteErrorResponse(w, r, responses.ErrInvalidPart) - return - } - if partID > consts.MaxPartID { - responses.WriteErrorResponse(w, r, responses.ErrInvalidMaxParts) - return - } - - if r.ContentLength == 0 { - responses.WriteErrorResponse(w, r, responses.ErrEntityTooSmall) - return - } - - if r.ContentLength > consts.MaxPartSize { - responses.WriteErrorResponse(w, r, responses.ErrEntityTooLarge) - return - } - - hrdr, ok := r.Body.(*hash.Reader) - if !ok { - responses.WriteErrorResponse(w, r, responses.ErrInternalError) - return - } - - mtp, err := h.objsvc.GetMultipart(ctx, bucname, objname, uploadID) - if errors.Is(err, object.ErrUploadNotFound) { - responses.WriteErrorResponse(w, r, responses.ErrNoSuchUpload) - return - } - if err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - - // rlock bucket - runlock, err := h.rlock(ctx, bucname, w, r) - if err != nil { - return - } - defer runlock() - - // lock object - unlock, err := h.lock(ctx, bucname+"/"+objname, w, r) - if err != nil { - return - } - defer unlock() - - err = h.bucsvc.CheckACL(ack, bucname, action.PutObjectAction) - if errors.Is(err, object.ErrBucketNotFound) { - responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) - return - } - if err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - - part, err := h.objsvc.UploadPart(ctx, bucname, objname, uploadID, partID, hrdr, r.ContentLength, mtp.MetaData) - if err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - - responses.WriteUploadPartResponse(w, r, part) - - return -} - -func (h *Handlers) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - ack := cctx.GetAccessKey(r) - var err error - defer func() { - cctx.SetHandleInf(r, h.name(), err) - }() - - bucname, objname, err := requests.ParseBucketAndObject(r) - if err != nil { - responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) - return - } - - err = s3utils.CheckAbortMultipartArgs(ctx, bucname, objname) - if err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - - uploadID, _, _, _, rerr := h.getObjectResources(r.Form) - if rerr != nil { - err = rerr - responses.WriteErrorResponse(w, r, err) - return - } - - // rlock bucket - runlock, err := h.rlock(ctx, bucname, w, r) - if err != nil { - return - } - defer runlock() - - // rlock object - unlock, err := h.lock(ctx, bucname+"/"+objname, w, r) - if err != nil { - return - } - defer unlock() - - err = h.bucsvc.CheckACL(ack, bucname, action.AbortMultipartUploadAction) - if errors.Is(err, object.ErrBucketNotFound) { - responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) - return - } - if err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - - err = h.objsvc.AbortMultipartUpload(ctx, bucname, objname, uploadID) - if err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - - responses.WriteAbortMultipartUploadResponse(w, r) - - return -} - -func (h *Handlers) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - ack := cctx.GetAccessKey(r) - var err error - defer func() { - cctx.SetHandleInf(r, h.name(), err) - }() - - bucname, objname, err := requests.ParseBucketAndObject(r) - if err != nil { - responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) - return - } - - err = s3utils.CheckCompleteMultipartArgs(ctx, bucname, objname) - if err != nil { // todo: convert error - responses.WriteErrorResponse(w, r, err) - return - } - - // Content-Length is required and should be non-zero - if r.ContentLength <= 0 { - responses.WriteErrorResponse(w, r, responses.ErrMissingContentLength) - return - } - - // Get upload id. - uploadID, _, _, _, rerr := h.getObjectResources(r.Form) - if rerr != nil { - err = rerr - responses.WriteErrorResponse(w, r, rerr) - return - } - - complMultipartUpload := &object.CompleteMultipartUpload{} - if err = utils.XmlDecoder(r.Body, complMultipartUpload, r.ContentLength); err != nil { - responses.WriteErrorResponse(w, r, responses.ErrMalformedXML) - return - } - if len(complMultipartUpload.Parts) == 0 { - responses.WriteErrorResponse(w, r, responses.ErrMalformedXML) - return - } - if !sort.IsSorted(object.CompletedParts(complMultipartUpload.Parts)) { - responses.WriteErrorResponse(w, r, responses.ErrInvalidPartOrder) - return - } - - // rlock bucket - runlock, err := h.rlock(ctx, bucname, w, r) - if err != nil { - return - } - defer runlock() - - // rlock object - unlock, err := h.lock(ctx, bucname+"/"+objname, w, r) - if err != nil { - return - } - defer unlock() - - err = h.bucsvc.CheckACL(ack, bucname, action.CompleteMultipartUploadAction) - if errors.Is(err, object.ErrBucketNotFound) { - responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) - return - } - if err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - - obj, err := h.objsvc.CompleteMultiPartUpload(ctx, bucname, objname, uploadID, complMultipartUpload.Parts) - if errors.Is(err, object.ErrUploadNotFound) { - rerr = responses.ErrNoSuchUpload - return - } - if err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - - buc, err := h.bucsvc.GetBucketMeta(ctx, bucname) - if err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - - responses.WriteCompleteMultipartUploadResponse(w, r, bucname, objname, buc.Region, obj) - - return -} +//func (h *Handlers) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Request) { +// ctx := r.Context() +// ack := cctx.GetAccessKey(r) +// var err error +// defer func() { +// cctx.SetHandleInf(r, h.name(), err) +// }() +// +// bucname, objname, err := requests.ParseBucketAndObject(r) +// if err != nil { +// responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) +// return +// } +// +// err = s3utils.CheckNewMultipartArgs(ctx, bucname, objname) +// if err != nil { +// responses.WriteErrorResponse(w, r, err) +// return +// } +// +// meta, err := extractMetadata(ctx, r) +// if err != nil { +// responses.WriteErrorResponse(w, r, responses.ErrInvalidRequest) +// return +// } +// +// // rlock bucket +// runlock, err := h.rlock(ctx, bucname, w, r) +// if err != nil { +// return +// } +// defer runlock() +// +// // lock object +// unlock, err := h.lock(ctx, bucname+"/"+objname, w, r) +// if err != nil { +// return +// } +// defer unlock() +// +// err = h.bucsvc.CheckACL(ack, bucname, action.CreateMultipartUploadAction) +// if errors.Is(err, object.ErrBucketNotFound) { +// responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) +// return +// } +// if err != nil { +// responses.WriteErrorResponse(w, r, err) +// return +// } +// +// mtp, err := h.objsvc.CreateMultipartUpload(ctx, bucname, objname, meta) +// if err != nil { +// responses.WriteErrorResponse(w, r, err) +// return +// } +// +// responses.WriteCreateMultipartUploadResponse(w, r, bucname, objname, mtp.UploadID) +// +// return +//} +// +//func (h *Handlers) UploadPartHandler(w http.ResponseWriter, r *http.Request) { +// ctx := r.Context() +// ack := cctx.GetAccessKey(r) +// var err error +// defer func() { +// cctx.SetHandleInf(r, h.name(), err) +// }() +// +// // X-Amz-Copy-Source shouldn't be set for this call. +// if _, ok := r.Header[consts.AmzCopySource]; ok { +// err = errors.New("shouldn't be copy") +// responses.WriteErrorResponse(w, r, responses.ErrInvalidCopySource) +// return +// } +// +// bucname, objname, err := requests.ParseBucketAndObject(r) +// if err != nil { +// responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) +// return +// } +// +// err = s3utils.CheckPutObjectPartArgs(ctx, bucname, objname) +// if err != nil { // todo: convert error +// responses.WriteErrorResponse(w, r, err) +// return +// } +// +// uploadID := r.Form.Get(consts.UploadID) +// partIDString := r.Form.Get(consts.PartNumber) +// partID, err := strconv.Atoi(partIDString) +// if err != nil { +// responses.WriteErrorResponse(w, r, responses.ErrInvalidPart) +// return +// } +// if partID > consts.MaxPartID { +// responses.WriteErrorResponse(w, r, responses.ErrInvalidMaxParts) +// return +// } +// +// if r.ContentLength == 0 { +// responses.WriteErrorResponse(w, r, responses.ErrEntityTooSmall) +// return +// } +// +// if r.ContentLength > consts.MaxPartSize { +// responses.WriteErrorResponse(w, r, responses.ErrEntityTooLarge) +// return +// } +// +// hrdr, ok := r.Body.(*hash.Reader) +// if !ok { +// responses.WriteErrorResponse(w, r, responses.ErrInternalError) +// return +// } +// +// mtp, err := h.objsvc.GetMultipart(ctx, bucname, objname, uploadID) +// if errors.Is(err, object.ErrUploadNotFound) { +// responses.WriteErrorResponse(w, r, responses.ErrNoSuchUpload) +// return +// } +// if err != nil { +// responses.WriteErrorResponse(w, r, err) +// return +// } +// +// // rlock bucket +// runlock, err := h.rlock(ctx, bucname, w, r) +// if err != nil { +// return +// } +// defer runlock() +// +// // lock object +// unlock, err := h.lock(ctx, bucname+"/"+objname, w, r) +// if err != nil { +// return +// } +// defer unlock() +// +// err = h.bucsvc.CheckACL(ack, bucname, action.PutObjectAction) +// if errors.Is(err, object.ErrBucketNotFound) { +// responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) +// return +// } +// if err != nil { +// responses.WriteErrorResponse(w, r, err) +// return +// } +// +// part, err := h.objsvc.UploadPart(ctx, bucname, objname, uploadID, partID, hrdr, r.ContentLength, mtp.MetaData) +// if err != nil { +// responses.WriteErrorResponse(w, r, err) +// return +// } +// +// responses.WriteUploadPartResponse(w, r, part) +// +// return +//} +// +//func (h *Handlers) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Request) { +// ctx := r.Context() +// ack := cctx.GetAccessKey(r) +// var err error +// defer func() { +// cctx.SetHandleInf(r, h.name(), err) +// }() +// +// bucname, objname, err := requests.ParseBucketAndObject(r) +// if err != nil { +// responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) +// return +// } +// +// err = s3utils.CheckAbortMultipartArgs(ctx, bucname, objname) +// if err != nil { +// responses.WriteErrorResponse(w, r, err) +// return +// } +// +// uploadID, _, _, _, rerr := h.getObjectResources(r.Form) +// if rerr != nil { +// err = rerr +// responses.WriteErrorResponse(w, r, err) +// return +// } +// +// // rlock bucket +// runlock, err := h.rlock(ctx, bucname, w, r) +// if err != nil { +// return +// } +// defer runlock() +// +// // rlock object +// unlock, err := h.lock(ctx, bucname+"/"+objname, w, r) +// if err != nil { +// return +// } +// defer unlock() +// +// err = h.bucsvc.CheckACL(ack, bucname, action.AbortMultipartUploadAction) +// if errors.Is(err, object.ErrBucketNotFound) { +// responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) +// return +// } +// if err != nil { +// responses.WriteErrorResponse(w, r, err) +// return +// } +// +// err = h.objsvc.AbortMultipartUpload(ctx, bucname, objname, uploadID) +// if err != nil { +// responses.WriteErrorResponse(w, r, err) +// return +// } +// +// responses.WriteAbortMultipartUploadResponse(w, r) +// +// return +//} +// +//func (h *Handlers) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.Request) { +// ctx := r.Context() +// ack := cctx.GetAccessKey(r) +// var err error +// defer func() { +// cctx.SetHandleInf(r, h.name(), err) +// }() +// +// bucname, objname, err := requests.ParseBucketAndObject(r) +// if err != nil { +// responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) +// return +// } +// +// err = s3utils.CheckCompleteMultipartArgs(ctx, bucname, objname) +// if err != nil { // todo: convert error +// responses.WriteErrorResponse(w, r, err) +// return +// } +// +// // Content-Length is required and should be non-zero +// if r.ContentLength <= 0 { +// responses.WriteErrorResponse(w, r, responses.ErrMissingContentLength) +// return +// } +// +// // Get upload id. +// uploadID, _, _, _, rerr := h.getObjectResources(r.Form) +// if rerr != nil { +// err = rerr +// responses.WriteErrorResponse(w, r, rerr) +// return +// } +// +// complMultipartUpload := &object.CompleteMultipartUpload{} +// if err = utils.XmlDecoder(r.Body, complMultipartUpload, r.ContentLength); err != nil { +// responses.WriteErrorResponse(w, r, responses.ErrMalformedXML) +// return +// } +// if len(complMultipartUpload.Parts) == 0 { +// responses.WriteErrorResponse(w, r, responses.ErrMalformedXML) +// return +// } +// if !sort.IsSorted(object.CompletedParts(complMultipartUpload.Parts)) { +// responses.WriteErrorResponse(w, r, responses.ErrInvalidPartOrder) +// return +// } +// +// // rlock bucket +// runlock, err := h.rlock(ctx, bucname, w, r) +// if err != nil { +// return +// } +// defer runlock() +// +// // rlock object +// unlock, err := h.lock(ctx, bucname+"/"+objname, w, r) +// if err != nil { +// return +// } +// defer unlock() +// +// err = h.bucsvc.CheckACL(ack, bucname, action.CompleteMultipartUploadAction) +// if errors.Is(err, object.ErrBucketNotFound) { +// responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) +// return +// } +// if err != nil { +// responses.WriteErrorResponse(w, r, err) +// return +// } +// +// obj, err := h.objsvc.CompleteMultiPartUpload(ctx, bucname, objname, uploadID, complMultipartUpload.Parts) +// if errors.Is(err, object.ErrUploadNotFound) { +// rerr = responses.ErrNoSuchUpload +// return +// } +// if err != nil { +// responses.WriteErrorResponse(w, r, err) +// return +// } +// +// buc, err := h.bucsvc.GetBucketMeta(ctx, bucname) +// if err != nil { +// responses.WriteErrorResponse(w, r, err) +// return +// } +// +// responses.WriteCompleteMultipartUploadResponse(w, r, bucname, objname, buc.Region, obj) +// +// return +//} +// diff --git a/s3/handlers/handlers_object.go b/s3/handlers/handlers_object.go index 559e37b2d..c7c89ff4d 100644 --- a/s3/handlers/handlers_object.go +++ b/s3/handlers/handlers_object.go @@ -1,790 +1,773 @@ package handlers import ( - "encoding/base64" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "path" - "strconv" - "strings" "time" - - "github.com/bittorrent/go-btfs/s3/action" - "github.com/bittorrent/go-btfs/s3/cctx" - "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/requests" - "github.com/bittorrent/go-btfs/s3/responses" - "github.com/bittorrent/go-btfs/s3/s3utils" - "github.com/bittorrent/go-btfs/s3/services/object" - "github.com/bittorrent/go-btfs/s3/utils/hash" ) const lockWaitTimeout = 5 * time.Minute -func (h *Handlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - ack := cctx.GetAccessKey(r) - var err error - defer func() { - cctx.SetHandleInf(r, h.name(), err) - }() - - // X-Amz-Copy-Source shouldn't be set for this call. - if _, ok := r.Header[consts.AmzCopySource]; ok { - err = errors.New("shouldn't be copy") - responses.WriteErrorResponse(w, r, responses.ErrInvalidCopySource) - return - } - - aclHeader := r.Header.Get(consts.AmzACL) - if aclHeader != "" { - err = errors.New("object acl can only set to default") - responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) - return - } - - bucname, objname, err := requests.ParseBucketAndObject(r) - if err != nil { - responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) - return - } - - err = s3utils.CheckPutObjectArgs(ctx, bucname, objname) - if err != nil { // todo: convert error - responses.WriteErrorResponse(w, r, err) - return - } - - meta, err := extractMetadata(ctx, r) - if err != nil { - responses.WriteErrorResponse(w, r, responses.ErrInvalidRequest) - return - } - - if r.ContentLength == 0 { - responses.WriteErrorResponse(w, r, responses.ErrEntityTooSmall) - return - } - - hrdr, ok := r.Body.(*hash.Reader) - if !ok { - responses.WriteErrorResponse(w, r, responses.ErrInternalError) - return - } - - // rlock bucket - runlock, err := h.rlock(ctx, bucname, w, r) - if err != nil { - return - } - defer runlock() - - // lock object - unlock, err := h.lock(ctx, bucname+"/"+objname, w, r) - if err != nil { - return - } - defer unlock() - - err = h.bucsvc.CheckACL(ack, bucname, action.PutObjectAction) - if errors.Is(err, object.ErrBucketNotFound) { - responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) - return - } - if err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - - //objsvc - obj, err := h.objsvc.PutObject(ctx, bucname, objname, hrdr, r.ContentLength, meta) - if err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - - responses.WritePutObjectResponse(w, r, obj) - - return -} - -// HeadObjectHandler - HEAD Object -func (h *Handlers) HeadObjectHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - ack := cctx.GetAccessKey(r) - var err error - defer func() { - cctx.SetHandleInf(r, h.name(), err) - }() - - bucname, objname, err := requests.ParseBucketAndObject(r) - if err != nil { - responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) - return - } - - if err := s3utils.CheckGetObjArgs(ctx, bucname, objname); err != nil { - responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) - return - } - - err = h.bucsvc.CheckACL(ack, bucname, action.HeadObjectAction) - if errors.Is(err, object.ErrBucketNotFound) { - responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) - return - } - if err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - - // rlock bucket - runlock, err := h.rlock(ctx, bucname, w, r) - if err != nil { - return - } - defer runlock() - - // rlock object - runlockObj, err := h.rlock(ctx, bucname+"/"+objname, w, r) - if err != nil { - return - } - defer runlockObj() - - //objsvc - obj, err := h.objsvc.GetObjectInfo(ctx, bucname, objname) - if err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - w.Header().Set(consts.AmzServerSideEncryption, consts.AmzEncryptionAES) - - // Set standard object headers. - responses.SetObjectHeaders(w, r, obj) - // Set any additional requested response headers. - responses.SetHeadGetRespHeaders(w, r.Form) - - // Successful response. - w.WriteHeader(http.StatusOK) -} - -// CopyObjectHandler - Copy Object -func (h *Handlers) CopyObjectHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - ack := cctx.GetAccessKey(r) - var err error - defer func() { - cctx.SetHandleInf(r, h.name(), err) - }() - - dstBucket, dstObject, err := requests.ParseBucketAndObject(r) - if err != nil { - responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) - return - } - if err := s3utils.CheckPutObjectArgs(ctx, dstBucket, dstObject); err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - err = h.bucsvc.CheckACL(ack, dstBucket, action.CopyObjectAction) - if errors.Is(err, object.ErrBucketNotFound) { - responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) - return - } - if err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - - // Copy source path. - cpSrcPath, err := url.QueryUnescape(r.Header.Get(consts.AmzCopySource)) - if err != nil { - // Save unescaped string as is. - cpSrcPath = r.Header.Get(consts.AmzCopySource) - } - srcBucket, srcObject := pathToBucketAndObject(cpSrcPath) - // If source object is empty or bucket is empty, reply back invalid copy source. - if srcObject == "" || srcBucket == "" { - responses.WriteErrorResponse(w, r, responses.ErrInvalidCopySource) - return - } - if err = s3utils.CheckGetObjArgs(ctx, srcBucket, srcObject); err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - if srcBucket == dstBucket && srcObject == dstObject { - responses.WriteErrorResponse(w, r, responses.ErrInvalidCopyDest) - return - } - err = h.bucsvc.CheckACL(ack, srcBucket, action.CopyObjectAction) - if errors.Is(err, object.ErrBucketNotFound) { - responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) - return - } - if err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - - fmt.Printf("CopyObjectHandler %s %s => %s %s \n", srcBucket, srcObject, dstBucket, dstObject) - - // rlock bucket 1 - runlock1, err := h.rlock(ctx, srcBucket, w, r) - if err != nil { - return - } - defer runlock1() - - // rlock object 1 - runlockObj1, err := h.rlock(ctx, srcBucket+"/"+srcObject, w, r) - if err != nil { - return - } - defer runlockObj1() - - // rlock bucket 2 - runlock2, err := h.rlock(ctx, dstBucket, w, r) - if err != nil { - return - } - defer runlock2() - - // lock object 2 - unlockObj2, err := h.lock(ctx, dstBucket+"/"+dstObject, w, r) - if err != nil { - return - } - defer unlockObj2() - - //objsvc - srcObjInfo, err := h.objsvc.GetObjectInfo(ctx, srcBucket, srcObject) - if err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - - metadata := make(map[string]string) - metadata[strings.ToLower(consts.ContentType)] = srcObjInfo.ContentType - metadata[strings.ToLower(consts.ContentEncoding)] = srcObjInfo.ContentEncoding - if isReplace(r) { - inputMeta, err := extractMetadata(ctx, r) - if err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - for key, val := range inputMeta { - metadata[key] = val - } - } - - //objsvc - obj, err := h.objsvc.CopyObject(ctx, dstBucket, dstObject, srcObjInfo, srcObjInfo.Size, metadata) - if err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - - resp := responses.CopyObjectResult{ - ETag: "\"" + obj.ETag + "\"", - LastModified: obj.ModTime.UTC().Format(consts.Iso8601TimeFormat), - } - - setPutObjHeaders(w, obj, false) - - responses.WriteSuccessResponseXML(w, r, resp) -} - -// DeleteObjectHandler - delete an object -// https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html -func (h *Handlers) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - ack := cctx.GetAccessKey(r) - var err error - defer func() { - cctx.SetHandleInf(r, h.name(), err) - }() - - bucname, objname, err := requests.ParseBucketAndObject(r) - if err != nil { - responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) - return - } - if err := s3utils.CheckDelObjArgs(ctx, bucname, objname); err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - - err = h.bucsvc.CheckACL(ack, bucname, action.DeleteObjectAction) - if errors.Is(err, object.ErrBucketNotFound) { - responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) - return - } - if err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - - // rlock bucket - runlock, err := h.rlock(ctx, bucname, w, r) - if err != nil { - return - } - defer runlock() - - // lock object - unlock, err := h.lock(ctx, bucname+"/"+objname, w, r) - if err != nil { - return - } - defer unlock() - - //objsvc - obj, err := h.objsvc.GetObjectInfo(ctx, bucname, objname) - if err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - //objsvc - err = h.objsvc.DeleteObject(ctx, bucname, objname) - if err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - setPutObjHeaders(w, obj, true) - responses.WriteSuccessNoContent(w) -} - -// DeleteObjectsHandler - delete objects -// https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html -func (h *Handlers) DeleteObjectsHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - ack := cctx.GetAccessKey(r) - var err error - defer func() { - cctx.SetHandleInf(r, h.name(), err) - }() - - bucname, objname, err := requests.ParseBucketAndObject(r) - if err != nil { - responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) - return - } - if err := s3utils.CheckDelObjArgs(ctx, bucname, objname); err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - - err = h.bucsvc.CheckACL(ack, bucname, action.DeleteObjectAction) - if errors.Is(err, object.ErrBucketNotFound) { - responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) - return - } - if err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - - // rlock bucket - runlock, err := h.rlock(ctx, bucname, w, r) - if err != nil { - return - } - defer runlock() - - // lock object - unlock, err := h.lock(ctx, bucname+"/"+objname, w, r) - if err != nil { - return - } - defer unlock() - - //objsvc - obj, err := h.objsvc.GetObjectInfo(ctx, bucname, objname) - if err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - //objsvc - err = h.objsvc.DeleteObject(ctx, bucname, objname) - if err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - setPutObjHeaders(w, obj, true) - responses.WriteSuccessNoContent(w) -} - -// GetObjectHandler - GET Object -// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html -func (h *Handlers) GetObjectHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - ack := cctx.GetAccessKey(r) - var err error - defer func() { - cctx.SetHandleInf(r, h.name(), err) - }() - - bucname, objname, err := requests.ParseBucketAndObject(r) - if err != nil { - responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) - return - } - if err = s3utils.CheckGetObjArgs(ctx, bucname, objname); err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - - err = h.bucsvc.CheckACL(ack, bucname, action.GetObjectAction) - if errors.Is(err, object.ErrBucketNotFound) { - responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) - return - } - if err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - - // rlock bucket - runlock, err := h.rlock(ctx, bucname, w, r) - if err != nil { - return - } - defer runlock() - - // rlock object - runlockObj, err := h.rlock(ctx, bucname+"/"+objname, w, r) - if err != nil { - return - } - defer runlockObj() - - //objsvc - obj, reader, err := h.objsvc.GetObject(ctx, bucname, objname) - if err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - //w.Header().Set(consts.AmzServerSideEncryption, consts.AmzEncryptionAES) - - responses.SetObjectHeaders(w, r, obj) - w.Header().Set(consts.ContentLength, strconv.FormatInt(obj.Size, 10)) - responses.SetHeadGetRespHeaders(w, r.Form) - _, err = io.Copy(w, reader) - if err != nil { - responses.WriteErrorResponse(w, r, responses.ErrInternalError) - return - } -} - -// GetObjectACLHandler - GET Object ACL -func (h *Handlers) GetObjectACLHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - ack := cctx.GetAccessKey(r) - var err error - defer func() { - cctx.SetHandleInf(r, h.name(), err) - }() - - bucname, _, err := requests.ParseBucketAndObject(r) - if err != nil { - responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) - return - } - - err = h.bucsvc.CheckACL(ack, bucname, action.GetBucketAclAction) - if errors.Is(err, object.ErrBucketNotFound) { - responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) - return - } - if err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - - // rlock bucket - runlock, err := h.rlock(ctx, bucname, w, r) - if err != nil { - return - } - defer runlock() - - acl, err := h.bucsvc.GetBucketAcl(ctx, bucname) - if err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - - responses.WriteGetBucketAclResponse(w, r, ack, acl) -} - -func (h *Handlers) ListObjectsHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - ack := cctx.GetAccessKey(r) - var err error - defer func() { - cctx.SetHandleInf(r, h.name(), err) - }() - - bucname, _, err := requests.ParseBucketAndObject(r) - if err != nil { - responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) - return - } - - // Extract all the litsObjectsV1 query params to their native values. - prefix, marker, delimiter, maxKeys, encodingType, s3Error := getListObjectsV1Args(r.Form) - if s3Error != nil { - responses.WriteErrorResponse(w, r, s3Error) - return - } - - if err := s3utils.CheckListObjsArgs(ctx, bucname, prefix, marker); err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - - // rlock bucket - runlock, err := h.rlock(ctx, bucname, w, r) - if err != nil { - return - } - defer runlock() - - err = h.bucsvc.CheckACL(ack, bucname, action.ListObjectsAction) - if errors.Is(err, object.ErrBucketNotFound) { - responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) - return - } - if err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - - //objsvc - objs, err := h.objsvc.ListObjects(ctx, bucname, prefix, marker, delimiter, maxKeys) - if err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - resp := responses.GenerateListObjectsV1Response(bucname, prefix, marker, delimiter, encodingType, maxKeys, objs) - // Write success response. - responses.WriteSuccessResponseXML(w, r, resp) -} - -func (h *Handlers) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - ack := cctx.GetAccessKey(r) - var err error - defer func() { - cctx.SetHandleInf(r, h.name(), err) - }() - - bucname, _, err := requests.ParseBucketAndObject(r) - if err != nil { - responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) - return - } - - err = h.bucsvc.CheckACL(ack, bucname, action.ListObjectsAction) - if errors.Is(err, object.ErrBucketNotFound) { - responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) - return - } - if err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - - urlValues := r.Form - // Extract all the listObjectsV2 query params to their native values. - prefix, token, startAfter, delimiter, fetchOwner, maxKeys, encodingType, errCode := getListObjectsV2Args(urlValues) - if errCode != nil { - responses.WriteErrorResponse(w, r, errCode) - return - } - - marker := token - if marker == "" { - marker = startAfter - } - if err := s3utils.CheckListObjsArgs(ctx, bucname, prefix, marker); err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - - // Validate the query params before beginning to serve the request. - // fetch-owner is not validated since it is a boolean - s3Error := validateListObjectsArgs(token, delimiter, encodingType, maxKeys) - if s3Error != nil { - responses.WriteErrorResponse(w, r, s3Error) - return - } - - // rlock bucket - runlock, err := h.rlock(ctx, bucname, w, r) - if err != nil { - return - } - defer runlock() - - // Initiate a list objects operation based on the input params. - // On success would return back ListObjectsInfo object to be - // marshaled into S3 compatible XML header. - //objsvc - listObjectsV2Info, err := h.objsvc.ListObjectsV2(ctx, bucname, prefix, token, delimiter, - maxKeys, fetchOwner, startAfter) - if err != nil { - responses.WriteErrorResponse(w, r, err) - return - } - - resp := responses.GenerateListObjectsV2Response( - bucname, prefix, token, listObjectsV2Info.NextContinuationToken, startAfter, - delimiter, encodingType, listObjectsV2Info.IsTruncated, - maxKeys, listObjectsV2Info.Objects, listObjectsV2Info.Prefixes) - - // Write success response. - responses.WriteSuccessResponseXML(w, r, resp) -} - -// setPutObjHeaders sets all the necessary headers returned back -// upon a success Put/Copy/CompleteMultipart/Delete requests -// to activate delete only headers set delete as true -func setPutObjHeaders(w http.ResponseWriter, obj object.Object, delete bool) { - // We must not use the http.Header().Set method here because some (broken) - // clients expect the ETag header key to be literally "ETag" - not "Etag" (case-sensitive). - // Therefore, we have to set the ETag directly as map entry. - if obj.ETag != "" && !delete { - w.Header()[consts.ETag] = []string{`"` + obj.ETag + `"`} - } - - // Set the relevant version ID as part of the response header. - if obj.VersionID != "" { - w.Header()[consts.AmzVersionID] = []string{obj.VersionID} - // If version is a deleted marker, set this header as well - if obj.DeleteMarker && delete { // only returned during delete object - w.Header()[consts.AmzDeleteMarker] = []string{strconv.FormatBool(obj.DeleteMarker)} - } - } - - if obj.Bucket != "" && obj.Name != "" { - // do something - } -} - -func pathToBucketAndObject(path string) (bucket, object string) { - path = strings.TrimPrefix(path, consts.SlashSeparator) - idx := strings.Index(path, consts.SlashSeparator) - if idx < 0 { - return path, "" - } - return path[:idx], path[idx+len(consts.SlashSeparator):] -} - -func isReplace(r *http.Request) bool { - return r.Header.Get("X-Amz-Metadata-Directive") == "REPLACE" -} - -// Parse bucket url queries -func getListObjectsV1Args(values url.Values) ( - prefix, marker, delimiter string, maxkeys int, encodingType string, errCode error) { - - if values.Get("max-keys") != "" { - var err error - if maxkeys, err = strconv.Atoi(values.Get("max-keys")); err != nil { - errCode = responses.ErrInvalidMaxKeys - return - } - } else { - maxkeys = consts.MaxObjectList - } - - prefix = trimLeadingSlash(values.Get("prefix")) - marker = trimLeadingSlash(values.Get("marker")) - delimiter = values.Get("delimiter") - encodingType = values.Get("encoding-type") - return -} - -// Parse bucket url queries for ListObjects V2. -func getListObjectsV2Args(values url.Values) ( - prefix, token, startAfter, delimiter string, - fetchOwner bool, maxkeys int, encodingType string, errCode error) { - - // The continuation-token cannot be empty. - if val, ok := values["continuation-token"]; ok { - if len(val[0]) == 0 { - errCode = responses.ErrInvalidToken - return - } - } - - if values.Get("max-keys") != "" { - var err error - if maxkeys, err = strconv.Atoi(values.Get("max-keys")); err != nil { - errCode = responses.ErrInvalidMaxKeys - return - } - // Over flowing count - reset to maxObjectList. - if maxkeys > consts.MaxObjectList { - maxkeys = consts.MaxObjectList - } - } else { - maxkeys = consts.MaxObjectList - } - - prefix = trimLeadingSlash(values.Get("prefix")) - startAfter = trimLeadingSlash(values.Get("start-after")) - delimiter = values.Get("delimiter") - fetchOwner = values.Get("fetch-owner") == "true" - encodingType = values.Get("encoding-type") - - if token = values.Get("continuation-token"); token != "" { - decodedToken, err := base64.StdEncoding.DecodeString(token) - if err != nil { - errCode = responses.ErrIncorrectContinuationToken - return - } - token = string(decodedToken) - } - return -} - -func trimLeadingSlash(ep string) string { - if len(ep) > 0 && ep[0] == '/' { - // Path ends with '/' preserve it - if ep[len(ep)-1] == '/' && len(ep) > 1 { - ep = path.Clean(ep) - ep += "/" - } else { - ep = path.Clean(ep) - } - ep = ep[1:] - } - return ep -} - -// Validate all the ListObjects query arguments, returns an APIErrorCode -// if one of the args do not meet the required conditions. -// - delimiter if set should be equal to '/', otherwise the request is rejected. -// - marker if set should have a common prefix with 'prefix' param, otherwise -// the request is rejected. -func validateListObjectsArgs(marker, delimiter, encodingType string, maxKeys int) error { - // Max keys cannot be negative. - if maxKeys < 0 { - return responses.ErrInvalidMaxKeys - } - - if encodingType != "" { - // AWS S3 spec only supports 'url' encoding type - if !strings.EqualFold(encodingType, "url") { - return responses.ErrInvalidEncodingMethod - } - } - - return nil -} +//func (h *Handlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) { +// ctx := r.Context() +// ack := cctx.GetAccessKey(r) +// var err error +// defer func() { +// cctx.SetHandleInf(r, h.name(), err) +// }() +// +// // X-Amz-Copy-Source shouldn't be set for this call. +// if _, ok := r.Header[consts.AmzCopySource]; ok { +// err = errors.New("shouldn't be copy") +// responses.WriteErrorResponse(w, r, responses.ErrInvalidCopySource) +// return +// } +// +// aclHeader := r.Header.Get(consts.AmzACL) +// if aclHeader != "" { +// err = errors.New("object acl can only set to default") +// responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) +// return +// } +// +// bucname, objname, err := requests.ParseBucketAndObject(r) +// if err != nil { +// responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) +// return +// } +// +// err = s3utils.CheckPutObjectArgs(ctx, bucname, objname) +// if err != nil { // todo: convert error +// responses.WriteErrorResponse(w, r, err) +// return +// } +// +// meta, err := extractMetadata(ctx, r) +// if err != nil { +// responses.WriteErrorResponse(w, r, responses.ErrInvalidRequest) +// return +// } +// +// if r.ContentLength == 0 { +// responses.WriteErrorResponse(w, r, responses.ErrEntityTooSmall) +// return +// } +// +// hrdr, ok := r.Body.(*hash.Reader) +// if !ok { +// responses.WriteErrorResponse(w, r, responses.ErrInternalError) +// return +// } +// +// // rlock bucket +// runlock, err := h.rlock(ctx, bucname, w, r) +// if err != nil { +// return +// } +// defer runlock() +// +// // lock object +// unlock, err := h.lock(ctx, bucname+"/"+objname, w, r) +// if err != nil { +// return +// } +// defer unlock() +// +// err = h.bucsvc.CheckACL(ack, bucname, action.PutObjectAction) +// if errors.Is(err, object.ErrBucketNotFound) { +// responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) +// return +// } +// if err != nil { +// responses.WriteErrorResponse(w, r, err) +// return +// } +// +// //objsvc +// obj, err := h.objsvc.PutObject(ctx, bucname, objname, hrdr, r.ContentLength, meta) +// if err != nil { +// responses.WriteErrorResponse(w, r, err) +// return +// } +// +// responses.WritePutObjectResponse(w, r, obj) +// +// return +//} +// +//// HeadObjectHandler - HEAD Object +//func (h *Handlers) HeadObjectHandler(w http.ResponseWriter, r *http.Request) { +// ctx := r.Context() +// ack := cctx.GetAccessKey(r) +// var err error +// defer func() { +// cctx.SetHandleInf(r, h.name(), err) +// }() +// +// bucname, objname, err := requests.ParseBucketAndObject(r) +// if err != nil { +// responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) +// return +// } +// +// if err := s3utils.CheckGetObjArgs(ctx, bucname, objname); err != nil { +// responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) +// return +// } +// +// err = h.bucsvc.CheckACL(ack, bucname, action.HeadObjectAction) +// if errors.Is(err, object.ErrBucketNotFound) { +// responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) +// return +// } +// if err != nil { +// responses.WriteErrorResponse(w, r, err) +// return +// } +// +// // rlock bucket +// runlock, err := h.rlock(ctx, bucname, w, r) +// if err != nil { +// return +// } +// defer runlock() +// +// // rlock object +// runlockObj, err := h.rlock(ctx, bucname+"/"+objname, w, r) +// if err != nil { +// return +// } +// defer runlockObj() +// +// //objsvc +// obj, err := h.objsvc.GetObjectInfo(ctx, bucname, objname) +// if err != nil { +// responses.WriteErrorResponse(w, r, err) +// return +// } +// w.Header().Set(consts.AmzServerSideEncryption, consts.AmzEncryptionAES) +// +// // Set standard object headers. +// responses.SetObjectHeaders(w, r, obj) +// // Set any additional requested response headers. +// responses.SetHeadGetRespHeaders(w, r.Form) +// +// // Successful response. +// w.WriteHeader(http.StatusOK) +//} +// +//// CopyObjectHandler - Copy Object +//func (h *Handlers) CopyObjectHandler(w http.ResponseWriter, r *http.Request) { +// ctx := r.Context() +// ack := cctx.GetAccessKey(r) +// var err error +// defer func() { +// cctx.SetHandleInf(r, h.name(), err) +// }() +// +// dstBucket, dstObject, err := requests.ParseBucketAndObject(r) +// if err != nil { +// responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) +// return +// } +// if err := s3utils.CheckPutObjectArgs(ctx, dstBucket, dstObject); err != nil { +// responses.WriteErrorResponse(w, r, err) +// return +// } +// err = h.bucsvc.CheckACL(ack, dstBucket, action.CopyObjectAction) +// if errors.Is(err, object.ErrBucketNotFound) { +// responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) +// return +// } +// if err != nil { +// responses.WriteErrorResponse(w, r, err) +// return +// } +// +// // Copy source path. +// cpSrcPath, err := url.QueryUnescape(r.Header.Get(consts.AmzCopySource)) +// if err != nil { +// // Save unescaped string as is. +// cpSrcPath = r.Header.Get(consts.AmzCopySource) +// } +// srcBucket, srcObject := pathToBucketAndObject(cpSrcPath) +// // If source object is empty or bucket is empty, reply back invalid copy source. +// if srcObject == "" || srcBucket == "" { +// responses.WriteErrorResponse(w, r, responses.ErrInvalidCopySource) +// return +// } +// if err = s3utils.CheckGetObjArgs(ctx, srcBucket, srcObject); err != nil { +// responses.WriteErrorResponse(w, r, err) +// return +// } +// if srcBucket == dstBucket && srcObject == dstObject { +// responses.WriteErrorResponse(w, r, responses.ErrInvalidCopyDest) +// return +// } +// err = h.bucsvc.CheckACL(ack, srcBucket, action.CopyObjectAction) +// if errors.Is(err, object.ErrBucketNotFound) { +// responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) +// return +// } +// if err != nil { +// responses.WriteErrorResponse(w, r, err) +// return +// } +// +// fmt.Printf("CopyObjectHandler %s %s => %s %s \n", srcBucket, srcObject, dstBucket, dstObject) +// +// // rlock bucket 1 +// runlock1, err := h.rlock(ctx, srcBucket, w, r) +// if err != nil { +// return +// } +// defer runlock1() +// +// // rlock object 1 +// runlockObj1, err := h.rlock(ctx, srcBucket+"/"+srcObject, w, r) +// if err != nil { +// return +// } +// defer runlockObj1() +// +// // rlock bucket 2 +// runlock2, err := h.rlock(ctx, dstBucket, w, r) +// if err != nil { +// return +// } +// defer runlock2() +// +// // lock object 2 +// unlockObj2, err := h.lock(ctx, dstBucket+"/"+dstObject, w, r) +// if err != nil { +// return +// } +// defer unlockObj2() +// +// //objsvc +// srcObjInfo, err := h.objsvc.GetObjectInfo(ctx, srcBucket, srcObject) +// if err != nil { +// responses.WriteErrorResponse(w, r, err) +// return +// } +// +// metadata := make(map[string]string) +// metadata[strings.ToLower(consts.ContentType)] = srcObjInfo.ContentType +// metadata[strings.ToLower(consts.ContentEncoding)] = srcObjInfo.ContentEncoding +// if isReplace(r) { +// inputMeta, err := extractMetadata(ctx, r) +// if err != nil { +// responses.WriteErrorResponse(w, r, err) +// return +// } +// for key, val := range inputMeta { +// metadata[key] = val +// } +// } +// +// //objsvc +// obj, err := h.objsvc.CopyObject(ctx, dstBucket, dstObject, srcObjInfo, srcObjInfo.Size, metadata) +// if err != nil { +// responses.WriteErrorResponse(w, r, err) +// return +// } +// +// resp := responses.CopyObjectResult{ +// ETag: "\"" + obj.ETag + "\"", +// LastModified: obj.ModTime.UTC().Format(consts.Iso8601TimeFormat), +// } +// +// setPutObjHeaders(w, obj, false) +// +// responses.WriteSuccessResponseXML(w, r, resp) +//} +// +//// DeleteObjectHandler - delete an object +//// https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html +//func (h *Handlers) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) { +// ctx := r.Context() +// ack := cctx.GetAccessKey(r) +// var err error +// defer func() { +// cctx.SetHandleInf(r, h.name(), err) +// }() +// +// bucname, objname, err := requests.ParseBucketAndObject(r) +// if err != nil { +// responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) +// return +// } +// if err := s3utils.CheckDelObjArgs(ctx, bucname, objname); err != nil { +// responses.WriteErrorResponse(w, r, err) +// return +// } +// +// err = h.bucsvc.CheckACL(ack, bucname, action.DeleteObjectAction) +// if errors.Is(err, object.ErrBucketNotFound) { +// responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) +// return +// } +// if err != nil { +// responses.WriteErrorResponse(w, r, err) +// return +// } +// +// // rlock bucket +// runlock, err := h.rlock(ctx, bucname, w, r) +// if err != nil { +// return +// } +// defer runlock() +// +// // lock object +// unlock, err := h.lock(ctx, bucname+"/"+objname, w, r) +// if err != nil { +// return +// } +// defer unlock() +// +// //objsvc +// obj, err := h.objsvc.GetObjectInfo(ctx, bucname, objname) +// if err != nil { +// responses.WriteErrorResponse(w, r, err) +// return +// } +// //objsvc +// err = h.objsvc.DeleteObject(ctx, bucname, objname) +// if err != nil { +// responses.WriteErrorResponse(w, r, err) +// return +// } +// setPutObjHeaders(w, obj, true) +// responses.WriteSuccessNoContent(w) +//} +// +//// DeleteObjectsHandler - delete objects +//// https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html +//func (h *Handlers) DeleteObjectsHandler(w http.ResponseWriter, r *http.Request) { +// ctx := r.Context() +// ack := cctx.GetAccessKey(r) +// var err error +// defer func() { +// cctx.SetHandleInf(r, h.name(), err) +// }() +// +// bucname, objname, err := requests.ParseBucketAndObject(r) +// if err != nil { +// responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) +// return +// } +// if err := s3utils.CheckDelObjArgs(ctx, bucname, objname); err != nil { +// responses.WriteErrorResponse(w, r, err) +// return +// } +// +// err = h.bucsvc.CheckACL(ack, bucname, action.DeleteObjectAction) +// if errors.Is(err, object.ErrBucketNotFound) { +// responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) +// return +// } +// if err != nil { +// responses.WriteErrorResponse(w, r, err) +// return +// } +// +// // rlock bucket +// runlock, err := h.rlock(ctx, bucname, w, r) +// if err != nil { +// return +// } +// defer runlock() +// +// // lock object +// unlock, err := h.lock(ctx, bucname+"/"+objname, w, r) +// if err != nil { +// return +// } +// defer unlock() +// +// //objsvc +// obj, err := h.objsvc.GetObjectInfo(ctx, bucname, objname) +// if err != nil { +// responses.WriteErrorResponse(w, r, err) +// return +// } +// //objsvc +// err = h.objsvc.DeleteObject(ctx, bucname, objname) +// if err != nil { +// responses.WriteErrorResponse(w, r, err) +// return +// } +// setPutObjHeaders(w, obj, true) +// responses.WriteSuccessNoContent(w) +//} +// +//// GetObjectHandler - GET Object +//// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html +//func (h *Handlers) GetObjectHandler(w http.ResponseWriter, r *http.Request) { +// ctx := r.Context() +// ack := cctx.GetAccessKey(r) +// var err error +// defer func() { +// cctx.SetHandleInf(r, h.name(), err) +// }() +// +// bucname, objname, err := requests.ParseBucketAndObject(r) +// if err != nil { +// responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) +// return +// } +// if err = s3utils.CheckGetObjArgs(ctx, bucname, objname); err != nil { +// responses.WriteErrorResponse(w, r, err) +// return +// } +// +// err = h.bucsvc.CheckACL(ack, bucname, action.GetObjectAction) +// if errors.Is(err, object.ErrBucketNotFound) { +// responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) +// return +// } +// if err != nil { +// responses.WriteErrorResponse(w, r, err) +// return +// } +// +// // rlock bucket +// runlock, err := h.rlock(ctx, bucname, w, r) +// if err != nil { +// return +// } +// defer runlock() +// +// // rlock object +// runlockObj, err := h.rlock(ctx, bucname+"/"+objname, w, r) +// if err != nil { +// return +// } +// defer runlockObj() +// +// //objsvc +// obj, reader, err := h.objsvc.GetObject(ctx, bucname, objname) +// if err != nil { +// responses.WriteErrorResponse(w, r, err) +// return +// } +// //w.Header().Set(consts.AmzServerSideEncryption, consts.AmzEncryptionAES) +// +// responses.SetObjectHeaders(w, r, obj) +// w.Header().Set(consts.ContentLength, strconv.FormatInt(obj.Size, 10)) +// responses.SetHeadGetRespHeaders(w, r.Form) +// _, err = io.Copy(w, reader) +// if err != nil { +// responses.WriteErrorResponse(w, r, responses.ErrInternalError) +// return +// } +//} +// +//// GetObjectACLHandler - GET Object ACL +//func (h *Handlers) GetObjectACLHandler(w http.ResponseWriter, r *http.Request) { +// ctx := r.Context() +// ack := cctx.GetAccessKey(r) +// var err error +// defer func() { +// cctx.SetHandleInf(r, h.name(), err) +// }() +// +// bucname, _, err := requests.ParseBucketAndObject(r) +// if err != nil { +// responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) +// return +// } +// +// err = h.bucsvc.CheckACL(ack, bucname, action.GetBucketAclAction) +// if errors.Is(err, object.ErrBucketNotFound) { +// responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) +// return +// } +// if err != nil { +// responses.WriteErrorResponse(w, r, err) +// return +// } +// +// // rlock bucket +// runlock, err := h.rlock(ctx, bucname, w, r) +// if err != nil { +// return +// } +// defer runlock() +// +// acl, err := h.bucsvc.GetBucketAcl(ctx, bucname) +// if err != nil { +// responses.WriteErrorResponse(w, r, err) +// return +// } +// +// responses.WriteGetBucketAclResponse(w, r, ack, acl) +//} +// +//func (h *Handlers) ListObjectsHandler(w http.ResponseWriter, r *http.Request) { +// ctx := r.Context() +// ack := cctx.GetAccessKey(r) +// var err error +// defer func() { +// cctx.SetHandleInf(r, h.name(), err) +// }() +// +// bucname, _, err := requests.ParseBucketAndObject(r) +// if err != nil { +// responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) +// return +// } +// +// // Extract all the litsObjectsV1 query params to their native values. +// prefix, marker, delimiter, maxKeys, encodingType, s3Error := getListObjectsV1Args(r.Form) +// if s3Error != nil { +// responses.WriteErrorResponse(w, r, s3Error) +// return +// } +// +// if err := s3utils.CheckListObjsArgs(ctx, bucname, prefix, marker); err != nil { +// responses.WriteErrorResponse(w, r, err) +// return +// } +// +// // rlock bucket +// runlock, err := h.rlock(ctx, bucname, w, r) +// if err != nil { +// return +// } +// defer runlock() +// +// err = h.bucsvc.CheckACL(ack, bucname, action.ListObjectsAction) +// if errors.Is(err, object.ErrBucketNotFound) { +// responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) +// return +// } +// if err != nil { +// responses.WriteErrorResponse(w, r, err) +// return +// } +// +// //objsvc +// objs, err := h.objsvc.ListObjects(ctx, bucname, prefix, marker, delimiter, maxKeys) +// if err != nil { +// responses.WriteErrorResponse(w, r, err) +// return +// } +// resp := responses.GenerateListObjectsV1Response(bucname, prefix, marker, delimiter, encodingType, maxKeys, objs) +// // Write success response. +// responses.WriteSuccessResponseXML(w, r, resp) +//} +// +//func (h *Handlers) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) { +// ctx := r.Context() +// ack := cctx.GetAccessKey(r) +// var err error +// defer func() { +// cctx.SetHandleInf(r, h.name(), err) +// }() +// +// bucname, _, err := requests.ParseBucketAndObject(r) +// if err != nil { +// responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) +// return +// } +// +// err = h.bucsvc.CheckACL(ack, bucname, action.ListObjectsAction) +// if errors.Is(err, object.ErrBucketNotFound) { +// responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) +// return +// } +// if err != nil { +// responses.WriteErrorResponse(w, r, err) +// return +// } +// +// urlValues := r.Form +// // Extract all the listObjectsV2 query params to their native values. +// prefix, token, startAfter, delimiter, fetchOwner, maxKeys, encodingType, errCode := getListObjectsV2Args(urlValues) +// if errCode != nil { +// responses.WriteErrorResponse(w, r, errCode) +// return +// } +// +// marker := token +// if marker == "" { +// marker = startAfter +// } +// if err := s3utils.CheckListObjsArgs(ctx, bucname, prefix, marker); err != nil { +// responses.WriteErrorResponse(w, r, err) +// return +// } +// +// // Validate the query params before beginning to serve the request. +// // fetch-owner is not validated since it is a boolean +// s3Error := validateListObjectsArgs(token, delimiter, encodingType, maxKeys) +// if s3Error != nil { +// responses.WriteErrorResponse(w, r, s3Error) +// return +// } +// +// // rlock bucket +// runlock, err := h.rlock(ctx, bucname, w, r) +// if err != nil { +// return +// } +// defer runlock() +// +// // Initiate a list objects operation based on the input params. +// // On success would return back ListObjectsInfo object to be +// // marshaled into S3 compatible XML header. +// //objsvc +// listObjectsV2Info, err := h.objsvc.ListObjectsV2(ctx, bucname, prefix, token, delimiter, +// maxKeys, fetchOwner, startAfter) +// if err != nil { +// responses.WriteErrorResponse(w, r, err) +// return +// } +// +// resp := responses.GenerateListObjectsV2Response( +// bucname, prefix, token, listObjectsV2Info.NextContinuationToken, startAfter, +// delimiter, encodingType, listObjectsV2Info.IsTruncated, +// maxKeys, listObjectsV2Info.Objects, listObjectsV2Info.Prefixes) +// +// // Write success response. +// responses.WriteSuccessResponseXML(w, r, resp) +//} +// +//// setPutObjHeaders sets all the necessary headers returned back +//// upon a success Put/Copy/CompleteMultipart/Delete requests +//// to activate delete only headers set delete as true +//func setPutObjHeaders(w http.ResponseWriter, obj object.Object, delete bool) { +// // We must not use the http.Header().Set method here because some (broken) +// // clients expect the ETag header key to be literally "ETag" - not "Etag" (case-sensitive). +// // Therefore, we have to set the ETag directly as map entry. +// if obj.ETag != "" && !delete { +// w.Header()[consts.ETag] = []string{`"` + obj.ETag + `"`} +// } +// +// // Set the relevant version ID as part of the response header. +// if obj.VersionID != "" { +// w.Header()[consts.AmzVersionID] = []string{obj.VersionID} +// // If version is a deleted marker, set this header as well +// if obj.DeleteMarker && delete { // only returned during delete object +// w.Header()[consts.AmzDeleteMarker] = []string{strconv.FormatBool(obj.DeleteMarker)} +// } +// } +// +// if obj.Bucket != "" && obj.Name != "" { +// // do something +// } +//} +// +//func pathToBucketAndObject(path string) (bucket, object string) { +// path = strings.TrimPrefix(path, consts.SlashSeparator) +// idx := strings.Index(path, consts.SlashSeparator) +// if idx < 0 { +// return path, "" +// } +// return path[:idx], path[idx+len(consts.SlashSeparator):] +//} +// +//func isReplace(r *http.Request) bool { +// return r.Header.Get("X-Amz-Metadata-Directive") == "REPLACE" +//} +// +//// Parse bucket url queries +//func getListObjectsV1Args(values url.Values) ( +// prefix, marker, delimiter string, maxkeys int, encodingType string, errCode error) { +// +// if values.Get("max-keys") != "" { +// var err error +// if maxkeys, err = strconv.Atoi(values.Get("max-keys")); err != nil { +// errCode = responses.ErrInvalidMaxKeys +// return +// } +// } else { +// maxkeys = consts.MaxObjectList +// } +// +// prefix = trimLeadingSlash(values.Get("prefix")) +// marker = trimLeadingSlash(values.Get("marker")) +// delimiter = values.Get("delimiter") +// encodingType = values.Get("encoding-type") +// return +//} +// +//// Parse bucket url queries for ListObjects V2. +//func getListObjectsV2Args(values url.Values) ( +// prefix, token, startAfter, delimiter string, +// fetchOwner bool, maxkeys int, encodingType string, errCode error) { +// +// // The continuation-token cannot be empty. +// if val, ok := values["continuation-token"]; ok { +// if len(val[0]) == 0 { +// errCode = responses.ErrInvalidToken +// return +// } +// } +// +// if values.Get("max-keys") != "" { +// var err error +// if maxkeys, err = strconv.Atoi(values.Get("max-keys")); err != nil { +// errCode = responses.ErrInvalidMaxKeys +// return +// } +// // Over flowing count - reset to maxObjectList. +// if maxkeys > consts.MaxObjectList { +// maxkeys = consts.MaxObjectList +// } +// } else { +// maxkeys = consts.MaxObjectList +// } +// +// prefix = trimLeadingSlash(values.Get("prefix")) +// startAfter = trimLeadingSlash(values.Get("start-after")) +// delimiter = values.Get("delimiter") +// fetchOwner = values.Get("fetch-owner") == "true" +// encodingType = values.Get("encoding-type") +// +// if token = values.Get("continuation-token"); token != "" { +// decodedToken, err := base64.StdEncoding.DecodeString(token) +// if err != nil { +// errCode = responses.ErrIncorrectContinuationToken +// return +// } +// token = string(decodedToken) +// } +// return +//} +// +//func trimLeadingSlash(ep string) string { +// if len(ep) > 0 && ep[0] == '/' { +// // Path ends with '/' preserve it +// if ep[len(ep)-1] == '/' && len(ep) > 1 { +// ep = path.Clean(ep) +// ep += "/" +// } else { +// ep = path.Clean(ep) +// } +// ep = ep[1:] +// } +// return ep +//} +// +//// Validate all the ListObjects query arguments, returns an APIErrorCode +//// if one of the args do not meet the required conditions. +//// - delimiter if set should be equal to '/', otherwise the request is rejected. +//// - marker if set should have a common prefix with 'prefix' param, otherwise +//// the request is rejected. +//func validateListObjectsArgs(marker, delimiter, encodingType string, maxKeys int) error { +// // Max keys cannot be negative. +// if maxKeys < 0 { +// return responses.ErrInvalidMaxKeys +// } +// +// if encodingType != "" { +// // AWS S3 spec only supports 'url' encoding type +// if !strings.EqualFold(encodingType, "url") { +// return responses.ErrInvalidEncodingMethod +// } +// } +// +// return nil +//} +// diff --git a/s3/handlers/proto.go b/s3/handlers/proto.go index d4dd4c350..25f80d424 100644 --- a/s3/handlers/proto.go +++ b/s3/handlers/proto.go @@ -13,7 +13,7 @@ type Handlerser interface { // Bucket - PutBucketHandler(w http.ResponseWriter, r *http.Request) + CreateBucketHandler(w http.ResponseWriter, r *http.Request) HeadBucketHandler(w http.ResponseWriter, r *http.Request) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) ListBucketsHandler(w http.ResponseWriter, r *http.Request) @@ -22,19 +22,19 @@ type Handlerser interface { // Object - PutObjectHandler(w http.ResponseWriter, r *http.Request) - HeadObjectHandler(w http.ResponseWriter, r *http.Request) - CopyObjectHandler(w http.ResponseWriter, r *http.Request) - DeleteObjectHandler(w http.ResponseWriter, r *http.Request) - GetObjectHandler(w http.ResponseWriter, r *http.Request) - GetObjectACLHandler(w http.ResponseWriter, r *http.Request) - ListObjectsHandler(w http.ResponseWriter, r *http.Request) - ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) - - // Multipart - - CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Request) - UploadPartHandler(w http.ResponseWriter, r *http.Request) - AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Request) - CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.Request) + //PutObjectHandler(w http.ResponseWriter, r *http.Request) + //HeadObjectHandler(w http.ResponseWriter, r *http.Request) + //CopyObjectHandler(w http.ResponseWriter, r *http.Request) + //DeleteObjectHandler(w http.ResponseWriter, r *http.Request) + //GetObjectHandler(w http.ResponseWriter, r *http.Request) + //GetObjectACLHandler(w http.ResponseWriter, r *http.Request) + //ListObjectsHandler(w http.ResponseWriter, r *http.Request) + //ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) + + //// Multipart + + //CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Request) + //UploadPartHandler(w http.ResponseWriter, r *http.Request) + //AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Request) + //CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.Request) } diff --git a/s3/requests/parsers.go b/s3/requests/parsers.go index abccb8f05..5e55f7262 100644 --- a/s3/requests/parsers.go +++ b/s3/requests/parsers.go @@ -5,24 +5,19 @@ import ( "github.com/bittorrent/go-btfs/s3/responses" "net/http" "path" - - "github.com/bittorrent/go-btfs/s3/consts" - "github.com/gorilla/mux" ) -//type PutObjectRequest struct { -// Bucket string -// Object string -// Body io.Reader -//} -// -//func (req *PutObjectRequest) Bind(r *http.Request) (err error) { -// return -//} +// PutBucketRequest . +type PutBucketRequest struct { + AccessKey string + Bucket string + ACL string + Region string +} func ParsePutBucketRequest(r *http.Request) (req *PutBucketRequest, rerr *responses.Error) { req = &PutBucketRequest{} - req.User = cctx.GetAccessKey(r) + req.AccessKey = cctx.GetAccessKey(r) req.Bucket, rerr = parseBucket(r) if rerr != nil { return @@ -35,77 +30,71 @@ func ParsePutBucketRequest(r *http.Request) (req *PutBucketRequest, rerr *respon return } -func ParseHeadBucketRequest(r *http.Request) (req *HeadBucketRequest, err error) { - req = &HeadBucketRequest{} - vars := mux.Vars(r) - bucket := vars["bucket"] +// DeleteBucketRequest . +type DeleteBucketRequest struct { + AccessKey string + Bucket string +} - //set request - req.Bucket = bucket +func ParseDeleteBucketRequest(r *http.Request) (req *DeleteBucketRequest, rerr *responses.Error) { + req = &DeleteBucketRequest{} + req.AccessKey = cctx.GetAccessKey(r) + req.Bucket, rerr = parseBucket(r) return } -// DeleteBucketRequest . -type DeleteBucketRequest struct { - Bucket string +// HeadBucketRequest . +type HeadBucketRequest struct { + AccessKey string + Bucket string } -func ParseDeleteBucketRequest(r *http.Request) (req *DeleteBucketRequest, err error) { - vars := mux.Vars(r) - bucket := vars["bucket"] - - //set request - req = &DeleteBucketRequest{} - req.Bucket = bucket +func ParseHeadBucketRequest(r *http.Request) (req *HeadBucketRequest, rerr *responses.Error) { + req = &HeadBucketRequest{} + req.AccessKey = cctx.GetAccessKey(r) + req.Bucket, rerr = parseBucket(r) return } // ListBucketsRequest . type ListBucketsRequest struct { - Bucket string + AccessKey string } -func ParseListBucketsRequest(r *http.Request) (req *ListBucketsRequest, err error) { - vars := mux.Vars(r) - bucket := vars["bucket"] - - //set request +func ParseListBucketsRequest(r *http.Request) (req *ListBucketsRequest, rerr *responses.Error) { req = &ListBucketsRequest{} - req.Bucket = bucket + req.AccessKey = cctx.GetAccessKey(r) return } // GetBucketAclRequest . type GetBucketAclRequest struct { - Bucket string + AccessKey string + Bucket string } -func ParseGetBucketAclRequest(r *http.Request) (req *GetBucketAclRequest, err error) { - vars := mux.Vars(r) - bucket := vars["bucket"] - - //set request +func ParseGetBucketAclRequest(r *http.Request) (req *GetBucketAclRequest, rerr *responses.Error) { req = &GetBucketAclRequest{} - req.Bucket = bucket + req.AccessKey = cctx.GetAccessKey(r) + req.Bucket, rerr = parseBucket(r) return } // PutBucketAclRequest . type PutBucketAclRequest struct { - Bucket string - ACL string + AccessKey string + Bucket string + ACL string } -func ParsePutBucketAclRequest(r *http.Request) (req *PutBucketAclRequest, err error) { - vars := mux.Vars(r) - bucket := vars["bucket"] - - acl := r.Header.Get(consts.AmzACL) - - //set request +func ParsePutBucketAclRequest(r *http.Request) (req *PutBucketAclRequest, rerr *responses.Error) { req = &PutBucketAclRequest{} - req.Bucket = bucket - req.ACL = acl + req.AccessKey = cctx.GetAccessKey(r) + req.Bucket, rerr = parseBucket(r) + if rerr != nil { + return + } + req.ACL, rerr = parseAcl(r) return } diff --git a/s3/requests/types.go b/s3/requests/types.go deleted file mode 100644 index cd88036dd..000000000 --- a/s3/requests/types.go +++ /dev/null @@ -1,14 +0,0 @@ -package requests - -// PutBucketRequest . -type PutBucketRequest struct { - User string - Bucket string - ACL string - Region string -} - -// HeadBucketRequest . -type HeadBucketRequest struct { - Bucket string -} diff --git a/s3/requests/types_common.go b/s3/requests/types_common.go index 116ba81ad..e2107f405 100644 --- a/s3/requests/types_common.go +++ b/s3/requests/types_common.go @@ -1,5 +1,7 @@ package requests +import "encoding/xml" + // createBucketConfiguration container for bucket configuration request from client. // Used for parsing the location from the request body for Makebucket. type createBucketLocationConfiguration struct { diff --git a/s3/responses/types.go b/s3/responses/types.go index bdc7eccd6..76298df4f 100644 --- a/s3/responses/types.go +++ b/s3/responses/types.go @@ -30,6 +30,7 @@ type GetBucketAclResponse AccessControlPolicy // // type AccessControlPolicy struct { + s3.AccessControlPolicy Owner canonicalUser `xml:"Owner"` AccessControlList accessControlList `xml:"AccessControlList"` } @@ -288,7 +289,7 @@ func GenerateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, } // generates an ListObjectsV1 response for the said bucket with other enumerated options. -func GenerateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp object.ListObjectsInfo) ListObjectsResponse { +func GenerateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp object.ObjectsList) ListObjectsResponse { contents := make([]Object, 0, len(resp.Objects)) id := consts.DefaultOwnerID name := consts.DisplayName diff --git a/s3/responses/wirters.go b/s3/responses/wirters.go index 465e5e09d..fb77fb46c 100644 --- a/s3/responses/wirters.go +++ b/s3/responses/wirters.go @@ -1,7 +1,6 @@ package responses import ( - "fmt" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" "github.com/bittorrent/go-btfs/s3/consts" @@ -27,52 +26,44 @@ func WriteDeleteBucketResponse(w http.ResponseWriter) { return } -func WriteListBucketsResponse(w http.ResponseWriter, r *http.Request, bucketMetas []*object.Bucket) { - var buckets []*s3.Bucket - for _, b := range bucketMetas { - buckets = append(buckets, &s3.Bucket{ - Name: aws.String(b.Name), - CreationDate: aws.Time(b.Created), - }) - } - - resp := ListAllMyBucketsResult{ +func WriteListBucketsResponse(w http.ResponseWriter, r *http.Request, userId, username string, buckets []*object.Bucket) { + resp := s3.ListBucketsOutput{ Owner: &s3.Owner{ - ID: aws.String(consts.DefaultOwnerID), - DisplayName: aws.String(consts.DisplayName), + ID: aws.String(userId), + DisplayName: aws.String(username), }, - Buckets: buckets, + Buckets: []*s3.Bucket{}, + } + + for _, buc := range buckets { + resp.Buckets = append(resp.Buckets, &s3.Bucket{ + Name: aws.String(buc.Name), + CreationDate: aws.Time(buc.Created), + }) } WriteSuccessResponseXML(w, r, resp) + return } -func WriteGetBucketAclResponse(w http.ResponseWriter, r *http.Request, key string, acl string) { - resp := GetBucketAclResponse{} - fmt.Printf(" -1- get acl resp: %+v \n", resp) - - id := key - if resp.Owner.DisplayName == "" { - resp.Owner.DisplayName = key - resp.Owner.ID = id +func WriteGetBucketAclResponse(w http.ResponseWriter, r *http.Request, userId, username, acl string) { + resp := s3.GetBucketAclOutput{ + Owner: &s3.Owner{ + ID: aws.String(userId), + DisplayName: aws.String(username), + }, + Grants: []*s3.Grant{ + { + Grantee: &s3.Grantee{ + ID: aws.String(userId), + DisplayName: aws.String(userId), + Type: aws.String("CanonicalUser"), + }, + Permission: aws.String("public-read"), + }, + }, } - fmt.Printf(" -2- get acl resp: %+v \n", resp) - - resp.AccessControlList.Grant = make([]Grant, 0) - resp.AccessControlList.Grant = append(resp.AccessControlList.Grant, Grant{ - Grantee: Grantee{ - ID: id, - DisplayName: key, - Type: "CanonicalUser", - XMLXSI: "CanonicalUser", - XMLNS: "http://www.w3.org/2001/XMLSchema-instance"}, - Permission: Permission(acl), //todo change - }) - fmt.Printf(" -3- get acl resp: %+v \n", resp) - - fmt.Printf("get acl resp: %+v \n", resp) - WriteSuccessResponseXML(w, r, resp) return } diff --git a/s3/responses/writers_common.go b/s3/responses/writers_common.go index f9b3ff914..ad2cb80b4 100644 --- a/s3/responses/writers_common.go +++ b/s3/responses/writers_common.go @@ -67,18 +67,11 @@ func WriteErrorResponseHeadersOnly(w http.ResponseWriter, r *http.Request, err e } // WriteErrorResponse write ErrorResponse -func WriteErrorResponse(w http.ResponseWriter, r *http.Request, err error) { - var rerr *Error - if !errors.As(err, &rerr) { - rerr = ErrInternalError - } - vars := mux.Vars(r) - bucket := vars["bucket"] - object := vars["object"] +func WriteErrorResponse(w http.ResponseWriter, r *http.Request, rerr *Error) { errorResponse := RESTErrorResponse{ Code: rerr.Code(), - BucketName: bucket, - Key: object, + BucketName: mux.Vars(r)["bucket"], + Key: mux.Vars(r)["object"], Message: rerr.Description(), Resource: r.URL.Path, RequestID: fmt.Sprintf("%d", time.Now().UnixNano()), diff --git a/s3/routers/routers.go b/s3/routers/routers.go index 189d80050..0090fe0e1 100644 --- a/s3/routers/routers.go +++ b/s3/routers/routers.go @@ -34,41 +34,41 @@ func (routers *Routers) Register() http.Handler { // multipart object... // CreateMultipart - bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(hs.CreateMultipartUploadHandler).Queries("uploads", "") - // UploadPart - bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(hs.UploadPartHandler).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}") - // CompleteMultipartUpload - bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(hs.CompleteMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}") - // AbortMultipart - bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(hs.AbortMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}") + //bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(hs.CreateMultipartUploadHandler).Queries("uploads", "") + //// UploadPart + //bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(hs.UploadPartHandler).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}") + //// CompleteMultipartUpload + //bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(hs.CompleteMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}") + //// AbortMultipart + //bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(hs.AbortMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}") - //object... - // ListObjectsV2 - bucket.Methods(http.MethodGet).HandlerFunc(hs.ListObjectsV2Handler).Queries("list-type", "2") - // ListObjects - bucket.Methods(http.MethodGet).HandlerFunc(hs.ListObjectsHandler) - // HeadObject - bucket.Methods(http.MethodHead).Path("/{object:.+}").HandlerFunc(hs.HeadObjectHandler) - // PutObject - bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(hs.PutObjectHandler) - // CopyObject - bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(hs.CopyObjectHandler) - // DeleteObject - bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(hs.DeleteObjectHandler) - //todo DeleteObjects new ? - bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(hs.DeleteObjectHandler) - // GetObject - bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(hs.GetObjectHandler) - // GetObjectACL - bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(hs.GetObjectACLHandler).Queries("acl", "") + ////object... + //// ListObjectsV2 + //bucket.Methods(http.MethodGet).HandlerFunc(hs.ListObjectsV2Handler).Queries("list-type", "2") + //// ListObjects + //bucket.Methods(http.MethodGet).HandlerFunc(hs.ListObjectsHandler) + //// HeadObject + //bucket.Methods(http.MethodHead).Path("/{object:.+}").HandlerFunc(hs.HeadObjectHandler) + //// PutObject + //bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(hs.PutObjectHandler) + //// CopyObject + //bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(hs.CopyObjectHandler) + //// DeleteObject + //bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(hs.DeleteObjectHandler) + ////todo DeleteObjects new ? + //bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(hs.DeleteObjectHandler) + //// GetObject + //bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(hs.GetObjectHandler) + //// GetObjectACL + //bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(hs.GetObjectACLHandler).Queries("acl", "") //bucket... // GetBucketAcl bucket.Methods(http.MethodGet).HandlerFunc(hs.GetBucketAclHandler).Queries("acl", "") // PutBucketAcl bucket.Methods(http.MethodPut).HandlerFunc(hs.PutBucketAclHandler).Queries("acl", "") - // PutBucket - bucket.Methods(http.MethodPut).HandlerFunc(hs.PutBucketHandler) + // CreateBucket + bucket.Methods(http.MethodPut).HandlerFunc(hs.CreateBucketHandler) // HeadBucket bucket.Methods(http.MethodHead).HandlerFunc(hs.HeadBucketHandler) // DeleteBucket diff --git a/s3/s3.go b/s3/s3.go index 5ab6b184a..a37ebd7a5 100644 --- a/s3/s3.go +++ b/s3/s3.go @@ -8,7 +8,6 @@ import ( "github.com/bittorrent/go-btfs/s3/routers" "github.com/bittorrent/go-btfs/s3/server" "github.com/bittorrent/go-btfs/s3/services/accesskey" - "github.com/bittorrent/go-btfs/s3/services/bucket" "github.com/bittorrent/go-btfs/s3/services/object" "github.com/bittorrent/go-btfs/s3/services/sign" "sync" @@ -40,16 +39,10 @@ func NewServer(cfg config.S3CompatibleAPI) *server.Server { acksvc := accesskey.NewService(ps) sigsvc := sign.NewService() objsvc := object.NewService(ps) - bucsvc := bucket.NewService(ps) - bucsvc.EmptyBucket(objsvc.EmptyBucket) // handlers hs := handlers.NewHandlers( - acksvc, - sigsvc, - bucsvc, - objsvc, - handlers.WithHeaders(cfg.HTTPHeaders), + acksvc, sigsvc, objsvc, handlers.WithHeaders(cfg.HTTPHeaders), ) // routers diff --git a/s3/services/object/proto.go b/s3/services/object/proto.go index 5269a87b2..2927cc02d 100644 --- a/s3/services/object/proto.go +++ b/s3/services/object/proto.go @@ -17,7 +17,7 @@ var ( ) type Service interface { - PutBucket(ctx context.Context, user, bucname, region, acl string) (bucket *Bucket, err error) + CreateBucket(ctx context.Context, user, bucname, region, acl string) (bucket *Bucket, err error) GetBucket(ctx context.Context, user, bucname string) (bucket *Bucket, err error) DeleteBucket(ctx context.Context, user, bucname string) (err error) GetAllBuckets(ctx context.Context, user string) (list []*Bucket, err error) diff --git a/s3/services/object/service_bucket.go b/s3/services/object/service_bucket.go index 2bd6a3417..62ed4107b 100644 --- a/s3/services/object/service_bucket.go +++ b/s3/services/object/service_bucket.go @@ -11,7 +11,7 @@ import ( ) // CreateBucket create a new bucket for the specified user -func (s *service) PutBucket(ctx context.Context, user, bucname, region, acl string) (bucket *Bucket, err error) { +func (s *service) CreateBucket(ctx context.Context, user, bucname, region, acl string) (bucket *Bucket, err error) { // Operation context ctx, cancel := s.opctx(ctx) defer cancel() @@ -28,7 +28,7 @@ func (s *service) PutBucket(ctx context.Context, user, bucname, region, acl stri // Get old bucket bucketOld, err := s.getBucket(buckey) - if err == nil { + if err != nil { return } if bucketOld != nil { @@ -343,7 +343,7 @@ func (s *service) EmptyBucket(ctx context.Context, user, bucname string) (empty } func (s *service) getBucket(buckey string) (bucket *Bucket, err error) { - err = s.providers.StateStore().Get(buckey, bucket) + err = s.providers.StateStore().Get(buckey, &bucket) if errors.Is(err, providers.ErrStateStoreNotFound) { err = nil } diff --git a/s3/services/object/service_multipart.go b/s3/services/object/service_multipart.go index b9dcaaa8c..7cc73765f 100644 --- a/s3/services/object/service_multipart.go +++ b/s3/services/object/service_multipart.go @@ -453,7 +453,7 @@ func (s *service) CompleteMultiPartUpload(ctx context.Context, user, bucname, ob } func (s *service) getMultipart(uplkey string) (multipart *Multipart, err error) { - err = s.providers.StateStore().Get(uplkey, multipart) + err = s.providers.StateStore().Get(uplkey, &multipart) if errors.Is(err, providers.ErrStateStoreNotFound) { err = nil } diff --git a/s3/services/object/service_object.go b/s3/services/object/service_object.go index 9d5af7970..bfcbc1e4c 100644 --- a/s3/services/object/service_object.go +++ b/s3/services/object/service_object.go @@ -522,7 +522,7 @@ func (s *service) ListObjects(ctx context.Context, user, bucname, prefix, delimi } func (s *service) getObject(objkey string) (object *Object, err error) { - err = s.providers.StateStore().Get(objkey, object) + err = s.providers.StateStore().Get(objkey, &object) if errors.Is(err, providers.ErrStateStoreNotFound) { err = nil } From 187f740ba85d7399cc55cf61a907ff2e4bdee740 Mon Sep 17 00:00:00 2001 From: Steve Date: Sun, 3 Sep 2023 05:40:10 +0800 Subject: [PATCH 083/139] refractor: bucket handler --- blocks/blockstoreutil/remove.go | 4 +- core/commands/cid.go | 2 +- core/commands/cmdenv/cidbase.go | 2 +- core/commands/files.go | 4 +- core/commands/filestore.go | 4 +- core/commands/refs.go | 2 +- core/corehttp/gateway/gateway.go | 2 +- core/corehttp/gateway/gateway_test.go | 6 +- go.mod | 4 +- go.sum | 12 +- s3/action/action.go | 8 +- s3/consts/consts.go | 24 +- s3/handlers/handlers_bucket.go | 12 +- s3/handlers/handlers_object.go | 2 +- s3/requests/parsers.go | 81 +++---- s3/requests/parsers_common.go | 61 ++--- .../{writers_common.go => response.go} | 30 ++- s3/responses/response_bucket.go | 71 ++++++ s3/responses/response_multipart.go | 26 +++ s3/responses/response_object.go | 11 + s3/responses/types.go | 221 ++++++++---------- s3/responses/wirters.go | 99 -------- s3/routers/routers.go | 4 +- s3/services/object/proto.go | 39 +--- s3/services/object/service.go | 2 +- s3/services/object/service_bucket.go | 46 ++-- s3/services/object/service_multipart.go | 36 +-- s3/services/object/service_object.go | 38 +-- s3/services/sign/signature-v4-utils.go | 4 +- s3/utils/signature.go | 2 +- 30 files changed, 430 insertions(+), 429 deletions(-) rename s3/responses/{writers_common.go => response.go} (86%) create mode 100644 s3/responses/response_bucket.go create mode 100644 s3/responses/response_multipart.go create mode 100644 s3/responses/response_object.go delete mode 100644 s3/responses/wirters.go diff --git a/blocks/blockstoreutil/remove.go b/blocks/blockstoreutil/remove.go index e08a95d47..81780c124 100644 --- a/blocks/blockstoreutil/remove.go +++ b/blocks/blockstoreutil/remove.go @@ -71,8 +71,8 @@ func RmBlocks(ctx context.Context, blocks bs.GCBlockstore, pins pin.Pinner, cids } // FilterPinned takes a slice of Cids and returns it with the pinned Cids -// removed. If a Cid is pinned, it will place RemovedBlock objects in the given -// out channel, with an error which indicates that the Cid is pinned. +// removed. If a CID is pinned, it will place RemovedBlock objects in the given +// out channel, with an error which indicates that the CID is pinned. // This function is used in RmBlocks to filter out any blocks which are not // to be removed (because they are pinned). func FilterPinned(ctx context.Context, pins pin.Pinner, out chan<- interface{}, cids []cid.Cid) []cid.Cid { diff --git a/core/commands/cid.go b/core/commands/cid.go index 0bbc3ce26..16d972985 100644 --- a/core/commands/cid.go +++ b/core/commands/cid.go @@ -117,7 +117,7 @@ The optional format string is a printf style format string: } type CidFormatRes struct { - CidStr string // Original Cid String passed in + CidStr string // Original CID String passed in Formatted string // Formatted Result ErrorMsg string // Error } diff --git a/core/commands/cmdenv/cidbase.go b/core/commands/cmdenv/cidbase.go index 0e0822af1..445705d28 100644 --- a/core/commands/cmdenv/cidbase.go +++ b/core/commands/cmdenv/cidbase.go @@ -58,7 +58,7 @@ func CidBaseDefined(req *cmds.Request) bool { } // CidEncoderFromPath creates a new encoder that is influenced from -// the encoded Cid in a Path. For CidV0 the multibase from the base +// the encoded CID in a Path. For CidV0 the multibase from the base // encoder is used and automatic upgrades are disabled. For CidV1 the // multibase from the CID is used and upgrades are enabled. // diff --git a/core/commands/files.go b/core/commands/files.go index 1b9768bf1..950132efe 100644 --- a/core/commands/files.go +++ b/core/commands/files.go @@ -87,8 +87,8 @@ const ( filesHashOptionName = "hash" ) -var cidVersionOption = cmds.IntOption(filesCidVersionOptionName, "cid-ver", "Cid version to use. (experimental)") -var hashOption = cmds.StringOption(filesHashOptionName, "Hash function to use. Will set Cid version to 1 if used. (experimental)") +var cidVersionOption = cmds.IntOption(filesCidVersionOptionName, "cid-ver", "CID version to use. (experimental)") +var hashOption = cmds.StringOption(filesHashOptionName, "Hash function to use. Will set CID version to 1 if used. (experimental)") var errFormat = errors.New("format was set by multiple options. Only one format option is allowed") diff --git a/core/commands/filestore.go b/core/commands/filestore.go index e6520984e..808bf2d9d 100644 --- a/core/commands/filestore.go +++ b/core/commands/filestore.go @@ -45,7 +45,7 @@ The output is: `, }, Arguments: []cmds.Argument{ - cmds.StringArg("obj", false, true, "Cid of objects to list."), + cmds.StringArg("obj", false, true, "CID of objects to list."), }, Options: []cmds.Option{ cmds.BoolOption(fileOrderOptionName, "sort the results based on the path of the backing file"), @@ -122,7 +122,7 @@ For ERROR entries the error will also be printed to stderr. `, }, Arguments: []cmds.Argument{ - cmds.StringArg("obj", false, true, "Cid of objects to verify."), + cmds.StringArg("obj", false, true, "CID of objects to verify."), }, Options: []cmds.Option{ cmds.BoolOption(fileOrderOptionName, "verify the objects based on the order of the backing file"), diff --git a/core/commands/refs.go b/core/commands/refs.go index 9f564297c..bda6b2c73 100644 --- a/core/commands/refs.go +++ b/core/commands/refs.go @@ -296,7 +296,7 @@ func (rw *RefWriter) visit(c cid.Cid, depth int) (bool, bool) { // Unique == true && depth < MaxDepth (or unlimited) from this point // Branch pruning cases: - // - We saw the Cid before and either: + // - We saw the CID before and either: // - Depth is unlimited (MaxDepth = -1) // - We saw it higher (smaller depth) in the DAG (means we must have // explored deep enough before) diff --git a/core/corehttp/gateway/gateway.go b/core/corehttp/gateway/gateway.go index 1b9423f72..9e8e946e6 100644 --- a/core/corehttp/gateway/gateway.go +++ b/core/corehttp/gateway/gateway.go @@ -92,7 +92,7 @@ type IPFSBackend interface { // Get returns a GetResponse with UnixFS file, directory or a block in IPLD // format e.g., (DAG-)CBOR/JSON. // - // Returned Directories are preferably a minimum info required for enumeration: Name, Size, and Cid. + // Returned Directories are preferably a minimum info required for enumeration: Name, Size, and CID. // // Optional ranges follow [HTTP Byte Ranges] notation and can be used for // pre-fetching specific sections of a file or a block. diff --git a/core/corehttp/gateway/gateway_test.go b/core/corehttp/gateway/gateway_test.go index 304051a95..96d47180a 100644 --- a/core/corehttp/gateway/gateway_test.go +++ b/core/corehttp/gateway/gateway_test.go @@ -418,7 +418,7 @@ func TestIPNSHostnameRedirect(t *testing.T) { // assert.Contains(t, s, "", "expected backlink in directory listing") // assert.Contains(t, s, "", "expected file in directory listing") -// assert.Contains(t, s, s, k2.Cid().String(), "expected hash in directory listing") +// assert.Contains(t, s, s, k2.CID().String(), "expected hash in directory listing") // // make request to directory listing at root // req, err = http.NewRequest(http.MethodGet, ts.URL, nil) @@ -440,7 +440,7 @@ func TestIPNSHostnameRedirect(t *testing.T) { // assert.Contains(t, s, "", "expected file in directory listing") // // https://github.com/btfs/dir-index-html/issues/42 // assert.Contains(t, s, "example.net/foo? #<'/bar"), "expected a path in directory listing") // assert.Contains(t, s, "", "expected backlink in directory listing") // assert.Contains(t, s, "", "expected file in directory listing") -// assert.Contains(t, s, k3.Cid().String(), "expected hash in directory listing") +// assert.Contains(t, s, k3.CID().String(), "expected hash in directory listing") // } func TestPretty404(t *testing.T) { diff --git a/go.mod b/go.mod index e93c84df0..8bb429e1a 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.18 require ( bazil.org/fuse v0.0.0-20200117225306-7b5117fecadc github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 - github.com/aws/aws-sdk-go v1.27.0 + github.com/aws/aws-sdk-go v1.45.2 github.com/bittorrent/go-btfs-api v0.5.0 github.com/bittorrent/go-btfs-chunker v0.4.0 github.com/bittorrent/go-btfs-cmds v0.3.0 @@ -181,7 +181,7 @@ require ( github.com/ipfs/go-ipld-legacy v0.1.1 // indirect github.com/ipfs/go-ipns v0.3.0 // indirect github.com/ipld/edelweiss v0.2.0 // indirect - github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/libp2p/go-libp2p-core v0.20.1 // indirect github.com/libp2p/go-libp2p-xor v0.1.0 // indirect github.com/libp2p/go-yamux/v4 v4.0.0 // indirect diff --git a/go.sum b/go.sum index 25113f808..ba79092d7 100644 --- a/go.sum +++ b/go.sum @@ -180,8 +180,9 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= -github.com/aws/aws-sdk-go v1.27.0 h1:0xphMHGMLBrPMfxR2AmVjZKcMEESEgWF8Kru94BNByk= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.45.2 h1:hTong9YUklQKqzrGk3WnKABReb5R8GjbG4Y6dEQfjnk= +github.com/aws/aws-sdk-go v1.45.2/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk= github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg= @@ -894,8 +895,11 @@ github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= @@ -1960,6 +1964,7 @@ golang.org/x/net v0.0.0-20220531201128-c960675eff93/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220920183852-bf014ff85ad5/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= @@ -2089,6 +2094,7 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= @@ -2096,6 +2102,7 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2106,6 +2113,7 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= diff --git a/s3/action/action.go b/s3/action/action.go index 8a13e97db..c5d6ad30c 100644 --- a/s3/action/action.go +++ b/s3/action/action.go @@ -23,11 +23,11 @@ const ( // DeleteBucketAction - DeleteBucket Rest API action. DeleteBucketAction = "s3:DeleteBucket" - // PutBucketAclAction - PutBucketAcl Rest API action. - PutBucketAclAction = "s3:PutBucketAcl" + // PutBucketAclAction - PutBucketACL Rest API action. + PutBucketAclAction = "s3:PutBucketACL" - // GetBucketAclAction - GetBucketAcl Rest API action. - GetBucketAclAction = "s3:GetBucketAcl" + // GetBucketAclAction - GetBucketACL Rest API action. + GetBucketAclAction = "s3:GetBucketACL" //--- object diff --git a/s3/consts/consts.go b/s3/consts/consts.go index 5ddaa2995..09c388843 100644 --- a/s3/consts/consts.go +++ b/s3/consts/consts.go @@ -1,6 +1,7 @@ package consts import ( + "github.com/aws/aws-sdk-go/service/s3" "github.com/dustin/go-humanize" "time" ) @@ -16,8 +17,6 @@ const ( MaxLocationConstraintSize = 3 * humanize.MiByte EmptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" StsRequestBodyLimit = 10 * (1 << 20) // 10 MiB - DefaultRegion = "" - DefaultAcl = "public-read" SlashSeparator = "/" MaxSkewTime = 15 * time.Minute // 15 minutes skew allowed. @@ -29,11 +28,26 @@ const ( AssumeRole = "AssumeRole" SignV4Algorithm = "AWS4-HMAC-SHA256" - DefaultOwnerID = "02d6176db174dc93cb1b899f7c6078f08654445fe8cf1b6ce98d8855f66bdbf4" - DisplayName = "FileDagStorage" - DefaultStorageClass = "DAGSTORE" + DefaultLocation = "us-east-1" + DefaultBucketACL = s3.BucketCannedACLPublicRead + DefaultObjectACL = "" + AllUsersURI = "http://acs.amazonaws.com/groups/global/AllUsers" ) +var SupportedLocations = map[string]bool{ + DefaultLocation: true, +} + +var SupportedBucketACLs = map[string]bool{ + s3.BucketCannedACLPrivate: true, + s3.BucketCannedACLPublicRead: true, + s3.BucketCannedACLPublicReadWrite: true, +} + +var SupportedObjectACLs = map[string]bool{ + DefaultObjectACL: true, +} + // Standard S3 HTTP request constants const ( IfModifiedSince = "If-Modified-Since" diff --git a/s3/handlers/handlers_bucket.go b/s3/handlers/handlers_bucket.go index 038dbe0f1..ddf26b4a1 100644 --- a/s3/handlers/handlers_bucket.go +++ b/s3/handlers/handlers_bucket.go @@ -30,7 +30,7 @@ func (h *Handlers) CreateBucketHandler(w http.ResponseWriter, r *http.Request) { cctx.SetHandleInf(r, h.name(), err) }() - req, rerr := requests.ParsePutBucketRequest(r) + req, rerr := requests.ParseCreateBucketRequest(r) if rerr != nil { err = rerr responses.WriteErrorResponse(w, r, rerr) @@ -119,7 +119,7 @@ func (h *Handlers) ListBucketsHandler(w http.ResponseWriter, r *http.Request) { return } - responses.WriteListBucketsResponse(w, r, req.AccessKey, "", list) + responses.WriteListBucketsResponse(w, r, req.AccessKey, list) return } @@ -130,21 +130,21 @@ func (h *Handlers) GetBucketAclHandler(w http.ResponseWriter, r *http.Request) { cctx.SetHandleInf(r, h.name(), err) }() - req, rerr := requests.ParseGetBucketAclRequest(r) + req, rerr := requests.ParseGetBucketACLRequest(r) if rerr != nil { err = rerr responses.WriteErrorResponse(w, r, rerr) return } - acl, err := h.objsvc.GetBucketAcl(r.Context(), req.AccessKey, req.Bucket) + acl, err := h.objsvc.GetBucketACL(r.Context(), req.AccessKey, req.Bucket) if err != nil { rerr = h.respErr(err) responses.WriteErrorResponse(w, r, rerr) return } - responses.WriteGetBucketAclResponse(w, r, req.AccessKey, "", acl) + responses.WriteGetBucketACLResponse(w, r, req.AccessKey, acl) } func (h *Handlers) PutBucketAclHandler(w http.ResponseWriter, r *http.Request) { @@ -160,7 +160,7 @@ func (h *Handlers) PutBucketAclHandler(w http.ResponseWriter, r *http.Request) { return } - err = h.objsvc.PutBucketAcl(r.Context(), req.AccessKey, req.Bucket, req.ACL) + err = h.objsvc.PutBucketACL(r.Context(), req.AccessKey, req.Bucket, req.ACL) if err != nil { rerr = h.respErr(err) responses.WriteErrorResponse(w, r, rerr) diff --git a/s3/handlers/handlers_object.go b/s3/handlers/handlers_object.go index c7c89ff4d..991f4617c 100644 --- a/s3/handlers/handlers_object.go +++ b/s3/handlers/handlers_object.go @@ -494,7 +494,7 @@ const lockWaitTimeout = 5 * time.Minute // } // defer runlock() // -// acl, err := h.bucsvc.GetBucketAcl(ctx, bucname) +// acl, err := h.bucsvc.GetBucketACL(ctx, bucname) // if err != nil { // responses.WriteErrorResponse(w, r, err) // return diff --git a/s3/requests/parsers.go b/s3/requests/parsers.go index 5e55f7262..e0ac814a6 100644 --- a/s3/requests/parsers.go +++ b/s3/requests/parsers.go @@ -1,32 +1,51 @@ package requests import ( + "errors" + "fmt" "github.com/bittorrent/go-btfs/s3/cctx" "github.com/bittorrent/go-btfs/s3/responses" "net/http" - "path" + "reflect" ) -// PutBucketRequest . -type PutBucketRequest struct { +// CreateBucketRequest . +type CreateBucketRequest struct { AccessKey string Bucket string ACL string Region string } -func ParsePutBucketRequest(r *http.Request) (req *PutBucketRequest, rerr *responses.Error) { - req = &PutBucketRequest{} +// todo: parse aws request use aws struct +func ParseS3Request(r *http.Request, v interface{}) (err error) { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Pointer || rv.IsNil() { + err = errors.New("invalid value must be non nil pointer") + return + } + + rt := reflect.TypeOf(v).Elem() + n := rt.NumField() + for i := 0; i < n; i++ { + f := rt.Field(i) + fmt.Println(f) + } + return +} + +func ParseCreateBucketRequest(r *http.Request) (req *CreateBucketRequest, rerr *responses.Error) { + req = &CreateBucketRequest{} req.AccessKey = cctx.GetAccessKey(r) req.Bucket, rerr = parseBucket(r) if rerr != nil { return } - req.ACL, rerr = parseAcl(r) + req.ACL, rerr = parseBucketACL(r) if rerr != nil { return } - req.Region, rerr = parseLocationConstraint(r) + req.Region, rerr = parseLocation(r) return } @@ -67,63 +86,33 @@ func ParseListBucketsRequest(r *http.Request) (req *ListBucketsRequest, rerr *re return } -// GetBucketAclRequest . -type GetBucketAclRequest struct { +// GetBucketACLRequest . +type GetBucketACLRequest struct { AccessKey string Bucket string } -func ParseGetBucketAclRequest(r *http.Request) (req *GetBucketAclRequest, rerr *responses.Error) { - req = &GetBucketAclRequest{} +func ParseGetBucketACLRequest(r *http.Request) (req *GetBucketACLRequest, rerr *responses.Error) { + req = &GetBucketACLRequest{} req.AccessKey = cctx.GetAccessKey(r) req.Bucket, rerr = parseBucket(r) return } -// PutBucketAclRequest . -type PutBucketAclRequest struct { +// PutBucketACLRequest . +type PutBucketACLRequest struct { AccessKey string Bucket string ACL string } -func ParsePutBucketAclRequest(r *http.Request) (req *PutBucketAclRequest, rerr *responses.Error) { - req = &PutBucketAclRequest{} +func ParsePutBucketAclRequest(r *http.Request) (req *PutBucketACLRequest, rerr *responses.Error) { + req = &PutBucketACLRequest{} req.AccessKey = cctx.GetAccessKey(r) req.Bucket, rerr = parseBucket(r) if rerr != nil { return } - req.ACL, rerr = parseAcl(r) - return -} - -// pathClean is like path.Clean but does not return "." for -// empty inputs, instead returns "empty" as is. -func PathClean(p string) string { - cp := path.Clean(p) - if cp == "." { - return "" - } - return cp -} - -//func unmarshalXML(reader io.Reader, isObject bool) (*store.Tags, error) { -// tagging := &store.Tags{ -// TagSet: &store.TagSet{ -// TagMap: make(map[string]string), -// IsObject: isObject, -// }, -// } -// -// if err := xml.NewDecoder(reader).Decode(tagging); err != nil { -// return nil, err -// } -// -// return tagging, nil -//} - -func checkAcl(acl string) (ok bool) { - _, ok = supportAcls[acl] + req.ACL, rerr = parseBucketACL(r) return } diff --git a/s3/requests/parsers_common.go b/s3/requests/parsers_common.go index 0c4374e80..488da5791 100644 --- a/s3/requests/parsers_common.go +++ b/s3/requests/parsers_common.go @@ -1,11 +1,12 @@ package requests import ( + "encoding/xml" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" + "github.com/aws/aws-sdk-go/service/s3" "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/policy" "github.com/bittorrent/go-btfs/s3/responses" "github.com/bittorrent/go-btfs/s3/s3utils" - "github.com/bittorrent/go-btfs/s3/utils" "github.com/gorilla/mux" "net/http" "net/url" @@ -13,8 +14,7 @@ import ( ) func parseBucket(r *http.Request) (bucket string, rerr *responses.Error) { - bucket = mux.Vars(r)["bucket"] - err := s3utils.CheckValidBucketNameStrict(bucket) + err := s3utils.CheckValidBucketNameStrict(mux.Vars(r)["bucket"]) if err != nil { rerr = responses.ErrInvalidBucketName } @@ -29,39 +29,44 @@ func parseObject(r *http.Request) (object string, rerr *responses.Error) { return } -// Parses location constraint from the incoming reader. -func parseLocationConstraint(r *http.Request) (location string, rerr *responses.Error) { - // If the request has no body with content-length set to 0, - // we do not have to validate location constraint. Bucket will - // be created at default region. - locationConstraint := createBucketLocationConfiguration{} - err := utils.XmlDecoder(r.Body, &locationConstraint, r.ContentLength) - if err != nil && r.ContentLength != 0 { - rerr = responses.ErrMalformedXML - return - } // else for both err as nil or io.EOF - - location = locationConstraint.Location - if location == "" { - location = consts.DefaultRegion +func parseLocation(r *http.Request) (location string, rerr *responses.Error) { + if r.ContentLength != 0 { + locationCfg := s3.CreateBucketConfiguration{} + decoder := xml.NewDecoder(r.Body) + err := xmlutil.UnmarshalXML(&locationCfg, decoder, "") + if err != nil { + rerr = responses.ErrMalformedXML + return + } + location = *locationCfg.LocationConstraint + } + if len(location) == 0 { + location = consts.DefaultLocation + } + if !consts.SupportedLocations[location] { + rerr = responses.ErrNotImplemented } return } -var supportAcls = map[string]struct{}{ - policy.Private: {}, - policy.PublicRead: {}, - policy.PublicReadWrite: {}, +func parseBucketACL(r *http.Request) (acl string, rerr *responses.Error) { + acl = r.Header.Get(consts.AmzACL) + if len(acl) == 0 { + acl = consts.DefaultBucketACL + } + if !consts.SupportedBucketACLs[acl] { + rerr = responses.ErrNotImplemented + } + return } -func parseAcl(r *http.Request) (acl string, rerr *responses.Error) { +func parseObjectACL(r *http.Request) (acl string, rerr *responses.Error) { acl = r.Header.Get(consts.AmzACL) - if acl == "" { - acl = consts.DefaultAcl + if len(acl) == 0 { + acl = consts.DefaultObjectACL } - _, ok := supportAcls[acl] - if !ok { + if !consts.SupportedObjectACLs[acl] { rerr = responses.ErrNotImplemented } return diff --git a/s3/responses/writers_common.go b/s3/responses/response.go similarity index 86% rename from s3/responses/writers_common.go rename to s3/responses/response.go index ad2cb80b4..04e2cedb4 100644 --- a/s3/responses/writers_common.go +++ b/s3/responses/response.go @@ -6,6 +6,8 @@ import ( "encoding/xml" "errors" "fmt" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" + "github.com/aws/aws-sdk-go/service/s3" "github.com/bittorrent/go-btfs/s3/consts" "github.com/gorilla/mux" logging "github.com/ipfs/go-log/v2" @@ -27,6 +29,19 @@ const ( mimeXML mimeType = " application/xml" ) +func owner(accessKey string) *s3.Owner { + return new(s3.Owner).SetID(accessKey).SetDisplayName(accessKey) +} + +func ownerFullControlGrant(accessKey string) *s3.Grant { + return new(s3.Grant).SetGrantee(new(s3.Grantee).SetType(s3.TypeCanonicalUser).SetID(accessKey).SetDisplayName(accessKey)).SetPermission(s3.PermissionFullControl) +} + +var ( + allUsersReadGrant = new(s3.Grant).SetGrantee(new(s3.Grantee).SetType(s3.TypeGroup).SetURI(consts.AllUsersURI)).SetPermission(s3.PermissionRead) + allUsersWriteGrant = new(s3.Grant).SetGrantee(new(s3.Grantee).SetType(s3.TypeGroup).SetURI(consts.AllUsersURI)).SetPermission(s3.PermissionWrite) +) + // APIErrorResponse - error response format type APIErrorResponse struct { XMLName xml.Name `xml:"Error" json:"-"` @@ -109,7 +124,6 @@ func writeResponse(w http.ResponseWriter, r *http.Request, statusCode int, respo } w.WriteHeader(statusCode) if response != nil { - log.Debugf("status %d %s: %s", statusCode, mType, string(response)) _, err := w.Write(response) if err != nil { log.Errorf("write err: %v", err) @@ -130,11 +144,15 @@ func setCommonHeaders(w http.ResponseWriter, r *http.Request) { // encodeXMLResponse Encodes the response headers into XML format. func encodeXMLResponse(response interface{}) []byte { - var bytesBuffer bytes.Buffer - bytesBuffer.WriteString(xml.Header) - e := xml.NewEncoder(&bytesBuffer) - e.Encode(response) - return bytesBuffer.Bytes() + var buf bytes.Buffer + buf.WriteString(xml.Header) + err := xmlutil.BuildXML(response, xml.NewEncoder(&buf)) + if err != nil { + panic(err) + } + bs := buf.Bytes() + fmt.Println(string(bs)) + return bs } // WriteErrorResponseJSON - writes error response in JSON format; diff --git a/s3/responses/response_bucket.go b/s3/responses/response_bucket.go new file mode 100644 index 000000000..9bced083f --- /dev/null +++ b/s3/responses/response_bucket.go @@ -0,0 +1,71 @@ +package responses + +import ( + "github.com/aws/aws-sdk-go/service/s3" + "github.com/bittorrent/go-btfs/s3/consts" + "github.com/bittorrent/go-btfs/s3/services/object" + "net/http" +) + +func WritePutBucketResponse(w http.ResponseWriter, r *http.Request) { + if cp := pathClean(r.URL.Path); cp != "" { + w.Header().Set(consts.Location, cp) + } + WriteSuccessResponse(w, r) + return +} + +func WriteHeadBucketResponse(w http.ResponseWriter, r *http.Request) { + WriteSuccessResponse(w, r) + return +} + +func WriteDeleteBucketResponse(w http.ResponseWriter) { + WriteSuccessNoContent(w) + return +} + +type ListBucketResponse struct { + ListAllMyBucketsResult s3.ListBucketsOutput `xml:"ListAllMyBucketsResult"` +} + +func WriteListBucketsResponse(w http.ResponseWriter, r *http.Request, accessKey string, buckets []*object.Bucket) { + resp := &ListBucketResponse{} + resp.ListAllMyBucketsResult.SetOwner(owner(accessKey)) + s3Buckets := make([]*s3.Bucket, 0) + for _, buc := range buckets { + s3Bucket := new(s3.Bucket).SetName(buc.Name).SetCreationDate(buc.Created) + s3Buckets = append(s3Buckets, s3Bucket) + } + resp.ListAllMyBucketsResult.SetBuckets(s3Buckets) + WriteSuccessResponseXML(w, r, resp) + return +} + +func WritePutBucketAclResponse(w http.ResponseWriter, r *http.Request) { + WriteSuccessResponse(w, r) + return +} + +type GetBucketACLResponse struct { + AccessControlPolicy s3.GetBucketAclOutput `xml:"AccessControlPolicy"` +} + +func WriteGetBucketACLResponse(w http.ResponseWriter, r *http.Request, accessKey string, acl string) { + resp := GetBucketACLResponse{} + resp.AccessControlPolicy.SetOwner(owner(accessKey)) + grants := make([]*s3.Grant, 0) + grants = append(grants, ownerFullControlGrant(accessKey)) + switch acl { + case s3.BucketCannedACLPrivate: + case s3.BucketCannedACLPublicRead: + grants = append(grants, allUsersReadGrant) + case s3.BucketCannedACLPublicReadWrite: + grants = append(grants, allUsersReadGrant, allUsersWriteGrant) + default: + panic("unknown acl") + } + resp.AccessControlPolicy.SetGrants(grants) + WriteSuccessResponseXML(w, r, resp) + return +} diff --git a/s3/responses/response_multipart.go b/s3/responses/response_multipart.go new file mode 100644 index 000000000..62188e8ab --- /dev/null +++ b/s3/responses/response_multipart.go @@ -0,0 +1,26 @@ +package responses + +import ( + "github.com/bittorrent/go-btfs/s3/services/object" + "net/http" +) + +func WriteCreateMultipartUploadResponse(w http.ResponseWriter, r *http.Request, bucname, objname, uploadID string) { + resp := GenerateInitiateMultipartUploadResponse(bucname, objname, uploadID) + WriteSuccessResponseXML(w, r, resp) +} + +func WriteAbortMultipartUploadResponse(w http.ResponseWriter, r *http.Request) { + WriteSuccessNoContent(w) +} + +func WriteUploadPartResponse(w http.ResponseWriter, r *http.Request, part object.Part) { + setPutObjHeaders(w, part.ETag, part.CID, false) + WriteSuccessResponseHeadersOnly(w, r) +} + +func WriteCompleteMultipartUploadResponse(w http.ResponseWriter, r *http.Request, bucname, objname, region string, obj object.Object) { + resp := GenerateCompleteMultipartUploadResponse(bucname, objname, region, obj) + setPutObjHeaders(w, obj.ETag, obj.CID, false) + WriteSuccessResponseXML(w, r, resp) +} diff --git a/s3/responses/response_object.go b/s3/responses/response_object.go new file mode 100644 index 000000000..34bbf0073 --- /dev/null +++ b/s3/responses/response_object.go @@ -0,0 +1,11 @@ +package responses + +import ( + "github.com/bittorrent/go-btfs/s3/services/object" + "net/http" +) + +func WritePutObjectResponse(w http.ResponseWriter, r *http.Request, obj object.Object) { + setPutObjHeaders(w, obj.ETag, obj.CID, false) + WriteSuccessResponseHeadersOnly(w, r) +} diff --git a/s3/responses/types.go b/s3/responses/types.go index 76298df4f..55987629e 100644 --- a/s3/responses/types.go +++ b/s3/responses/types.go @@ -1,44 +1,16 @@ package responses import ( - "encoding/base64" "encoding/xml" "github.com/aws/aws-sdk-go/service/s3" - "github.com/bittorrent/go-btfs/s3/consts" "github.com/bittorrent/go-btfs/s3/services/object" - "github.com/bittorrent/go-btfs/s3/utils" ) -type GetBucketAclResponse AccessControlPolicy - -// AccessControlPolicy -// -// -// 75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a -// CustomersName@amazon.com -// -// -// -// -// 75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a -// CustomersName@amazon.com -// -// FULL_CONTROL -// -// -// -// -type AccessControlPolicy struct { - s3.AccessControlPolicy - Owner canonicalUser `xml:"Owner"` - AccessControlList accessControlList `xml:"AccessControlList"` +type AccessControlList struct { + Grant []*s3.Grant `xml:"Grant,omitempty"` } -type accessControlList struct { - Grant []Grant `xml:"Grant,omitempty"` -} -type canonicalUser struct { +type CanonicalUser struct { ID string `xml:"ID"` DisplayName string `xml:"DisplayName,omitempty"` } @@ -240,98 +212,99 @@ func GenerateCompleteMultipartUploadResponse(bucname, objname, location string, } // GenerateListObjectsV2Response Generates an ListObjectsV2 response for the said bucket with other enumerated options. -func GenerateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, isTruncated bool, maxKeys int, objects []object.Object, prefixes []string) ListObjectsV2Response { - contents := make([]Object, 0, len(objects)) - id := consts.DefaultOwnerID - name := consts.DisplayName - owner := s3.Owner{ - ID: &id, - DisplayName: &name, - } - data := ListObjectsV2Response{} - - for _, object := range objects { - content := Object{} - if object.Name == "" { - continue - } - content.Key = utils.S3EncodeName(object.Name, encodingType) - content.LastModified = object.ModTime.UTC().Format(consts.Iso8601TimeFormat) - if object.ETag != "" { - content.ETag = "\"" + object.ETag + "\"" - } - content.Size = object.Size - content.Owner = owner - content.CID = object.Cid - contents = append(contents, content) - } - data.Name = bucket - data.Contents = contents - - data.EncodingType = encodingType - data.StartAfter = utils.S3EncodeName(startAfter, encodingType) - data.Delimiter = utils.S3EncodeName(delimiter, encodingType) - data.Prefix = utils.S3EncodeName(prefix, encodingType) - data.MaxKeys = maxKeys - data.ContinuationToken = base64.StdEncoding.EncodeToString([]byte(token)) - data.NextContinuationToken = base64.StdEncoding.EncodeToString([]byte(nextToken)) - data.IsTruncated = isTruncated - - commonPrefixes := make([]CommonPrefix, 0, len(prefixes)) - for _, prefix := range prefixes { - prefixItem := CommonPrefix{} - prefixItem.Prefix = utils.S3EncodeName(prefix, encodingType) - commonPrefixes = append(commonPrefixes, prefixItem) - } - data.CommonPrefixes = commonPrefixes - data.KeyCount = len(data.Contents) + len(data.CommonPrefixes) - return data -} +//func GenerateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, isTruncated bool, maxKeys int, objects []object.Object, prefixes []string) ListObjectsV2Response { +// contents := make([]Object, 0, len(objects)) +// id := consts.DefaultOwnerID +// name := consts.DisplayName +// owner := s3.Owner{ +// ID: &id, +// DisplayName: &name, +// } +// data := ListObjectsV2Response{} +// +// for _, object := range objects { +// content := Object{} +// if object.Name == "" { +// continue +// } +// content.Key = utils.S3EncodeName(object.Name, encodingType) +// content.LastModified = object.ModTime.UTC().Format(consts.Iso8601TimeFormat) +// if object.ETag != "" { +// content.ETag = "\"" + object.ETag + "\"" +// } +// content.Size = object.Size +// content.Owner = owner +// content.CID = object.CID +// contents = append(contents, content) +// } +// data.Name = bucket +// data.Contents = contents +// +// data.EncodingType = encodingType +// data.StartAfter = utils.S3EncodeName(startAfter, encodingType) +// data.Delimiter = utils.S3EncodeName(delimiter, encodingType) +// data.Prefix = utils.S3EncodeName(prefix, encodingType) +// data.MaxKeys = maxKeys +// data.ContinuationToken = base64.StdEncoding.EncodeToString([]byte(token)) +// data.NextContinuationToken = base64.StdEncoding.EncodeToString([]byte(nextToken)) +// data.IsTruncated = isTruncated +// +// commonPrefixes := make([]CommonPrefix, 0, len(prefixes)) +// for _, prefix := range prefixes { +// prefixItem := CommonPrefix{} +// prefixItem.Prefix = utils.S3EncodeName(prefix, encodingType) +// commonPrefixes = append(commonPrefixes, prefixItem) +// } +// data.CommonPrefixes = commonPrefixes +// data.KeyCount = len(data.Contents) + len(data.CommonPrefixes) +// return data +//} // generates an ListObjectsV1 response for the said bucket with other enumerated options. -func GenerateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp object.ObjectsList) ListObjectsResponse { - contents := make([]Object, 0, len(resp.Objects)) - id := consts.DefaultOwnerID - name := consts.DisplayName - owner := s3.Owner{ - ID: &id, - DisplayName: &name, - } - data := ListObjectsResponse{} - - for _, object := range resp.Objects { - content := Object{} - if object.Name == "" { - continue - } - content.Key = utils.S3EncodeName(object.Name, encodingType) - content.LastModified = object.ModTime.UTC().Format(consts.Iso8601TimeFormat) - if object.ETag != "" { - content.ETag = "\"" + object.ETag + "\"" - } - content.CID = object.Cid - content.Size = object.Size - content.StorageClass = "" - content.Owner = owner - contents = append(contents, content) - } - data.Name = bucket - data.Contents = contents - - data.EncodingType = encodingType - data.Prefix = utils.S3EncodeName(prefix, encodingType) - data.Marker = utils.S3EncodeName(marker, encodingType) - data.Delimiter = utils.S3EncodeName(delimiter, encodingType) - data.MaxKeys = maxKeys - data.NextMarker = utils.S3EncodeName(resp.NextMarker, encodingType) - data.IsTruncated = resp.IsTruncated - - prefixes := make([]CommonPrefix, 0, len(resp.Prefixes)) - for _, prefix := range resp.Prefixes { - prefixItem := CommonPrefix{} - prefixItem.Prefix = utils.S3EncodeName(prefix, encodingType) - prefixes = append(prefixes, prefixItem) - } - data.CommonPrefixes = prefixes - return data -} +//func GenerateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp object.ObjectsList) ListObjectsResponse { +// contents := make([]Object, 0, len(resp.Objects)) +// id := consts.DefaultOwnerID +// name := consts.DisplayName +// owner := s3.Owner{ +// ID: &id, +// DisplayName: &name, +// } +// data := ListObjectsResponse{} +// +// for _, object := range resp.Objects { +// content := Object{} +// if object.Name == "" { +// continue +// } +// content.Key = utils.S3EncodeName(object.Name, encodingType) +// content.LastModified = object.ModTime.UTC().Format(consts.Iso8601TimeFormat) +// if object.ETag != "" { +// content.ETag = "\"" + object.ETag + "\"" +// } +// content.CID = object.CID +// content.Size = object.Size +// content.StorageClass = "" +// content.Owner = owner +// contents = append(contents, content) +// } +// data.Name = bucket +// data.Contents = contents +// +// data.EncodingType = encodingType +// data.Prefix = utils.S3EncodeName(prefix, encodingType) +// data.Marker = utils.S3EncodeName(marker, encodingType) +// data.Delimiter = utils.S3EncodeName(delimiter, encodingType) +// data.MaxKeys = maxKeys +// data.NextMarker = utils.S3EncodeName(resp.NextMarker, encodingType) +// data.IsTruncated = resp.IsTruncated +// +// prefixes := make([]CommonPrefix, 0, len(resp.Prefixes)) +// for _, prefix := range resp.Prefixes { +// prefixItem := CommonPrefix{} +// prefixItem.Prefix = utils.S3EncodeName(prefix, encodingType) +// prefixes = append(prefixes, prefixItem) +// } +// data.CommonPrefixes = prefixes +// return data +//} +// diff --git a/s3/responses/wirters.go b/s3/responses/wirters.go deleted file mode 100644 index fb77fb46c..000000000 --- a/s3/responses/wirters.go +++ /dev/null @@ -1,99 +0,0 @@ -package responses - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/services/object" - "net/http" -) - -func WritePutBucketResponse(w http.ResponseWriter, r *http.Request) { - if cp := pathClean(r.URL.Path); cp != "" { - w.Header().Set(consts.Location, cp) - } - WriteSuccessResponse(w, r) - return -} - -func WriteHeadBucketResponse(w http.ResponseWriter, r *http.Request) { - WriteSuccessResponse(w, r) - return -} - -func WriteDeleteBucketResponse(w http.ResponseWriter) { - WriteSuccessNoContent(w) - return -} - -func WriteListBucketsResponse(w http.ResponseWriter, r *http.Request, userId, username string, buckets []*object.Bucket) { - resp := s3.ListBucketsOutput{ - Owner: &s3.Owner{ - ID: aws.String(userId), - DisplayName: aws.String(username), - }, - Buckets: []*s3.Bucket{}, - } - - for _, buc := range buckets { - resp.Buckets = append(resp.Buckets, &s3.Bucket{ - Name: aws.String(buc.Name), - CreationDate: aws.Time(buc.Created), - }) - } - - WriteSuccessResponseXML(w, r, resp) - - return -} - -func WriteGetBucketAclResponse(w http.ResponseWriter, r *http.Request, userId, username, acl string) { - resp := s3.GetBucketAclOutput{ - Owner: &s3.Owner{ - ID: aws.String(userId), - DisplayName: aws.String(username), - }, - Grants: []*s3.Grant{ - { - Grantee: &s3.Grantee{ - ID: aws.String(userId), - DisplayName: aws.String(userId), - Type: aws.String("CanonicalUser"), - }, - Permission: aws.String("public-read"), - }, - }, - } - WriteSuccessResponseXML(w, r, resp) - return -} - -func WritePutBucketAclResponse(w http.ResponseWriter, r *http.Request) { - WriteSuccessResponse(w, r) - return -} - -func WritePutObjectResponse(w http.ResponseWriter, r *http.Request, obj object.Object) { - setPutObjHeaders(w, obj.ETag, obj.Cid, false) - WriteSuccessResponseHeadersOnly(w, r) -} - -func WriteCreateMultipartUploadResponse(w http.ResponseWriter, r *http.Request, bucname, objname, uploadID string) { - resp := GenerateInitiateMultipartUploadResponse(bucname, objname, uploadID) - WriteSuccessResponseXML(w, r, resp) -} - -func WriteAbortMultipartUploadResponse(w http.ResponseWriter, r *http.Request) { - WriteSuccessNoContent(w) -} - -func WriteUploadPartResponse(w http.ResponseWriter, r *http.Request, part object.ObjectPart) { - setPutObjHeaders(w, part.ETag, part.Cid, false) - WriteSuccessResponseHeadersOnly(w, r) -} - -func WriteCompleteMultipartUploadResponse(w http.ResponseWriter, r *http.Request, bucname, objname, region string, obj object.Object) { - resp := GenerateCompleteMultipartUploadResponse(bucname, objname, region, obj) - setPutObjHeaders(w, obj.ETag, obj.Cid, false) - WriteSuccessResponseXML(w, r, resp) -} diff --git a/s3/routers/routers.go b/s3/routers/routers.go index 0090fe0e1..786a05133 100644 --- a/s3/routers/routers.go +++ b/s3/routers/routers.go @@ -63,9 +63,9 @@ func (routers *Routers) Register() http.Handler { //bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(hs.GetObjectACLHandler).Queries("acl", "") //bucket... - // GetBucketAcl + // GetBucketACL bucket.Methods(http.MethodGet).HandlerFunc(hs.GetBucketAclHandler).Queries("acl", "") - // PutBucketAcl + // PutBucketACL bucket.Methods(http.MethodPut).HandlerFunc(hs.PutBucketAclHandler).Queries("acl", "") // CreateBucket bucket.Methods(http.MethodPut).HandlerFunc(hs.CreateBucketHandler) diff --git a/s3/services/object/proto.go b/s3/services/object/proto.go index 2927cc02d..529d2c437 100644 --- a/s3/services/object/proto.go +++ b/s3/services/object/proto.go @@ -21,8 +21,8 @@ type Service interface { GetBucket(ctx context.Context, user, bucname string) (bucket *Bucket, err error) DeleteBucket(ctx context.Context, user, bucname string) (err error) GetAllBuckets(ctx context.Context, user string) (list []*Bucket, err error) - PutBucketAcl(ctx context.Context, user, bucname, acl string) (err error) - GetBucketAcl(ctx context.Context, user, bucname string) (acl string, err error) + PutBucketACL(ctx context.Context, user, bucname, acl string) (err error) + GetBucketACL(ctx context.Context, user, bucname string) (acl string, err error) EmptyBucket(ctx context.Context, user, bucname string) (empty bool, err error) PutObject(ctx context.Context, user, bucname, objname string, body *hash.Reader, size int64, meta map[string]string) (object *Object, err error) @@ -33,7 +33,7 @@ type Service interface { ListObjects(ctx context.Context, user, bucname, prefix, delimiter, marker string, max int) (list *ObjectsList, err error) CreateMultipartUpload(ctx context.Context, user, bucname, objname string, meta map[string]string) (multipart *Multipart, err error) - UploadPart(ctx context.Context, user, bucname, objname, uplid string, partId int, reader *hash.Reader, size int64, meta map[string]string) (part *ObjectPart, err error) + UploadPart(ctx context.Context, user, bucname, objname, uplid string, partId int, reader *hash.Reader, size int64, meta map[string]string) (part *Part, err error) AbortMultipartUpload(ctx context.Context, user, bucname, objname, uplid string) (err error) CompleteMultiPartUpload(ctx context.Context, user, bucname, objname, uplid string, parts []*CompletePart) (object *Object, err error) } @@ -43,7 +43,7 @@ type Bucket struct { Name string Region string Owner string - Acl string + ACL string Created time.Time } @@ -54,8 +54,8 @@ type Object struct { Size int64 IsDir bool ETag string - Cid string - Acl string + CID string + ACL string VersionID string IsLatest bool DeleteMarker bool @@ -72,37 +72,22 @@ type Multipart struct { UploadID string Initiated time.Time MetaData map[string]string - Parts []*ObjectPart + Parts []*Part } -type ObjectPart struct { +type Part struct { ETag string `json:"etag,omitempty"` - Cid string `json:"cid,omitempty"` + CID string `json:"cid,omitempty"` Number int `json:"number"` Size int64 `json:"size"` ModTime time.Time `json:"mod_time"` } -// ListObjectsInfo - container for list objects. type ObjectsList struct { - // Indicates whether the returned list objects response is truncated. A - // value of true indicates that the list was truncated. The list can be truncated - // if the number of objects exceeds the limit allowed or specified - // by max keys. IsTruncated bool - - // When response is truncated (the IsTruncated element value in the response is true), - // you can use the key name in this field as marker in the subsequent - // request to get next set of objects. - // - // NOTE: AWS S3 returns NextMarker only if you have delimiter request parameter specified, - NextMarker string - - // List of objects info for this request. - Objects []*Object - - // List of prefixes for this request. - Prefixes []string + NextMarker string + Objects []*Object + Prefixes []string } type CompletePart struct { diff --git a/s3/services/object/service.go b/s3/services/object/service.go index 0b45d90b4..6d44114c0 100644 --- a/s3/services/object/service.go +++ b/s3/services/object/service.go @@ -79,7 +79,7 @@ func (s *service) opctx(parent context.Context) (ctx context.Context, cancel con return } -func (s *service) checkAcl(owner, acl, user string, act action.Action) (allow bool) { +func (s *service) checkACL(owner, acl, user string, act action.Action) (allow bool) { own := user != "" && user == owner allow = policy.IsAllowed(own, acl, act) return diff --git a/s3/services/object/service_bucket.go b/s3/services/object/service_bucket.go index 62ed4107b..1f3f7ad6d 100644 --- a/s3/services/object/service_bucket.go +++ b/s3/services/object/service_bucket.go @@ -36,8 +36,8 @@ func (s *service) CreateBucket(ctx context.Context, user, bucname, region, acl s return } - // Check action acl - allow := s.checkAcl(user, acl, user, action.CreateBucketAction) + // Check action ACL + allow := s.checkACL(user, policy.Private, user, action.CreateBucketAction) if !allow { err = ErrNotAllowed return @@ -48,7 +48,7 @@ func (s *service) CreateBucket(ctx context.Context, user, bucname, region, acl s Name: bucname, Region: region, Owner: user, - Acl: acl, + ACL: acl, Created: time.Now().UTC(), } @@ -84,8 +84,8 @@ func (s *service) GetBucket(ctx context.Context, user, bucname string) (bucket * return } - // Check action acl - allow := s.checkAcl(bucket.Owner, bucket.Acl, user, action.HeadBucketAction) + // Check action ACL + allow := s.checkACL(bucket.Owner, bucket.ACL, user, action.HeadBucketAction) if !allow { err = ErrNotAllowed } @@ -119,8 +119,8 @@ func (s *service) DeleteBucket(ctx context.Context, user, bucname string) (err e return } - // Check action acl - allow := s.checkAcl(bucket.Owner, bucket.Acl, user, action.DeleteBucketAction) + // Check action ACL + allow := s.checkACL(bucket.Owner, bucket.ACL, user, action.DeleteBucketAction) if !allow { err = ErrNotAllowed return @@ -153,8 +153,8 @@ func (s *service) GetAllBuckets(ctx context.Context, user string) (list []*Bucke ctx, cancel := s.opctx(ctx) defer cancel() - // Check action acl - allow := s.checkAcl(user, policy.Private, user, action.ListBucketAction) + // Check action ACL + allow := s.checkACL(user, policy.Private, user, action.ListBucketAction) if !allow { err = ErrNotAllowed return @@ -196,8 +196,8 @@ func (s *service) GetAllBuckets(ctx context.Context, user string) (list []*Bucke return } -// PutBucketAcl update user specified bucket's acl field value -func (s *service) PutBucketAcl(ctx context.Context, user, bucname, acl string) (err error) { +// PutBucketACL update user specified bucket's ACL field value +func (s *service) PutBucketACL(ctx context.Context, user, bucname, acl string) (err error) { // Operation context ctx, cancel := s.opctx(ctx) defer cancel() @@ -222,15 +222,15 @@ func (s *service) PutBucketAcl(ctx context.Context, user, bucname, acl string) ( return } - // Check action acl - allow := s.checkAcl(bucket.Owner, bucket.Acl, user, action.PutBucketAclAction) + // Check action ACL + allow := s.checkACL(bucket.Owner, bucket.ACL, user, action.PutBucketAclAction) if !allow { err = ErrNotAllowed return } - // Update bucket acl - bucket.Acl = acl + // Update bucket ACL + bucket.ACL = acl // Put bucket err = s.providers.StateStore().Put(buckey, bucket) @@ -238,8 +238,8 @@ func (s *service) PutBucketAcl(ctx context.Context, user, bucname, acl string) ( return } -// GetBucketAcl get user specified bucket acl field value -func (s *service) GetBucketAcl(ctx context.Context, user, bucname string) (acl string, err error) { +// GetBucketACL get user specified bucket ACL field value +func (s *service) GetBucketACL(ctx context.Context, user, bucname string) (acl string, err error) { // Operation context ctx, cancel := s.opctx(ctx) defer cancel() @@ -264,15 +264,15 @@ func (s *service) GetBucketAcl(ctx context.Context, user, bucname string) (acl s return } - // Check action acl - allow := s.checkAcl(bucket.Owner, bucket.Acl, user, action.GetBucketAclAction) + // Check action ACL + allow := s.checkACL(bucket.Owner, bucket.ACL, user, action.GetBucketAclAction) if !allow { err = ErrNotAllowed return } - // Get acl field value - acl = bucket.Acl + // Get ACL field value + acl = bucket.ACL return } @@ -302,8 +302,8 @@ func (s *service) EmptyBucket(ctx context.Context, user, bucname string) (empty return } - // Check action acl - allow := s.checkAcl(bucket.Owner, bucket.Acl, user, action.ListObjectsAction) + // Check action ACL + allow := s.checkACL(bucket.Owner, bucket.ACL, user, action.ListObjectsAction) if !allow { err = ErrNotAllowed return diff --git a/s3/services/object/service_multipart.go b/s3/services/object/service_multipart.go index 7cc73765f..4ae5f27a1 100644 --- a/s3/services/object/service_multipart.go +++ b/s3/services/object/service_multipart.go @@ -45,8 +45,8 @@ func (s *service) CreateMultipartUpload(ctx context.Context, user, bucname, objn return } - // Check action acl - allow := s.checkAcl(bucket.Owner, bucket.Acl, user, action.CreateMultipartUploadAction) + // Check action ACL + allow := s.checkACL(bucket.Owner, bucket.ACL, user, action.CreateMultipartUploadAction) if !allow { err = ErrNotAllowed return @@ -81,7 +81,7 @@ func (s *service) CreateMultipartUpload(ctx context.Context, user, bucname, objn } // UploadPart upload user specified multipart part -func (s *service) UploadPart(ctx context.Context, user, bucname, objname, uplid string, partId int, body *hash.Reader, size int64, meta map[string]string) (part *ObjectPart, err error) { +func (s *service) UploadPart(ctx context.Context, user, bucname, objname, uplid string, partId int, body *hash.Reader, size int64, meta map[string]string) (part *Part, err error) { // Operation context ctx, cancel := s.opctx(ctx) defer cancel() @@ -106,8 +106,8 @@ func (s *service) UploadPart(ctx context.Context, user, bucname, objname, uplid return } - // Check acl - allow := s.checkAcl(bucket.Owner, bucket.Acl, user, action.UploadPartAction) + // Check action ACL + allow := s.checkACL(bucket.Owner, bucket.ACL, user, action.UploadPartAction) if !allow { err = ErrNotAllowed return @@ -151,10 +151,10 @@ func (s *service) UploadPart(ctx context.Context, user, bucname, objname, uplid }() // Part - part = &ObjectPart{ + part = &Part{ Number: partId, ETag: body.ETag().String(), - Cid: cid, + CID: cid, Size: size, ModTime: time.Now().UTC(), } @@ -200,8 +200,8 @@ func (s *service) AbortMultipartUpload(ctx context.Context, user, bucname, objna return } - // Check action acl - allow := s.checkAcl(bucket.Owner, bucket.Acl, user, action.AbortMultipartUploadAction) + // Check action ACL + allow := s.checkACL(bucket.Owner, bucket.ACL, user, action.AbortMultipartUploadAction) if !allow { err = ErrNotAllowed return @@ -235,7 +235,7 @@ func (s *service) AbortMultipartUpload(ctx context.Context, user, bucname, objna // Try to remove all parts body for _, part := range multipart.Parts { - _ = s.providers.FileStore().Remove(part.Cid) + _ = s.providers.FileStore().Remove(part.CID) } return @@ -267,8 +267,8 @@ func (s *service) CompleteMultiPartUpload(ctx context.Context, user, bucname, ob return } - // Check acl - allow := s.checkAcl(bucket.Owner, bucket.Acl, user, action.CompleteMultipartUploadAction) + // Check action ACL + allow := s.checkACL(bucket.Owner, bucket.ACL, user, action.CompleteMultipartUploadAction) if !allow { err = ErrNotAllowed return @@ -372,7 +372,7 @@ func (s *service) CompleteMultiPartUpload(ctx context.Context, user, bucname, ob // Get part body reader var rdr io.ReadCloser - rdr, err = s.providers.FileStore().Cat(gotPart.Cid) + rdr, err = s.providers.FileStore().Cat(gotPart.CID) if err != nil { return } @@ -409,7 +409,7 @@ func (s *service) CompleteMultiPartUpload(ctx context.Context, user, bucname, ob Size: size, IsDir: false, ETag: s.computeMultipartMD5(parts), - Cid: cid, + CID: cid, VersionID: "", IsLatest: true, DeleteMarker: false, @@ -435,7 +435,7 @@ func (s *service) CompleteMultiPartUpload(ctx context.Context, user, bucname, ob // Try to remove old object body if exists, because it has been covered by new one if objectOld != nil { - _ = s.providers.FileStore().Remove(objectOld.Cid) + _ = s.providers.FileStore().Remove(objectOld.CID) } // Remove multipart upload @@ -446,7 +446,7 @@ func (s *service) CompleteMultiPartUpload(ctx context.Context, user, bucname, ob // Try to remove all parts body, because they are no longer be referenced for _, part := range multipart.Parts { - _ = s.providers.FileStore().Remove(part.Cid) + _ = s.providers.FileStore().Remove(part.CID) } return @@ -460,7 +460,7 @@ func (s *service) getMultipart(uplkey string) (multipart *Multipart, err error) return } -func (s *service) partIdxMap(parts []*ObjectPart) map[int]int { +func (s *service) partIdxMap(parts []*Part) map[int]int { mp := make(map[int]int) for i, part := range parts { mp[part.Number] = i @@ -504,7 +504,7 @@ func (s *service) deleteUploadsByPrefix(uploadsPrefix string) (err error) { return } for _, part := range multipart.Parts { - _ = s.providers.FileStore().Remove(part.Cid) + _ = s.providers.FileStore().Remove(part.CID) } return }) diff --git a/s3/services/object/service_object.go b/s3/services/object/service_object.go index bfcbc1e4c..9b9f996e0 100644 --- a/s3/services/object/service_object.go +++ b/s3/services/object/service_object.go @@ -39,8 +39,8 @@ func (s *service) PutObject(ctx context.Context, user, bucname, objname string, return } - // Check action acl - allow := s.checkAcl(bucket.Owner, bucket.Acl, user, action.PutObjectAction) + // Check action ACL + allow := s.checkACL(bucket.Owner, bucket.ACL, user, action.PutObjectAction) if !allow { err = ErrNotAllowed return @@ -90,11 +90,11 @@ func (s *service) PutObject(ctx context.Context, user, bucname, objname string, Size: size, IsDir: false, ETag: body.ETag().String(), - Cid: cid, + CID: cid, VersionID: "", IsLatest: true, DeleteMarker: false, - Acl: meta[consts.AmzACL], + ACL: meta[consts.AmzACL], ContentType: meta[strings.ToLower(consts.ContentType)], ContentEncoding: meta[strings.ToLower(consts.ContentEncoding)], SuccessorModTime: now.UTC(), @@ -117,7 +117,7 @@ func (s *service) PutObject(ctx context.Context, user, bucname, objname string, // Try to remove old object body if exists, because it has been covered by new one if objectOld != nil { - _ = s.providers.FileStore().Remove(objectOld.Cid) + _ = s.providers.FileStore().Remove(objectOld.CID) } return @@ -149,8 +149,8 @@ func (s *service) CopyObject(ctx context.Context, user, srcBucname, srcObjname, return } - // Check source action acl - srcAllow := s.checkAcl(srcBucket.Owner, srcBucket.Acl, user, action.GetObjectAction) + // Check source action ACL + srcAllow := s.checkACL(srcBucket.Owner, srcBucket.ACL, user, action.GetObjectAction) if !srcAllow { err = ErrNotAllowed return @@ -196,8 +196,8 @@ func (s *service) CopyObject(ctx context.Context, user, srcBucname, srcObjname, return } - // Check destination action acl - dstAllow := s.checkAcl(dstBucket.Owner, dstBucket.Acl, user, action.PutObjectAction) + // Check destination action ACL + dstAllow := s.checkACL(dstBucket.Owner, dstBucket.ACL, user, action.PutObjectAction) if !dstAllow { err = ErrNotAllowed return @@ -224,7 +224,7 @@ func (s *service) CopyObject(ctx context.Context, user, srcBucname, srcObjname, Size: srcObject.Size, IsDir: false, ETag: srcObject.ETag, - Cid: srcObject.Cid, + CID: srcObject.CID, VersionID: "", IsLatest: true, DeleteMarker: false, @@ -276,8 +276,8 @@ func (s *service) GetObject(ctx context.Context, user, bucname, objname string) return } - // Check action acl - allow := s.checkAcl(bucket.Owner, bucket.Acl, user, action.GetObjectAction) + // Check action ACL + allow := s.checkACL(bucket.Owner, bucket.ACL, user, action.GetObjectAction) if !allow { err = ErrNotAllowed return @@ -309,7 +309,7 @@ func (s *service) GetObject(ctx context.Context, user, bucname, objname string) } // Get object body - body, err = s.providers.FileStore().Cat(object.Cid) + body, err = s.providers.FileStore().Cat(object.CID) if err != nil { return } @@ -356,8 +356,8 @@ func (s *service) DeleteObject(ctx context.Context, user, bucname, objname strin return } - // Check action acl - allow := s.checkAcl(bucket.Owner, bucket.Acl, user, action.DeleteObjectAction) + // Check action ACL + allow := s.checkACL(bucket.Owner, bucket.ACL, user, action.DeleteObjectAction) if !allow { err = ErrNotAllowed return @@ -390,7 +390,7 @@ func (s *service) DeleteObject(ctx context.Context, user, bucname, objname strin } // Try to delete object body - _ = s.providers.FileStore().Remove(object.Cid) + _ = s.providers.FileStore().Remove(object.CID) return } @@ -421,8 +421,8 @@ func (s *service) ListObjects(ctx context.Context, user, bucname, prefix, delimi return } - // Check action acl - allow := s.checkAcl(bucket.Owner, bucket.Acl, user, action.ListObjectsAction) + // Check action ACL + allow := s.checkACL(bucket.Owner, bucket.ACL, user, action.ListObjectsAction) if !allow { err = ErrNotAllowed return @@ -542,7 +542,7 @@ func (s *service) deleteObjectsByPrefix(objectsPrefix string) (err error) { if er != nil { return } - _ = s.providers.FileStore().Remove(object.Cid) + _ = s.providers.FileStore().Remove(object.CID) return }) diff --git a/s3/services/sign/signature-v4-utils.go b/s3/services/sign/signature-v4-utils.go index 1239b9ff6..e615a9b73 100644 --- a/s3/services/sign/signature-v4-utils.go +++ b/s3/services/sign/signature-v4-utils.go @@ -126,12 +126,12 @@ func isValidRegion(reqRegion string, confRegion string) bool { return true } if confRegion == "US" { - confRegion = consts.DefaultRegion + confRegion = consts.DefaultLocation } // Some older s3 clients set region as "US" instead of // globalDefaultRegion, handle it. if reqRegion == "US" { - reqRegion = consts.DefaultRegion + reqRegion = consts.DefaultLocation } return reqRegion == confRegion } diff --git a/s3/utils/signature.go b/s3/utils/signature.go index 571730535..674b25505 100644 --- a/s3/utils/signature.go +++ b/s3/utils/signature.go @@ -103,7 +103,7 @@ func SignRequestV4(req *http.Request, accessKey, secretKey string, st ServiceTyp //req.Form.Add(b,string(a)) //queryStr := req.Form.Encode() queryStr := req.URL.Query().Encode() - region := consts.DefaultRegion + region := consts.DefaultLocation // Get scope. scope := strings.Join([]string{ currTime.Format(yyyymmdd), From ec4bb379d002b76e3da6ec7b80650010605a13cb Mon Sep 17 00:00:00 2001 From: Steve Date: Mon, 4 Sep 2023 07:04:27 +0800 Subject: [PATCH 084/139] refractor: response --- cmd/btfs/daemon.go | 3 + core/commands/files.go | 2 +- core/commands/object/object.go | 10 +- core/commands/object/patch.go | 2 +- fuse/ipns/ipns_test.go | 8 +- fuse/readonly/readonly_unix.go | 2 +- s3/consts/consts.go | 9 +- s3/handlers/handlers.go | 2 - s3/handlers/handlers_bucket.go | 4 +- s3/handlers/{handlers_utils.go => utils.go} | 0 s3/requests/parsers.go | 20 - s3/requests/parsers_common.go | 3 +- s3/requests/types_common.go | 10 - s3/responses/object_header.go | 64 --- s3/responses/response.go | 228 ----------- s3/responses/response_multipart.go | 26 -- s3/responses/responses.go | 364 ++++++++++++++++++ ...response_bucket.go => responses_bucket.go} | 43 +-- s3/responses/responses_common.go | 81 ++++ s3/responses/responses_multipart.go | 21 + ...response_object.go => responses_object.go} | 2 +- s3/responses/types.go | 310 --------------- s3/responses/types_common.go | 1 - s3/routers/{routers_options.go => options.go} | 0 s3/s3.go | 19 +- s3/server/{server_options.go => options.go} | 0 .../{service_instance.go => instance.go} | 0 s3/services/accesskey/options.go | 40 ++ s3/services/accesskey/service.go | 33 +- s3/services/accesskey/service_options.go | 15 - s3/services/object/options.go | 6 +- .../sign/{service_options.go => options.go} | 0 settlement/swap/vault/cashout.go | 4 +- settlement/swap/vault/factory_test.go | 4 +- settlement/swap/vault/vault.go | 2 +- 35 files changed, 581 insertions(+), 757 deletions(-) rename s3/handlers/{handlers_utils.go => utils.go} (100%) delete mode 100644 s3/requests/types_common.go delete mode 100644 s3/responses/object_header.go delete mode 100644 s3/responses/response.go delete mode 100644 s3/responses/response_multipart.go create mode 100644 s3/responses/responses.go rename s3/responses/{response_bucket.go => responses_bucket.go} (57%) create mode 100644 s3/responses/responses_common.go create mode 100644 s3/responses/responses_multipart.go rename s3/responses/{response_object.go => responses_object.go} (85%) delete mode 100644 s3/responses/types.go delete mode 100644 s3/responses/types_common.go rename s3/routers/{routers_options.go => options.go} (100%) rename s3/server/{server_options.go => options.go} (100%) rename s3/services/accesskey/{service_instance.go => instance.go} (100%) create mode 100644 s3/services/accesskey/options.go delete mode 100644 s3/services/accesskey/service_options.go rename s3/services/sign/{service_options.go => options.go} (100%) diff --git a/cmd/btfs/daemon.go b/cmd/btfs/daemon.go index d5dc0790e..a220315a3 100644 --- a/cmd/btfs/daemon.go +++ b/cmd/btfs/daemon.go @@ -717,6 +717,9 @@ If the user need to start multiple nodes on the same machine, the configuration functest(cfg.Services.OnlineServerDomain, cfg.Identity.PeerID, hValue) } + // init s3 providers + s3.InitProviders(statestore) + // access-key init accesskey.InitService(s3.GetProviders()) diff --git a/core/commands/files.go b/core/commands/files.go index 950132efe..ae9272310 100644 --- a/core/commands/files.go +++ b/core/commands/files.go @@ -735,7 +735,7 @@ stat' on the file or any of its ancestors. }, Arguments: []cmds.Argument{ cmds.StringArg("path", true, false, "Path to write to."), - cmds.FileArg("data", true, false, "Data to write.").EnableStdin(), + cmds.FileArg("data", true, false, "data to write.").EnableStdin(), }, Options: []cmds.Option{ cmds.Int64Option(filesOffsetOptionName, "o", "Byte offset to begin writing at."), diff --git a/core/commands/object/object.go b/core/commands/object/object.go index 524e490f0..3a9d4c28b 100644 --- a/core/commands/object/object.go +++ b/core/commands/object/object.go @@ -270,7 +270,7 @@ Supported values are: Type: Node{}, Encoders: cmds.EncoderMap{ cmds.Protobuf: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *Node) error { - // deserialize the Data field as text as this was the standard behaviour + // deserialize the data field as text as this was the standard behaviour object, err := deserializeNode(out, "text") if err != nil { return nil @@ -371,20 +371,20 @@ It reads from stdin, and the output is a base58 encoded multihash. 'btfs object put' is a plumbing command for storing DAG nodes. It reads from stdin, and the output is a base58 encoded multihash. -Data should be in the format specified by the --inputenc flag. +data should be in the format specified by the --inputenc flag. --inputenc may be one of the following: * "protobuf" * "json" (default) Examples: - $ echo '{ "Data": "abc" }' | btfs object put + $ echo '{ "data": "abc" }' | btfs object put This creates a node with the data 'abc' and no links. For an object with links, create a file named 'node.json' with the contents: { - "Data": "another", + "data": "another", "Links": [ { "Name": "some link", "Hash": "QmXg9Pp2ytZ14xgmQjYEiHjVjMFXzCVVEcRTWJBmLgR39V", @@ -399,7 +399,7 @@ And then run: }, Arguments: []cmds.Argument{ - cmds.FileArg("data", true, false, "Data to be stored as a DAG object.").EnableStdin(), + cmds.FileArg("data", true, false, "data to be stored as a DAG object.").EnableStdin(), }, Options: []cmds.Option{ cmds.StringOption(inputencOptionName, "Encoding type of input data. One of: {\"protobuf\", \"json\"}.").WithDefault("json"), diff --git a/core/commands/object/patch.go b/core/commands/object/patch.go index f2eb0dc4e..ff2c6933e 100644 --- a/core/commands/object/patch.go +++ b/core/commands/object/patch.go @@ -46,7 +46,7 @@ the limit will not be respected by the network. }, Arguments: []cmds.Argument{ cmds.StringArg("root", true, false, "The hash of the node to modify."), - cmds.FileArg("data", true, false, "Data to append.").EnableStdin(), + cmds.FileArg("data", true, false, "data to append.").EnableStdin(), }, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { api, err := cmdenv.GetApi(env, req) diff --git a/fuse/ipns/ipns_test.go b/fuse/ipns/ipns_test.go index 276f6a0dc..9ac110d60 100644 --- a/fuse/ipns/ipns_test.go +++ b/fuse/ipns/ipns_test.go @@ -66,10 +66,10 @@ func verifyFile(t *testing.T, path string, wantData []byte) { t.Fatal(err) } if len(isData) != len(wantData) { - t.Fatal("Data not equal - length check failed") + t.Fatal("data not equal - length check failed") } if !bytes.Equal(isData, wantData) { - t.Fatal("Data not equal") + t.Fatal("data not equal") } } @@ -328,7 +328,7 @@ func TestAppendFile(t *testing.T) { t.Fatal(err) } if !bytes.Equal(rbuf, data) { - t.Fatal("Data inconsistent!") + t.Fatal("data inconsistent!") } } @@ -458,7 +458,7 @@ func TestFSThrash(t *testing.T) { } if !bytes.Equal(data, out) { - t.Errorf("Data didn't match in %s: expected %v, got %v", name, data, out) + t.Errorf("data didn't match in %s: expected %v, got %v", name, data, out) } } } diff --git a/fuse/readonly/readonly_unix.go b/fuse/readonly/readonly_unix.go index 7e92aa6bf..dc7451d42 100644 --- a/fuse/readonly/readonly_unix.go +++ b/fuse/readonly/readonly_unix.go @@ -272,7 +272,7 @@ func (s *Node) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadR if err != nil { return err } - // Data has a capacity of Size + // data has a capacity of Size buf := resp.Data[:int(req.Size)] n, err := io.ReadFull(r, buf) resp.Data = buf[:n] diff --git a/s3/consts/consts.go b/s3/consts/consts.go index 09c388843..53c163e49 100644 --- a/s3/consts/consts.go +++ b/s3/consts/consts.go @@ -28,10 +28,11 @@ const ( AssumeRole = "AssumeRole" SignV4Algorithm = "AWS4-HMAC-SHA256" - DefaultLocation = "us-east-1" - DefaultBucketACL = s3.BucketCannedACLPublicRead - DefaultObjectACL = "" - AllUsersURI = "http://acs.amazonaws.com/groups/global/AllUsers" + DefaultServerInfo = "BTFS" + DefaultLocation = "us-east-1" + DefaultBucketACL = s3.BucketCannedACLPublicRead + DefaultObjectACL = "" + AllUsersURI = "http://acs.amazonaws.com/groups/global/AllUsers" ) var SupportedLocations = map[string]bool{ diff --git a/s3/handlers/handlers.go b/s3/handlers/handlers.go index f47c7dcdf..bd82b4349 100644 --- a/s3/handlers/handlers.go +++ b/s3/handlers/handlers.go @@ -12,8 +12,6 @@ import ( "strconv" ) -const lockPrefix = "s3:lock/" - var _ Handlerser = (*Handlers)(nil) type Handlers struct { diff --git a/s3/handlers/handlers_bucket.go b/s3/handlers/handlers_bucket.go index ddf26b4a1..e0c246b96 100644 --- a/s3/handlers/handlers_bucket.go +++ b/s3/handlers/handlers_bucket.go @@ -44,7 +44,7 @@ func (h *Handlers) CreateBucketHandler(w http.ResponseWriter, r *http.Request) { return } - responses.WritePutBucketResponse(w, r) + responses.WriteCreateBucketResponse(w, r) return } @@ -154,7 +154,7 @@ func (h *Handlers) PutBucketAclHandler(w http.ResponseWriter, r *http.Request) { }() req, rerr := requests.ParsePutBucketAclRequest(r) - if err != nil { + if rerr != nil { err = rerr responses.WriteErrorResponse(w, r, rerr) return diff --git a/s3/handlers/handlers_utils.go b/s3/handlers/utils.go similarity index 100% rename from s3/handlers/handlers_utils.go rename to s3/handlers/utils.go diff --git a/s3/requests/parsers.go b/s3/requests/parsers.go index e0ac814a6..09175e04f 100644 --- a/s3/requests/parsers.go +++ b/s3/requests/parsers.go @@ -1,12 +1,9 @@ package requests import ( - "errors" - "fmt" "github.com/bittorrent/go-btfs/s3/cctx" "github.com/bittorrent/go-btfs/s3/responses" "net/http" - "reflect" ) // CreateBucketRequest . @@ -17,23 +14,6 @@ type CreateBucketRequest struct { Region string } -// todo: parse aws request use aws struct -func ParseS3Request(r *http.Request, v interface{}) (err error) { - rv := reflect.ValueOf(v) - if rv.Kind() != reflect.Pointer || rv.IsNil() { - err = errors.New("invalid value must be non nil pointer") - return - } - - rt := reflect.TypeOf(v).Elem() - n := rt.NumField() - for i := 0; i < n; i++ { - f := rt.Field(i) - fmt.Println(f) - } - return -} - func ParseCreateBucketRequest(r *http.Request) (req *CreateBucketRequest, rerr *responses.Error) { req = &CreateBucketRequest{} req.AccessKey = cctx.GetAccessKey(r) diff --git a/s3/requests/parsers_common.go b/s3/requests/parsers_common.go index 488da5791..8bdf75b65 100644 --- a/s3/requests/parsers_common.go +++ b/s3/requests/parsers_common.go @@ -14,7 +14,8 @@ import ( ) func parseBucket(r *http.Request) (bucket string, rerr *responses.Error) { - err := s3utils.CheckValidBucketNameStrict(mux.Vars(r)["bucket"]) + bucket = mux.Vars(r)["bucket"] + err := s3utils.CheckValidBucketNameStrict(bucket) if err != nil { rerr = responses.ErrInvalidBucketName } diff --git a/s3/requests/types_common.go b/s3/requests/types_common.go deleted file mode 100644 index e2107f405..000000000 --- a/s3/requests/types_common.go +++ /dev/null @@ -1,10 +0,0 @@ -package requests - -import "encoding/xml" - -// createBucketConfiguration container for bucket configuration request from client. -// Used for parsing the location from the request body for Makebucket. -type createBucketLocationConfiguration struct { - XMLName xml.Name `xml:"CreateBucketConfiguration" json:"-"` - Location string `xml:"LocationConstraint"` -} diff --git a/s3/responses/object_header.go b/s3/responses/object_header.go deleted file mode 100644 index 2f66c84fe..000000000 --- a/s3/responses/object_header.go +++ /dev/null @@ -1,64 +0,0 @@ -package responses - -import ( - "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/services/object" - "net/http" - "net/url" - "strconv" - "strings" -) - -// SetObjectHeaders Write object header -func SetObjectHeaders(w http.ResponseWriter, r *http.Request, objInfo object.Object) { - // set common headers - setCommonHeaders(w, r) - - // Set last modified time. - lastModified := objInfo.ModTime.UTC().Format(http.TimeFormat) - w.Header().Set(consts.LastModified, lastModified) - - // Set Etag if available. - if objInfo.ETag != "" { - w.Header()[consts.ETag] = []string{"\"" + objInfo.ETag + "\""} - } - - if objInfo.ContentType != "" { - w.Header().Set(consts.ContentType, objInfo.ContentType) - } - - if objInfo.ContentEncoding != "" { - w.Header().Set(consts.ContentEncoding, objInfo.ContentEncoding) - } - - if !objInfo.Expires.IsZero() { - w.Header().Set(consts.Expires, objInfo.Expires.UTC().Format(http.TimeFormat)) - } - - // Set content length - w.Header().Set(consts.ContentLength, strconv.FormatInt(objInfo.Size, 10)) - - // Set the relevant version ID as part of the response header. - if objInfo.VersionID != "" { - w.Header()[consts.AmzVersionID] = []string{objInfo.VersionID} - } - -} - -// SetHeadGetRespHeaders - set any requested parameters as response headers. -func SetHeadGetRespHeaders(w http.ResponseWriter, reqParams url.Values) { - for k, v := range reqParams { - if header, ok := supportedHeadGetReqParams[strings.ToLower(k)]; ok { - w.Header()[header] = v - } - } -} - -// supportedHeadGetReqParams - supported request parameters for GET and HEAD presigned request. -var supportedHeadGetReqParams = map[string]string{ - "response-expires": consts.Expires, - "response-content-type": consts.ContentType, - "response-content-encoding": consts.ContentEncoding, - "response-content-language": consts.ContentLanguage, - "response-content-disposition": consts.ContentDisposition, -} diff --git a/s3/responses/response.go b/s3/responses/response.go deleted file mode 100644 index 04e2cedb4..000000000 --- a/s3/responses/response.go +++ /dev/null @@ -1,228 +0,0 @@ -package responses - -import ( - "bytes" - "encoding/json" - "encoding/xml" - "errors" - "fmt" - "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/bittorrent/go-btfs/s3/consts" - "github.com/gorilla/mux" - logging "github.com/ipfs/go-log/v2" - "net/http" - "net/url" - "path" - "strconv" - "time" -) - -var log = logging.Logger("resp") - -type mimeType string - -const ( - mimeNone mimeType = "" - mimeJSON mimeType = "application/json" - //mimeXML application/xml UTF-8 - mimeXML mimeType = " application/xml" -) - -func owner(accessKey string) *s3.Owner { - return new(s3.Owner).SetID(accessKey).SetDisplayName(accessKey) -} - -func ownerFullControlGrant(accessKey string) *s3.Grant { - return new(s3.Grant).SetGrantee(new(s3.Grantee).SetType(s3.TypeCanonicalUser).SetID(accessKey).SetDisplayName(accessKey)).SetPermission(s3.PermissionFullControl) -} - -var ( - allUsersReadGrant = new(s3.Grant).SetGrantee(new(s3.Grantee).SetType(s3.TypeGroup).SetURI(consts.AllUsersURI)).SetPermission(s3.PermissionRead) - allUsersWriteGrant = new(s3.Grant).SetGrantee(new(s3.Grantee).SetType(s3.TypeGroup).SetURI(consts.AllUsersURI)).SetPermission(s3.PermissionWrite) -) - -// APIErrorResponse - error response format -type APIErrorResponse struct { - XMLName xml.Name `xml:"Error" json:"-"` - Code string - Message string - Resource string - RequestID string `xml:"RequestId" json:"RequestId"` - HostID string `xml:"HostId" json:"HostId"` -} - -type RESTErrorResponse struct { - XMLName xml.Name `xml:"Error" json:"-"` - Code string `xml:"Code" json:"Code"` - Message string `xml:"Message" json:"Message"` - Resource string `xml:"Resource" json:"Resource"` - RequestID string `xml:"RequestId" json:"RequestId"` - Key string `xml:"Key,omitempty" json:"Key,omitempty"` - BucketName string `xml:"BucketName,omitempty" json:"BucketName,omitempty"` -} - -func getRESTErrorResponse(err *Error, resource string, bucket, object string) RESTErrorResponse { - return RESTErrorResponse{ - Code: err.Code(), - BucketName: bucket, - Key: object, - Message: err.Description(), - Resource: resource, - RequestID: fmt.Sprintf("%d", time.Now().UnixNano()), - } -} - -func WriteErrorResponseHeadersOnly(w http.ResponseWriter, r *http.Request, err error) { - var rerr *Error - if !errors.As(err, &rerr) { - rerr = ErrInternalError - } - writeResponse(w, r, rerr.HTTPStatusCode(), nil, mimeNone) -} - -// WriteErrorResponse write ErrorResponse -func WriteErrorResponse(w http.ResponseWriter, r *http.Request, rerr *Error) { - errorResponse := RESTErrorResponse{ - Code: rerr.Code(), - BucketName: mux.Vars(r)["bucket"], - Key: mux.Vars(r)["object"], - Message: rerr.Description(), - Resource: r.URL.Path, - RequestID: fmt.Sprintf("%d", time.Now().UnixNano()), - } - WriteXMLResponse(w, r, rerr.HTTPStatusCode(), errorResponse) -} - -// WriteSuccessResponseHeadersOnly write SuccessResponseHeadersOnly -func WriteSuccessResponseHeadersOnly(w http.ResponseWriter, r *http.Request) { - writeResponse(w, r, http.StatusOK, nil, mimeNone) -} - -// WriteSuccessResponse write SuccessResponseHeadersOnly -func WriteSuccessResponse(w http.ResponseWriter, r *http.Request) { - writeResponse(w, r, http.StatusOK, nil, mimeNone) -} - -// WriteSuccessResponseXML Write Success Response XML -func WriteSuccessResponseXML(w http.ResponseWriter, r *http.Request, response interface{}) { - WriteXMLResponse(w, r, http.StatusOK, response) -} - -// WriteXMLResponse Write XMLResponse -func WriteXMLResponse(w http.ResponseWriter, r *http.Request, statusCode int, response interface{}) { - writeResponse(w, r, statusCode, encodeXMLResponse(response), mimeXML) -} - -func writeResponse(w http.ResponseWriter, r *http.Request, statusCode int, response []byte, mType mimeType) { - setCommonHeaders(w, r) - if response != nil { - w.Header().Set(consts.ContentLength, strconv.Itoa(len(response))) - } - if mType != mimeNone { - w.Header().Set(consts.ContentType, string(mType)) - } - w.WriteHeader(statusCode) - if response != nil { - _, err := w.Write(response) - if err != nil { - log.Errorf("write err: %v", err) - } - w.(http.Flusher).Flush() - } -} - -func setCommonHeaders(w http.ResponseWriter, r *http.Request) { - w.Header().Set(consts.ServerInfo, "FDS") - w.Header().Set(consts.AmzRequestID, fmt.Sprintf("%d", time.Now().UnixNano())) - w.Header().Set(consts.AcceptRanges, "bytes") - if r.Header.Get("Origin") != "" { - w.Header().Set("Access-Control-Allow-Origin", "*") - w.Header().Set("Access-Control-Allow-Credentials", "true") - } -} - -// encodeXMLResponse Encodes the response headers into XML format. -func encodeXMLResponse(response interface{}) []byte { - var buf bytes.Buffer - buf.WriteString(xml.Header) - err := xmlutil.BuildXML(response, xml.NewEncoder(&buf)) - if err != nil { - panic(err) - } - bs := buf.Bytes() - fmt.Println(string(bs)) - return bs -} - -// WriteErrorResponseJSON - writes error response in JSON format; -// useful for admin APIs. -func WriteErrorResponseJSON(w http.ResponseWriter, err error, reqURL *url.URL, host string) { - var rerr *Error - if !errors.As(err, &rerr) { - rerr = ErrInternalError - } - // Generate error response. - errorResponse := getAPIErrorResponse(rerr, reqURL.Path, w.Header().Get(consts.AmzRequestID), host) - encodedErrorResponse := encodeResponseJSON(errorResponse) - writeResponseSimple(w, rerr.HTTPStatusCode(), encodedErrorResponse, mimeJSON) -} - -// getErrorResponse gets in standard error and resource value and -// provides a encodable populated response values -func getAPIErrorResponse(err *Error, resource, requestID, hostID string) APIErrorResponse { - return APIErrorResponse{ - Code: err.Code(), - Message: err.Description(), - Resource: resource, - RequestID: requestID, - HostID: hostID, - } -} - -// Encodes the response headers into JSON format. -func encodeResponseJSON(response interface{}) []byte { - var bytesBuffer bytes.Buffer - e := json.NewEncoder(&bytesBuffer) - e.Encode(response) - return bytesBuffer.Bytes() -} - -// WriteSuccessResponseJSON writes success headers and response if any, -// with content-type set to `application/json`. -func WriteSuccessResponseJSON(w http.ResponseWriter, response []byte) { - writeResponseSimple(w, http.StatusOK, response, mimeJSON) -} - -func writeResponseSimple(w http.ResponseWriter, statusCode int, response []byte, mType mimeType) { - if mType != mimeNone { - w.Header().Set(consts.ContentType, string(mType)) - } - w.Header().Set(consts.ContentLength, strconv.Itoa(len(response))) - w.WriteHeader(statusCode) - if response != nil { - w.Write(response) - } -} - -// WriteSuccessNoContent writes success headers with http status 204 -func WriteSuccessNoContent(w http.ResponseWriter) { - writeResponseSimple(w, http.StatusNoContent, nil, mimeNone) -} - -func setPutObjHeaders(w http.ResponseWriter, etag, cid string, delete bool) { - if etag != "" && !delete { - w.Header()[consts.ETag] = []string{`"` + etag + `"`} - } - if cid != "" { - w.Header()[consts.CID] = []string{cid} - } -} - -func pathClean(p string) string { - cp := path.Clean(p) - if cp == "." { - return "" - } - return cp -} diff --git a/s3/responses/response_multipart.go b/s3/responses/response_multipart.go deleted file mode 100644 index 62188e8ab..000000000 --- a/s3/responses/response_multipart.go +++ /dev/null @@ -1,26 +0,0 @@ -package responses - -import ( - "github.com/bittorrent/go-btfs/s3/services/object" - "net/http" -) - -func WriteCreateMultipartUploadResponse(w http.ResponseWriter, r *http.Request, bucname, objname, uploadID string) { - resp := GenerateInitiateMultipartUploadResponse(bucname, objname, uploadID) - WriteSuccessResponseXML(w, r, resp) -} - -func WriteAbortMultipartUploadResponse(w http.ResponseWriter, r *http.Request) { - WriteSuccessNoContent(w) -} - -func WriteUploadPartResponse(w http.ResponseWriter, r *http.Request, part object.Part) { - setPutObjHeaders(w, part.ETag, part.CID, false) - WriteSuccessResponseHeadersOnly(w, r) -} - -func WriteCompleteMultipartUploadResponse(w http.ResponseWriter, r *http.Request, bucname, objname, region string, obj object.Object) { - resp := GenerateCompleteMultipartUploadResponse(bucname, objname, region, obj) - setPutObjHeaders(w, obj.ETag, obj.CID, false) - WriteSuccessResponseXML(w, r, resp) -} diff --git a/s3/responses/responses.go b/s3/responses/responses.go new file mode 100644 index 000000000..f9f2ac782 --- /dev/null +++ b/s3/responses/responses.go @@ -0,0 +1,364 @@ +package responses + +import ( + "bytes" + "encoding/base64" + "encoding/xml" + "errors" + "fmt" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" + "github.com/bittorrent/go-btfs/s3/consts" + "io" + "math" + "net/http" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/private/protocol" +) + +const ( + mimeTypeXml = "application/xml" + noPayload = "nopayload" +) + +const ( + floatNaN = "NaN" + floatInf = "Infinity" + floatNegInf = "-Infinity" +) + +var errValueNotSet = fmt.Errorf("value not set") + +var byteSliceType = reflect.TypeOf([]byte{}) + +func WriteResponse(w http.ResponseWriter, statusCode int, output interface{}, locationName string) (err error) { + if locationName != "" { + output = wrapOutput(output, locationName) + } + + defer func() { + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + } + }() + + if !outputFilled(output) { + w.WriteHeader(statusCode) + return + } + + typ := getPayloadType(output) + + if typ == noPayload { + err = buildHeader(w.Header(), output) + if err != nil { + return + } + w.WriteHeader(statusCode) + return + } + + var ( + body io.ReadCloser + contentLength int + contentType string + ) + defer func() { + if body != nil { + _ = body.Close() + } + }() + + switch typ { + case "structure", "": + body, contentLength, contentType, err = buildXMLBody(output) + default: + body, contentLength, contentType, err = buildRESTBody(output) + } + if err != nil { + return + } + + if contentLength != -1 { + w.Header().Set(consts.ContentLength, fmt.Sprintf("%d", contentLength)) + } + + if contentType != "" { + w.Header().Set(consts.ContentType, contentType) + } + + err = buildHeader(w.Header(), output) + if err != nil { + return + } + + w.WriteHeader(statusCode) + + if body != nil { + _, err = io.Copy(w, body) + } + + return +} + +func wrapOutput(v interface{}, locationName string) (wrapper interface{}) { + outputTag := fmt.Sprintf(`locationName:"%s" type:"structure"`, locationName) + fields := []reflect.StructField{ + { + Name: "_", + Type: reflect.TypeOf(struct{}{}), + Tag: `payload:"Output" type:"structure"`, + PkgPath: "responses", + }, + { + Name: "Output", + Type: reflect.TypeOf(v), + Tag: reflect.StructTag(outputTag), + }, + } + wrapperTyp := reflect.StructOf(fields) + wrapperVal := reflect.New(wrapperTyp) + wrapperVal.Elem().Field(1).Set(reflect.ValueOf(v)) + wrapper = wrapperVal.Interface() + return +} + +func outputFilled(output interface{}) bool { + return reflect.Indirect(reflect.ValueOf(output)).IsValid() +} + +func getPayloadType(output interface{}) (typ string) { + typ = noPayload + v := reflect.Indirect(reflect.ValueOf(output)) + if !v.IsValid() { + return + } + field, ok := v.Type().FieldByName("_") + if !ok { + return + } + noPayloadValue := field.Tag.Get(noPayload) + if noPayloadValue != "" { + return + } + payloadName := field.Tag.Get("payload") + if payloadName == "" { + return + } + member, ok := v.Type().FieldByName(payloadName) + if !ok { + return + } + typ = member.Tag.Get("type") + return +} + +func buildXMLBody(output interface{}) (body io.ReadCloser, contentLength int, contentType string, err error) { + var buf bytes.Buffer + buf.WriteString(xml.Header) + err = xmlutil.BuildXML(output, xml.NewEncoder(&buf)) + if err != nil { + return + } + body = io.NopCloser(&buf) + contentLength = buf.Len() + contentType = mimeTypeXml + return +} + +func buildRESTBody(output interface{}) (body io.ReadCloser, contentLength int, contentType string, err error) { + v := reflect.Indirect(reflect.ValueOf(output)) + field, _ := v.Type().FieldByName("_") + payloadName := field.Tag.Get("payload") + payload := reflect.Indirect(v.FieldByName(payloadName)) + if !payload.IsValid() || payload.Interface() == nil { + return + } + switch pIface := payload.Interface().(type) { + case io.ReadCloser: + body = pIface + contentLength = -1 + case []byte: + body = io.NopCloser(bytes.NewBuffer(pIface)) + contentLength = len(pIface) + case string: + body = io.NopCloser(bytes.NewBufferString(pIface)) + contentLength = len(pIface) + default: + err = fmt.Errorf( + "unknown payload type %s", + payload.Type(), + ) + } + return +} + +func buildHeader(header http.Header, output interface{}) (err error) { + v := reflect.ValueOf(output).Elem() + for i := 0; i < v.NumField(); i++ { + ft := v.Type().Field(i) + fv := v.Field(i) + fk := fv.Kind() + + if !fv.IsValid() { + continue + } + + if n := ft.Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if fk == reflect.Ptr { + fv = fv.Elem() + fk = fv.Kind() + if !fv.IsValid() { + continue + } + } else if fk == reflect.Interface { + if !fv.Elem().IsValid() { + continue + } + } + + if ft.Tag.Get("ignore") != "" { + continue + } + + if ft.Tag.Get("marshal-as") == "blob" { + fv = fv.Convert(byteSliceType) + } + + switch ft.Tag.Get("location") { + case "headers": + err = writeHeaderMap(&header, fv, ft.Tag) + case "header": + name := ft.Tag.Get("locationName") + if name == "" { + name = ft.Name + } + err = writeHeader(&header, fv, name, ft.Tag) + } + + if err != nil { + return + } + } + + return +} + +func writeHeader(header *http.Header, v reflect.Value, name string, tag reflect.StructTag) (err error) { + str, err := convertType(v, tag) + if errors.Is(err, errValueNotSet) { + err = nil + return + } + if err != nil { + return + } + name = strings.TrimSpace(name) + str = strings.TrimSpace(str) + header.Add(name, str) + return +} + +func writeHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag) (err error) { + prefix := tag.Get("locationName") + for _, key := range v.MapKeys() { + var str string + str, err = convertType(v.MapIndex(key), tag) + if errors.Is(err, errValueNotSet) { + err = nil + continue + } + if err != nil { + return + } + keyStr := strings.TrimSpace(key.String()) + str = strings.TrimSpace(str) + header.Add(prefix+keyStr, str) + } + return +} + +func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) { + v = reflect.Indirect(v) + if !v.IsValid() { + err = errValueNotSet + return + } + + switch value := v.Interface().(type) { + case string: + if tag.Get("suppressedJSONValue") == "true" && tag.Get("location") == "header" { + value = base64.StdEncoding.EncodeToString([]byte(value)) + } + str = value + case []*string: + if tag.Get("location") != "header" || tag.Get("enum") == "" { + return "", fmt.Errorf("%T is only supported with location header and enum shapes", value) + } + if len(value) == 0 { + return "", errValueNotSet + } + + buff := &bytes.Buffer{} + for i, sv := range value { + if sv == nil || len(*sv) == 0 { + continue + } + if i != 0 { + buff.WriteRune(',') + } + item := *sv + if strings.Index(item, `,`) != -1 || strings.Index(item, `"`) != -1 { + item = strconv.Quote(item) + } + buff.WriteString(item) + } + str = string(buff.Bytes()) + case []byte: + str = base64.StdEncoding.EncodeToString(value) + case bool: + str = strconv.FormatBool(value) + case int64: + str = strconv.FormatInt(value, 10) + case float64: + switch { + case math.IsNaN(value): + str = floatNaN + case math.IsInf(value, 1): + str = floatInf + case math.IsInf(value, -1): + str = floatNegInf + default: + str = strconv.FormatFloat(value, 'f', -1, 64) + } + case time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.RFC822TimeFormatName + if tag.Get("location") == "querystring" { + format = protocol.ISO8601TimeFormatName + } + } + str = protocol.FormatTime(format, value) + case aws.JSONValue: + if len(value) == 0 { + return "", errValueNotSet + } + escaping := protocol.NoEscape + if tag.Get("location") == "header" { + escaping = protocol.Base64Escape + } + str, err = protocol.EncodeJSONValue(value, escaping) + default: + err = fmt.Errorf("unsupported value for param %v (%s)", v.Interface(), v.Type()) + } + + return +} diff --git a/s3/responses/response_bucket.go b/s3/responses/responses_bucket.go similarity index 57% rename from s3/responses/response_bucket.go rename to s3/responses/responses_bucket.go index 9bced083f..8430c284e 100644 --- a/s3/responses/response_bucket.go +++ b/s3/responses/responses_bucket.go @@ -2,58 +2,51 @@ package responses import ( "github.com/aws/aws-sdk-go/service/s3" - "github.com/bittorrent/go-btfs/s3/consts" "github.com/bittorrent/go-btfs/s3/services/object" "net/http" ) -func WritePutBucketResponse(w http.ResponseWriter, r *http.Request) { - if cp := pathClean(r.URL.Path); cp != "" { - w.Header().Set(consts.Location, cp) - } - WriteSuccessResponse(w, r) +func WriteCreateBucketResponse(w http.ResponseWriter, r *http.Request) { + output := new(s3.CreateBucketOutput).SetLocation(pathClean(r.URL.Path)) + WriteSuccessResponse(w, output, "") return } func WriteHeadBucketResponse(w http.ResponseWriter, r *http.Request) { - WriteSuccessResponse(w, r) + output := new(s3.HeadBucketOutput) + WriteSuccessResponse(w, output, "") return } + func WriteDeleteBucketResponse(w http.ResponseWriter) { - WriteSuccessNoContent(w) + output := new(s3.DeleteBucketOutput) + _ = WriteResponse(w, http.StatusOK, output, "") return } -type ListBucketResponse struct { - ListAllMyBucketsResult s3.ListBucketsOutput `xml:"ListAllMyBucketsResult"` -} - func WriteListBucketsResponse(w http.ResponseWriter, r *http.Request, accessKey string, buckets []*object.Bucket) { - resp := &ListBucketResponse{} - resp.ListAllMyBucketsResult.SetOwner(owner(accessKey)) + output := new(s3.ListBucketsOutput) + output.SetOwner(owner(accessKey)) s3Buckets := make([]*s3.Bucket, 0) for _, buc := range buckets { s3Bucket := new(s3.Bucket).SetName(buc.Name).SetCreationDate(buc.Created) s3Buckets = append(s3Buckets, s3Bucket) } - resp.ListAllMyBucketsResult.SetBuckets(s3Buckets) - WriteSuccessResponseXML(w, r, resp) + output.SetBuckets(s3Buckets) + WriteSuccessResponse(w, output, "ListAllMyBucketsResult") return } func WritePutBucketAclResponse(w http.ResponseWriter, r *http.Request) { - WriteSuccessResponse(w, r) + output := new(s3.PutBucketAclOutput) + WriteSuccessResponse(w, output, "") return } -type GetBucketACLResponse struct { - AccessControlPolicy s3.GetBucketAclOutput `xml:"AccessControlPolicy"` -} - func WriteGetBucketACLResponse(w http.ResponseWriter, r *http.Request, accessKey string, acl string) { - resp := GetBucketACLResponse{} - resp.AccessControlPolicy.SetOwner(owner(accessKey)) + output := new(s3.GetBucketAclOutput) + output.SetOwner(owner(accessKey)) grants := make([]*s3.Grant, 0) grants = append(grants, ownerFullControlGrant(accessKey)) switch acl { @@ -65,7 +58,7 @@ func WriteGetBucketACLResponse(w http.ResponseWriter, r *http.Request, accessKey default: panic("unknown acl") } - resp.AccessControlPolicy.SetGrants(grants) - WriteSuccessResponseXML(w, r, resp) + output.SetGrants(grants) + WriteSuccessResponse(w, output, "AccessControlPolicy") return } diff --git a/s3/responses/responses_common.go b/s3/responses/responses_common.go new file mode 100644 index 000000000..7bc1864a4 --- /dev/null +++ b/s3/responses/responses_common.go @@ -0,0 +1,81 @@ +package responses + +import ( + "fmt" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/bittorrent/go-btfs/s3/consts" + "net/http" + "path" + "time" +) + +func owner(accessKey string) *s3.Owner { + return new(s3.Owner).SetID(accessKey).SetDisplayName(accessKey) +} + +func ownerFullControlGrant(accessKey string) *s3.Grant { + return new(s3.Grant).SetGrantee(new(s3.Grantee).SetType(s3.TypeCanonicalUser).SetID(accessKey).SetDisplayName(accessKey)).SetPermission(s3.PermissionFullControl) +} + +var ( + allUsersReadGrant = new(s3.Grant).SetGrantee(new(s3.Grantee).SetType(s3.TypeGroup).SetURI(consts.AllUsersURI)).SetPermission(s3.PermissionRead) + allUsersWriteGrant = new(s3.Grant).SetGrantee(new(s3.Grantee).SetType(s3.TypeGroup).SetURI(consts.AllUsersURI)).SetPermission(s3.PermissionWrite) +) + +func getRequestID() string { + return fmt.Sprintf("%d", time.Now().UnixNano()) +} + +func setCommonHeader(w http.ResponseWriter, requestId string) { + w.Header().Set(consts.ServerInfo, consts.DefaultServerInfo) + w.Header().Set(consts.AmzRequestID, requestId) + w.Header().Set(consts.AcceptRanges, "bytes") +} + +type ErrorOutput struct { + _ struct{} `type:"structure"` + Code string `locationName:"Code" type:"string"` + Message string `locationName:"Message" type:"string"` + Resource string `locationName:"Resource" type:"string"` + RequestID string `locationName:"RequestID" type:"string"` +} + +func WriteErrorResponse(w http.ResponseWriter, r *http.Request, rerr *Error) { + reqID := getRequestID() + setCommonHeader(w, reqID) + output := &ErrorOutput{ + Code: rerr.Code(), + Message: rerr.Description(), + Resource: pathClean(r.URL.Path), + RequestID: reqID, + } + err := WriteResponse(w, rerr.HTTPStatusCode(), output, "Error") + if err != nil { + fmt.Println("write response: ", err) + } +} + +func WriteSuccessResponse(w http.ResponseWriter, output interface{}, locationName string) { + setCommonHeader(w, getRequestID()) + err := WriteResponse(w, http.StatusOK, output, locationName) + if err != nil { + fmt.Println("write response: ", err) + } +} + +func setPutObjHeaders(w http.ResponseWriter, etag, cid string, delete bool) { + if etag != "" && !delete { + w.Header()[consts.ETag] = []string{`"` + etag + `"`} + } + if cid != "" { + w.Header()[consts.CID] = []string{cid} + } +} + +func pathClean(p string) string { + cp := path.Clean(p) + if cp == "." { + return "" + } + return cp +} diff --git a/s3/responses/responses_multipart.go b/s3/responses/responses_multipart.go new file mode 100644 index 000000000..fd7aff5d2 --- /dev/null +++ b/s3/responses/responses_multipart.go @@ -0,0 +1,21 @@ +package responses + +//func WriteCreateMultipartUploadResponse(w http.ResponseWriter, r *http.Request, bucname, objname, uploadID string) { +// resp := GenerateInitiateMultipartUploadResponse(bucname, objname, uploadID) +// WriteSuccessResponse(w, resp, "") +//} +// +//func WriteAbortMultipartUploadResponse(w http.ResponseWriter, r *http.Request) { +// WriteSuccessResponse(w, nil, "") +//} +// +//func WriteUploadPartResponse(w http.ResponseWriter, r *http.Request, part object.Part) { +// setPutObjHeaders(w, part.ETag, part.CID, false) +// WriteSuccessResponse(w, nil, "") +//} +// +//func WriteCompleteMultipartUploadResponse(w http.ResponseWriter, r *http.Request, bucname, objname, region string, obj object.Object) { +// resp := GenerateCompleteMultipartUploadResponse(bucname, objname, region, obj) +// setPutObjHeaders(w, obj.ETag, obj.CID, false) +// WriteSuccessResponse(w, resp, "") +//} diff --git a/s3/responses/response_object.go b/s3/responses/responses_object.go similarity index 85% rename from s3/responses/response_object.go rename to s3/responses/responses_object.go index 34bbf0073..edffad81d 100644 --- a/s3/responses/response_object.go +++ b/s3/responses/responses_object.go @@ -7,5 +7,5 @@ import ( func WritePutObjectResponse(w http.ResponseWriter, r *http.Request, obj object.Object) { setPutObjHeaders(w, obj.ETag, obj.CID, false) - WriteSuccessResponseHeadersOnly(w, r) + WriteSuccessResponse(w, nil, "") } diff --git a/s3/responses/types.go b/s3/responses/types.go deleted file mode 100644 index 55987629e..000000000 --- a/s3/responses/types.go +++ /dev/null @@ -1,310 +0,0 @@ -package responses - -import ( - "encoding/xml" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/bittorrent/go-btfs/s3/services/object" -) - -type AccessControlList struct { - Grant []*s3.Grant `xml:"Grant,omitempty"` -} - -type CanonicalUser struct { - ID string `xml:"ID"` - DisplayName string `xml:"DisplayName,omitempty"` -} - -// Grant grant -type Grant struct { - Grantee Grantee `xml:"Grantee"` - Permission Permission `xml:"Permission"` -} - -// Grantee grant -type Grantee struct { - XMLNS string `xml:"xmlns:xsi,attr"` - XMLXSI string `xml:"xsi:type,attr"` - Type string `xml:"Type"` - ID string `xml:"ID,omitempty"` - DisplayName string `xml:"DisplayName,omitempty"` - URI string `xml:"URI,omitempty"` -} - -// Permission May be one of READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL -type Permission string - -// ListAllMyBucketsResult List All Buckets Result -type ListAllMyBucketsResult struct { - XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListAllMyBucketsResult"` - Owner *s3.Owner - Buckets []*s3.Bucket `xml:"Buckets>Bucket"` -} - -type CopyObjectResponse struct { - CopyObjectResult CopyObjectResult `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopyObjectResult"` -} - -type CopyObjectResult struct { - LastModified string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LastModified"` - ETag string `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ETag"` -} - -// LocationResponse - format for location response. -type LocationResponse struct { - XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LocationConstraint" json:"-"` - Location string `xml:",chardata"` -} - -// ListObjectsResponse - format for list objects response. -type ListObjectsResponse struct { - XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult" json:"-"` - - Name string - Prefix string - Marker string - - // When response is truncated (the IsTruncated element value in the response - // is true), you can use the key name in this field as marker in the subsequent - // request to get next set of objects. Server lists objects in alphabetical - // order Note: This element is returned only if you have delimiter request parameter - // specified. If response does not include the NextMaker and it is truncated, - // you can use the value of the last Key in the response as the marker in the - // subsequent request to get the next set of object keys. - NextMarker string `xml:"NextMarker,omitempty"` - - MaxKeys int - Delimiter string - // A flag that indicates whether or not ListObjects returned all of the results - // that satisfied the search criteria. - IsTruncated bool - - Contents []Object - CommonPrefixes []CommonPrefix - - // Encoding type used to encode object keys in the response. - EncodingType string `xml:"EncodingType,omitempty"` -} - -// ListObjectsV2Response - format for list objects response. -type ListObjectsV2Response struct { - XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult" json:"-"` - - Name string - Prefix string - StartAfter string `xml:"StartAfter,omitempty"` - // When response is truncated (the IsTruncated element value in the response - // is true), you can use the key name in this field as marker in the subsequent - // request to get next set of objects. Server lists objects in alphabetical - // order Note: This element is returned only if you have delimiter request parameter - // specified. If response does not include the NextMaker and it is truncated, - // you can use the value of the last Key in the response as the marker in the - // subsequent request to get the next set of object keys. - ContinuationToken string `xml:"ContinuationToken,omitempty"` - NextContinuationToken string `xml:"NextContinuationToken,omitempty"` - - KeyCount int - MaxKeys int - Delimiter string - // A flag that indicates whether or not ListObjects returned all of the results - // that satisfied the search criteria. - IsTruncated bool - - Contents []Object - CommonPrefixes []CommonPrefix - - // Encoding type used to encode object keys in the response. - EncodingType string `xml:"EncodingType,omitempty"` -} - -// Object container for object metadata -type Object struct { - Key string - LastModified string // time string of format "2006-01-02T15:04:05.000Z" - ETag string - CID string // CID - Size int64 - - // Owner of the object. - Owner s3.Owner - - // The class of storage used to store the object. - StorageClass string - - // UserMetadata user-defined metadata - UserMetadata StringMap `xml:"UserMetadata,omitempty"` -} - -// StringMap is a map[string]string -type StringMap map[string]string - -// MarshalXML - StringMap marshals into XML. -func (s StringMap) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - tokens := []xml.Token{start} - - for key, value := range s { - t := xml.StartElement{} - t.Name = xml.Name{ - Space: "", - Local: key, - } - tokens = append(tokens, t, xml.CharData(value), xml.EndElement{Name: t.Name}) - } - - tokens = append(tokens, xml.EndElement{ - Name: start.Name, - }) - - for _, t := range tokens { - if err := e.EncodeToken(t); err != nil { - return err - } - } - - // flush to ensure tokens are written - return e.Flush() -} - -// CommonPrefix container for prefix response in ListObjectsResponse -type CommonPrefix struct { - Prefix string -} - -type InitiateMultipartUploadResponse struct { - XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ InitiateMultipartUploadResult" json:"-"` - - Bucket string - Key string - UploadID string `xml:"UploadId"` -} - -func GenerateInitiateMultipartUploadResponse(bucname, objname, uploadID string) InitiateMultipartUploadResponse { - return InitiateMultipartUploadResponse{ - Bucket: bucname, - Key: objname, - UploadID: uploadID, - } -} - -type CompleteMultipartUploadResponse struct { - XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUploadResult" json:"-"` - - Location string - Bucket string - Key string - ETag string - - ChecksumCRC32 string - ChecksumCRC32C string - ChecksumSHA1 string - ChecksumSHA256 string -} - -func GenerateCompleteMultipartUploadResponse(bucname, objname, location string, obj object.Object) CompleteMultipartUploadResponse { - c := CompleteMultipartUploadResponse{ - Location: location, - Bucket: bucname, - Key: objname, - // AWS S3 quotes the ETag in XML, make sure we are compatible here. - ETag: "\"" + obj.ETag + "\"", - } - return c -} - -// GenerateListObjectsV2Response Generates an ListObjectsV2 response for the said bucket with other enumerated options. -//func GenerateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, isTruncated bool, maxKeys int, objects []object.Object, prefixes []string) ListObjectsV2Response { -// contents := make([]Object, 0, len(objects)) -// id := consts.DefaultOwnerID -// name := consts.DisplayName -// owner := s3.Owner{ -// ID: &id, -// DisplayName: &name, -// } -// data := ListObjectsV2Response{} -// -// for _, object := range objects { -// content := Object{} -// if object.Name == "" { -// continue -// } -// content.Key = utils.S3EncodeName(object.Name, encodingType) -// content.LastModified = object.ModTime.UTC().Format(consts.Iso8601TimeFormat) -// if object.ETag != "" { -// content.ETag = "\"" + object.ETag + "\"" -// } -// content.Size = object.Size -// content.Owner = owner -// content.CID = object.CID -// contents = append(contents, content) -// } -// data.Name = bucket -// data.Contents = contents -// -// data.EncodingType = encodingType -// data.StartAfter = utils.S3EncodeName(startAfter, encodingType) -// data.Delimiter = utils.S3EncodeName(delimiter, encodingType) -// data.Prefix = utils.S3EncodeName(prefix, encodingType) -// data.MaxKeys = maxKeys -// data.ContinuationToken = base64.StdEncoding.EncodeToString([]byte(token)) -// data.NextContinuationToken = base64.StdEncoding.EncodeToString([]byte(nextToken)) -// data.IsTruncated = isTruncated -// -// commonPrefixes := make([]CommonPrefix, 0, len(prefixes)) -// for _, prefix := range prefixes { -// prefixItem := CommonPrefix{} -// prefixItem.Prefix = utils.S3EncodeName(prefix, encodingType) -// commonPrefixes = append(commonPrefixes, prefixItem) -// } -// data.CommonPrefixes = commonPrefixes -// data.KeyCount = len(data.Contents) + len(data.CommonPrefixes) -// return data -//} - -// generates an ListObjectsV1 response for the said bucket with other enumerated options. -//func GenerateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp object.ObjectsList) ListObjectsResponse { -// contents := make([]Object, 0, len(resp.Objects)) -// id := consts.DefaultOwnerID -// name := consts.DisplayName -// owner := s3.Owner{ -// ID: &id, -// DisplayName: &name, -// } -// data := ListObjectsResponse{} -// -// for _, object := range resp.Objects { -// content := Object{} -// if object.Name == "" { -// continue -// } -// content.Key = utils.S3EncodeName(object.Name, encodingType) -// content.LastModified = object.ModTime.UTC().Format(consts.Iso8601TimeFormat) -// if object.ETag != "" { -// content.ETag = "\"" + object.ETag + "\"" -// } -// content.CID = object.CID -// content.Size = object.Size -// content.StorageClass = "" -// content.Owner = owner -// contents = append(contents, content) -// } -// data.Name = bucket -// data.Contents = contents -// -// data.EncodingType = encodingType -// data.Prefix = utils.S3EncodeName(prefix, encodingType) -// data.Marker = utils.S3EncodeName(marker, encodingType) -// data.Delimiter = utils.S3EncodeName(delimiter, encodingType) -// data.MaxKeys = maxKeys -// data.NextMarker = utils.S3EncodeName(resp.NextMarker, encodingType) -// data.IsTruncated = resp.IsTruncated -// -// prefixes := make([]CommonPrefix, 0, len(resp.Prefixes)) -// for _, prefix := range resp.Prefixes { -// prefixItem := CommonPrefix{} -// prefixItem.Prefix = utils.S3EncodeName(prefix, encodingType) -// prefixes = append(prefixes, prefixItem) -// } -// data.CommonPrefixes = prefixes -// return data -//} -// diff --git a/s3/responses/types_common.go b/s3/responses/types_common.go deleted file mode 100644 index 66522789b..000000000 --- a/s3/responses/types_common.go +++ /dev/null @@ -1 +0,0 @@ -package responses diff --git a/s3/routers/routers_options.go b/s3/routers/options.go similarity index 100% rename from s3/routers/routers_options.go rename to s3/routers/options.go diff --git a/s3/s3.go b/s3/s3.go index a37ebd7a5..6ab69c45e 100644 --- a/s3/s3.go +++ b/s3/s3.go @@ -2,7 +2,7 @@ package s3 import ( config "github.com/bittorrent/go-btfs-config" - "github.com/bittorrent/go-btfs/chain" + "github.com/bittorrent/go-btfs/s3/ctxmu" "github.com/bittorrent/go-btfs/s3/handlers" "github.com/bittorrent/go-btfs/s3/providers" "github.com/bittorrent/go-btfs/s3/routers" @@ -10,6 +10,7 @@ import ( "github.com/bittorrent/go-btfs/s3/services/accesskey" "github.com/bittorrent/go-btfs/s3/services/object" "github.com/bittorrent/go-btfs/s3/services/sign" + "github.com/bittorrent/go-btfs/transaction/storage" "sync" ) @@ -18,31 +19,31 @@ var ( once sync.Once ) -func initProviders() { +func InitProviders(stateStore storage.StateStorer) { once.Do(func() { - sstore := providers.NewStorageStateStoreProxy(chain.StateStore) + sstore := providers.NewStorageStateStoreProxy(stateStore) fstore := providers.NewBtfsAPI("") ps = providers.NewProviders(sstore, fstore) }) } func GetProviders() *providers.Providers { - initProviders() return ps } func NewServer(cfg config.S3CompatibleAPI) *server.Server { - // providers - initProviders() + // lock global multiple keys read write lock + lock := ctxmu.NewDefaultMultiCtxRWMutex() // services - acksvc := accesskey.NewService(ps) sigsvc := sign.NewService() - objsvc := object.NewService(ps) + acksvc := accesskey.NewService(ps, accesskey.WithLock(lock)) + objsvc := object.NewService(ps, object.WithLock(lock)) // handlers hs := handlers.NewHandlers( - acksvc, sigsvc, objsvc, handlers.WithHeaders(cfg.HTTPHeaders), + acksvc, sigsvc, objsvc, + handlers.WithHeaders(cfg.HTTPHeaders), ) // routers diff --git a/s3/server/server_options.go b/s3/server/options.go similarity index 100% rename from s3/server/server_options.go rename to s3/server/options.go diff --git a/s3/services/accesskey/service_instance.go b/s3/services/accesskey/instance.go similarity index 100% rename from s3/services/accesskey/service_instance.go rename to s3/services/accesskey/instance.go diff --git a/s3/services/accesskey/options.go b/s3/services/accesskey/options.go new file mode 100644 index 000000000..f959d6560 --- /dev/null +++ b/s3/services/accesskey/options.go @@ -0,0 +1,40 @@ +package accesskey + +import ( + "github.com/bittorrent/go-btfs/s3/ctxmu" + "time" +) + +const ( + defaultSecretLength = 32 + defaultStoreKeyPrefix = "access-keys:" + defaultWaitLockTimout = 2 * time.Minute +) + +var defaultLock = ctxmu.NewDefaultMultiCtxRWMutex() + +type Option func(svc *service) + +func WithSecretLength(length int) Option { + return func(svc *service) { + svc.secretLength = length + } +} + +func WithStoreKeyPrefix(prefix string) Option { + return func(svc *service) { + svc.storeKeyPrefix = prefix + } +} + +func WithWaitLockTimout(timout time.Duration) Option { + return func(svc *service) { + svc.waitLockTimeout = timout + } +} + +func WithLock(lock ctxmu.MultiCtxRWLocker) Option { + return func(svc *service) { + svc.lock = lock + } +} diff --git a/s3/services/accesskey/service.go b/s3/services/accesskey/service.go index 6ba1f91e2..5a7f5c3ee 100644 --- a/s3/services/accesskey/service.go +++ b/s3/services/accesskey/service.go @@ -11,29 +11,24 @@ import ( "time" ) -const ( - defaultSecretLength = 32 - defaultStoreKeyPrefix = "access-keys:" - defaultUpdateTimeoutMS = 200 -) - var _ Service = (*service)(nil) type service struct { - providers providers.Providerser - secretLength int - storeKeyPrefix string - locks *ctxmu.MultiCtxRWMutex - updateTimeout time.Duration + providers providers.Providerser + secretLength int + storeKeyPrefix string + lock ctxmu.MultiCtxRWLocker + waitLockTimeout time.Duration } + func NewService(providers providers.Providerser, options ...Option) Service { svc := &service{ - providers: providers, - secretLength: defaultSecretLength, - storeKeyPrefix: defaultStoreKeyPrefix, - locks: ctxmu.NewDefaultMultiCtxRWMutex(), - updateTimeout: time.Duration(defaultUpdateTimeoutMS) * time.Millisecond, + providers: providers, + secretLength: defaultSecretLength, + storeKeyPrefix: defaultStoreKeyPrefix, + lock: defaultLock, + waitLockTimeout: defaultWaitLockTimout, } for _, option := range options { option(svc) @@ -137,14 +132,14 @@ type updateArgs struct { } func (svc *service) update(key string, args *updateArgs) (err error) { - ctx, cancel := context.WithTimeout(context.Background(), svc.updateTimeout) + ctx, cancel := context.WithTimeout(context.Background(), svc.waitLockTimeout) defer cancel() - err = svc.locks.Lock(ctx, key) + err = svc.lock.Lock(ctx, key) if err != nil { return } - defer svc.locks.Unlock(key) + defer svc.lock.Unlock(key) record := &AccessKey{} stk := svc.getStoreKey(key) diff --git a/s3/services/accesskey/service_options.go b/s3/services/accesskey/service_options.go deleted file mode 100644 index 25f1617a6..000000000 --- a/s3/services/accesskey/service_options.go +++ /dev/null @@ -1,15 +0,0 @@ -package accesskey - -type Option func(svc *service) - -func WithSecretLength(length int) Option { - return func(svc *service) { - svc.secretLength = length - } -} - -func WithStoreKeyPrefix(prefix string) Option { - return func(svc *service) { - svc.storeKeyPrefix = prefix - } -} diff --git a/s3/services/object/options.go b/s3/services/object/options.go index 05c4dd9a3..7e6b80f58 100644 --- a/s3/services/object/options.go +++ b/s3/services/object/options.go @@ -7,9 +7,9 @@ import ( const ( defaultKeySeparator = "/" - defaultBucketSpace = "bkt" - defaultObjectSpace = "obj" - defaultUploadSpace = "upl" + defaultBucketSpace = "s3:bkt" + defaultObjectSpace = "s3:obj" + defaultUploadSpace = "s3:upl" defaultOperationTimeout = 5 * time.Minute defaultCloseBodyTimeout = 10 * time.Minute ) diff --git a/s3/services/sign/service_options.go b/s3/services/sign/options.go similarity index 100% rename from s3/services/sign/service_options.go rename to s3/services/sign/options.go diff --git a/settlement/swap/vault/cashout.go b/settlement/swap/vault/cashout.go index c6ee34bb5..89540735d 100644 --- a/settlement/swap/vault/cashout.go +++ b/settlement/swap/vault/cashout.go @@ -131,7 +131,7 @@ func cashoutActionKey(vault common.Address, token common.Address) string { // // output, err := s.transactionService.Call(ctx, &transaction.TxRequest{ // To: &vault, -// Data: callData, +// data: callData, // }) // if err != nil { // return nil, err @@ -191,7 +191,7 @@ func (s *cashoutService) CashCheque(ctx context.Context, vault, recipient common //} //request := &transaction.TxRequest{ // To: &vault, - // Data: callData, + // data: callData, // Value: big.NewInt(0), // Description: "cheque cashout", //} diff --git a/settlement/swap/vault/factory_test.go b/settlement/swap/vault/factory_test.go index 652eb87e3..f52c352ec 100644 --- a/settlement/swap/vault/factory_test.go +++ b/settlement/swap/vault/factory_test.go @@ -207,12 +207,12 @@ func TestFactoryVerifyVault(t *testing.T) { // Status: 1, // Logs: []*types.Log{ // { -// Data: logData, +// data: logData, // }, // { // Address: factoryAddress, // Topics: []common.Hash{simpleSwapDeployedEvent.ID}, -// Data: logData, +// data: logData, // }, // }, // }, nil diff --git a/settlement/swap/vault/vault.go b/settlement/swap/vault/vault.go index d8fe95800..d4733a3d2 100644 --- a/settlement/swap/vault/vault.go +++ b/settlement/swap/vault/vault.go @@ -526,7 +526,7 @@ func (s *service) LastCheques(token common.Address) (map[common.Address]*SignedC // // request := &transaction.TxRequest{ // To: &s.address, -// Data: callData, +// data: callData, // Value: big.NewInt(0), // Description: fmt.Sprintf("vault withdrawal of %d WBTT", amount), // } From ac30bbe41936719e0300bc1f7e8575583874ec9c Mon Sep 17 00:00:00 2001 From: Steve Date: Mon, 4 Sep 2023 15:52:17 +0800 Subject: [PATCH 085/139] refractor: response func --- s3/responses/responses.go | 158 +++++++++++++++++--------------------- 1 file changed, 72 insertions(+), 86 deletions(-) diff --git a/s3/responses/responses.go b/s3/responses/responses.go index f9f2ac782..7fc418039 100644 --- a/s3/responses/responses.go +++ b/s3/responses/responses.go @@ -46,15 +46,17 @@ func WriteResponse(w http.ResponseWriter, statusCode int, output interface{}, lo } }() - if !outputFilled(output) { + if !valid(output) { w.WriteHeader(statusCode) return } - typ := getPayloadType(output) - - if typ == noPayload { - err = buildHeader(w.Header(), output) + body, clen, ctyp, err := extractBody(output) + if err != nil { + return + } + if body == nil { + err = extractHeaders(w.Header(), output) if err != nil { return } @@ -62,45 +64,21 @@ func WriteResponse(w http.ResponseWriter, statusCode int, output interface{}, lo return } - var ( - body io.ReadCloser - contentLength int - contentType string - ) defer func() { - if body != nil { - _ = body.Close() - } + _ = body.Close() }() - switch typ { - case "structure", "": - body, contentLength, contentType, err = buildXMLBody(output) - default: - body, contentLength, contentType, err = buildRESTBody(output) - } - if err != nil { - return - } - - if contentLength != -1 { - w.Header().Set(consts.ContentLength, fmt.Sprintf("%d", contentLength)) - } - - if contentType != "" { - w.Header().Set(consts.ContentType, contentType) - } + w.Header().Set(consts.ContentLength, fmt.Sprintf("%d", clen)) + w.Header().Set(consts.ContentType, ctyp) - err = buildHeader(w.Header(), output) + err = extractHeaders(w.Header(), output) if err != nil { return } w.WriteHeader(statusCode) - if body != nil { - _, err = io.Copy(w, body) - } + _, err = io.Copy(w, body) return } @@ -127,12 +105,51 @@ func wrapOutput(v interface{}, locationName string) (wrapper interface{}) { return } -func outputFilled(output interface{}) bool { - return reflect.Indirect(reflect.ValueOf(output)).IsValid() +func extractBody(output interface{}) (body io.ReadCloser, clen int, ctyp string, err error) { + ptyp, plod := getPayload(output) + if ptyp == noPayload { + return + } + + if ptyp == "structure" || ptyp == "" { + var buf bytes.Buffer + buf.WriteString(xml.Header) + err = xmlutil.BuildXML(output, xml.NewEncoder(&buf)) + if err != nil { + return + } + body = io.NopCloser(&buf) + clen = buf.Len() + ctyp = mimeTypeXml + return + } + + if plod.Interface() == nil { + return + } + + switch pifc := plod.Interface().(type) { + case io.ReadCloser: + body = pifc + clen = -1 + case []byte: + body = io.NopCloser(bytes.NewBuffer(pifc)) + clen = len(pifc) + case string: + body = io.NopCloser(bytes.NewBufferString(pifc)) + clen = len(pifc) + default: + err = fmt.Errorf( + "unknown payload type %s", + plod.Type(), + ) + } + + return } -func getPayloadType(output interface{}) (typ string) { - typ = noPayload +func getPayload(output interface{}) (ptyp string, plod reflect.Value) { + ptyp = noPayload v := reflect.Indirect(reflect.ValueOf(output)) if !v.IsValid() { return @@ -153,51 +170,12 @@ func getPayloadType(output interface{}) (typ string) { if !ok { return } - typ = member.Tag.Get("type") - return -} - -func buildXMLBody(output interface{}) (body io.ReadCloser, contentLength int, contentType string, err error) { - var buf bytes.Buffer - buf.WriteString(xml.Header) - err = xmlutil.BuildXML(output, xml.NewEncoder(&buf)) - if err != nil { - return - } - body = io.NopCloser(&buf) - contentLength = buf.Len() - contentType = mimeTypeXml - return -} - -func buildRESTBody(output interface{}) (body io.ReadCloser, contentLength int, contentType string, err error) { - v := reflect.Indirect(reflect.ValueOf(output)) - field, _ := v.Type().FieldByName("_") - payloadName := field.Tag.Get("payload") - payload := reflect.Indirect(v.FieldByName(payloadName)) - if !payload.IsValid() || payload.Interface() == nil { - return - } - switch pIface := payload.Interface().(type) { - case io.ReadCloser: - body = pIface - contentLength = -1 - case []byte: - body = io.NopCloser(bytes.NewBuffer(pIface)) - contentLength = len(pIface) - case string: - body = io.NopCloser(bytes.NewBufferString(pIface)) - contentLength = len(pIface) - default: - err = fmt.Errorf( - "unknown payload type %s", - payload.Type(), - ) - } + ptyp = member.Tag.Get("type") + plod = reflect.Indirect(v.FieldByName(payloadName)) return } -func buildHeader(header http.Header, output interface{}) (err error) { +func extractHeaders(header http.Header, output interface{}) (err error) { v := reflect.ValueOf(output).Elem() for i := 0; i < v.NumField(); i++ { ft := v.Type().Field(i) @@ -233,14 +211,11 @@ func buildHeader(header http.Header, output interface{}) (err error) { } switch ft.Tag.Get("location") { - case "headers": - err = writeHeaderMap(&header, fv, ft.Tag) case "header": - name := ft.Tag.Get("locationName") - if name == "" { - name = ft.Name - } + name := ifemp(ft.Tag.Get("locationName"), ft.Name) err = writeHeader(&header, fv, name, ft.Tag) + case "headers": + err = writeHeaderMap(&header, fv, ft.Tag) } if err != nil { @@ -362,3 +337,14 @@ func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) return } + +func ifemp(a, b string) string { + if a != "" { + return a + } + return b +} + +func valid(ifce interface{}) bool { + return reflect.Indirect(reflect.ValueOf(ifce)).IsValid() +} From 2fb46c1cc8253931c9d8ae7111836df46ebcfc1d Mon Sep 17 00:00:00 2001 From: Steve Date: Tue, 5 Sep 2023 04:51:30 +0800 Subject: [PATCH 086/139] refractor: response --- s3/responses/responses.go | 168 ++++++++++++++++--------------- s3/responses/responses_common.go | 28 ++---- s3/routers/routers.go | 2 +- s3/server/options.go | 2 +- 4 files changed, 99 insertions(+), 101 deletions(-) diff --git a/s3/responses/responses.go b/s3/responses/responses.go index 7fc418039..bc10ce995 100644 --- a/s3/responses/responses.go +++ b/s3/responses/responses.go @@ -33,11 +33,13 @@ const ( var errValueNotSet = fmt.Errorf("value not set") -var byteSliceType = reflect.TypeOf([]byte{}) - func WriteResponse(w http.ResponseWriter, statusCode int, output interface{}, locationName string) (err error) { - if locationName != "" { - output = wrapOutput(output, locationName) + setCommonHeaders(w.Header()) + + outv := reflect.Indirect(reflect.ValueOf(wrapOutput(output, locationName))) + if !outv.IsValid() { + w.WriteHeader(statusCode) + return } defer func() { @@ -46,17 +48,18 @@ func WriteResponse(w http.ResponseWriter, statusCode int, output interface{}, lo } }() - if !valid(output) { - w.WriteHeader(statusCode) + err = setFieldRequestID(w.Header(), outv) + if err != nil { return } - body, clen, ctyp, err := extractBody(output) + body, clen, ctyp, err := extractBody(outv) if err != nil { return } + if body == nil { - err = extractHeaders(w.Header(), output) + err = setLocationHeaders(w.Header(), outv) if err != nil { return } @@ -64,14 +67,12 @@ func WriteResponse(w http.ResponseWriter, statusCode int, output interface{}, lo return } - defer func() { - _ = body.Close() - }() + defer body.Close() w.Header().Set(consts.ContentLength, fmt.Sprintf("%d", clen)) w.Header().Set(consts.ContentType, ctyp) - err = extractHeaders(w.Header(), output) + err = setLocationHeaders(w.Header(), outv) if err != nil { return } @@ -84,6 +85,11 @@ func WriteResponse(w http.ResponseWriter, statusCode int, output interface{}, lo } func wrapOutput(v interface{}, locationName string) (wrapper interface{}) { + if locationName == "" { + wrapper = v + return + } + outputTag := fmt.Sprintf(`locationName:"%s" type:"structure"`, locationName) fields := []reflect.StructField{ { @@ -105,8 +111,8 @@ func wrapOutput(v interface{}, locationName string) (wrapper interface{}) { return } -func extractBody(output interface{}) (body io.ReadCloser, clen int, ctyp string, err error) { - ptyp, plod := getPayload(output) +func extractBody(v reflect.Value) (body io.ReadCloser, clen int, ctyp string, err error) { + ptyp, plod := getPayload(v) if ptyp == noPayload { return } @@ -114,7 +120,7 @@ func extractBody(output interface{}) (body io.ReadCloser, clen int, ctyp string, if ptyp == "structure" || ptyp == "" { var buf bytes.Buffer buf.WriteString(xml.Header) - err = xmlutil.BuildXML(output, xml.NewEncoder(&buf)) + err = xmlutil.BuildXML(v.Interface(), xml.NewEncoder(&buf)) if err != nil { return } @@ -132,6 +138,15 @@ func extractBody(output interface{}) (body io.ReadCloser, clen int, ctyp string, case io.ReadCloser: body = pifc clen = -1 + case io.ReadSeeker: + var bs []byte + bs, err = io.ReadAll(pifc) + if err != nil { + return + } + body = io.NopCloser(bytes.NewBuffer(bs)) + clen = len(bs) + ctyp = http.DetectContentType(bs) case []byte: body = io.NopCloser(bytes.NewBuffer(pifc)) clen = len(pifc) @@ -148,45 +163,44 @@ func extractBody(output interface{}) (body io.ReadCloser, clen int, ctyp string, return } -func getPayload(output interface{}) (ptyp string, plod reflect.Value) { - ptyp = noPayload - v := reflect.Indirect(reflect.ValueOf(output)) - if !v.IsValid() { - return - } - field, ok := v.Type().FieldByName("_") - if !ok { - return - } - noPayloadValue := field.Tag.Get(noPayload) - if noPayloadValue != "" { - return - } - payloadName := field.Tag.Get("payload") - if payloadName == "" { +func setFieldRequestID(headers http.Header, outv reflect.Value) (err error) { + reqId := headers.Get(consts.AmzRequestID) + + idv := outv.FieldByName("RequestID") + if !idv.IsValid() { return } - member, ok := v.Type().FieldByName(payloadName) - if !ok { - return + + switch idv.Interface().(type) { + case *string: + idv.Set(reflect.ValueOf(&reqId)) + case string: + idv.Set(reflect.ValueOf(reqId)) + default: + err = errValueNotSet } - ptyp = member.Tag.Get("type") - plod = reflect.Indirect(v.FieldByName(payloadName)) + return } -func extractHeaders(header http.Header, output interface{}) (err error) { - v := reflect.ValueOf(output).Elem() +func setCommonHeaders(headers http.Header) { + reqId := getRequestID() + headers.Set(consts.ServerInfo, consts.DefaultServerInfo) + headers.Set(consts.AcceptRanges, "bytes") + headers.Set(consts.AmzRequestID, reqId) +} + +func setLocationHeaders(header http.Header, v reflect.Value) (err error) { for i := 0; i < v.NumField(); i++ { - ft := v.Type().Field(i) fv := v.Field(i) + ft := v.Type().Field(i) fk := fv.Kind() - if !fv.IsValid() { + if n := ft.Name; n[0:1] == strings.ToLower(n[0:1]) { continue } - if n := ft.Name; n[0:1] == strings.ToLower(n[0:1]) { + if !fv.IsValid() { continue } @@ -202,20 +216,12 @@ func extractHeaders(header http.Header, output interface{}) (err error) { } } - if ft.Tag.Get("ignore") != "" { - continue - } - - if ft.Tag.Get("marshal-as") == "blob" { - fv = fv.Convert(byteSliceType) - } - switch ft.Tag.Get("location") { case "header": name := ifemp(ft.Tag.Get("locationName"), ft.Name) - err = writeHeader(&header, fv, name, ft.Tag) + err = setHeaders(&header, fv, name, ft.Tag) case "headers": - err = writeHeaderMap(&header, fv, ft.Tag) + err = setHeadersMap(&header, fv, ft.Tag) } if err != nil { @@ -226,7 +232,7 @@ func extractHeaders(header http.Header, output interface{}) (err error) { return } -func writeHeader(header *http.Header, v reflect.Value, name string, tag reflect.StructTag) (err error) { +func setHeaders(header *http.Header, v reflect.Value, name string, tag reflect.StructTag) (err error) { str, err := convertType(v, tag) if errors.Is(err, errValueNotSet) { err = nil @@ -241,7 +247,7 @@ func writeHeader(header *http.Header, v reflect.Value, name string, tag reflect. return } -func writeHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag) (err error) { +func setHeadersMap(header *http.Header, v reflect.Value, tag reflect.StructTag) (err error) { prefix := tag.Get("locationName") for _, key := range v.MapKeys() { var str string @@ -260,6 +266,35 @@ func writeHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag) return } +func getPayload(v reflect.Value) (ptyp string, plod reflect.Value) { + ptyp = noPayload + + field, ok := v.Type().FieldByName("_") + if !ok { + return + } + + noPayloadValue := field.Tag.Get(noPayload) + if noPayloadValue != "" { + return + } + + payloadName := field.Tag.Get("payload") + if payloadName == "" { + return + } + + member, ok := v.Type().FieldByName(payloadName) + if !ok { + return + } + + ptyp = member.Tag.Get("type") + plod = reflect.Indirect(v.FieldByName(payloadName)) + + return +} + func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) { v = reflect.Indirect(v) if !v.IsValid() { @@ -273,29 +308,6 @@ func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) value = base64.StdEncoding.EncodeToString([]byte(value)) } str = value - case []*string: - if tag.Get("location") != "header" || tag.Get("enum") == "" { - return "", fmt.Errorf("%T is only supported with location header and enum shapes", value) - } - if len(value) == 0 { - return "", errValueNotSet - } - - buff := &bytes.Buffer{} - for i, sv := range value { - if sv == nil || len(*sv) == 0 { - continue - } - if i != 0 { - buff.WriteRune(',') - } - item := *sv - if strings.Index(item, `,`) != -1 || strings.Index(item, `"`) != -1 { - item = strconv.Quote(item) - } - buff.WriteString(item) - } - str = string(buff.Bytes()) case []byte: str = base64.StdEncoding.EncodeToString(value) case bool: @@ -344,7 +356,3 @@ func ifemp(a, b string) string { } return b } - -func valid(ifce interface{}) bool { - return reflect.Indirect(reflect.ValueOf(ifce)).IsValid() -} diff --git a/s3/responses/responses_common.go b/s3/responses/responses_common.go index 7bc1864a4..0178f44d3 100644 --- a/s3/responses/responses_common.go +++ b/s3/responses/responses_common.go @@ -26,11 +26,6 @@ func getRequestID() string { return fmt.Sprintf("%d", time.Now().UnixNano()) } -func setCommonHeader(w http.ResponseWriter, requestId string) { - w.Header().Set(consts.ServerInfo, consts.DefaultServerInfo) - w.Header().Set(consts.AmzRequestID, requestId) - w.Header().Set(consts.AcceptRanges, "bytes") -} type ErrorOutput struct { _ struct{} `type:"structure"` @@ -40,27 +35,22 @@ type ErrorOutput struct { RequestID string `locationName:"RequestID" type:"string"` } -func WriteErrorResponse(w http.ResponseWriter, r *http.Request, rerr *Error) { - reqID := getRequestID() - setCommonHeader(w, reqID) - output := &ErrorOutput{ +func NewErrOutput(r *http.Request, rerr *Error) *ErrorOutput { + return &ErrorOutput{ Code: rerr.Code(), Message: rerr.Description(), Resource: pathClean(r.URL.Path), - RequestID: reqID, - } - err := WriteResponse(w, rerr.HTTPStatusCode(), output, "Error") - if err != nil { - fmt.Println("write response: ", err) + RequestID: "", // this field value will be automatically filled } } +func WriteErrorResponse(w http.ResponseWriter, r *http.Request, rerr *Error) { + output := NewErrOutput(r, rerr) + _ = WriteResponse(w, rerr.HTTPStatusCode(), output, "Error") +} + func WriteSuccessResponse(w http.ResponseWriter, output interface{}, locationName string) { - setCommonHeader(w, getRequestID()) - err := WriteResponse(w, http.StatusOK, output, locationName) - if err != nil { - fmt.Println("write response: ", err) - } + _ = WriteResponse(w, http.StatusOK, output, locationName) } func setPutObjHeaders(w http.ResponseWriter, etag, cid string, delete bool) { diff --git a/s3/routers/routers.go b/s3/routers/routers.go index 786a05133..080c1f970 100644 --- a/s3/routers/routers.go +++ b/s3/routers/routers.go @@ -30,7 +30,7 @@ func (routers *Routers) Register() http.Handler { hs.Sign, ) - bucket := root.PathPrefix("/{bucket}").Subrouter() + bucket := root.PathPrefix("/{Bucket}").Subrouter() // multipart object... // CreateMultipart diff --git a/s3/server/options.go b/s3/server/options.go index 2c8ec3f81..7d34ed0bb 100644 --- a/s3/server/options.go +++ b/s3/server/options.go @@ -1,6 +1,6 @@ package server -const defaultServerAddress = "127.0.0.1:15001" +const defaultServerAddress = "127.0.0.1:6001" type Option func(*Server) From 59577be812b9842586917c50d39ba028b71e3e8a Mon Sep 17 00:00:00 2001 From: Steve Date: Tue, 5 Sep 2023 05:30:00 +0800 Subject: [PATCH 087/139] refractor: object --- s3/handlers/handlers_bucket.go | 17 +++- s3/handlers/handlers_object.go | 166 +++++++++++++++---------------- s3/handlers/proto.go | 2 +- s3/requests/parsers.go | 16 +-- s3/requests/parsers_common.go | 10 +- s3/responses/responses_object.go | 10 +- s3/routers/routers.go | 6 +- 7 files changed, 119 insertions(+), 108 deletions(-) diff --git a/s3/handlers/handlers_bucket.go b/s3/handlers/handlers_bucket.go index e0c246b96..ea0559d3a 100644 --- a/s3/handlers/handlers_bucket.go +++ b/s3/handlers/handlers_bucket.go @@ -4,6 +4,7 @@ import ( "github.com/bittorrent/go-btfs/s3/cctx" "github.com/bittorrent/go-btfs/s3/requests" "github.com/bittorrent/go-btfs/s3/responses" + "github.com/bittorrent/go-btfs/s3/s3utils" "github.com/bittorrent/go-btfs/s3/services/object" "net/http" ) @@ -18,7 +19,21 @@ var errToRespErr = map[error]*responses.Error{ func (h *Handlers) respErr(err error) (rerr *responses.Error) { rerr, ok := errToRespErr[err] - if !ok { + if ok { + return + } + switch err.(type) { + case s3utils.BucketNameInvalid: + rerr = responses.ErrInvalidBucketName + case s3utils.ObjectNameInvalid: + rerr = responses.ErrInvalidObjectName + case s3utils.InvalidPart: + rerr = responses.ErrInvalidPart + case s3utils.InvalidUploadID: + rerr = responses.ErrNoSuchUpload + case s3utils.InvalidMarkerPrefixCombination: + rerr = responses.ErrInvalidRequestParameter + default: rerr = responses.ErrInternalError } return diff --git a/s3/handlers/handlers_object.go b/s3/handlers/handlers_object.go index 991f4617c..f7a3ffd92 100644 --- a/s3/handlers/handlers_object.go +++ b/s3/handlers/handlers_object.go @@ -1,98 +1,90 @@ package handlers import ( + "errors" + "github.com/bittorrent/go-btfs/s3/cctx" + "github.com/bittorrent/go-btfs/s3/consts" + "github.com/bittorrent/go-btfs/s3/requests" + "github.com/bittorrent/go-btfs/s3/responses" + "github.com/bittorrent/go-btfs/s3/s3utils" + "github.com/bittorrent/go-btfs/s3/utils/hash" + "net/http" "time" ) const lockWaitTimeout = 5 * time.Minute -//func (h *Handlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) { -// ctx := r.Context() -// ack := cctx.GetAccessKey(r) -// var err error -// defer func() { -// cctx.SetHandleInf(r, h.name(), err) -// }() -// -// // X-Amz-Copy-Source shouldn't be set for this call. -// if _, ok := r.Header[consts.AmzCopySource]; ok { -// err = errors.New("shouldn't be copy") -// responses.WriteErrorResponse(w, r, responses.ErrInvalidCopySource) -// return -// } -// -// aclHeader := r.Header.Get(consts.AmzACL) -// if aclHeader != "" { -// err = errors.New("object acl can only set to default") -// responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) -// return -// } -// -// bucname, objname, err := requests.ParseBucketAndObject(r) -// if err != nil { -// responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) -// return -// } -// -// err = s3utils.CheckPutObjectArgs(ctx, bucname, objname) -// if err != nil { // todo: convert error -// responses.WriteErrorResponse(w, r, err) -// return -// } -// -// meta, err := extractMetadata(ctx, r) -// if err != nil { -// responses.WriteErrorResponse(w, r, responses.ErrInvalidRequest) -// return -// } -// -// if r.ContentLength == 0 { -// responses.WriteErrorResponse(w, r, responses.ErrEntityTooSmall) -// return -// } -// -// hrdr, ok := r.Body.(*hash.Reader) -// if !ok { -// responses.WriteErrorResponse(w, r, responses.ErrInternalError) -// return -// } -// -// // rlock bucket -// runlock, err := h.rlock(ctx, bucname, w, r) -// if err != nil { -// return -// } -// defer runlock() -// -// // lock object -// unlock, err := h.lock(ctx, bucname+"/"+objname, w, r) -// if err != nil { -// return -// } -// defer unlock() -// -// err = h.bucsvc.CheckACL(ack, bucname, action.PutObjectAction) -// if errors.Is(err, object.ErrBucketNotFound) { -// responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) -// return -// } -// if err != nil { -// responses.WriteErrorResponse(w, r, err) -// return -// } -// -// //objsvc -// obj, err := h.objsvc.PutObject(ctx, bucname, objname, hrdr, r.ContentLength, meta) -// if err != nil { -// responses.WriteErrorResponse(w, r, err) -// return -// } -// -// responses.WritePutObjectResponse(w, r, obj) -// -// return -//} -// +func (h *Handlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + ack := cctx.GetAccessKey(r) + var err error + defer func() { + cctx.SetHandleInf(r, h.name(), err) + }() + + // X-Amz-Copy-Source shouldn't be set for this call. + if _, ok := r.Header[consts.AmzCopySource]; ok { + err = errors.New("shouldn't be copy") + responses.WriteErrorResponse(w, r, responses.ErrInvalidCopySource) + return + } + + _, rerr := requests.ParseObjectACL(r) + if rerr != nil { + err = rerr + responses.WriteErrorResponse(w, r, rerr) + return + } + + bucname, rerr := requests.ParseBucket(r) + if rerr != nil { + err = rerr + responses.WriteErrorResponse(w, r, rerr) + return + } + + objname, rerr := requests.ParseObject(r) + if rerr != nil { + err = rerr + responses.WriteErrorResponse(w, r, rerr) + } + + err = s3utils.CheckPutObjectArgs(ctx, bucname, objname) + if err != nil { + rerr = h.respErr(err) + responses.WriteErrorResponse(w, r, rerr) + return + } + + meta, err := extractMetadata(ctx, r) + if err != nil { + responses.WriteErrorResponse(w, r, responses.ErrInvalidRequest) + return + } + + if r.ContentLength == 0 { + responses.WriteErrorResponse(w, r, responses.ErrEntityTooSmall) + return + } + + hrdr, ok := r.Body.(*hash.Reader) + if !ok { + responses.WriteErrorResponse(w, r, responses.ErrInternalError) + return + } + + obj, err := h.objsvc.PutObject(ctx, ack, bucname, objname, hrdr, r.ContentLength, meta) + if err != nil { + rerr = h.respErr(err) + responses.WriteErrorResponse(w, r, rerr) + return + } + + responses.WritePutObjectResponse(w, r, obj) + + return +} + //// HeadObjectHandler - HEAD Object //func (h *Handlers) HeadObjectHandler(w http.ResponseWriter, r *http.Request) { // ctx := r.Context() diff --git a/s3/handlers/proto.go b/s3/handlers/proto.go index 25f80d424..812a7d3dc 100644 --- a/s3/handlers/proto.go +++ b/s3/handlers/proto.go @@ -22,7 +22,7 @@ type Handlerser interface { // Object - //PutObjectHandler(w http.ResponseWriter, r *http.Request) + PutObjectHandler(w http.ResponseWriter, r *http.Request) //HeadObjectHandler(w http.ResponseWriter, r *http.Request) //CopyObjectHandler(w http.ResponseWriter, r *http.Request) //DeleteObjectHandler(w http.ResponseWriter, r *http.Request) diff --git a/s3/requests/parsers.go b/s3/requests/parsers.go index 09175e04f..d53e4af6a 100644 --- a/s3/requests/parsers.go +++ b/s3/requests/parsers.go @@ -17,15 +17,15 @@ type CreateBucketRequest struct { func ParseCreateBucketRequest(r *http.Request) (req *CreateBucketRequest, rerr *responses.Error) { req = &CreateBucketRequest{} req.AccessKey = cctx.GetAccessKey(r) - req.Bucket, rerr = parseBucket(r) + req.Bucket, rerr = ParseBucket(r) if rerr != nil { return } - req.ACL, rerr = parseBucketACL(r) + req.ACL, rerr = ParseBucketACL(r) if rerr != nil { return } - req.Region, rerr = parseLocation(r) + req.Region, rerr = ParseLocation(r) return } @@ -38,7 +38,7 @@ type DeleteBucketRequest struct { func ParseDeleteBucketRequest(r *http.Request) (req *DeleteBucketRequest, rerr *responses.Error) { req = &DeleteBucketRequest{} req.AccessKey = cctx.GetAccessKey(r) - req.Bucket, rerr = parseBucket(r) + req.Bucket, rerr = ParseBucket(r) return } @@ -51,7 +51,7 @@ type HeadBucketRequest struct { func ParseHeadBucketRequest(r *http.Request) (req *HeadBucketRequest, rerr *responses.Error) { req = &HeadBucketRequest{} req.AccessKey = cctx.GetAccessKey(r) - req.Bucket, rerr = parseBucket(r) + req.Bucket, rerr = ParseBucket(r) return } @@ -75,7 +75,7 @@ type GetBucketACLRequest struct { func ParseGetBucketACLRequest(r *http.Request) (req *GetBucketACLRequest, rerr *responses.Error) { req = &GetBucketACLRequest{} req.AccessKey = cctx.GetAccessKey(r) - req.Bucket, rerr = parseBucket(r) + req.Bucket, rerr = ParseBucket(r) return } @@ -89,10 +89,10 @@ type PutBucketACLRequest struct { func ParsePutBucketAclRequest(r *http.Request) (req *PutBucketACLRequest, rerr *responses.Error) { req = &PutBucketACLRequest{} req.AccessKey = cctx.GetAccessKey(r) - req.Bucket, rerr = parseBucket(r) + req.Bucket, rerr = ParseBucket(r) if rerr != nil { return } - req.ACL, rerr = parseBucketACL(r) + req.ACL, rerr = ParseBucketACL(r) return } diff --git a/s3/requests/parsers_common.go b/s3/requests/parsers_common.go index 8bdf75b65..f4f2cac0e 100644 --- a/s3/requests/parsers_common.go +++ b/s3/requests/parsers_common.go @@ -13,7 +13,7 @@ import ( "path" ) -func parseBucket(r *http.Request) (bucket string, rerr *responses.Error) { +func ParseBucket(r *http.Request) (bucket string, rerr *responses.Error) { bucket = mux.Vars(r)["bucket"] err := s3utils.CheckValidBucketNameStrict(bucket) if err != nil { @@ -22,7 +22,7 @@ func parseBucket(r *http.Request) (bucket string, rerr *responses.Error) { return } -func parseObject(r *http.Request) (object string, rerr *responses.Error) { +func ParseObject(r *http.Request) (object string, rerr *responses.Error) { object, err := unescapePath(mux.Vars(r)["object"]) if err != nil { rerr = responses.ErrInvalidRequestParameter @@ -30,7 +30,7 @@ func parseObject(r *http.Request) (object string, rerr *responses.Error) { return } -func parseLocation(r *http.Request) (location string, rerr *responses.Error) { +func ParseLocation(r *http.Request) (location string, rerr *responses.Error) { if r.ContentLength != 0 { locationCfg := s3.CreateBucketConfiguration{} decoder := xml.NewDecoder(r.Body) @@ -51,7 +51,7 @@ func parseLocation(r *http.Request) (location string, rerr *responses.Error) { return } -func parseBucketACL(r *http.Request) (acl string, rerr *responses.Error) { +func ParseBucketACL(r *http.Request) (acl string, rerr *responses.Error) { acl = r.Header.Get(consts.AmzACL) if len(acl) == 0 { acl = consts.DefaultBucketACL @@ -62,7 +62,7 @@ func parseBucketACL(r *http.Request) (acl string, rerr *responses.Error) { return } -func parseObjectACL(r *http.Request) (acl string, rerr *responses.Error) { +func ParseObjectACL(r *http.Request) (acl string, rerr *responses.Error) { acl = r.Header.Get(consts.AmzACL) if len(acl) == 0 { acl = consts.DefaultObjectACL diff --git a/s3/responses/responses_object.go b/s3/responses/responses_object.go index edffad81d..860b7a397 100644 --- a/s3/responses/responses_object.go +++ b/s3/responses/responses_object.go @@ -1,11 +1,15 @@ package responses import ( + "github.com/aws/aws-sdk-go/service/s3" + "github.com/bittorrent/go-btfs/s3/consts" "github.com/bittorrent/go-btfs/s3/services/object" "net/http" ) -func WritePutObjectResponse(w http.ResponseWriter, r *http.Request, obj object.Object) { - setPutObjHeaders(w, obj.ETag, obj.CID, false) - WriteSuccessResponse(w, nil, "") +func WritePutObjectResponse(w http.ResponseWriter, r *http.Request, obj *object.Object) { + output := new(s3.PutObjectOutput) + output.SetETag(`"` + obj.ETag + `"`) + w.Header().Set(consts.CID, obj.CID) + WriteSuccessResponse(w, output, "") } diff --git a/s3/routers/routers.go b/s3/routers/routers.go index 080c1f970..6d62e216e 100644 --- a/s3/routers/routers.go +++ b/s3/routers/routers.go @@ -30,7 +30,7 @@ func (routers *Routers) Register() http.Handler { hs.Sign, ) - bucket := root.PathPrefix("/{Bucket}").Subrouter() + bucket := root.PathPrefix("/{bucket}").Subrouter() // multipart object... // CreateMultipart @@ -49,8 +49,8 @@ func (routers *Routers) Register() http.Handler { //bucket.Methods(http.MethodGet).HandlerFunc(hs.ListObjectsHandler) //// HeadObject //bucket.Methods(http.MethodHead).Path("/{object:.+}").HandlerFunc(hs.HeadObjectHandler) - //// PutObject - //bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(hs.PutObjectHandler) + // PutObject + bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(hs.PutObjectHandler) //// CopyObject //bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(hs.CopyObjectHandler) //// DeleteObject From 1bbb0364f968b24fb44b8aba30de41e618f0d9b0 Mon Sep 17 00:00:00 2001 From: Shawn-Huang-Tron <107823650+Shawn-Huang-Tron@users.noreply.github.com> Date: Tue, 5 Sep 2023 15:34:57 +0800 Subject: [PATCH 088/139] feat: add backup and recovery command (#348) * feat: add backup and recovery command * feat: beautify the log * feat: init add recovery option * fix: format error --- cmd/btfs/init.go | 26 +++ core/commands/backup.go | 339 +++++++++++++++++++++++++++++++++ core/commands/commands_test.go | 2 + core/commands/root.go | 2 + 4 files changed, 369 insertions(+) create mode 100644 core/commands/backup.go diff --git a/cmd/btfs/init.go b/cmd/btfs/init.go index 1261778a2..63950b5c9 100644 --- a/cmd/btfs/init.go +++ b/cmd/btfs/init.go @@ -10,6 +10,7 @@ import ( "path/filepath" "strconv" "strings" + "time" "github.com/bittorrent/go-btfs/assets" "github.com/bittorrent/go-btfs/chain" @@ -36,6 +37,7 @@ const ( rmOnUnpinOptionName = "rm-on-unpin" seedOptionName = "seed" simpleMode = "simple-mode" + recoveryOptionName = "recovery" /* passWordOptionName = "password" passwordFileoptionName = "password-file" @@ -72,6 +74,7 @@ environment variable: cmds.BoolOption(rmOnUnpinOptionName, "r", "Remove unpinned files.").WithDefault(false), cmds.StringOption(seedOptionName, "s", "Import seed phrase"), cmds.BoolOption(simpleMode, "sm", "init with simple mode or not."), + cmds.StringOption(recoveryOptionName, "Recovery data from a backup"), /* cmds.StringOption(passWordOptionName, "", "password for decrypting keys."), cmds.StringOption(passwordFileoptionName, "", "path to a file that contains password for decrypting keys"), @@ -140,7 +143,30 @@ environment variable: password, _ := req.Options[passWordOptionName].(string) passwordFile, _ := req.Options[passwordFileoptionName].(string) */ + backupPath, ok := req.Options[recoveryOptionName].(string) + if ok { + btfsPath := env.(*oldcmds.Context).ConfigRoot + dstPath := filepath.Dir(btfsPath) + if fsrepo.IsInitialized(btfsPath) { + newPath := filepath.Join(dstPath, fmt.Sprintf(".btfs_backup_%d", time.Now().Unix())) + // newPath := filepath.Join(filepath.Dir(btfsPath), backup) + err := os.Rename(btfsPath, newPath) + if err != nil { + return err + } + fmt.Println("btfs configuration file already exists!") + fmt.Println("We have renamed it to ", newPath) + } + if err := commands.UnTar(backupPath, dstPath); err != nil { + err = commands.UnZip(backupPath, dstPath) + if err != nil { + return errors.New("your file format is not tar.gz or zip, please check again") + } + } + fmt.Println("Recovery successful!") + return nil + } return doInit(os.Stdout, cctx.ConfigRoot, empty, nBitsForKeypair, profile, conf, keyType, importKey, seedPhrase, rmOnUnpin, simpleModeIn) }, } diff --git a/core/commands/backup.go b/core/commands/backup.go new file mode 100644 index 000000000..989c569e2 --- /dev/null +++ b/core/commands/backup.go @@ -0,0 +1,339 @@ +package commands + +import ( + "archive/tar" + "archive/zip" + "compress/gzip" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "time" + + cmds "github.com/bittorrent/go-btfs-cmds" + commands "github.com/bittorrent/go-btfs/commands" + fsrepo "github.com/bittorrent/go-btfs/repo/fsrepo" +) + +const ( + outputFileOption = "o" + compressOption = "a" + backupPathOption = "r" + excludeOption = "exclude" +) + +var BackupCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Back up BTFS's data", + LongDescription: ` +This command will create a backup of the data from the current BTFS node. +`, + }, + Arguments: []cmds.Argument{ + cmds.FileArg("file", true, false, "data to encode").EnableStdin(), + }, + Options: []cmds.Option{ + cmds.StringOption(outputFileOption, "backup output file path"), + cmds.StringOption(compressOption, "gz or zip").WithDefault("gz"), + cmds.StringsOption(excludeOption, "exclude backup output file path"), + }, + Run: func(req *cmds.Request, resp cmds.ResponseEmitter, env cmds.Environment) error { + r, err := fsrepo.Open(env.(*commands.Context).ConfigRoot) + if err != nil { + return err + } + defer r.Close() + + var fileName = fmt.Sprintf("btfs_backup_%d", time.Now().Unix()) + + outputName, ok := req.Options[outputFileOption].(string) + if ok { + fileName = outputName + } + btfsPath, err := fsrepo.BestKnownPath() + if err != nil { + return err + } + + excludePath, _ := req.Options[excludeOption].([]string) + for _, v := range excludePath { + // TODO + if v != "config" && v != "statestore" && v != "datastore" { + return errors.New("-exclude only support config, statestore or datastore") + } + } + // exclude the repo.lock to avoid dead lock + excludePath = append(excludePath, "repo.lock") + compressWay, _ := req.Options[compressOption].(string) + // TODO + if compressWay != "gz" && compressWay != "zip" { + return errors.New("-a only support zip or gz, gz is default") + } + absPath, err := filepath.Abs(fileName) + if err != nil { + return err + } + if compressWay == "zip" { + absPath += ".zip" + err = Zip(btfsPath, absPath, excludePath) + } else { + absPath += ".tar.gz" + err = Tar(btfsPath, absPath, excludePath) + } + if err != nil { + return err + } + fmt.Printf("Backup successful! The backup path is %s\n", absPath) + return nil + }, +} + +var RecoveryCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "Recover BTFS's data from a archived file of backup", + LongDescription: `This command will recover data from a previously created backup file`, + }, + Options: []cmds.Option{ + cmds.StringOption(backupPathOption, "backup output file path"), + }, + Run: func(req *cmds.Request, resp cmds.ResponseEmitter, env cmds.Environment) error { + backupPath, ok := req.Options[backupPathOption].(string) + if !ok { + return errors.New("you need to specify -r to indicate the path you want to recover") + } + btfsPath := env.(*commands.Context).ConfigRoot + dstPath := filepath.Dir(btfsPath) + if fsrepo.IsInitialized(btfsPath) { + newPath := filepath.Join(dstPath, fmt.Sprintf(".btfs_backup_%d", time.Now().Unix())) + // newPath := filepath.Join(filepath.Dir(btfsPath), backup) + err := os.Rename(btfsPath, newPath) + if err != nil { + return err + } + fmt.Println("btfs configuration file already exists!") + fmt.Println("We have renamed it to ", newPath) + } + + if err := UnTar(backupPath, dstPath); err != nil { + err = UnZip(backupPath, dstPath) + if err != nil { + return errors.New("your file format is not tar.gz or zip, please check again") + } + } + fmt.Println("Recovery successful!") + return nil + }, +} + +func Tar(src, dst string, excludePath []string) (err error) { + fw, err := os.Create(dst) + if err != nil { + return + } + defer fw.Close() + + gw := gzip.NewWriter(fw) + defer gw.Close() + + tw := tar.NewWriter(gw) + defer tw.Close() + + basePath := filepath.Dir(src) + filepath.Walk(src, func(fileAbsPath string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + for _, v := range excludePath { + excludeAbsPath := filepath.Join(src, v) + if v != "" && strings.HasPrefix(fileAbsPath, excludeAbsPath) { + return nil + } + } + rel, err := filepath.Rel(basePath, fileAbsPath) + if err != nil { + return err + } + hdr, err := tar.FileInfoHeader(fi, "") + if err != nil { + return err + } + hdr.Name = rel + + // 写入文件信息 + if err = tw.WriteHeader(hdr); err != nil { + return err + } + + if fi.IsDir() { + return nil + } + + fr, err := os.Open(fileAbsPath) + if err != nil { + return err + } + defer fr.Close() + + // copy 文件数据到 tw + _, err = io.Copy(tw, fr) + if err != nil { + return err + } + return nil + }) + tw.Flush() + gw.Flush() + return +} + +func UnTar(src, dst string) (err error) { + fr, err := os.Open(src) + if err != nil { + return err + } + defer fr.Close() + gr, err := gzip.NewReader(fr) + if err != nil { + return err + } + defer gr.Close() + // tar read + tr := tar.NewReader(gr) + // 读取文件 + for { + h, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + if h.FileInfo().IsDir() { + err = os.MkdirAll(filepath.Join(dst, h.Name), h.FileInfo().Mode()) + if err != nil { + return err + } + continue + } + + fw, err := os.OpenFile(filepath.Join(dst, h.Name), os.O_CREATE|os.O_RDWR|os.O_TRUNC, os.FileMode(h.Mode)) + if err != nil { + return err + } + defer fw.Close() + // 写文件 + _, err = io.Copy(fw, tr) + if err != nil { + return err + } + } + return +} + +func Zip(src, dst string, excludePath []string) (err error) { + fw, err := os.Create(dst) + if err != nil { + return err + } + defer fw.Close() + + zw := zip.NewWriter(fw) + defer func() { + if err := zw.Close(); err != nil { + log.Fatalln(err) + } + }() + basePath := filepath.Dir(src) + filepath.Walk(src, func(fileAbsPath string, fi os.FileInfo, errBack error) (err error) { + if errBack != nil { + return errBack + } + for _, v := range excludePath { + excludeAbsPath := filepath.Join(src, v) + if v != "" && strings.HasPrefix(fileAbsPath, excludeAbsPath) { + return nil + } + } + fh, err := zip.FileInfoHeader(fi) + if err != nil { + return + } + + rel, err := filepath.Rel(basePath, fileAbsPath) + if err != nil { + return err + } + fh.Name = rel + + if fi.IsDir() { + fh.Name += "/" + } + + w, err := zw.CreateHeader(fh) + if err != nil { + return + } + + if !fh.Mode().IsRegular() { + return nil + } + + fr, err := os.Open(fileAbsPath) + if err != nil { + return + } + defer fr.Close() + + _, err = io.Copy(w, fr) + if err != nil { + return + } + return nil + }) + zw.Flush() + return +} + +func UnZip(src, dst string) (err error) { + zr, err := zip.OpenReader(src) + if err != nil { + return + } + defer zr.Close() + + for _, file := range zr.File { + err = persistZipFile(dst, file) + if err != nil { + return + } + } + return nil +} + +func persistZipFile(dst string, file *zip.File) (err error) { + path := filepath.Join(dst, file.Name) + + if file.FileInfo().IsDir() { + return os.MkdirAll(path, file.Mode()) + } + + fr, err := file.Open() + if err != nil { + return err + } + defer fr.Close() + + fw, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR|os.O_TRUNC, file.Mode()) + if err != nil { + return err + } + defer fw.Close() + + _, err = io.Copy(fw, fr) + if err != nil { + return err + } + return +} diff --git a/core/commands/commands_test.go b/core/commands/commands_test.go index 1366d2963..5b4eadcc1 100644 --- a/core/commands/commands_test.go +++ b/core/commands/commands_test.go @@ -347,6 +347,8 @@ func TestCommands(t *testing.T) { "/multibase/decode", "/multibase/transcode", "/multibase/list", + "/backup", + "/recovery", } cmdSet := make(map[string]struct{}) diff --git a/core/commands/root.go b/core/commands/root.go index 22508bece..ee7775ada 100644 --- a/core/commands/root.go +++ b/core/commands/root.go @@ -180,6 +180,8 @@ var rootSubcommands = map[string]*cmds.Command{ "statuscontract": StatusContractCmd, "bittorrent": bittorrentCmd, "multibase": MbaseCmd, + "backup": BackupCmd, + "recovery": RecoveryCmd, } // RootRO is the readonly version of Root From 12f68bf2738fa9bc9e2e10c39e37b643df6b25e5 Mon Sep 17 00:00:00 2001 From: Steve Date: Wed, 6 Sep 2023 10:37:51 +0800 Subject: [PATCH 089/139] refractor: object --- s3/etag/etag.go | 4 +- s3/etag/reader.go | 13 +- s3/handlers/handlers_bucket.go | 72 ++- s3/handlers/handlers_object.go | 920 +++++++++++---------------- s3/handlers/proto.go | 12 +- s3/requests/parsers.go | 13 + s3/requests/parsers_common.go | 9 + s3/responses/object_header.go | 61 ++ s3/responses/request.go | 415 ++++++++++++ s3/responses/responses.go | 57 +- s3/responses/responses_bucket.go | 1 - s3/responses/responses_common.go | 7 - s3/responses/responses_object.go | 86 +++ s3/routers/routers.go | 24 +- s3/s3utils/request_test.go | 27 + s3/services/accesskey/service.go | 1 - s3/services/object/proto.go | 17 +- s3/services/object/service_object.go | 39 +- s3/set/match.go | 2 +- s3/utils/encode.go | 4 +- 20 files changed, 1127 insertions(+), 657 deletions(-) create mode 100644 s3/responses/object_header.go create mode 100644 s3/responses/request.go create mode 100644 s3/s3utils/request_test.go diff --git a/s3/etag/etag.go b/s3/etag/etag.go index d68e73814..ccfafaab2 100644 --- a/s3/etag/etag.go +++ b/s3/etag/etag.go @@ -134,8 +134,8 @@ func Parse(s string) (ETag, error) { // parse parse s as an S3 ETag, returning the result. // It operates in one of two modes: -// - strict -// - non-strict +// - strict +// - non-strict // // In strict mode, parse only accepts ETags that // are AWS S3 compatible. In particular, an AWS diff --git a/s3/etag/reader.go b/s3/etag/reader.go index c19f7733f..532e5e973 100644 --- a/s3/etag/reader.go +++ b/s3/etag/reader.go @@ -39,15 +39,14 @@ func (r wrapReader) ETag() ETag { // It is mainly used to provide a high-level io.Reader // access to the ETag computed by a low-level io.Reader: // -// content := etag.NewReader(r.Body, nil) +// content := etag.NewReader(r.Body, nil) // -// compressedContent := Compress(content) -// encryptedContent := Encrypt(compressedContent) -// -// // Now, we need an io.Reader that can access -// // the ETag computed over the content. -// reader := etag.Wrap(encryptedContent, content) +// compressedContent := Compress(content) +// encryptedContent := Encrypt(compressedContent) // +// // Now, we need an io.Reader that can access +// // the ETag computed over the content. +// reader := etag.Wrap(encryptedContent, content) func Wrap(wrapped, content io.Reader) io.Reader { if t, ok := content.(Tagger); ok { return wrapReader{ diff --git a/s3/handlers/handlers_bucket.go b/s3/handlers/handlers_bucket.go index ea0559d3a..8f26b417e 100644 --- a/s3/handlers/handlers_bucket.go +++ b/s3/handlers/handlers_bucket.go @@ -1,40 +1,66 @@ package handlers import ( + "context" "github.com/bittorrent/go-btfs/s3/cctx" "github.com/bittorrent/go-btfs/s3/requests" "github.com/bittorrent/go-btfs/s3/responses" "github.com/bittorrent/go-btfs/s3/s3utils" "github.com/bittorrent/go-btfs/s3/services/object" + "github.com/bittorrent/go-btfs/s3/utils/hash" "net/http" + "net/url" ) -var errToRespErr = map[error]*responses.Error{ - object.ErrBucketNotFound: responses.ErrNoSuchBucket, - object.ErrObjectNotFound: responses.ErrNoSuchKey, - object.ErrUploadNotFound: responses.ErrNoSuchUpload, - object.ErrBucketAlreadyExists: responses.ErrBucketAlreadyExists, - object.ErrNotAllowed: responses.ErrAccessDenied, -} - func (h *Handlers) respErr(err error) (rerr *responses.Error) { - rerr, ok := errToRespErr[err] - if ok { - return - } - switch err.(type) { - case s3utils.BucketNameInvalid: - rerr = responses.ErrInvalidBucketName - case s3utils.ObjectNameInvalid: - rerr = responses.ErrInvalidObjectName - case s3utils.InvalidPart: - rerr = responses.ErrInvalidPart - case s3utils.InvalidUploadID: + switch err { + case object.ErrBucketNotFound: + rerr = responses.ErrNoSuchBucket + case object.ErrObjectNotFound: + rerr = responses.ErrNoSuchKey + case object.ErrUploadNotFound: rerr = responses.ErrNoSuchUpload - case s3utils.InvalidMarkerPrefixCombination: - rerr = responses.ErrInvalidRequestParameter + case object.ErrBucketAlreadyExists: + rerr = responses.ErrBucketAlreadyExists + case object.ErrNotAllowed: + rerr = responses.ErrAccessDenied + case context.Canceled: + rerr = responses.ErrClientDisconnected + case context.DeadlineExceeded: + rerr = responses.ErrOperationTimedOut default: - rerr = responses.ErrInternalError + switch err.(type) { + case hash.SHA256Mismatch: + rerr = responses.ErrContentSHA256Mismatch + case hash.BadDigest: + rerr = responses.ErrBadDigest + case s3utils.BucketNameInvalid: + rerr = responses.ErrInvalidBucketName + case s3utils.ObjectNameInvalid: + rerr = responses.ErrInvalidObjectName + case s3utils.ObjectNameTooLong: + rerr = responses.ErrKeyTooLongError + case s3utils.ObjectNamePrefixAsSlash: + rerr = responses.ErrInvalidObjectNamePrefixSlash + case s3utils.InvalidUploadIDKeyCombination: + rerr = responses.ErrNotImplemented + case s3utils.InvalidMarkerPrefixCombination: + rerr = responses.ErrNotImplemented + case s3utils.MalformedUploadID: + rerr = responses.ErrNoSuchUpload + case s3utils.InvalidUploadID: + rerr = responses.ErrNoSuchUpload + case s3utils.InvalidPart: + rerr = responses.ErrInvalidPart + case s3utils.PartTooSmall: + rerr = responses.ErrEntityTooSmall + case s3utils.PartTooBig: + rerr = responses.ErrEntityTooLarge + case url.EscapeError: + rerr = responses.ErrInvalidObjectName + default: + rerr = responses.ErrInternalError + } } return } diff --git a/s3/handlers/handlers_object.go b/s3/handlers/handlers_object.go index f7a3ffd92..c7a31300f 100644 --- a/s3/handlers/handlers_object.go +++ b/s3/handlers/handlers_object.go @@ -1,6 +1,7 @@ package handlers import ( + "encoding/base64" "errors" "github.com/bittorrent/go-btfs/s3/cctx" "github.com/bittorrent/go-btfs/s3/consts" @@ -9,11 +10,12 @@ import ( "github.com/bittorrent/go-btfs/s3/s3utils" "github.com/bittorrent/go-btfs/s3/utils/hash" "net/http" - "time" + "net/url" + "path" + "strconv" + "strings" ) -const lockWaitTimeout = 5 * time.Minute - func (h *Handlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) { ctx := r.Context() ack := cctx.GetAccessKey(r) @@ -22,33 +24,26 @@ func (h *Handlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) { cctx.SetHandleInf(r, h.name(), err) }() - // X-Amz-Copy-Source shouldn't be set for this call. if _, ok := r.Header[consts.AmzCopySource]; ok { err = errors.New("shouldn't be copy") responses.WriteErrorResponse(w, r, responses.ErrInvalidCopySource) return } - _, rerr := requests.ParseObjectACL(r) + bucname, objname, rerr := requests.ParseBucketAndObject(r) if rerr != nil { err = rerr responses.WriteErrorResponse(w, r, rerr) return } - bucname, rerr := requests.ParseBucket(r) + _, rerr = requests.ParseObjectACL(r) if rerr != nil { err = rerr responses.WriteErrorResponse(w, r, rerr) return } - objname, rerr := requests.ParseObject(r) - if rerr != nil { - err = rerr - responses.WriteErrorResponse(w, r, rerr) - } - err = s3utils.CheckPutObjectArgs(ctx, bucname, objname) if err != nil { rerr = h.respErr(err) @@ -67,13 +62,13 @@ func (h *Handlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) { return } - hrdr, ok := r.Body.(*hash.Reader) + body, ok := r.Body.(*hash.Reader) if !ok { responses.WriteErrorResponse(w, r, responses.ErrInternalError) return } - obj, err := h.objsvc.PutObject(ctx, ack, bucname, objname, hrdr, r.ContentLength, meta) + obj, err := h.objsvc.PutObject(ctx, ack, bucname, objname, body, r.ContentLength, meta) if err != nil { rerr = h.respErr(err) responses.WriteErrorResponse(w, r, rerr) @@ -85,316 +80,150 @@ func (h *Handlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) { return } -//// HeadObjectHandler - HEAD Object -//func (h *Handlers) HeadObjectHandler(w http.ResponseWriter, r *http.Request) { -// ctx := r.Context() -// ack := cctx.GetAccessKey(r) -// var err error -// defer func() { -// cctx.SetHandleInf(r, h.name(), err) -// }() -// -// bucname, objname, err := requests.ParseBucketAndObject(r) -// if err != nil { -// responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) -// return -// } -// -// if err := s3utils.CheckGetObjArgs(ctx, bucname, objname); err != nil { -// responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) -// return -// } -// -// err = h.bucsvc.CheckACL(ack, bucname, action.HeadObjectAction) -// if errors.Is(err, object.ErrBucketNotFound) { -// responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) -// return -// } -// if err != nil { -// responses.WriteErrorResponse(w, r, err) -// return -// } -// -// // rlock bucket -// runlock, err := h.rlock(ctx, bucname, w, r) -// if err != nil { -// return -// } -// defer runlock() -// -// // rlock object -// runlockObj, err := h.rlock(ctx, bucname+"/"+objname, w, r) -// if err != nil { -// return -// } -// defer runlockObj() -// -// //objsvc -// obj, err := h.objsvc.GetObjectInfo(ctx, bucname, objname) -// if err != nil { -// responses.WriteErrorResponse(w, r, err) -// return -// } -// w.Header().Set(consts.AmzServerSideEncryption, consts.AmzEncryptionAES) -// -// // Set standard object headers. -// responses.SetObjectHeaders(w, r, obj) -// // Set any additional requested response headers. -// responses.SetHeadGetRespHeaders(w, r.Form) -// -// // Successful response. -// w.WriteHeader(http.StatusOK) -//} -// -//// CopyObjectHandler - Copy Object -//func (h *Handlers) CopyObjectHandler(w http.ResponseWriter, r *http.Request) { -// ctx := r.Context() -// ack := cctx.GetAccessKey(r) -// var err error -// defer func() { -// cctx.SetHandleInf(r, h.name(), err) -// }() -// -// dstBucket, dstObject, err := requests.ParseBucketAndObject(r) -// if err != nil { -// responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) -// return -// } -// if err := s3utils.CheckPutObjectArgs(ctx, dstBucket, dstObject); err != nil { -// responses.WriteErrorResponse(w, r, err) -// return -// } -// err = h.bucsvc.CheckACL(ack, dstBucket, action.CopyObjectAction) -// if errors.Is(err, object.ErrBucketNotFound) { -// responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) -// return -// } -// if err != nil { -// responses.WriteErrorResponse(w, r, err) -// return -// } -// -// // Copy source path. -// cpSrcPath, err := url.QueryUnescape(r.Header.Get(consts.AmzCopySource)) -// if err != nil { -// // Save unescaped string as is. -// cpSrcPath = r.Header.Get(consts.AmzCopySource) -// } -// srcBucket, srcObject := pathToBucketAndObject(cpSrcPath) -// // If source object is empty or bucket is empty, reply back invalid copy source. -// if srcObject == "" || srcBucket == "" { -// responses.WriteErrorResponse(w, r, responses.ErrInvalidCopySource) -// return -// } -// if err = s3utils.CheckGetObjArgs(ctx, srcBucket, srcObject); err != nil { -// responses.WriteErrorResponse(w, r, err) -// return -// } -// if srcBucket == dstBucket && srcObject == dstObject { -// responses.WriteErrorResponse(w, r, responses.ErrInvalidCopyDest) -// return -// } -// err = h.bucsvc.CheckACL(ack, srcBucket, action.CopyObjectAction) -// if errors.Is(err, object.ErrBucketNotFound) { -// responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) -// return -// } -// if err != nil { -// responses.WriteErrorResponse(w, r, err) -// return -// } -// -// fmt.Printf("CopyObjectHandler %s %s => %s %s \n", srcBucket, srcObject, dstBucket, dstObject) -// -// // rlock bucket 1 -// runlock1, err := h.rlock(ctx, srcBucket, w, r) -// if err != nil { -// return -// } -// defer runlock1() -// -// // rlock object 1 -// runlockObj1, err := h.rlock(ctx, srcBucket+"/"+srcObject, w, r) -// if err != nil { -// return -// } -// defer runlockObj1() -// -// // rlock bucket 2 -// runlock2, err := h.rlock(ctx, dstBucket, w, r) -// if err != nil { -// return -// } -// defer runlock2() -// -// // lock object 2 -// unlockObj2, err := h.lock(ctx, dstBucket+"/"+dstObject, w, r) -// if err != nil { -// return -// } -// defer unlockObj2() -// -// //objsvc -// srcObjInfo, err := h.objsvc.GetObjectInfo(ctx, srcBucket, srcObject) -// if err != nil { -// responses.WriteErrorResponse(w, r, err) -// return -// } -// -// metadata := make(map[string]string) -// metadata[strings.ToLower(consts.ContentType)] = srcObjInfo.ContentType -// metadata[strings.ToLower(consts.ContentEncoding)] = srcObjInfo.ContentEncoding -// if isReplace(r) { -// inputMeta, err := extractMetadata(ctx, r) -// if err != nil { -// responses.WriteErrorResponse(w, r, err) -// return -// } -// for key, val := range inputMeta { -// metadata[key] = val -// } -// } -// -// //objsvc -// obj, err := h.objsvc.CopyObject(ctx, dstBucket, dstObject, srcObjInfo, srcObjInfo.Size, metadata) -// if err != nil { -// responses.WriteErrorResponse(w, r, err) -// return -// } -// -// resp := responses.CopyObjectResult{ -// ETag: "\"" + obj.ETag + "\"", -// LastModified: obj.ModTime.UTC().Format(consts.Iso8601TimeFormat), -// } -// -// setPutObjHeaders(w, obj, false) -// -// responses.WriteSuccessResponseXML(w, r, resp) -//} -// -//// DeleteObjectHandler - delete an object -//// https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html -//func (h *Handlers) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) { -// ctx := r.Context() -// ack := cctx.GetAccessKey(r) -// var err error -// defer func() { -// cctx.SetHandleInf(r, h.name(), err) -// }() -// -// bucname, objname, err := requests.ParseBucketAndObject(r) -// if err != nil { -// responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) -// return -// } -// if err := s3utils.CheckDelObjArgs(ctx, bucname, objname); err != nil { -// responses.WriteErrorResponse(w, r, err) -// return -// } -// -// err = h.bucsvc.CheckACL(ack, bucname, action.DeleteObjectAction) -// if errors.Is(err, object.ErrBucketNotFound) { -// responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) -// return -// } -// if err != nil { -// responses.WriteErrorResponse(w, r, err) -// return -// } -// -// // rlock bucket -// runlock, err := h.rlock(ctx, bucname, w, r) -// if err != nil { -// return -// } -// defer runlock() -// -// // lock object -// unlock, err := h.lock(ctx, bucname+"/"+objname, w, r) -// if err != nil { -// return -// } -// defer unlock() -// -// //objsvc -// obj, err := h.objsvc.GetObjectInfo(ctx, bucname, objname) -// if err != nil { -// responses.WriteErrorResponse(w, r, err) -// return -// } -// //objsvc -// err = h.objsvc.DeleteObject(ctx, bucname, objname) -// if err != nil { -// responses.WriteErrorResponse(w, r, err) -// return -// } -// setPutObjHeaders(w, obj, true) -// responses.WriteSuccessNoContent(w) -//} -// -//// DeleteObjectsHandler - delete objects -//// https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html -//func (h *Handlers) DeleteObjectsHandler(w http.ResponseWriter, r *http.Request) { -// ctx := r.Context() -// ack := cctx.GetAccessKey(r) -// var err error -// defer func() { -// cctx.SetHandleInf(r, h.name(), err) -// }() -// -// bucname, objname, err := requests.ParseBucketAndObject(r) -// if err != nil { -// responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) -// return -// } -// if err := s3utils.CheckDelObjArgs(ctx, bucname, objname); err != nil { -// responses.WriteErrorResponse(w, r, err) -// return -// } -// -// err = h.bucsvc.CheckACL(ack, bucname, action.DeleteObjectAction) -// if errors.Is(err, object.ErrBucketNotFound) { -// responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) -// return -// } -// if err != nil { -// responses.WriteErrorResponse(w, r, err) -// return -// } -// -// // rlock bucket -// runlock, err := h.rlock(ctx, bucname, w, r) -// if err != nil { -// return -// } -// defer runlock() -// -// // lock object -// unlock, err := h.lock(ctx, bucname+"/"+objname, w, r) -// if err != nil { -// return -// } -// defer unlock() -// -// //objsvc -// obj, err := h.objsvc.GetObjectInfo(ctx, bucname, objname) -// if err != nil { -// responses.WriteErrorResponse(w, r, err) -// return -// } -// //objsvc -// err = h.objsvc.DeleteObject(ctx, bucname, objname) -// if err != nil { -// responses.WriteErrorResponse(w, r, err) -// return -// } -// setPutObjHeaders(w, obj, true) -// responses.WriteSuccessNoContent(w) -//} -// -//// GetObjectHandler - GET Object -//// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html -//func (h *Handlers) GetObjectHandler(w http.ResponseWriter, r *http.Request) { +func (h *Handlers) HeadObjectHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + ack := cctx.GetAccessKey(r) + var err error + defer func() { + cctx.SetHandleInf(r, h.name(), err) + }() + + bucname, objname, rerr := requests.ParseBucketAndObject(r) + if rerr != nil { + err = rerr + responses.WriteErrorResponse(w, r, rerr) + return + } + + err = s3utils.CheckGetObjArgs(ctx, bucname, objname) + if err != nil { + rerr = h.respErr(err) + responses.WriteErrorResponse(w, r, rerr) + return + } + + //objsvc + obj, _, err := h.objsvc.GetObject(ctx, ack, bucname, objname, false) + if err != nil { + rerr = h.respErr(err) + responses.WriteErrorResponse(w, r, rerr) + return + } + + responses.WriteHeadObjectResponse(w, r, obj) +} + +func (h *Handlers) CopyObjectHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + ack := cctx.GetAccessKey(r) + var err error + defer func() { + cctx.SetHandleInf(r, h.name(), err) + }() + + dstBucket, dstObject, rerr := requests.ParseBucketAndObject(r) + if rerr != nil { + err = rerr + responses.WriteErrorResponse(w, r, rerr) + return + } + + err = s3utils.CheckPutObjectArgs(ctx, dstBucket, dstObject) + if err != nil { + rerr = h.respErr(err) + responses.WriteErrorResponse(w, r, rerr) + return + } + + // Copy source path. + cpSrcPath, err := url.QueryUnescape(r.Header.Get(consts.AmzCopySource)) + if err != nil { + // Save unescaped string as is. + cpSrcPath = r.Header.Get(consts.AmzCopySource) + err = nil + } + + srcBucket, srcObject := pathToBucketAndObject(cpSrcPath) + // If source object is empty or bucket is empty, reply back invalid copy source. + if srcObject == "" || srcBucket == "" { + err = responses.ErrInvalidCopySource + responses.WriteErrorResponse(w, r, responses.ErrInvalidCopySource) + return + } + if err = s3utils.CheckGetObjArgs(ctx, srcBucket, srcObject); err != nil { + rerr = h.respErr(err) + responses.WriteErrorResponse(w, r, rerr) + return + } + if srcBucket == dstBucket && srcObject == dstObject { + err = responses.ErrInvalidCopySource + responses.WriteErrorResponse(w, r, responses.ErrInvalidCopyDest) + return + } + + metadata := make(map[string]string) + if isReplace(r) { + var inputMeta map[string]string + inputMeta, err = extractMetadata(ctx, r) + if err != nil { + rerr = h.respErr(err) + responses.WriteErrorResponse(w, r, rerr) + return + } + for key, val := range inputMeta { + metadata[key] = val + } + } + + //objsvc + obj, err := h.objsvc.CopyObject(ctx, ack, srcBucket, srcObject, dstBucket, dstObject, metadata) + if err != nil { + rerr = h.respErr(err) + responses.WriteErrorResponse(w, r, rerr) + return + } + + responses.WriteCopyObjectResponse(w, r, obj) +} + +// DeleteObjectHandler - delete an object +// https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html +func (h *Handlers) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + ack := cctx.GetAccessKey(r) + var err error + defer func() { + cctx.SetHandleInf(r, h.name(), err) + }() + + bucname, objname, rerr := requests.ParseBucketAndObject(r) + if rerr != nil { + err = rerr + responses.WriteErrorResponse(w, r, rerr) + return + } + + err = s3utils.CheckDelObjArgs(ctx, bucname, objname) + if err != nil { + rerr = h.respErr(err) + responses.WriteErrorResponse(w, r, rerr) + return + } + + //objsvc + err = h.objsvc.DeleteObject(ctx, ack, bucname, objname) + if err != nil { + rerr = h.respErr(err) + responses.WriteErrorResponse(w, r, rerr) + return + } + + responses.WriteDeleteObjectResponse(w, r, nil) +} + +// DeleteObjectsHandler - delete objects +// https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html +//func (h *Handlers) DeleteObjectsHandler(w http.ResponseWriter, r *http.Request) { // ctx := r.Context() // ack := cctx.GetAccessKey(r) // var err error @@ -407,12 +236,12 @@ func (h *Handlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) { // responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) // return // } -// if err = s3utils.CheckGetObjArgs(ctx, bucname, objname); err != nil { +// if err := s3utils.CheckDelObjArgs(ctx, bucname, objname); err != nil { // responses.WriteErrorResponse(w, r, err) // return // } // -// err = h.bucsvc.CheckACL(ack, bucname, action.GetObjectAction) +// err = h.bucsvc.CheckACL(ack, bucname, action.DeleteObjectAction) // if errors.Is(err, object.ErrBucketNotFound) { // responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) // return @@ -429,126 +258,128 @@ func (h *Handlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) { // } // defer runlock() // -// // rlock object -// runlockObj, err := h.rlock(ctx, bucname+"/"+objname, w, r) +// // lock object +// unlock, err := h.lock(ctx, bucname+"/"+objname, w, r) // if err != nil { // return // } -// defer runlockObj() +// defer unlock() // // //objsvc -// obj, reader, err := h.objsvc.GetObject(ctx, bucname, objname) -// if err != nil { -// responses.WriteErrorResponse(w, r, err) -// return -// } -// //w.Header().Set(consts.AmzServerSideEncryption, consts.AmzEncryptionAES) -// -// responses.SetObjectHeaders(w, r, obj) -// w.Header().Set(consts.ContentLength, strconv.FormatInt(obj.Size, 10)) -// responses.SetHeadGetRespHeaders(w, r.Form) -// _, err = io.Copy(w, reader) -// if err != nil { -// responses.WriteErrorResponse(w, r, responses.ErrInternalError) -// return -// } -//} -// -//// GetObjectACLHandler - GET Object ACL -//func (h *Handlers) GetObjectACLHandler(w http.ResponseWriter, r *http.Request) { -// ctx := r.Context() -// ack := cctx.GetAccessKey(r) -// var err error -// defer func() { -// cctx.SetHandleInf(r, h.name(), err) -// }() -// -// bucname, _, err := requests.ParseBucketAndObject(r) -// if err != nil { -// responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) -// return -// } -// -// err = h.bucsvc.CheckACL(ack, bucname, action.GetBucketAclAction) -// if errors.Is(err, object.ErrBucketNotFound) { -// responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) -// return -// } -// if err != nil { -// responses.WriteErrorResponse(w, r, err) -// return -// } -// -// // rlock bucket -// runlock, err := h.rlock(ctx, bucname, w, r) -// if err != nil { -// return -// } -// defer runlock() -// -// acl, err := h.bucsvc.GetBucketACL(ctx, bucname) -// if err != nil { -// responses.WriteErrorResponse(w, r, err) -// return -// } -// -// responses.WriteGetBucketAclResponse(w, r, ack, acl) -//} -// -//func (h *Handlers) ListObjectsHandler(w http.ResponseWriter, r *http.Request) { -// ctx := r.Context() -// ack := cctx.GetAccessKey(r) -// var err error -// defer func() { -// cctx.SetHandleInf(r, h.name(), err) -// }() -// -// bucname, _, err := requests.ParseBucketAndObject(r) -// if err != nil { -// responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) -// return -// } -// -// // Extract all the litsObjectsV1 query params to their native values. -// prefix, marker, delimiter, maxKeys, encodingType, s3Error := getListObjectsV1Args(r.Form) -// if s3Error != nil { -// responses.WriteErrorResponse(w, r, s3Error) -// return -// } -// -// if err := s3utils.CheckListObjsArgs(ctx, bucname, prefix, marker); err != nil { -// responses.WriteErrorResponse(w, r, err) -// return -// } -// -// // rlock bucket -// runlock, err := h.rlock(ctx, bucname, w, r) -// if err != nil { -// return -// } -// defer runlock() -// -// err = h.bucsvc.CheckACL(ack, bucname, action.ListObjectsAction) -// if errors.Is(err, object.ErrBucketNotFound) { -// responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) -// return -// } +// obj, err := h.objsvc.GetObjectInfo(ctx, bucname, objname) // if err != nil { // responses.WriteErrorResponse(w, r, err) // return // } -// // //objsvc -// objs, err := h.objsvc.ListObjects(ctx, bucname, prefix, marker, delimiter, maxKeys) +// err = h.objsvc.DeleteObject(ctx, bucname, objname) // if err != nil { // responses.WriteErrorResponse(w, r, err) // return // } -// resp := responses.GenerateListObjectsV1Response(bucname, prefix, marker, delimiter, encodingType, maxKeys, objs) -// // Write success response. -// responses.WriteSuccessResponseXML(w, r, resp) +// setPutObjHeaders(w, obj, true) +// responses.WriteSuccessNoContent(w) //} -// + +// GetObjectHandler - GET Object +// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html +func (h *Handlers) GetObjectHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + ack := cctx.GetAccessKey(r) + var err error + defer func() { + cctx.SetHandleInf(r, h.name(), err) + }() + + bucname, objname, rerr := requests.ParseBucketAndObject(r) + if rerr != nil { + err = rerr + responses.WriteErrorResponse(w, r, rerr) + return + } + + if err = s3utils.CheckGetObjArgs(ctx, bucname, objname); err != nil { + rerr = h.respErr(err) + responses.WriteErrorResponse(w, r, rerr) + return + } + + obj, body, err := h.objsvc.GetObject(ctx, ack, bucname, objname, true) + if err != nil { + rerr = h.respErr(err) + responses.WriteErrorResponse(w, r, rerr) + return + } + + responses.WriteGetObjectResponse(w, r, obj, body) +} + +// GetObjectACLHandler - GET Object ACL +func (h *Handlers) GetObjectACLHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + ack := cctx.GetAccessKey(r) + var err error + defer func() { + cctx.SetHandleInf(r, h.name(), err) + }() + + bucname, _, rerr := requests.ParseBucketAndObject(r) + if rerr != nil { + err = rerr + responses.WriteErrorResponse(w, r, rerr) + return + } + + acl, err := h.objsvc.GetBucketACL(ctx, ack, bucname) + if err != nil { + rerr = h.respErr(err) + responses.WriteErrorResponse(w, r, rerr) + return + } + + responses.WriteGetObjectACLResponse(w, r, ack, acl) +} + +func (h *Handlers) ListObjectsHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + ack := cctx.GetAccessKey(r) + var err error + defer func() { + cctx.SetHandleInf(r, h.name(), err) + }() + + bucname, rerr := requests.ParseBucket(r) + if rerr != nil { + err = rerr + responses.WriteErrorResponse(w, r, rerr) + return + } + + // Extract all the litsObjectsV1 query params to their native values. + prefix, marker, delimiter, maxKeys, encodingType, err := getListObjectsV1Args(r.Form) + if err != nil { + rerr = h.respErr(err) + responses.WriteErrorResponse(w, r, rerr) + return + } + + err = s3utils.CheckListObjsArgs(ctx, bucname, prefix, marker) + if err != nil { + rerr = h.respErr(err) + responses.WriteErrorResponse(w, r, rerr) + return + } + + list, err := h.objsvc.ListObjects(ctx, ack, bucname, prefix, marker, delimiter, maxKeys) + if err != nil { + rerr = h.respErr(err) + responses.WriteErrorResponse(w, r, rerr) + return + } + + responses.WriteListObjectsResponse(w, r, ack, bucname, prefix, marker, delimiter, encodingType, maxKeys, list) +} + //func (h *Handlers) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) { // ctx := r.Context() // ack := cctx.GetAccessKey(r) @@ -649,117 +480,116 @@ func (h *Handlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) { // // do something // } //} -// -//func pathToBucketAndObject(path string) (bucket, object string) { -// path = strings.TrimPrefix(path, consts.SlashSeparator) -// idx := strings.Index(path, consts.SlashSeparator) -// if idx < 0 { -// return path, "" -// } -// return path[:idx], path[idx+len(consts.SlashSeparator):] -//} -// -//func isReplace(r *http.Request) bool { -// return r.Header.Get("X-Amz-Metadata-Directive") == "REPLACE" -//} -// -//// Parse bucket url queries -//func getListObjectsV1Args(values url.Values) ( -// prefix, marker, delimiter string, maxkeys int, encodingType string, errCode error) { -// -// if values.Get("max-keys") != "" { -// var err error -// if maxkeys, err = strconv.Atoi(values.Get("max-keys")); err != nil { -// errCode = responses.ErrInvalidMaxKeys -// return -// } -// } else { -// maxkeys = consts.MaxObjectList -// } -// -// prefix = trimLeadingSlash(values.Get("prefix")) -// marker = trimLeadingSlash(values.Get("marker")) -// delimiter = values.Get("delimiter") -// encodingType = values.Get("encoding-type") -// return -//} -// -//// Parse bucket url queries for ListObjects V2. -//func getListObjectsV2Args(values url.Values) ( -// prefix, token, startAfter, delimiter string, -// fetchOwner bool, maxkeys int, encodingType string, errCode error) { -// -// // The continuation-token cannot be empty. -// if val, ok := values["continuation-token"]; ok { -// if len(val[0]) == 0 { -// errCode = responses.ErrInvalidToken -// return -// } -// } -// -// if values.Get("max-keys") != "" { -// var err error -// if maxkeys, err = strconv.Atoi(values.Get("max-keys")); err != nil { -// errCode = responses.ErrInvalidMaxKeys -// return -// } -// // Over flowing count - reset to maxObjectList. -// if maxkeys > consts.MaxObjectList { -// maxkeys = consts.MaxObjectList -// } -// } else { -// maxkeys = consts.MaxObjectList -// } -// -// prefix = trimLeadingSlash(values.Get("prefix")) -// startAfter = trimLeadingSlash(values.Get("start-after")) -// delimiter = values.Get("delimiter") -// fetchOwner = values.Get("fetch-owner") == "true" -// encodingType = values.Get("encoding-type") -// -// if token = values.Get("continuation-token"); token != "" { -// decodedToken, err := base64.StdEncoding.DecodeString(token) -// if err != nil { -// errCode = responses.ErrIncorrectContinuationToken -// return -// } -// token = string(decodedToken) -// } -// return -//} -// -//func trimLeadingSlash(ep string) string { -// if len(ep) > 0 && ep[0] == '/' { -// // Path ends with '/' preserve it -// if ep[len(ep)-1] == '/' && len(ep) > 1 { -// ep = path.Clean(ep) -// ep += "/" -// } else { -// ep = path.Clean(ep) -// } -// ep = ep[1:] -// } -// return ep -//} -// -//// Validate all the ListObjects query arguments, returns an APIErrorCode -//// if one of the args do not meet the required conditions. -//// - delimiter if set should be equal to '/', otherwise the request is rejected. -//// - marker if set should have a common prefix with 'prefix' param, otherwise -//// the request is rejected. -//func validateListObjectsArgs(marker, delimiter, encodingType string, maxKeys int) error { -// // Max keys cannot be negative. -// if maxKeys < 0 { -// return responses.ErrInvalidMaxKeys -// } -// -// if encodingType != "" { -// // AWS S3 spec only supports 'url' encoding type -// if !strings.EqualFold(encodingType, "url") { -// return responses.ErrInvalidEncodingMethod -// } -// } -// -// return nil -//} -// + +func pathToBucketAndObject(path string) (bucket, object string) { + path = strings.TrimPrefix(path, consts.SlashSeparator) + idx := strings.Index(path, consts.SlashSeparator) + if idx < 0 { + return path, "" + } + return path[:idx], path[idx+len(consts.SlashSeparator):] +} + +func isReplace(r *http.Request) bool { + return r.Header.Get("X-Amz-Metadata-Directive") == "REPLACE" +} + +// Parse bucket url queries +func getListObjectsV1Args(values url.Values) ( + prefix, marker, delimiter string, maxkeys int64, encodingType string, errCode error) { + + if values.Get("max-keys") != "" { + var err error + if maxkeys, err = strconv.ParseInt(values.Get("max-keys"), 10, 64); err != nil { + errCode = responses.ErrInvalidMaxKeys + return + } + } else { + maxkeys = consts.MaxObjectList + } + + prefix = trimLeadingSlash(values.Get("prefix")) + marker = trimLeadingSlash(values.Get("marker")) + delimiter = values.Get("delimiter") + encodingType = values.Get("encoding-type") + return +} + +// Parse bucket url queries for ListObjects V2. +func getListObjectsV2Args(values url.Values) ( + prefix, token, startAfter, delimiter string, + fetchOwner bool, maxkeys int, encodingType string, errCode error) { + + // The continuation-token cannot be empty. + if val, ok := values["continuation-token"]; ok { + if len(val[0]) == 0 { + errCode = responses.ErrInvalidToken + return + } + } + + if values.Get("max-keys") != "" { + var err error + if maxkeys, err = strconv.Atoi(values.Get("max-keys")); err != nil { + errCode = responses.ErrInvalidMaxKeys + return + } + // Over flowing count - reset to maxObjectList. + if maxkeys > consts.MaxObjectList { + maxkeys = consts.MaxObjectList + } + } else { + maxkeys = consts.MaxObjectList + } + + prefix = trimLeadingSlash(values.Get("prefix")) + startAfter = trimLeadingSlash(values.Get("start-after")) + delimiter = values.Get("delimiter") + fetchOwner = values.Get("fetch-owner") == "true" + encodingType = values.Get("encoding-type") + + if token = values.Get("continuation-token"); token != "" { + decodedToken, err := base64.StdEncoding.DecodeString(token) + if err != nil { + errCode = responses.ErrIncorrectContinuationToken + return + } + token = string(decodedToken) + } + return +} + +func trimLeadingSlash(ep string) string { + if len(ep) > 0 && ep[0] == '/' { + // Path ends with '/' preserve it + if ep[len(ep)-1] == '/' && len(ep) > 1 { + ep = path.Clean(ep) + ep += "/" + } else { + ep = path.Clean(ep) + } + ep = ep[1:] + } + return ep +} + +// Validate all the ListObjects query arguments, returns an APIErrorCode +// if one of the args do not meet the required conditions. +// - delimiter if set should be equal to '/', otherwise the request is rejected. +// - marker if set should have a common prefix with 'prefix' param, otherwise +// the request is rejected. +func validateListObjectsArgs(marker, delimiter, encodingType string, maxKeys int) error { + // Max keys cannot be negative. + if maxKeys < 0 { + return responses.ErrInvalidMaxKeys + } + + if encodingType != "" { + // AWS S3 spec only supports 'url' encoding type + if !strings.EqualFold(encodingType, "url") { + return responses.ErrInvalidEncodingMethod + } + } + + return nil +} diff --git a/s3/handlers/proto.go b/s3/handlers/proto.go index 812a7d3dc..9430d33fb 100644 --- a/s3/handlers/proto.go +++ b/s3/handlers/proto.go @@ -23,12 +23,12 @@ type Handlerser interface { // Object PutObjectHandler(w http.ResponseWriter, r *http.Request) - //HeadObjectHandler(w http.ResponseWriter, r *http.Request) - //CopyObjectHandler(w http.ResponseWriter, r *http.Request) - //DeleteObjectHandler(w http.ResponseWriter, r *http.Request) - //GetObjectHandler(w http.ResponseWriter, r *http.Request) - //GetObjectACLHandler(w http.ResponseWriter, r *http.Request) - //ListObjectsHandler(w http.ResponseWriter, r *http.Request) + HeadObjectHandler(w http.ResponseWriter, r *http.Request) + CopyObjectHandler(w http.ResponseWriter, r *http.Request) + DeleteObjectHandler(w http.ResponseWriter, r *http.Request) + GetObjectHandler(w http.ResponseWriter, r *http.Request) + GetObjectACLHandler(w http.ResponseWriter, r *http.Request) + ListObjectsHandler(w http.ResponseWriter, r *http.Request) //ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) //// Multipart diff --git a/s3/requests/parsers.go b/s3/requests/parsers.go index d53e4af6a..ff4a0f774 100644 --- a/s3/requests/parsers.go +++ b/s3/requests/parsers.go @@ -1,6 +1,8 @@ package requests import ( + "fmt" + "github.com/aws/aws-sdk-go/service/s3" "github.com/bittorrent/go-btfs/s3/cctx" "github.com/bittorrent/go-btfs/s3/responses" "net/http" @@ -96,3 +98,14 @@ func ParsePutBucketAclRequest(r *http.Request) (req *PutBucketACLRequest, rerr * req.ACL, rerr = ParseBucketACL(r) return } + +func ParsePutObjectRequest(r *http.Request) (req *s3.PutObjectInput, rerr *responses.Error) { + err := responses.ParseRequest(r, &req) + if err != nil { + rerr = responses.ErrInvalidRequestParameter + return + } + + fmt.Printf("%+v", *req) + return +} diff --git a/s3/requests/parsers_common.go b/s3/requests/parsers_common.go index f4f2cac0e..ad42c684e 100644 --- a/s3/requests/parsers_common.go +++ b/s3/requests/parsers_common.go @@ -13,6 +13,15 @@ import ( "path" ) +func ParseBucketAndObject(r *http.Request) (bucket string, object string, rerr *responses.Error) { + bucket, rerr = ParseBucket(r) + if rerr != nil { + return + } + object, rerr = ParseObject(r) + return +} + func ParseBucket(r *http.Request) (bucket string, rerr *responses.Error) { bucket = mux.Vars(r)["bucket"] err := s3utils.CheckValidBucketNameStrict(bucket) diff --git a/s3/responses/object_header.go b/s3/responses/object_header.go new file mode 100644 index 000000000..401957956 --- /dev/null +++ b/s3/responses/object_header.go @@ -0,0 +1,61 @@ +package responses + +import ( + "github.com/bittorrent/go-btfs/s3/consts" + "github.com/bittorrent/go-btfs/s3/services/object" + "net/http" + "net/url" + "strconv" + "strings" +) + +// SetObjectHeaders Write object header +func SetObjectHeaders(w http.ResponseWriter, r *http.Request, objInfo *object.Object) { + // Set last modified time. + lastModified := objInfo.ModTime.UTC().Format(http.TimeFormat) + w.Header().Set(consts.LastModified, lastModified) + + // Set Etag if available. + if objInfo.ETag != "" { + w.Header()[consts.ETag] = []string{"\"" + objInfo.ETag + "\""} + } + + if objInfo.ContentType != "" { + w.Header().Set(consts.ContentType, objInfo.ContentType) + } + + if objInfo.ContentEncoding != "" { + w.Header().Set(consts.ContentEncoding, objInfo.ContentEncoding) + } + + if !objInfo.Expires.IsZero() { + w.Header().Set(consts.Expires, objInfo.Expires.UTC().Format(http.TimeFormat)) + } + + // Set content length + w.Header().Set(consts.ContentLength, strconv.FormatInt(objInfo.Size, 10)) + + // Set the relevant version ID as part of the response header. + if objInfo.VersionID != "" { + w.Header()[consts.AmzVersionID] = []string{objInfo.VersionID} + } + +} + +// SetHeadGetRespHeaders - set any requested parameters as response headers. +func SetHeadGetRespHeaders(w http.ResponseWriter, reqParams url.Values) { + for k, v := range reqParams { + if header, ok := supportedHeadGetReqParams[strings.ToLower(k)]; ok { + w.Header()[header] = v + } + } +} + +// supportedHeadGetReqParams - supported request parameters for GET and HEAD presigned request. +var supportedHeadGetReqParams = map[string]string{ + "response-expires": consts.Expires, + "response-content-type": consts.ContentType, + "response-content-encoding": consts.ContentEncoding, + "response-content-language": consts.ContentLanguage, + "response-content-disposition": consts.ContentDisposition, +} diff --git a/s3/responses/request.go b/s3/responses/request.go new file mode 100644 index 000000000..e4f0393d6 --- /dev/null +++ b/s3/responses/request.go @@ -0,0 +1,415 @@ +package responses + +import ( + "encoding/base64" + "encoding/xml" + "errors" + "fmt" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" + "github.com/gorilla/mux" + "io" + "math" + "net/http" + "net/url" + "reflect" + "strconv" + "strings" + "time" +) + +var byteSliceType = reflect.TypeOf([]byte{}) + +func ParseRequest(r *http.Request, input interface{}) (err error) { + inv, err := getInputValue(input) + if err != nil { + return + } + + err = parseLocation(r, inv) + if err != nil { + return + } + + ptyp, pftp, pfvl := getPayload(inv) + if ptyp == noPayload { + return + } + + if ptyp == "structure" || ptyp == "" { + err = parseXMLBody(r, inv) + } else { + err = parseBody(r, pftp, pfvl) + } + + return + +} + +func parseXMLBody(r *http.Request, inv reflect.Value) (err error) { + defer r.Body.Close() + decoder := xml.NewDecoder(r.Body) + err = xmlutil.UnmarshalXML(inv.Addr().Interface(), decoder, "") + return +} + +func parseBody(r *http.Request, pftp reflect.Type, pfvl reflect.Value) (err error) { + var b []byte + switch pfvl.Interface().(type) { + case []byte: + defer r.Body.Close() + b, err = io.ReadAll(r.Body) + if err != nil { + return + } + pfvl.Set(reflect.ValueOf(b)) + case *string: + defer r.Body.Close() + b, err = io.ReadAll(r.Body) + if err != nil { + return + } + val := string(b) + pfvl.Set(reflect.ValueOf(&val)) + default: + switch pftp.String() { + case "io.ReadSeeker": + // keep the request body + default: + err = errValueNotSet + } + } + return +} + +func getInputValue(input interface{}) (inv reflect.Value, err error) { + typErr := fmt.Errorf("input <%T> must be non nil or ", input) + + if input == nil { + err = typErr + return + } + + t := reflect.TypeOf(input) + k := t.Kind() + + if k != reflect.Pointer { + err = typErr + return + } + + inv = reflect.ValueOf(input).Elem() + if !inv.IsValid() { + err = typErr + return + } + + t = t.Elem() + k = t.Kind() + + if k == reflect.Struct { + return + } + + if k != reflect.Pointer { + err = typErr + return + } + + t = t.Elem() + k = t.Kind() + if k != reflect.Struct { + err = typErr + return + } + + if inv.Elem().IsValid() { + inv = inv.Elem() + return + } + + inv.Set(reflect.New(inv.Type().Elem())) + inv = inv.Elem() + + return +} + +func parseLocation(r *http.Request, inv reflect.Value) (err error) { + query := r.URL.Query() + + for i := 0; i < inv.NumField(); i++ { + fv := inv.Field(i) + ft := inv.Type().Field(i) + if ft.Name[0:1] == strings.ToLower(ft.Name[0:1]) { + continue + } + + if ft.Tag.Get("ignore") != "" { + continue + } + + name := ifemp(ft.Tag.Get("locationName"), ft.Name) + + if ft.Tag.Get("marshal-as") == "blob" { + if fv.Kind() == reflect.Pointer { + fv.Set(reflect.New(fv.Type().Elem())) + fv = fv.Elem() + } + fv = fv.Convert(byteSliceType) + } + + switch ft.Tag.Get("location") { + case "headers": + prefix := ft.Tag.Get("locationName") + err = parseHeaderMap(r.Header, fv, prefix) + case "header": + locVal := r.Header.Get(name) + err = parseLocationValue(locVal, fv, ft.Tag) + case "uri": + locVal := mux.Vars(r)[name] + err = parseLocationValue(locVal, fv, ft.Tag) + case "querystring": + err = parseQueryString(query, fv, name, ft.Tag) + } + } + + return +} + +func parseQueryString(query url.Values, fv reflect.Value, name string, tag reflect.StructTag) (err error) { + switch value := fv.Interface().(type) { + case []*string: + vals := make([]*string, len(query[name])) + for i, oval := range query[name] { + val := oval + vals[i] = &val + } + if len(vals) > 0 { + fv.Set(reflect.ValueOf(vals)) + } + case map[string]*string: + vals := make(map[string]*string, len(query)) + for key := range query { + val := query.Get(key) + vals[key] = &val + } + if len(vals) > 0 { + fv.Set(reflect.ValueOf(vals)) + } + case map[string][]*string: + for key, items := range value { + for _, item := range items { + query.Add(key, *item) + } + } + vals := make(map[string][]*string, len(query)) + for key := range query { + vals[key] = make([]*string, len(query[key])) + for i := range query[key] { + vals[key][i] = &(query[key][i]) + } + } + if len(vals) > 0 { + fv.Set(reflect.ValueOf(vals)) + } + default: + locVal := query.Get(name) + err = parseLocationValue(locVal, fv, tag) + if err != nil { + return + } + } + + return +} + +func parseHeaderMap(headers http.Header, fv reflect.Value, prefix string) (err error) { + if len(headers) == 0 { + return + } + switch fv.Interface().(type) { + case map[string]*string: + vals := map[string]*string{} + for key := range headers { + if !hasPrefixFold(key, prefix) { + continue + } + key = strings.ToLower(key) + val := headers.Get(key) + vals[key[len(prefix):]] = &val + } + if len(vals) != 0 { + fv.Set(reflect.ValueOf(vals)) + } + default: + err = errValueNotSet + } + return +} + +func parseLocationValue(locVal string, v reflect.Value, tag reflect.StructTag) (err error) { + switch tag.Get("type") { + case "jsonvalue": + if len(locVal) == 0 { + return + } + case "blob": + if len(locVal) == 0 { + return + } + default: + if !v.IsValid() || (locVal == "" && (v.Kind() != reflect.Pointer || v.Elem().Kind() != reflect.String)) { + return + } + } + + switch v.Interface().(type) { + case *string: + if tag.Get("suppressedJSONValue") == "true" && tag.Get("location") == "header" { + var b []byte + b, err = base64.StdEncoding.DecodeString(locVal) + if err != nil { + return + } + locVal = string(b) + } + v.Set(reflect.ValueOf(&locVal)) + case []*string: + if tag.Get("location") != "header" || tag.Get("enum") == "" { + return fmt.Errorf("%T is only supported with location header and enum shapes", v) + } + var vals []*string + vals, err = splitHeaderVal(locVal) + if err != nil { + return + } + if len(vals) > 0 { + v.Set(reflect.ValueOf(vals)) + } + case []byte: + var b []byte + b, err = base64.StdEncoding.DecodeString(locVal) + if err != nil { + return + } + v.Set(reflect.ValueOf(b)) + case *bool: + var b bool + b, err = strconv.ParseBool(locVal) + if err != nil { + return + } + v.Set(reflect.ValueOf(&b)) + case *int64: + var i int64 + i, err = strconv.ParseInt(locVal, 10, 64) + if err != nil { + return + } + v.Set(reflect.ValueOf(&i)) + case *float64: + var f float64 + switch { + case strings.EqualFold(locVal, floatNaN): + f = math.NaN() + case strings.EqualFold(locVal, floatInf): + f = math.Inf(1) + case strings.EqualFold(locVal, floatNegInf): + f = math.Inf(-1) + default: + f, err = strconv.ParseFloat(locVal, 64) + if err != nil { + return + } + } + v.Set(reflect.ValueOf(&f)) + case *time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.RFC822TimeFormatName + if tag.Get("location") == "querystring" { + format = protocol.ISO8601TimeFormatName + } + } + var t time.Time + t, err = protocol.ParseTime(format, locVal) + if err != nil { + return + } + v.Set(reflect.ValueOf(&t)) + case aws.JSONValue: + escaping := protocol.NoEscape + if tag.Get("location") == "header" { + escaping = protocol.Base64Escape + } + var m aws.JSONValue + m, err = protocol.DecodeJSONValue(locVal, escaping) + if err != nil { + return + } + v.Set(reflect.ValueOf(m)) + default: + err = fmt.Errorf("unsupported value for input %v (%s)", v.Interface(), v.Type()) + return + } + + return +} + +func hasPrefixFold(s, prefix string) bool { + return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix) +} + +func splitHeaderVal(header string) (vals []*string, err error) { + pv := ' ' + start := 0 + quote := false + for i, v := range header { + opv := pv + pv = v + if quote { + if v == '"' && opv != '\\' { + quote = false + val := header[start : i+1] + val, err = strconv.Unquote(val) + if err != nil { + return + } + vals = append(vals, &val) + start = i + 1 + } + continue + } + + if v == '"' && opv != '\\' { + quote = true + continue + } + + if v == ',' && opv == '"' { + start += 1 + continue + } + + if v == ',' { + val := header[start:i] + vals = append(vals, &val) + start = i + 1 + } + + continue + } + + if quote { + err = errors.New("unquote part") + return + } + + if start < len(header) || pv == ',' { + val := header[start:] + vals = append(vals, &val) + } + + return +} diff --git a/s3/responses/responses.go b/s3/responses/responses.go index bc10ce995..f24eb01be 100644 --- a/s3/responses/responses.go +++ b/s3/responses/responses.go @@ -84,9 +84,9 @@ func WriteResponse(w http.ResponseWriter, statusCode int, output interface{}, lo return } -func wrapOutput(v interface{}, locationName string) (wrapper interface{}) { +func wrapOutput(output interface{}, locationName string) (wrapper interface{}) { if locationName == "" { - wrapper = v + wrapper = output return } @@ -100,19 +100,19 @@ func wrapOutput(v interface{}, locationName string) (wrapper interface{}) { }, { Name: "Output", - Type: reflect.TypeOf(v), + Type: reflect.TypeOf(output), Tag: reflect.StructTag(outputTag), }, } - wrapperTyp := reflect.StructOf(fields) - wrapperVal := reflect.New(wrapperTyp) - wrapperVal.Elem().Field(1).Set(reflect.ValueOf(v)) - wrapper = wrapperVal.Interface() + wrtyp := reflect.StructOf(fields) + wrval := reflect.New(wrtyp) + wrval.Elem().FieldByName("Output").Set(reflect.ValueOf(output)) + wrapper = wrval.Interface() return } func extractBody(v reflect.Value) (body io.ReadCloser, clen int, ctyp string, err error) { - ptyp, plod := getPayload(v) + ptyp, _, pfvl := getPayload(v) if ptyp == noPayload { return } @@ -130,11 +130,11 @@ func extractBody(v reflect.Value) (body io.ReadCloser, clen int, ctyp string, er return } - if plod.Interface() == nil { + if pfvl.Interface() == nil { return } - switch pifc := plod.Interface().(type) { + switch pifc := pfvl.Interface().(type) { case io.ReadCloser: body = pifc clen = -1 @@ -156,7 +156,7 @@ func extractBody(v reflect.Value) (body io.ReadCloser, clen int, ctyp string, er default: err = fmt.Errorf( "unknown payload type %s", - plod.Type(), + pfvl.Type(), ) } @@ -184,17 +184,19 @@ func setFieldRequestID(headers http.Header, outv reflect.Value) (err error) { } func setCommonHeaders(headers http.Header) { - reqId := getRequestID() headers.Set(consts.ServerInfo, consts.DefaultServerInfo) headers.Set(consts.AcceptRanges, "bytes") - headers.Set(consts.AmzRequestID, reqId) + headers.Set(consts.AmzRequestID, getRequestID()) +} + +func getRequestID() string { + return fmt.Sprintf("%d", time.Now().UnixNano()) } func setLocationHeaders(header http.Header, v reflect.Value) (err error) { for i := 0; i < v.NumField(); i++ { - fv := v.Field(i) + fv := reflect.Indirect(v.Field(i)) ft := v.Type().Field(i) - fk := fv.Kind() if n := ft.Name; n[0:1] == strings.ToLower(n[0:1]) { continue @@ -204,16 +206,8 @@ func setLocationHeaders(header http.Header, v reflect.Value) (err error) { continue } - if fk == reflect.Ptr { - fv = fv.Elem() - fk = fv.Kind() - if !fv.IsValid() { - continue - } - } else if fk == reflect.Interface { - if !fv.Elem().IsValid() { - continue - } + if fv.Kind() == reflect.Interface && !fv.Elem().IsValid() { + continue } switch ft.Tag.Get("location") { @@ -234,10 +228,6 @@ func setLocationHeaders(header http.Header, v reflect.Value) (err error) { func setHeaders(header *http.Header, v reflect.Value, name string, tag reflect.StructTag) (err error) { str, err := convertType(v, tag) - if errors.Is(err, errValueNotSet) { - err = nil - return - } if err != nil { return } @@ -266,7 +256,7 @@ func setHeadersMap(header *http.Header, v reflect.Value, tag reflect.StructTag) return } -func getPayload(v reflect.Value) (ptyp string, plod reflect.Value) { +func getPayload(v reflect.Value) (ptyp string, pftp reflect.Type, pfvl reflect.Value) { ptyp = noPayload field, ok := v.Type().FieldByName("_") @@ -284,13 +274,14 @@ func getPayload(v reflect.Value) (ptyp string, plod reflect.Value) { return } - member, ok := v.Type().FieldByName(payloadName) + pfld, ok := v.Type().FieldByName(payloadName) if !ok { return } - ptyp = member.Tag.Get("type") - plod = reflect.Indirect(v.FieldByName(payloadName)) + ptyp = pfld.Tag.Get("type") + pftp = pfld.Type + pfvl = reflect.Indirect(v.FieldByName(payloadName)) return } diff --git a/s3/responses/responses_bucket.go b/s3/responses/responses_bucket.go index 8430c284e..33da3ff6b 100644 --- a/s3/responses/responses_bucket.go +++ b/s3/responses/responses_bucket.go @@ -18,7 +18,6 @@ func WriteHeadBucketResponse(w http.ResponseWriter, r *http.Request) { return } - func WriteDeleteBucketResponse(w http.ResponseWriter) { output := new(s3.DeleteBucketOutput) _ = WriteResponse(w, http.StatusOK, output, "") diff --git a/s3/responses/responses_common.go b/s3/responses/responses_common.go index 0178f44d3..a93945dd3 100644 --- a/s3/responses/responses_common.go +++ b/s3/responses/responses_common.go @@ -1,12 +1,10 @@ package responses import ( - "fmt" "github.com/aws/aws-sdk-go/service/s3" "github.com/bittorrent/go-btfs/s3/consts" "net/http" "path" - "time" ) func owner(accessKey string) *s3.Owner { @@ -22,11 +20,6 @@ var ( allUsersWriteGrant = new(s3.Grant).SetGrantee(new(s3.Grantee).SetType(s3.TypeGroup).SetURI(consts.AllUsersURI)).SetPermission(s3.PermissionWrite) ) -func getRequestID() string { - return fmt.Sprintf("%d", time.Now().UnixNano()) -} - - type ErrorOutput struct { _ struct{} `type:"structure"` Code string `locationName:"Code" type:"string"` diff --git a/s3/responses/responses_object.go b/s3/responses/responses_object.go index 860b7a397..12cb10a83 100644 --- a/s3/responses/responses_object.go +++ b/s3/responses/responses_object.go @@ -4,6 +4,7 @@ import ( "github.com/aws/aws-sdk-go/service/s3" "github.com/bittorrent/go-btfs/s3/consts" "github.com/bittorrent/go-btfs/s3/services/object" + "io" "net/http" ) @@ -13,3 +14,88 @@ func WritePutObjectResponse(w http.ResponseWriter, r *http.Request, obj *object. w.Header().Set(consts.CID, obj.CID) WriteSuccessResponse(w, output, "") } + +func WriteHeadObjectResponse(w http.ResponseWriter, r *http.Request, obj *object.Object) { + output := new(s3.HeadObjectOutput) + w.Header().Set(consts.CID, obj.CID) + SetObjectHeaders(w, r, obj) + SetHeadGetRespHeaders(w, r.Form) + WriteSuccessResponse(w, output, "") +} + +func WriteCopyObjectResponse(w http.ResponseWriter, r *http.Request, obj *object.Object) { + output := new(s3.CopyObjectResult) + output.SetETag(`"` + obj.ETag + `"`) + output.SetLastModified(obj.ModTime) + w.Header().Set(consts.CID, obj.CID) + WriteSuccessResponse(w, output, "") +} + +func WriteDeleteObjectResponse(w http.ResponseWriter, r *http.Request, obj *object.Object) { + output := new(s3.DeleteObjectOutput) + output.SetDeleteMarker(true) + WriteSuccessResponse(w, output, "") +} + +func WriteGetObjectResponse(w http.ResponseWriter, r *http.Request, obj *object.Object, body io.ReadCloser) { + output := new(s3.GetObjectOutput) + output.SetContentLength(obj.Size) + output.SetBody(body) + output.SetMetadata(map[string]*string{ + consts.CID: &obj.CID, + }) + w.Header().Set(consts.CID, obj.CID) + SetObjectHeaders(w, r, obj) + SetHeadGetRespHeaders(w, r.Form) + WriteSuccessResponse(w, output, "") +} + +func WriteGetObjectACLResponse(w http.ResponseWriter, r *http.Request, accessKey, acl string) { + output := new(s3.GetObjectAclOutput) + output.SetOwner(owner(accessKey)) + grants := make([]*s3.Grant, 0) + grants = append(grants, ownerFullControlGrant(accessKey)) + switch acl { + case s3.BucketCannedACLPrivate: + case s3.BucketCannedACLPublicRead: + grants = append(grants, allUsersReadGrant) + case s3.BucketCannedACLPublicReadWrite: + grants = append(grants, allUsersReadGrant, allUsersWriteGrant) + default: + panic("unknown acl") + } + output.SetGrants(grants) + WriteSuccessResponse(w, output, "AccessControlPolicy") + return +} + +func WriteListObjectsResponse(w http.ResponseWriter, r *http.Request, accessKey, bucname, prefix, marker, delimiter, encodingType string, maxKeys int64, list *object.ObjectsList) { + out := new(s3.ListObjectsOutput) + out.SetName(bucname) + out.SetPrefix(prefix) + out.SetMarker(marker) + out.SetDelimiter(delimiter) + out.SetEncodingType(encodingType) + out.SetMaxKeys(maxKeys) + s3Objs := make([]*s3.Object, len(list.Objects)) + for i, obj := range list.Objects { + s3Obj := new(s3.Object) + s3Obj.SetETag(`"` + obj.ETag + `"`) + s3Obj.SetOwner(owner(accessKey)) + s3Obj.SetLastModified(obj.ModTime) + s3Obj.SetKey(obj.Name) + s3Obj.SetSize(obj.Size) + s3Objs[i] = s3Obj + } + out.SetContents(s3Objs) + s3CommPrefixes := make([]*s3.CommonPrefix, len(list.Prefixes)) + for i, cpf := range list.Prefixes { + pfx := new(s3.CommonPrefix) + pfx.SetPrefix(cpf) + s3CommPrefixes[i] = pfx + } + out.SetCommonPrefixes(s3CommPrefixes) + out.SetIsTruncated(list.IsTruncated) + out.SetNextMarker(list.NextMarker) + WriteSuccessResponse(w, out, "") +} diff --git a/s3/routers/routers.go b/s3/routers/routers.go index 6d62e216e..4defffe55 100644 --- a/s3/routers/routers.go +++ b/s3/routers/routers.go @@ -45,22 +45,22 @@ func (routers *Routers) Register() http.Handler { ////object... //// ListObjectsV2 //bucket.Methods(http.MethodGet).HandlerFunc(hs.ListObjectsV2Handler).Queries("list-type", "2") - //// ListObjects - //bucket.Methods(http.MethodGet).HandlerFunc(hs.ListObjectsHandler) - //// HeadObject - //bucket.Methods(http.MethodHead).Path("/{object:.+}").HandlerFunc(hs.HeadObjectHandler) + // HeadObject + bucket.Methods(http.MethodHead).Path("/{object:.+}").HandlerFunc(hs.HeadObjectHandler) // PutObject bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(hs.PutObjectHandler) - //// CopyObject - //bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(hs.CopyObjectHandler) - //// DeleteObject - //bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(hs.DeleteObjectHandler) + // CopyObject + bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(hs.CopyObjectHandler) + // DeleteObject + bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(hs.DeleteObjectHandler) ////todo DeleteObjects new ? //bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(hs.DeleteObjectHandler) - //// GetObject - //bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(hs.GetObjectHandler) - //// GetObjectACL - //bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(hs.GetObjectACLHandler).Queries("acl", "") + // GetObject + bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(hs.GetObjectHandler) + // GetObjectACL + bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(hs.GetObjectACLHandler).Queries("acl", "") + // ListObjects + bucket.Methods(http.MethodGet).HandlerFunc(hs.ListObjectsHandler) //bucket... // GetBucketACL diff --git a/s3/s3utils/request_test.go b/s3/s3utils/request_test.go new file mode 100644 index 000000000..fd84d329a --- /dev/null +++ b/s3/s3utils/request_test.go @@ -0,0 +1,27 @@ +package s3utils + +import ( + "fmt" + "github.com/aws/aws-sdk-go/service/s3" + "io" + "reflect" + "testing" +) + +type req struct { + _ struct{} `embed:"PutObjectInput"` + s3.PutObjectInput `location:"embed"` + Body io.ReadCloser `type:"blob"` +} + +func TestParseRequest(t *testing.T) { + var r req + v := reflect.ValueOf(r) + p := v.Type() + n := v.NumField() + for i := 0; i < n; i++ { + ft := p.Field(i) + fmt.Println(ft.Name) + } + +} diff --git a/s3/services/accesskey/service.go b/s3/services/accesskey/service.go index 5a7f5c3ee..e4e16a027 100644 --- a/s3/services/accesskey/service.go +++ b/s3/services/accesskey/service.go @@ -21,7 +21,6 @@ type service struct { waitLockTimeout time.Duration } - func NewService(providers providers.Providerser, options ...Option) Service { svc := &service{ providers: providers, diff --git a/s3/services/object/proto.go b/s3/services/object/proto.go index 529d2c437..fc0f6e1f3 100644 --- a/s3/services/object/proto.go +++ b/s3/services/object/proto.go @@ -9,11 +9,14 @@ import ( ) var ( - ErrBucketNotFound = errors.New("bucket not found") - ErrObjectNotFound = errors.New("object not found") - ErrUploadNotFound = errors.New("upload not found") - ErrNotAllowed = errors.New("not allowed") - ErrBucketAlreadyExists = errors.New("bucket already exists") + ErrBucketNotFound = errors.New("bucket not found") + ErrObjectNotFound = errors.New("object not found") + ErrUploadNotFound = errors.New("upload not found") + ErrNotAllowed = errors.New("not allowed") + ErrBucketAlreadyExists = errors.New("bucket already exists") + ErrOperationTimeout = errors.New("operation timeout") + ErrContentSHA256Mismatch = errors.New("sha256 mismatch") + ErrBadDigest = errors.New("bad digest") ) type Service interface { @@ -27,10 +30,10 @@ type Service interface { PutObject(ctx context.Context, user, bucname, objname string, body *hash.Reader, size int64, meta map[string]string) (object *Object, err error) CopyObject(ctx context.Context, user, srcBucname, srcObjname, dstBucname, dstObjname string, meta map[string]string) (dstObject *Object, err error) - GetObject(ctx context.Context, user, bucname, objname string) (object *Object, body io.ReadCloser, err error) + GetObject(ctx context.Context, user, bucname, objname string, withBody bool) (object *Object, body io.ReadCloser, err error) DeleteObject(ctx context.Context, user, bucname, objname string) (err error) // todo: DeleteObjects - ListObjects(ctx context.Context, user, bucname, prefix, delimiter, marker string, max int) (list *ObjectsList, err error) + ListObjects(ctx context.Context, user, bucname, prefix, delimiter, marker string, max int64) (list *ObjectsList, err error) CreateMultipartUpload(ctx context.Context, user, bucname, objname string, meta map[string]string) (multipart *Multipart, err error) UploadPart(ctx context.Context, user, bucname, objname, uplid string, partId int, reader *hash.Reader, size int64, meta map[string]string) (part *Part, err error) diff --git a/s3/services/object/service_object.go b/s3/services/object/service_object.go index 9b9f996e0..79c8f6a7a 100644 --- a/s3/services/object/service_object.go +++ b/s3/services/object/service_object.go @@ -228,15 +228,27 @@ func (s *service) CopyObject(ctx context.Context, user, srcBucname, srcObjname, VersionID: "", IsLatest: true, DeleteMarker: false, - ContentType: meta[strings.ToLower(consts.ContentType)], - ContentEncoding: meta[strings.ToLower(consts.ContentEncoding)], + ContentType: srcObject.ContentType, + ContentEncoding: srcObject.ContentEncoding, SuccessorModTime: now.UTC(), + Expires: srcObject.Expires, } - // Set destination object expires - exp, er := time.Parse(http.TimeFormat, strings.ToLower(consts.Expires)) - if er != nil { - dstObject.Expires = exp.UTC() + // Set destination object metadata + val, ok := meta[consts.ContentType] + if ok { + dstObject.ContentType = val + } + val, ok = meta[consts.ContentEncoding] + if ok { + dstObject.ContentEncoding = val + } + val, ok = meta[strings.ToLower(consts.Expires)] + if ok { + exp, er := time.Parse(http.TimeFormat, val) + if er != nil { + dstObject.Expires = exp.UTC() + } } // Put destination object @@ -246,7 +258,7 @@ func (s *service) CopyObject(ctx context.Context, user, srcBucname, srcObjname, } // GetObject get a user specified object -func (s *service) GetObject(ctx context.Context, user, bucname, objname string) (object *Object, body io.ReadCloser, err error) { +func (s *service) GetObject(ctx context.Context, user, bucname, objname string, withBody bool) (object *Object, body io.ReadCloser, err error) { // Operation context ctx, cancel := s.opctx(ctx) defer cancel() @@ -308,6 +320,11 @@ func (s *service) GetObject(ctx context.Context, user, bucname, objname string) return } + // no need body + if !withBody { + return + } + // Get object body body, err = s.providers.FileStore().Cat(object.CID) if err != nil { @@ -396,7 +413,7 @@ func (s *service) DeleteObject(ctx context.Context, user, bucname, objname strin } // ListObjects list user specified objects -func (s *service) ListObjects(ctx context.Context, user, bucname, prefix, delimiter, marker string, max int) (list *ObjectsList, err error) { +func (s *service) ListObjects(ctx context.Context, user, bucname, prefix, delimiter, marker string, max int64) (list *ObjectsList, err error) { // Operation context ctx, cancel := s.opctx(ctx) defer cancel() @@ -428,6 +445,8 @@ func (s *service) ListObjects(ctx context.Context, user, bucname, prefix, delimi return } + list = &ObjectsList{} + // All bucket objects key prefix allObjectsKeyPrefix := s.getAllObjectsKeyPrefix(bucname) @@ -435,7 +454,7 @@ func (s *service) ListObjects(ctx context.Context, user, bucname, prefix, delimi listObjectsKeyPrefix := allObjectsKeyPrefix + prefix // Accumulate count - count := 0 + count := int64(0) // Flag mark if begin collect, it initialized to true if // marker is "" @@ -496,7 +515,7 @@ func (s *service) ListObjects(ctx context.Context, user, bucname, prefix, delimi } else { // object without common prefix var object *Object - er = s.providers.StateStore().Get(objkey, object) + er = s.providers.StateStore().Get(objkey, &object) if er != nil { return } diff --git a/s3/set/match.go b/s3/set/match.go index aa57c4f4c..a5cb98015 100644 --- a/s3/set/match.go +++ b/s3/set/match.go @@ -35,7 +35,7 @@ func deepMatchRune(str, pattern []rune, simple bool) bool { return len(str) == 0 && len(pattern) == 0 } -//Match regular match +// Match regular match func Match(pattern, name string) (matched bool) { if pattern == "" { return name == pattern diff --git a/s3/utils/encode.go b/s3/utils/encode.go index d8fc042cd..5b80319a5 100644 --- a/s3/utils/encode.go +++ b/s3/utils/encode.go @@ -18,8 +18,8 @@ func S3EncodeName(name string, encodingType string) (result string) { // s3URLEncode is based on Golang's url.QueryEscape() code, // while considering some S3 exceptions: -// - Avoid encoding '/' and '*' -// - Force encoding of '~' +// - Avoid encoding '/' and '*' +// - Force encoding of '~' func s3URLEncode(s string) string { spaceCount, hexCount := 0, 0 for i := 0; i < len(s); i++ { From 00c72270a88ac5215960973206c162371cb9b2f2 Mon Sep 17 00:00:00 2001 From: Steve Date: Wed, 6 Sep 2023 19:05:17 +0800 Subject: [PATCH 090/139] refractor: objects --- s3/consts/consts.go | 3 +- s3/handlers/handlers_object.go | 179 ++++++++++----------------- s3/handlers/options.go | 4 +- s3/handlers/proto.go | 2 +- s3/responses/responses_common.go | 2 +- s3/responses/responses_object.go | 63 ++++++++-- s3/routers/routers.go | 4 +- s3/services/object/proto.go | 9 ++ s3/services/object/service_object.go | 54 +++++--- s3/utils/encode.go | 1 + 10 files changed, 174 insertions(+), 147 deletions(-) diff --git a/s3/consts/consts.go b/s3/consts/consts.go index 53c163e49..ed60adb0f 100644 --- a/s3/consts/consts.go +++ b/s3/consts/consts.go @@ -162,7 +162,8 @@ const ( XRequestWith = "X-Requested-With" Range = "Range" UserAgent = "User-Agent" - CID = "CID" + Cid = "Cid" + CidList = "Cid-List" ) // Standard HTTP cors headers diff --git a/s3/handlers/handlers_object.go b/s3/handlers/handlers_object.go index c7a31300f..08dc64000 100644 --- a/s3/handlers/handlers_object.go +++ b/s3/handlers/handlers_object.go @@ -355,14 +355,58 @@ func (h *Handlers) ListObjectsHandler(w http.ResponseWriter, r *http.Request) { return } - // Extract all the litsObjectsV1 query params to their native values. - prefix, marker, delimiter, maxKeys, encodingType, err := getListObjectsV1Args(r.Form) + // Extract all the listsObjectsV1 query params to their native values. + prefix, marker, delimiter, maxKeys, encodingType, rerr := getListObjectsV1Args(r.Form) + if rerr != nil { + err = rerr + responses.WriteErrorResponse(w, r, rerr) + return + } + + err = s3utils.CheckListObjsArgs(ctx, bucname, prefix, marker) + if err != nil { + rerr = h.respErr(err) + responses.WriteErrorResponse(w, r, rerr) + return + } + list, err := h.objsvc.ListObjects(ctx, ack, bucname, prefix, delimiter, marker, maxKeys) if err != nil { rerr = h.respErr(err) responses.WriteErrorResponse(w, r, rerr) return } + responses.WriteListObjectsResponse(w, r, ack, bucname, prefix, marker, delimiter, encodingType, maxKeys, list) +} + +func (h *Handlers) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + ack := cctx.GetAccessKey(r) + var err error + defer func() { + cctx.SetHandleInf(r, h.name(), err) + }() + + bucname, rerr := requests.ParseBucket(r) + if rerr != nil { + err = rerr + responses.WriteErrorResponse(w, r, rerr) + return + } + + urlValues := r.Form + // Extract all the listObjectsV2 query params to their native values. + prefix, token, startAfter, delimiter, fetchOwner, maxKeys, encodingType, rerr := getListObjectsV2Args(urlValues) + if rerr != nil { + err = rerr + responses.WriteErrorResponse(w, r, rerr) + return + } + + marker := token + if marker == "" { + marker = startAfter + } err = s3utils.CheckListObjsArgs(ctx, bucname, prefix, marker) if err != nil { rerr = h.respErr(err) @@ -370,116 +414,27 @@ func (h *Handlers) ListObjectsHandler(w http.ResponseWriter, r *http.Request) { return } - list, err := h.objsvc.ListObjects(ctx, ack, bucname, prefix, marker, delimiter, maxKeys) + // Validate the query params before beginning to serve the request. + // fetch-owner is not validated since it is a boolean + rerr = validateListObjectsArgs(token, delimiter, encodingType, maxKeys) + if rerr != nil { + err = rerr + responses.WriteErrorResponse(w, r, rerr) + return + } + + list, err := h.objsvc.ListObjectsV2(ctx, ack, bucname, prefix, token, delimiter, + maxKeys, fetchOwner, startAfter) if err != nil { rerr = h.respErr(err) responses.WriteErrorResponse(w, r, rerr) return } - responses.WriteListObjectsResponse(w, r, ack, bucname, prefix, marker, delimiter, encodingType, maxKeys, list) + responses.WriteListObjectsV2Response(w, r, ack, bucname, prefix, token, startAfter, + delimiter, encodingType, maxKeys, list) } -//func (h *Handlers) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) { -// ctx := r.Context() -// ack := cctx.GetAccessKey(r) -// var err error -// defer func() { -// cctx.SetHandleInf(r, h.name(), err) -// }() -// -// bucname, _, err := requests.ParseBucketAndObject(r) -// if err != nil { -// responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) -// return -// } -// -// err = h.bucsvc.CheckACL(ack, bucname, action.ListObjectsAction) -// if errors.Is(err, object.ErrBucketNotFound) { -// responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) -// return -// } -// if err != nil { -// responses.WriteErrorResponse(w, r, err) -// return -// } -// -// urlValues := r.Form -// // Extract all the listObjectsV2 query params to their native values. -// prefix, token, startAfter, delimiter, fetchOwner, maxKeys, encodingType, errCode := getListObjectsV2Args(urlValues) -// if errCode != nil { -// responses.WriteErrorResponse(w, r, errCode) -// return -// } -// -// marker := token -// if marker == "" { -// marker = startAfter -// } -// if err := s3utils.CheckListObjsArgs(ctx, bucname, prefix, marker); err != nil { -// responses.WriteErrorResponse(w, r, err) -// return -// } -// -// // Validate the query params before beginning to serve the request. -// // fetch-owner is not validated since it is a boolean -// s3Error := validateListObjectsArgs(token, delimiter, encodingType, maxKeys) -// if s3Error != nil { -// responses.WriteErrorResponse(w, r, s3Error) -// return -// } -// -// // rlock bucket -// runlock, err := h.rlock(ctx, bucname, w, r) -// if err != nil { -// return -// } -// defer runlock() -// -// // Initiate a list objects operation based on the input params. -// // On success would return back ListObjectsInfo object to be -// // marshaled into S3 compatible XML header. -// //objsvc -// listObjectsV2Info, err := h.objsvc.ListObjectsV2(ctx, bucname, prefix, token, delimiter, -// maxKeys, fetchOwner, startAfter) -// if err != nil { -// responses.WriteErrorResponse(w, r, err) -// return -// } -// -// resp := responses.GenerateListObjectsV2Response( -// bucname, prefix, token, listObjectsV2Info.NextContinuationToken, startAfter, -// delimiter, encodingType, listObjectsV2Info.IsTruncated, -// maxKeys, listObjectsV2Info.Objects, listObjectsV2Info.Prefixes) -// -// // Write success response. -// responses.WriteSuccessResponseXML(w, r, resp) -//} -// -//// setPutObjHeaders sets all the necessary headers returned back -//// upon a success Put/Copy/CompleteMultipart/Delete requests -//// to activate delete only headers set delete as true -//func setPutObjHeaders(w http.ResponseWriter, obj object.Object, delete bool) { -// // We must not use the http.Header().Set method here because some (broken) -// // clients expect the ETag header key to be literally "ETag" - not "Etag" (case-sensitive). -// // Therefore, we have to set the ETag directly as map entry. -// if obj.ETag != "" && !delete { -// w.Header()[consts.ETag] = []string{`"` + obj.ETag + `"`} -// } -// -// // Set the relevant version ID as part of the response header. -// if obj.VersionID != "" { -// w.Header()[consts.AmzVersionID] = []string{obj.VersionID} -// // If version is a deleted marker, set this header as well -// if obj.DeleteMarker && delete { // only returned during delete object -// w.Header()[consts.AmzDeleteMarker] = []string{strconv.FormatBool(obj.DeleteMarker)} -// } -// } -// -// if obj.Bucket != "" && obj.Name != "" { -// // do something -// } -//} func pathToBucketAndObject(path string) (bucket, object string) { path = strings.TrimPrefix(path, consts.SlashSeparator) @@ -496,12 +451,12 @@ func isReplace(r *http.Request) bool { // Parse bucket url queries func getListObjectsV1Args(values url.Values) ( - prefix, marker, delimiter string, maxkeys int64, encodingType string, errCode error) { + prefix, marker, delimiter string, maxkeys int64, encodingType string, rerr *responses.Error) { if values.Get("max-keys") != "" { var err error if maxkeys, err = strconv.ParseInt(values.Get("max-keys"), 10, 64); err != nil { - errCode = responses.ErrInvalidMaxKeys + rerr = responses.ErrInvalidMaxKeys return } } else { @@ -518,20 +473,20 @@ func getListObjectsV1Args(values url.Values) ( // Parse bucket url queries for ListObjects V2. func getListObjectsV2Args(values url.Values) ( prefix, token, startAfter, delimiter string, - fetchOwner bool, maxkeys int, encodingType string, errCode error) { + fetchOwner bool, maxkeys int64, encodingType string, rerr *responses.Error) { // The continuation-token cannot be empty. if val, ok := values["continuation-token"]; ok { if len(val[0]) == 0 { - errCode = responses.ErrInvalidToken + rerr = responses.ErrInvalidToken return } } if values.Get("max-keys") != "" { var err error - if maxkeys, err = strconv.Atoi(values.Get("max-keys")); err != nil { - errCode = responses.ErrInvalidMaxKeys + if maxkeys, err = strconv.ParseInt(values.Get("max-keys"), 10, 64); err != nil { + rerr = responses.ErrInvalidMaxKeys return } // Over flowing count - reset to maxObjectList. @@ -551,7 +506,7 @@ func getListObjectsV2Args(values url.Values) ( if token = values.Get("continuation-token"); token != "" { decodedToken, err := base64.StdEncoding.DecodeString(token) if err != nil { - errCode = responses.ErrIncorrectContinuationToken + rerr = responses.ErrIncorrectContinuationToken return } token = string(decodedToken) @@ -578,7 +533,7 @@ func trimLeadingSlash(ep string) string { // - delimiter if set should be equal to '/', otherwise the request is rejected. // - marker if set should have a common prefix with 'prefix' param, otherwise // the request is rejected. -func validateListObjectsArgs(marker, delimiter, encodingType string, maxKeys int) error { +func validateListObjectsArgs(marker, delimiter, encodingType string, maxKeys int64) (rerr *responses.Error) { // Max keys cannot be negative. if maxKeys < 0 { return responses.ErrInvalidMaxKeys diff --git a/s3/handlers/options.go b/s3/handlers/options.go index 1e9bdcbb4..d4026c493 100644 --- a/s3/handlers/options.go +++ b/s3/handlers/options.go @@ -16,7 +16,6 @@ var defaultCorsMethods = []string{ } var defaultCorsHeaders = []string{ - consts.CID, consts.Date, consts.ETag, consts.ServerInfo, @@ -31,6 +30,7 @@ var defaultCorsHeaders = []string{ consts.LastModified, consts.ContentLanguage, consts.CacheControl, + consts.Location, consts.RetryAfter, consts.AmzBucketRegion, consts.Expires, @@ -42,6 +42,8 @@ var defaultCorsHeaders = []string{ "X-Amz*", "x-amz*", "*", + consts.Cid, + consts.CidList, } var defaultHeaders = map[string][]string{ diff --git a/s3/handlers/proto.go b/s3/handlers/proto.go index 9430d33fb..8f1066111 100644 --- a/s3/handlers/proto.go +++ b/s3/handlers/proto.go @@ -29,7 +29,7 @@ type Handlerser interface { GetObjectHandler(w http.ResponseWriter, r *http.Request) GetObjectACLHandler(w http.ResponseWriter, r *http.Request) ListObjectsHandler(w http.ResponseWriter, r *http.Request) - //ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) + ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) //// Multipart diff --git a/s3/responses/responses_common.go b/s3/responses/responses_common.go index a93945dd3..5d4a785c3 100644 --- a/s3/responses/responses_common.go +++ b/s3/responses/responses_common.go @@ -51,7 +51,7 @@ func setPutObjHeaders(w http.ResponseWriter, etag, cid string, delete bool) { w.Header()[consts.ETag] = []string{`"` + etag + `"`} } if cid != "" { - w.Header()[consts.CID] = []string{cid} + w.Header()[consts.Cid] = []string{cid} } } diff --git a/s3/responses/responses_object.go b/s3/responses/responses_object.go index 12cb10a83..3eeaf1976 100644 --- a/s3/responses/responses_object.go +++ b/s3/responses/responses_object.go @@ -1,9 +1,11 @@ package responses import ( + "encoding/base64" "github.com/aws/aws-sdk-go/service/s3" "github.com/bittorrent/go-btfs/s3/consts" "github.com/bittorrent/go-btfs/s3/services/object" + "github.com/bittorrent/go-btfs/s3/utils" "io" "net/http" ) @@ -11,13 +13,13 @@ import ( func WritePutObjectResponse(w http.ResponseWriter, r *http.Request, obj *object.Object) { output := new(s3.PutObjectOutput) output.SetETag(`"` + obj.ETag + `"`) - w.Header().Set(consts.CID, obj.CID) + w.Header().Set(consts.Cid, obj.CID) WriteSuccessResponse(w, output, "") } func WriteHeadObjectResponse(w http.ResponseWriter, r *http.Request, obj *object.Object) { output := new(s3.HeadObjectOutput) - w.Header().Set(consts.CID, obj.CID) + w.Header().Set(consts.Cid, obj.CID) SetObjectHeaders(w, r, obj) SetHeadGetRespHeaders(w, r.Form) WriteSuccessResponse(w, output, "") @@ -27,7 +29,7 @@ func WriteCopyObjectResponse(w http.ResponseWriter, r *http.Request, obj *object output := new(s3.CopyObjectResult) output.SetETag(`"` + obj.ETag + `"`) output.SetLastModified(obj.ModTime) - w.Header().Set(consts.CID, obj.CID) + w.Header().Set(consts.Cid, obj.CID) WriteSuccessResponse(w, output, "") } @@ -42,9 +44,9 @@ func WriteGetObjectResponse(w http.ResponseWriter, r *http.Request, obj *object. output.SetContentLength(obj.Size) output.SetBody(body) output.SetMetadata(map[string]*string{ - consts.CID: &obj.CID, + consts.Cid: &obj.CID, }) - w.Header().Set(consts.CID, obj.CID) + w.Header().Set(consts.Cid, obj.CID) SetObjectHeaders(w, r, obj) SetHeadGetRespHeaders(w, r.Form) WriteSuccessResponse(w, output, "") @@ -72,30 +74,67 @@ func WriteGetObjectACLResponse(w http.ResponseWriter, r *http.Request, accessKey func WriteListObjectsResponse(w http.ResponseWriter, r *http.Request, accessKey, bucname, prefix, marker, delimiter, encodingType string, maxKeys int64, list *object.ObjectsList) { out := new(s3.ListObjectsOutput) out.SetName(bucname) - out.SetPrefix(prefix) - out.SetMarker(marker) - out.SetDelimiter(delimiter) out.SetEncodingType(encodingType) + out.SetPrefix(utils.S3EncodeName(prefix, encodingType)) + out.SetMarker(utils.S3EncodeName(marker, encodingType)) + out.SetDelimiter(utils.S3EncodeName(delimiter, encodingType)) out.SetMaxKeys(maxKeys) + out.SetNextMarker(list.NextMarker) + out.SetIsTruncated(list.IsTruncated) s3Objs := make([]*s3.Object, len(list.Objects)) for i, obj := range list.Objects { s3Obj := new(s3.Object) s3Obj.SetETag(`"` + obj.ETag + `"`) s3Obj.SetOwner(owner(accessKey)) s3Obj.SetLastModified(obj.ModTime) - s3Obj.SetKey(obj.Name) + s3Obj.SetKey(utils.S3EncodeName(obj.Name, encodingType)) s3Obj.SetSize(obj.Size) + s3Obj.SetStorageClass("") s3Objs[i] = s3Obj + w.Header().Add(consts.CidList, obj.CID) } out.SetContents(s3Objs) s3CommPrefixes := make([]*s3.CommonPrefix, len(list.Prefixes)) for i, cpf := range list.Prefixes { pfx := new(s3.CommonPrefix) - pfx.SetPrefix(cpf) + pfx.SetPrefix(utils.S3EncodeName(cpf, encodingType)) s3CommPrefixes[i] = pfx } out.SetCommonPrefixes(s3CommPrefixes) + WriteSuccessResponse(w, out, "ListBucketResult") +} + +func WriteListObjectsV2Response(w http.ResponseWriter, r *http.Request, accessKey, bucname, prefix, token, startAfter, delimiter, encodingType string, maxKeys int64, list *object.ObjectsListV2) { + out := new(s3.ListObjectsV2Output) + out.SetName(bucname) + out.SetEncodingType(encodingType) + out.SetStartAfter(utils.S3EncodeName(startAfter, encodingType)) + out.SetDelimiter(utils.S3EncodeName(delimiter, encodingType)) + out.SetPrefix(utils.S3EncodeName(prefix, encodingType)) + out.SetMaxKeys(maxKeys) + out.SetContinuationToken(base64.StdEncoding.EncodeToString([]byte(token))) + out.SetNextContinuationToken(base64.StdEncoding.EncodeToString([]byte(list.NextContinuationToken))) out.SetIsTruncated(list.IsTruncated) - out.SetNextMarker(list.NextMarker) - WriteSuccessResponse(w, out, "") + s3Objs := make([]*s3.Object, len(list.Objects)) + for i, obj := range list.Objects { + s3Obj := new(s3.Object) + s3Obj.SetETag(`"` + obj.ETag + `"`) + s3Obj.SetOwner(owner(accessKey)) + s3Obj.SetLastModified(obj.ModTime) + s3Obj.SetKey(utils.S3EncodeName(obj.Name, encodingType)) + s3Obj.SetSize(obj.Size) + s3Obj.SetStorageClass("") + s3Objs[i] = s3Obj + w.Header().Add(consts.CidList, obj.CID) + } + out.SetContents(s3Objs) + s3CommPrefixes := make([]*s3.CommonPrefix, len(list.Prefixes)) + for i, cpf := range list.Prefixes { + pfx := new(s3.CommonPrefix) + pfx.SetPrefix(utils.S3EncodeName(cpf, encodingType)) + s3CommPrefixes[i] = pfx + } + out.SetCommonPrefixes(s3CommPrefixes) + out.SetKeyCount(int64(len(list.Objects) + len(list.Prefixes))) + WriteSuccessResponse(w, out, "ListBucketResult") } diff --git a/s3/routers/routers.go b/s3/routers/routers.go index 4defffe55..a34aafa5f 100644 --- a/s3/routers/routers.go +++ b/s3/routers/routers.go @@ -43,8 +43,8 @@ func (routers *Routers) Register() http.Handler { //bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(hs.AbortMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}") ////object... - //// ListObjectsV2 - //bucket.Methods(http.MethodGet).HandlerFunc(hs.ListObjectsV2Handler).Queries("list-type", "2") + // ListObjectsV2 + bucket.Methods(http.MethodGet).HandlerFunc(hs.ListObjectsV2Handler).Queries("list-type", "2") // HeadObject bucket.Methods(http.MethodHead).Path("/{object:.+}").HandlerFunc(hs.HeadObjectHandler) // PutObject diff --git a/s3/services/object/proto.go b/s3/services/object/proto.go index fc0f6e1f3..cf8a31c3b 100644 --- a/s3/services/object/proto.go +++ b/s3/services/object/proto.go @@ -34,6 +34,7 @@ type Service interface { DeleteObject(ctx context.Context, user, bucname, objname string) (err error) // todo: DeleteObjects ListObjects(ctx context.Context, user, bucname, prefix, delimiter, marker string, max int64) (list *ObjectsList, err error) + ListObjectsV2(ctx context.Context, user string, bucket string, prefix string, token, delimiter string, max int64, owner bool, after string) (list *ObjectsListV2, err error) CreateMultipartUpload(ctx context.Context, user, bucname, objname string, meta map[string]string) (multipart *Multipart, err error) UploadPart(ctx context.Context, user, bucname, objname, uplid string, partId int, reader *hash.Reader, size int64, meta map[string]string) (part *Part, err error) @@ -93,6 +94,14 @@ type ObjectsList struct { Prefixes []string } +type ObjectsListV2 struct { + IsTruncated bool + ContinuationToken string + NextContinuationToken string + Objects []*Object + Prefixes []string +} + type CompletePart struct { PartNumber int ETag string diff --git a/s3/services/object/service_object.go b/s3/services/object/service_object.go index 79c8f6a7a..5957d81e0 100644 --- a/s3/services/object/service_object.go +++ b/s3/services/object/service_object.go @@ -3,6 +3,7 @@ package object import ( "context" "errors" + "fmt" "github.com/bittorrent/go-btfs/s3/action" "github.com/bittorrent/go-btfs/s3/consts" "github.com/bittorrent/go-btfs/s3/providers" @@ -473,8 +474,8 @@ func (s *service) ListObjects(ctx context.Context, user, bucname, prefix, delimi // Common prefix: if the part of object name without prefix include delimiter // it is the string truncated object name after the delimiter, else - // it is the bucket name itself - commonPrefix := objname + // it is empty string + commonPrefix := "" if delimiter != "" { dl := len(delimiter) pl := len(prefix) @@ -484,31 +485,30 @@ func (s *service) ListObjects(ctx context.Context, user, bucname, prefix, delimi } } - // If collect not begin, check the marker, if it is matched - // with the common prefix, then begin collection from next iterate turn - // and mark this common prefix as seen - // note: common prefix also can be object name, so when marker is - // an object name, the check will be also done correctly - if !begin && marker == commonPrefix { - begin = true - seen[commonPrefix] = true - return - } + fmt.Printf("%-18s | %10s\n", objname, commonPrefix) - // Not begin, jump the item + // If collect not begin, check the marker, if it is matched + // with the common prefix or object name, then begin collection from next iterate + // and if common prefix matched, mark this common prefix as seen if !begin { + if commonPrefix != "" && marker == commonPrefix { + seen[commonPrefix] = true + begin = true + } else if marker == objname { + begin = true + } return } // Objects with same common prefix will be grouped into one // note: the objects without common prefix will present only once, so // it is not necessary to add these objects names in the seen map - if seen[commonPrefix] { - return - } // Objects with common prefix grouped int one - if commonPrefix != objname { + if commonPrefix != "" { + if seen[commonPrefix] { + return + } list.Prefixes = append(list.Prefixes, commonPrefix) list.NextMarker = commonPrefix seen[commonPrefix] = true @@ -540,6 +540,26 @@ func (s *service) ListObjects(ctx context.Context, user, bucname, prefix, delimi return } +func (s *service) ListObjectsV2(ctx context.Context, user string, bucket string, prefix string, token, delimiter string, max int64, owner bool, after string) (list *ObjectsListV2, err error) { + marker := token + if marker == "" { + marker = after + } + loi, err := s.ListObjects(ctx, user, bucket, prefix, delimiter, marker, max) + if err != nil { + return + } + + list = &ObjectsListV2{ + IsTruncated: loi.IsTruncated, + ContinuationToken: token, + NextContinuationToken: loi.NextMarker, + Objects: loi.Objects, + Prefixes: loi.Prefixes, + } + return +} + func (s *service) getObject(objkey string) (object *Object, err error) { err = s.providers.StateStore().Get(objkey, &object) if errors.Is(err, providers.ErrStateStoreNotFound) { diff --git a/s3/utils/encode.go b/s3/utils/encode.go index 5b80319a5..b85ad7bf8 100644 --- a/s3/utils/encode.go +++ b/s3/utils/encode.go @@ -75,6 +75,7 @@ func s3URLEncode(s string) string { } return string(t) } + func shouldEscape(c byte) bool { if 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' { return false From c2b877ef462cf3aaa02d9fd9bc59931037a2ed62 Mon Sep 17 00:00:00 2001 From: Steve Date: Wed, 6 Sep 2023 23:17:29 +0800 Subject: [PATCH 091/139] refractor: btf api add timeout & add cid refs to enable referred cid can not be deleted --- cmd/btfs/daemon.go | 5 +- s3/handlers/handlers_object.go | 2 +- s3/providers/btfs_api.go | 72 ++++++++++- s3/providers/btfs_api_options.go | 29 +++++ s3/responses/responses_object.go | 5 +- s3/routers/routers.go | 17 +-- s3/s3.go | 14 ++- s3/services/object/options.go | 7 ++ s3/services/object/service.go | 20 +++- s3/services/object/service_bucket.go | 4 +- s3/services/object/service_multipart.go | 28 +++-- s3/services/object/service_object.go | 151 ++++++++++++++++++++++-- 12 files changed, 311 insertions(+), 43 deletions(-) create mode 100644 s3/providers/btfs_api_options.go diff --git a/cmd/btfs/daemon.go b/cmd/btfs/daemon.go index a220315a3..b60347431 100644 --- a/cmd/btfs/daemon.go +++ b/cmd/btfs/daemon.go @@ -718,7 +718,10 @@ If the user need to start multiple nodes on the same machine, the configuration } // init s3 providers - s3.InitProviders(statestore) + err = s3.InitProviders(statestore) + if err != nil { + return err + } // access-key init accesskey.InitService(s3.GetProviders()) diff --git a/s3/handlers/handlers_object.go b/s3/handlers/handlers_object.go index 08dc64000..a1e1ba7ae 100644 --- a/s3/handlers/handlers_object.go +++ b/s3/handlers/handlers_object.go @@ -156,7 +156,7 @@ func (h *Handlers) CopyObjectHandler(w http.ResponseWriter, r *http.Request) { return } if srcBucket == dstBucket && srcObject == dstObject { - err = responses.ErrInvalidCopySource + err = responses.ErrInvalidCopyDest responses.WriteErrorResponse(w, r, responses.ErrInvalidCopyDest) return } diff --git a/s3/providers/btfs_api.go b/s3/providers/btfs_api.go index c29d9217c..4e97a9cbd 100644 --- a/s3/providers/btfs_api.go +++ b/s3/providers/btfs_api.go @@ -3,22 +3,54 @@ package providers import ( "errors" shell "github.com/bittorrent/go-btfs-api" + "github.com/mitchellh/go-homedir" "io" + "net/http" + "os" + "path" + "strings" + "time" ) var _ FileStorer = (*BtfsAPI)(nil) type BtfsAPI struct { shell *shell.Shell + headerTimout time.Duration + timeout time.Duration + endpointUrl string } -func NewBtfsAPI(endpointUrl string) (api *BtfsAPI) { - api = &BtfsAPI{} - if endpointUrl == "" { - api.shell = shell.NewLocalShell() - } else { - api.shell = shell.NewShell(endpointUrl) +func NewBtfsAPI(options ...BtfsAPIOption) (api *BtfsAPI, err error) { + api = &BtfsAPI{ + headerTimout: defaultBtfsAPIResponseHeaderTimeout, + timeout: defaultBtfsAPITimeout, + endpointUrl: defaultBtfsAPIEndpointUrl, } + for _, option := range options { + option(api) + } + + if api.endpointUrl == "" { + api.endpointUrl, err = api.getLocalUrl() + if err != nil { + return + } + } + + client := &http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DisableKeepAlives: true, + ResponseHeaderTimeout: api.headerTimout, + }, + Timeout: api.timeout, + } + + api.shell = shell.NewShellWithClient( + api.endpointUrl, client, + ) + return } @@ -39,3 +71,31 @@ func (api *BtfsAPI) Cat(id string) (rc io.ReadCloser, err error) { rc, err = api.shell.Cat(id) return } + +func (api *BtfsAPI) getLocalUrl() (url string, err error) { + baseDir := os.Getenv(shell.EnvDir) + if baseDir == "" { + baseDir = shell.DefaultPathRoot + } + + baseDir, err = homedir.Expand(baseDir) + if err != nil { + return + } + + apiFile := path.Join(baseDir, shell.DefaultApiFile) + + _, err = os.Stat(apiFile) + if err != nil { + return + } + + bs, err := os.ReadFile(apiFile) + if err != nil { + return + } + + url = strings.TrimSpace(string(bs)) + return + +} diff --git a/s3/providers/btfs_api_options.go b/s3/providers/btfs_api_options.go new file mode 100644 index 000000000..f016134de --- /dev/null +++ b/s3/providers/btfs_api_options.go @@ -0,0 +1,29 @@ +package providers + +import "time" + +type BtfsAPIOption func(api *BtfsAPI) + +const( + defaultBtfsAPIEndpointUrl = "" + defaultBtfsAPITimeout = 20 * time.Minute + defaultBtfsAPIResponseHeaderTimeout = 1 * time.Minute +) + +func BtfsAPIWithTimeout(timeout time.Duration) BtfsAPIOption { + return func(api *BtfsAPI) { + api.timeout = timeout + } +} + +func BtfsAPIWithBtfsAPIHeaderTimeout(timeout time.Duration) BtfsAPIOption { + return func(api *BtfsAPI) { + api.headerTimout = timeout + } +} + +func BtfsAPIWithEndpointUrl(url string) BtfsAPIOption { + return func(api *BtfsAPI) { + api.endpointUrl = url + } +} diff --git a/s3/responses/responses_object.go b/s3/responses/responses_object.go index 3eeaf1976..317995af3 100644 --- a/s3/responses/responses_object.go +++ b/s3/responses/responses_object.go @@ -20,6 +20,9 @@ func WritePutObjectResponse(w http.ResponseWriter, r *http.Request, obj *object. func WriteHeadObjectResponse(w http.ResponseWriter, r *http.Request, obj *object.Object) { output := new(s3.HeadObjectOutput) w.Header().Set(consts.Cid, obj.CID) + output.SetMetadata(map[string]*string{ + consts.Cid: &obj.CID, + }) SetObjectHeaders(w, r, obj) SetHeadGetRespHeaders(w, r.Form) WriteSuccessResponse(w, output, "") @@ -30,7 +33,7 @@ func WriteCopyObjectResponse(w http.ResponseWriter, r *http.Request, obj *object output.SetETag(`"` + obj.ETag + `"`) output.SetLastModified(obj.ModTime) w.Header().Set(consts.Cid, obj.CID) - WriteSuccessResponse(w, output, "") + WriteSuccessResponse(w, output, "CopyObjectResult") } func WriteDeleteObjectResponse(w http.ResponseWriter, r *http.Request, obj *object.Object) { diff --git a/s3/routers/routers.go b/s3/routers/routers.go index a34aafa5f..3c31cfb7b 100644 --- a/s3/routers/routers.go +++ b/s3/routers/routers.go @@ -43,25 +43,26 @@ func (routers *Routers) Register() http.Handler { //bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(hs.AbortMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}") ////object... - // ListObjectsV2 - bucket.Methods(http.MethodGet).HandlerFunc(hs.ListObjectsV2Handler).Queries("list-type", "2") // HeadObject bucket.Methods(http.MethodHead).Path("/{object:.+}").HandlerFunc(hs.HeadObjectHandler) - // PutObject - bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(hs.PutObjectHandler) // CopyObject bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(hs.CopyObjectHandler) + // PutObject + bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(hs.PutObjectHandler) // DeleteObject bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(hs.DeleteObjectHandler) - ////todo DeleteObjects new ? - //bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(hs.DeleteObjectHandler) - // GetObject - bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(hs.GetObjectHandler) // GetObjectACL bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(hs.GetObjectACLHandler).Queries("acl", "") + // GetObject + bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(hs.GetObjectHandler) + // ListObjectsV2 + bucket.Methods(http.MethodGet).HandlerFunc(hs.ListObjectsV2Handler).Queries("list-type", "2") // ListObjects bucket.Methods(http.MethodGet).HandlerFunc(hs.ListObjectsHandler) + // todo DeleteObjects new + // bucket.Methods(http.MethodDelete).HandlerFunc(hs.DeleteObjectsHandler).Queries("delete", "") + //bucket... // GetBucketACL bucket.Methods(http.MethodGet).HandlerFunc(hs.GetBucketAclHandler).Queries("acl", "") diff --git a/s3/s3.go b/s3/s3.go index 6ab69c45e..722bd3260 100644 --- a/s3/s3.go +++ b/s3/s3.go @@ -19,12 +19,20 @@ var ( once sync.Once ) -func InitProviders(stateStore storage.StateStorer) { +func InitProviders(stateStore storage.StateStorer) (err error) { once.Do(func() { - sstore := providers.NewStorageStateStoreProxy(stateStore) - fstore := providers.NewBtfsAPI("") + var ( + sstore providers.StateStorer + fstore providers.FileStorer + ) + sstore = providers.NewStorageStateStoreProxy(stateStore) + fstore, err = providers.NewBtfsAPI() + if err != nil { + return + } ps = providers.NewProviders(sstore, fstore) }) + return } func GetProviders() *providers.Providers { diff --git a/s3/services/object/options.go b/s3/services/object/options.go index 7e6b80f58..a913b7ef2 100644 --- a/s3/services/object/options.go +++ b/s3/services/object/options.go @@ -10,6 +10,7 @@ const ( defaultBucketSpace = "s3:bkt" defaultObjectSpace = "s3:obj" defaultUploadSpace = "s3:upl" + defaultCidrefSpace = "s3:cid" defaultOperationTimeout = 5 * time.Minute defaultCloseBodyTimeout = 10 * time.Minute ) @@ -42,6 +43,12 @@ func WithUploadSpace(space string) Option { } } +func WithCidrefSpace(space string) Option { + return func(svc *service) { + svc.cidrefSpace = space + } +} + func WithOperationTimeout(timeout time.Duration) Option { return func(svc *service) { svc.operationTimeout = timeout diff --git a/s3/services/object/service.go b/s3/services/object/service.go index 6d44114c0..4cb3b321f 100644 --- a/s3/services/object/service.go +++ b/s3/services/object/service.go @@ -2,6 +2,7 @@ package object import ( "context" + "fmt" "github.com/bittorrent/go-btfs/s3/action" "github.com/bittorrent/go-btfs/s3/ctxmu" "github.com/bittorrent/go-btfs/s3/policy" @@ -21,6 +22,7 @@ type service struct { bucketSpace string objectSpace string uploadSpace string + cidrefSpace string operationTimeout time.Duration closeBodyTimeout time.Duration } @@ -33,6 +35,7 @@ func NewService(providers providers.Providerser, options ...Option) Service { bucketSpace: defaultBucketSpace, objectSpace: defaultObjectSpace, uploadSpace: defaultUploadSpace, + cidrefSpace: defaultCidrefSpace, operationTimeout: defaultOperationTimeout, closeBodyTimeout: defaultCloseBodyTimeout, } @@ -70,7 +73,22 @@ func (s *service) getAllUploadsKeyPrefix(bucname string) (prefix string) { } func (s *service) getUploadKey(bucname, objname, uploadid string) (key string) { - key = strings.Join([]string{s.getAllUploadsKeyPrefix(bucname), objname, uploadid}, s.keySeparator) + key = s.getAllUploadsKeyPrefix(bucname) + strings.Join([]string{objname, uploadid}, s.keySeparator) + return +} + +func (s *service) getUploadPartKey(uplkey string, idx int) (key string) { + key = fmt.Sprintf("%s_%d", uplkey, idx) + return +} + +func (s *service) getAllCidrefsKeyPrefix(cid string) (prefix string) { + prefix = strings.Join([]string{s.cidrefSpace, cid, ""}, s.keySeparator) + return +} + +func (s *service) getCidrefKey(cid, to string) (key string) { + key = s.getAllCidrefsKeyPrefix(cid) + to return } diff --git a/s3/services/object/service_bucket.go b/s3/services/object/service_bucket.go index 1f3f7ad6d..078e4b6f8 100644 --- a/s3/services/object/service_bucket.go +++ b/s3/services/object/service_bucket.go @@ -136,13 +136,13 @@ func (s *service) DeleteBucket(ctx context.Context, user, bucname string) (err e objectsPrefix := s.getAllObjectsKeyPrefix(bucname) // Try to delete all bucket objects - _ = s.deleteObjectsByPrefix(objectsPrefix) + _ = s.deleteObjectsByPrefix(ctx, objectsPrefix) // All bucket uploads prefix uploadsPrefix := s.getAllUploadsKeyPrefix(bucname) // Try to delete all bucket uploads - _ = s.deleteUploadsByPrefix(uploadsPrefix) + _ = s.deleteUploadsByPrefix(ctx, uploadsPrefix) return } diff --git a/s3/services/object/service_multipart.go b/s3/services/object/service_multipart.go index 4ae5f27a1..f7d4ddbe4 100644 --- a/s3/services/object/service_multipart.go +++ b/s3/services/object/service_multipart.go @@ -133,8 +133,11 @@ func (s *service) UploadPart(ctx context.Context, user, bucname, objname, uplid return } + // Upload part key + prtkey := s.getUploadPartKey(uplkey, len(multipart.Parts)) + // Store part body - cid, err := s.providers.FileStore().Store(body) + cid, err := s.storeBody(ctx, body, prtkey) if err != nil { return } @@ -146,7 +149,7 @@ func (s *service) UploadPart(ctx context.Context, user, bucname, objname, uplid // Try to remove the part body defer func() { if removePartBody { - _ = s.providers.FileStore().Remove(cid) + _ = s.removeBody(ctx, cid, prtkey) } }() @@ -234,8 +237,9 @@ func (s *service) AbortMultipartUpload(ctx context.Context, user, bucname, objna } // Try to remove all parts body - for _, part := range multipart.Parts { - _ = s.providers.FileStore().Remove(part.CID) + for i, part := range multipart.Parts { + prtkey := s.getUploadPartKey(uplkey, i) + _ = s.removeBody(ctx, part.CID, prtkey) } return @@ -385,7 +389,7 @@ func (s *service) CompleteMultiPartUpload(ctx context.Context, user, bucname, ob body := io.MultiReader(readers...) // Store object body - cid, err := s.providers.FileStore().Store(body) + cid, err := s.storeBody(ctx, body, objkey) if err != nil { return } @@ -435,7 +439,7 @@ func (s *service) CompleteMultiPartUpload(ctx context.Context, user, bucname, ob // Try to remove old object body if exists, because it has been covered by new one if objectOld != nil { - _ = s.providers.FileStore().Remove(objectOld.CID) + _ = s.removeBody(ctx, objectOld.CID, objkey) } // Remove multipart upload @@ -445,8 +449,9 @@ func (s *service) CompleteMultiPartUpload(ctx context.Context, user, bucname, ob } // Try to remove all parts body, because they are no longer be referenced - for _, part := range multipart.Parts { - _ = s.providers.FileStore().Remove(part.CID) + for i, part := range multipart.Parts { + prtkey := s.getUploadPartKey(uplkey, i) + _ = s.removeBody(ctx, part.CID, prtkey) } return @@ -491,7 +496,7 @@ func (s *service) computeMultipartMD5(parts []*CompletePart) (md5 string) { } // deleteUploadsByPrefix try to delete all multipart uploads with the specified common prefix -func (s *service) deleteUploadsByPrefix(uploadsPrefix string) (err error) { +func (s *service) deleteUploadsByPrefix(ctx context.Context, uploadsPrefix string) (err error) { err = s.providers.StateStore().Iterate(uploadsPrefix, func(key, _ []byte) (stop bool, er error) { uplkey := string(key) var multipart *Multipart @@ -503,8 +508,9 @@ func (s *service) deleteUploadsByPrefix(uploadsPrefix string) (err error) { if er != nil { return } - for _, part := range multipart.Parts { - _ = s.providers.FileStore().Remove(part.CID) + for i, part := range multipart.Parts { + prtkey := s.getUploadPartKey(uplkey, i) + _ = s.removeBody(ctx, part.CID, prtkey) } return }) diff --git a/s3/services/object/service_object.go b/s3/services/object/service_object.go index 5957d81e0..3cf40bab4 100644 --- a/s3/services/object/service_object.go +++ b/s3/services/object/service_object.go @@ -64,7 +64,7 @@ func (s *service) PutObject(ctx context.Context, user, bucname, objname string, } // Store object body - cid, err := s.providers.FileStore().Store(body) + cid, err := s.storeBody(ctx, body, objkey) if err != nil { return } @@ -76,7 +76,7 @@ func (s *service) PutObject(ctx context.Context, user, bucname, objname string, // Try to remove stored body if put object failed defer func() { if removeObjectBody { - _ = s.providers.FileStore().Remove(cid) + _ = s.removeBody(ctx, cid, objkey) } }() @@ -108,7 +108,7 @@ func (s *service) PutObject(ctx context.Context, user, bucname, objname string, } // put object - err = s.providers.StateStore().Put(objkey, object) + err = s.putObject(objkey, object) if err != nil { return } @@ -118,7 +118,7 @@ func (s *service) PutObject(ctx context.Context, user, bucname, objname string, // Try to remove old object body if exists, because it has been covered by new one if objectOld != nil { - _ = s.providers.FileStore().Remove(objectOld.CID) + _ = s.removeBody(ctx, objectOld.CID, objkey) } return @@ -214,6 +214,28 @@ func (s *service) CopyObject(ctx context.Context, user, srcBucname, srcObjname, } defer s.lock.Unlock(dstObjkey) + // Add body Refer + err = s.addBodyRef(ctx, srcObject.CID, dstObjkey) + if err != nil { + return + } + + // Mark if delete the cid ref + deleteRef := true + + // If put new object failed, try to delete it's reference + defer func() { + if deleteRef { + _ = s.removeBodyRef(ctx, srcObject.CID, dstObjkey) + } + }() + + // Old desert object + oldDstObject, err := s.getObject(dstObjkey) + if err != nil { + return + } + // now now := time.Now() @@ -252,8 +274,20 @@ func (s *service) CopyObject(ctx context.Context, user, srcBucname, srcObjname, } } + // Put destination object - err = s.providers.StateStore().Put(dstObjkey, dstObject) + err = s.putObject(dstObjkey, dstObject) + if err != nil { + return + } + + // Mark the delete ref to false + deleteRef = false + + // Try to remove the old object body + if oldDstObject != nil { + _ = s.removeBody(ctx, oldDstObject.CID, dstObjkey) + } return } @@ -402,13 +436,13 @@ func (s *service) DeleteObject(ctx context.Context, user, bucname, objname strin } // Delete object - err = s.providers.StateStore().Delete(objkey) + err = s.deleteObject(objkey) if err != nil { return } // Try to delete object body - _ = s.providers.FileStore().Remove(object.CID) + _ = s.removeBodyRef(ctx, object.CID, objkey) return } @@ -560,6 +594,16 @@ func (s *service) ListObjectsV2(ctx context.Context, user string, bucket string, return } +func (s *service) deleteObject(objkey string) (err error) { + err = s.providers.StateStore().Delete(objkey) + return +} + +func (s *service) putObject(objkey string, object *Object) (err error) { + err = s.providers.StateStore().Put(objkey, object) + return +} + func (s *service) getObject(objkey string) (object *Object, err error) { err = s.providers.StateStore().Get(objkey, &object) if errors.Is(err, providers.ErrStateStoreNotFound) { @@ -569,7 +613,7 @@ func (s *service) getObject(objkey string) (object *Object, err error) { } // deleteObjectsByPrefix try to delete all objects with the specified common prefix -func (s *service) deleteObjectsByPrefix(objectsPrefix string) (err error) { +func (s *service) deleteObjectsByPrefix(ctx context.Context, objectsPrefix string) (err error) { err = s.providers.StateStore().Iterate(objectsPrefix, func(key, _ []byte) (stop bool, er error) { objkey := string(key) var object *Object @@ -581,9 +625,98 @@ func (s *service) deleteObjectsByPrefix(objectsPrefix string) (err error) { if er != nil { return } - _ = s.providers.FileStore().Remove(object.CID) + _ = s.removeBody(ctx, object.CID, objkey) return }) return } + +func (s *service) addBodyRef(ctx context.Context, cid, toKey string) (err error) { + // Cid reference key + crfKey := s.getCidrefKey(cid, toKey) + + // Add cid reference + err = s.providers.StateStore().Put(crfKey, nil) + + return +} + +func (s *service) removeBodyRef(ctx context.Context, cid, toKey string) (err error) { + // This object cid reference key + crfKey := s.getCidrefKey(cid, toKey) + + // Delete cid ref of this object + err = s.providers.StateStore().Delete(crfKey) + + return +} + +func (s *service) storeBody(ctx context.Context, body io.Reader, toKey string) (cid string, err error) { + // RLock all cid refs to enable no cid will be deleted + err = s.lock.RLock(ctx, s.cidrefSpace) + if err != nil { + return + } + defer s.lock.RUnlock(s.cidrefSpace) + + // Store body and get the cid + cid, err = s.providers.FileStore().Store(body) + if err != nil { + return + } + + // Cid reference key + crfKey := s.getCidrefKey(cid, toKey) + + // Add cid reference + err = s.providers.StateStore().Put(crfKey, nil) + + return +} + +func (s *service) removeBody(ctx context.Context, cid, toKey string) (err error) { + // Lock all cid refs to enable new cid reference can not be added when + // remove is executing + err = s.lock.Lock(ctx, s.cidrefSpace) + if err != nil { + return + } + defer s.lock.Unlock(s.cidrefSpace) + + // This object cid reference key + crfKey := s.getCidrefKey(cid, toKey) + + // Delete cid ref of this object + err = s.providers.StateStore().Delete(crfKey) + if err != nil { + return + } + + // All this cid references prefix + allRefsPrefix := s.getAllCidrefsKeyPrefix(cid) + + // Flag to mark cid be referenced by other object + otherRef := false + + // Iterate all this cid refs, if exists other object's ref, set + // the otherRef mark to true + err = s.providers.StateStore().Iterate(allRefsPrefix, func(key, _ []byte) (stop bool, err error) { + otherRef = true + stop = true + return + }) + if err != nil { + return + } + + // Exists other refs, cid body can not be removed + if otherRef { + return + } + + // No other refs to this cid, remove it + err = s.providers.FileStore().Remove(cid) + + return +} From 0e8707d5cf9a45644062bed88f578aaaa4f7fccb Mon Sep 17 00:00:00 2001 From: Steve Date: Thu, 7 Sep 2023 00:02:57 +0800 Subject: [PATCH 092/139] ref: fix delete object remove body --- s3/handlers/handlers_object.go | 2 -- s3/services/object/service_object.go | 9 ++++----- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/s3/handlers/handlers_object.go b/s3/handlers/handlers_object.go index a1e1ba7ae..5850b6261 100644 --- a/s3/handlers/handlers_object.go +++ b/s3/handlers/handlers_object.go @@ -210,7 +210,6 @@ func (h *Handlers) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) { return } - //objsvc err = h.objsvc.DeleteObject(ctx, ack, bucname, objname) if err != nil { rerr = h.respErr(err) @@ -435,7 +434,6 @@ func (h *Handlers) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) delimiter, encodingType, maxKeys, list) } - func pathToBucketAndObject(path string) (bucket, object string) { path = strings.TrimPrefix(path, consts.SlashSeparator) idx := strings.Index(path, consts.SlashSeparator) diff --git a/s3/services/object/service_object.go b/s3/services/object/service_object.go index 3cf40bab4..0286e713d 100644 --- a/s3/services/object/service_object.go +++ b/s3/services/object/service_object.go @@ -251,10 +251,10 @@ func (s *service) CopyObject(ctx context.Context, user, srcBucname, srcObjname, VersionID: "", IsLatest: true, DeleteMarker: false, - ContentType: srcObject.ContentType, - ContentEncoding: srcObject.ContentEncoding, + ContentType: srcObject.ContentType, + ContentEncoding: srcObject.ContentEncoding, SuccessorModTime: now.UTC(), - Expires: srcObject.Expires, + Expires: srcObject.Expires, } // Set destination object metadata @@ -274,7 +274,6 @@ func (s *service) CopyObject(ctx context.Context, user, srcBucname, srcObjname, } } - // Put destination object err = s.putObject(dstObjkey, dstObject) if err != nil { @@ -442,7 +441,7 @@ func (s *service) DeleteObject(ctx context.Context, user, bucname, objname strin } // Try to delete object body - _ = s.removeBodyRef(ctx, object.CID, objkey) + _ = s.removeBody(ctx, object.CID, objkey) return } From 102db6bf1459e893cd2f69760ae2f454e86a618e Mon Sep 17 00:00:00 2001 From: Steve Date: Thu, 7 Sep 2023 00:18:52 +0800 Subject: [PATCH 093/139] ref: format code --- s3/providers/btfs_api.go | 6 +++--- s3/providers/btfs_api_options.go | 4 ++-- s3/services/object/service.go | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/s3/providers/btfs_api.go b/s3/providers/btfs_api.go index 4e97a9cbd..51885c865 100644 --- a/s3/providers/btfs_api.go +++ b/s3/providers/btfs_api.go @@ -15,10 +15,10 @@ import ( var _ FileStorer = (*BtfsAPI)(nil) type BtfsAPI struct { - shell *shell.Shell + shell *shell.Shell headerTimout time.Duration - timeout time.Duration - endpointUrl string + timeout time.Duration + endpointUrl string } func NewBtfsAPI(options ...BtfsAPIOption) (api *BtfsAPI, err error) { diff --git a/s3/providers/btfs_api_options.go b/s3/providers/btfs_api_options.go index f016134de..448689342 100644 --- a/s3/providers/btfs_api_options.go +++ b/s3/providers/btfs_api_options.go @@ -4,8 +4,8 @@ import "time" type BtfsAPIOption func(api *BtfsAPI) -const( - defaultBtfsAPIEndpointUrl = "" +const ( + defaultBtfsAPIEndpointUrl = "" defaultBtfsAPITimeout = 20 * time.Minute defaultBtfsAPIResponseHeaderTimeout = 1 * time.Minute ) diff --git a/s3/services/object/service.go b/s3/services/object/service.go index 4cb3b321f..4c503a67c 100644 --- a/s3/services/object/service.go +++ b/s3/services/object/service.go @@ -35,7 +35,7 @@ func NewService(providers providers.Providerser, options ...Option) Service { bucketSpace: defaultBucketSpace, objectSpace: defaultObjectSpace, uploadSpace: defaultUploadSpace, - cidrefSpace: defaultCidrefSpace, + cidrefSpace: defaultCidrefSpace, operationTimeout: defaultOperationTimeout, closeBodyTimeout: defaultCloseBodyTimeout, } From f5153f3414bb742f055dc7327e8fdb89ecc59fa9 Mon Sep 17 00:00:00 2001 From: Steve Date: Thu, 7 Sep 2023 15:46:42 +0800 Subject: [PATCH 094/139] fix: routers --- s3/routers/routers.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/s3/routers/routers.go b/s3/routers/routers.go index 3c31cfb7b..7dec74522 100644 --- a/s3/routers/routers.go +++ b/s3/routers/routers.go @@ -55,10 +55,6 @@ func (routers *Routers) Register() http.Handler { bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(hs.GetObjectACLHandler).Queries("acl", "") // GetObject bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(hs.GetObjectHandler) - // ListObjectsV2 - bucket.Methods(http.MethodGet).HandlerFunc(hs.ListObjectsV2Handler).Queries("list-type", "2") - // ListObjects - bucket.Methods(http.MethodGet).HandlerFunc(hs.ListObjectsHandler) // todo DeleteObjects new // bucket.Methods(http.MethodDelete).HandlerFunc(hs.DeleteObjectsHandler).Queries("delete", "") @@ -66,6 +62,10 @@ func (routers *Routers) Register() http.Handler { //bucket... // GetBucketACL bucket.Methods(http.MethodGet).HandlerFunc(hs.GetBucketAclHandler).Queries("acl", "") + // ListObjectsV2 + bucket.Methods(http.MethodGet).HandlerFunc(hs.ListObjectsV2Handler).Queries("list-type", "2") + // ListObjects + bucket.Methods(http.MethodGet).HandlerFunc(hs.ListObjectsHandler) // PutBucketACL bucket.Methods(http.MethodPut).HandlerFunc(hs.PutBucketAclHandler).Queries("acl", "") // CreateBucket From f086009380395d7124b480d95f96a4961ea31936 Mon Sep 17 00:00:00 2001 From: Steve Date: Thu, 7 Sep 2023 17:09:15 +0800 Subject: [PATCH 095/139] fix: add cors header --- s3/handlers/options.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/s3/handlers/options.go b/s3/handlers/options.go index d4026c493..091930878 100644 --- a/s3/handlers/options.go +++ b/s3/handlers/options.go @@ -39,6 +39,8 @@ var defaultCorsHeaders = []string{ consts.XRequestWith, consts.Range, consts.UserAgent, + "Amz-Sdk-Request", + "Amz-Sdk-Invocation-Id", "X-Amz*", "x-amz*", "*", From 23b3caf7a07a7cbbc3523529203695416f0e07d5 Mon Sep 17 00:00:00 2001 From: Steve Date: Thu, 7 Sep 2023 20:58:43 +0800 Subject: [PATCH 096/139] fix: router option --- s3/routers/routers.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/s3/routers/routers.go b/s3/routers/routers.go index 7dec74522..8801210c3 100644 --- a/s3/routers/routers.go +++ b/s3/routers/routers.go @@ -24,12 +24,17 @@ func (routers *Routers) Register() http.Handler { hs := routers.handlers root := mux.NewRouter() + root.Use( hs.Cors, hs.Log, hs.Sign, ) + root.Methods(http.MethodOptions).HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + bucket := root.PathPrefix("/{bucket}").Subrouter() // multipart object... From ac5145c22b8b1659b6de0bf182a734d747e5aeb7 Mon Sep 17 00:00:00 2001 From: Steve Date: Fri, 8 Sep 2023 02:13:19 +0800 Subject: [PATCH 097/139] feat: add delete objects handler --- cmd/btfs/daemon.go | 20 +++-- s3/handlers/handlers_middlewares.go | 8 +- s3/handlers/handlers_object.go | 128 +++++++++++++++------------ s3/handlers/proto.go | 1 + s3/requests/parsers.go | 15 +--- s3/requests/parsers_common.go | 4 +- s3/responses/responses_object.go | 1 - s3/routers/routers.go | 46 ++++++---- s3/server/server.go | 7 +- s3/services/object/service_object.go | 3 - 10 files changed, 127 insertions(+), 106 deletions(-) diff --git a/cmd/btfs/daemon.go b/cmd/btfs/daemon.go index b60347431..0fb94782e 100644 --- a/cmd/btfs/daemon.go +++ b/cmd/btfs/daemon.go @@ -717,21 +717,31 @@ If the user need to start multiple nodes on the same machine, the configuration functest(cfg.Services.OnlineServerDomain, cfg.Identity.PeerID, hValue) } - // init s3 providers + // Init s3 providers err = s3.InitProviders(statestore) if err != nil { return err } - // access-key init + // Init access-key accesskey.InitService(s3.GetProviders()) - // start s3-compatible-api server + // Start s3-compatible-api server s3OptEnable, s3Opt := req.Options[enableS3CompatibleAPIKwd].(bool) if s3OptEnable || (!s3Opt && cfg.S3CompatibleAPI.Enable) { s3Server := s3.NewServer(cfg.S3CompatibleAPI) - _ = s3Server.Start() - defer s3Server.Stop() + err = s3Server.Start() + if err != nil { + fmt.Printf("S3-Compatible-API server: %v\n", err) + return + } + fmt.Printf("S3-Compatible-API server started, endpoint-url: http://%s\n", cfg.S3CompatibleAPI.Address) + defer func() { + err = s3Server.Stop() + if err != nil { + fmt.Printf("S3-Compatible-API server: %v\n", err) + } + }() } if SimpleMode == false { diff --git a/s3/handlers/handlers_middlewares.go b/s3/handlers/handlers_middlewares.go index f5c8da14f..1a7261019 100644 --- a/s3/handlers/handlers_middlewares.go +++ b/s3/handlers/handlers_middlewares.go @@ -9,6 +9,7 @@ import ( "github.com/bittorrent/go-btfs/s3/services/accesskey" rscors "github.com/rs/cors" "net/http" + "time" ) func (h *Handlers) Cors(handler http.Handler) http.Handler { @@ -34,10 +35,13 @@ func (h *Handlers) Cors(handler http.Handler) http.Handler { func (h *Handlers) Log(handler http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Printf("[REQ] <%4s> | %s\n", r.Method, r.URL) + start := time.Now() + fmt.Printf("s3-api: [I] %s | <%-4s> | %s\n", start.Format(time.RFC3339), r.Method, r.URL) handler.ServeHTTP(w, r) hname, herr := cctx.GetHandleInf(r) - fmt.Printf("[RSP] <%4s> | %s | %s | %v\n", r.Method, r.URL, hname, herr) + end := time.Now() + ela := end.Sub(start) + fmt.Printf("s3-api: [O] %s | <%-4s> | %s | %s | %v | %s \n", end.Format(time.RFC3339), r.Method, r.URL, hname, herr, ela) }) } diff --git a/s3/handlers/handlers_object.go b/s3/handlers/handlers_object.go index 5850b6261..12697b447 100644 --- a/s3/handlers/handlers_object.go +++ b/s3/handlers/handlers_object.go @@ -3,6 +3,7 @@ package handlers import ( "encoding/base64" "errors" + "github.com/aws/aws-sdk-go/service/s3" "github.com/bittorrent/go-btfs/s3/cctx" "github.com/bittorrent/go-btfs/s3/consts" "github.com/bittorrent/go-btfs/s3/requests" @@ -222,63 +223,76 @@ func (h *Handlers) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) { // DeleteObjectsHandler - delete objects // https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html -//func (h *Handlers) DeleteObjectsHandler(w http.ResponseWriter, r *http.Request) { -// ctx := r.Context() -// ack := cctx.GetAccessKey(r) -// var err error -// defer func() { -// cctx.SetHandleInf(r, h.name(), err) -// }() -// -// bucname, objname, err := requests.ParseBucketAndObject(r) -// if err != nil { -// responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) -// return -// } -// if err := s3utils.CheckDelObjArgs(ctx, bucname, objname); err != nil { -// responses.WriteErrorResponse(w, r, err) -// return -// } -// -// err = h.bucsvc.CheckACL(ack, bucname, action.DeleteObjectAction) -// if errors.Is(err, object.ErrBucketNotFound) { -// responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) -// return -// } -// if err != nil { -// responses.WriteErrorResponse(w, r, err) -// return -// } -// -// // rlock bucket -// runlock, err := h.rlock(ctx, bucname, w, r) -// if err != nil { -// return -// } -// defer runlock() -// -// // lock object -// unlock, err := h.lock(ctx, bucname+"/"+objname, w, r) -// if err != nil { -// return -// } -// defer unlock() -// -// //objsvc -// obj, err := h.objsvc.GetObjectInfo(ctx, bucname, objname) -// if err != nil { -// responses.WriteErrorResponse(w, r, err) -// return -// } -// //objsvc -// err = h.objsvc.DeleteObject(ctx, bucname, objname) -// if err != nil { -// responses.WriteErrorResponse(w, r, err) -// return -// } -// setPutObjHeaders(w, obj, true) -// responses.WriteSuccessNoContent(w) -//} +func (h *Handlers) DeleteObjectsHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + ack := cctx.GetAccessKey(r) + var err error + defer func() { + cctx.SetHandleInf(r, h.name(), err) + }() + + var input s3.DeleteObjectsInput + + err = responses.ParseRequest(r, &input) + if err != nil { + rerr := h.respErr(err) + responses.WriteErrorResponse(w, r, rerr) + return + } + + if input.Delete == nil || + len(input.Delete.Objects) == 0 || + len(input.Delete.Objects) > consts.MaxObjectList { + rerr := responses.ErrMalformedXML + err = rerr + responses.WriteErrorResponse(w, r, rerr) + return + } + + bucname := *input.Bucket + + _, err = h.objsvc.GetBucket(ctx, ack, bucname) + if err != nil { + rerr := h.respErr(err) + responses.WriteErrorResponse(w, r, rerr) + return + } + + output := new(s3.DeleteObjectsOutput) + delObjs := make([]*s3.DeletedObject, 0) + delErrs := make([]*s3.Error, 0) + for _, obj := range input.Delete.Objects { + objname := *obj.Key + er := s3utils.CheckDelObjArgs(ctx, bucname, objname) + if er != nil { + rerr := h.respErr(er) + derr := new(s3.Error) + derr.SetCode(rerr.Code()) + derr.SetMessage(rerr.Description()) + derr.SetKey(objname) + delErrs = append(delErrs, derr) + continue + } + er = h.objsvc.DeleteObject(ctx, ack, bucname, objname) + if er != nil { + rerr := h.respErr(er) + derr := new(s3.Error) + derr.SetCode(rerr.Code()) + derr.SetMessage(rerr.Description()) + derr.SetKey(objname) + delErrs = append(delErrs, derr) + } else { + dobj := new(s3.DeletedObject) + dobj.SetKey(objname) + delObjs = append(delObjs, dobj) + } + } + + output.SetDeleted(delObjs) + output.SetErrors(delErrs) + + responses.WriteSuccessResponse(w, output, "DeleteResult") +} // GetObjectHandler - GET Object // https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html diff --git a/s3/handlers/proto.go b/s3/handlers/proto.go index 8f1066111..2fb6d94a1 100644 --- a/s3/handlers/proto.go +++ b/s3/handlers/proto.go @@ -26,6 +26,7 @@ type Handlerser interface { HeadObjectHandler(w http.ResponseWriter, r *http.Request) CopyObjectHandler(w http.ResponseWriter, r *http.Request) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) + DeleteObjectsHandler(w http.ResponseWriter, r *http.Request) GetObjectHandler(w http.ResponseWriter, r *http.Request) GetObjectACLHandler(w http.ResponseWriter, r *http.Request) ListObjectsHandler(w http.ResponseWriter, r *http.Request) diff --git a/s3/requests/parsers.go b/s3/requests/parsers.go index ff4a0f774..47b0f415c 100644 --- a/s3/requests/parsers.go +++ b/s3/requests/parsers.go @@ -1,8 +1,6 @@ package requests import ( - "fmt" - "github.com/aws/aws-sdk-go/service/s3" "github.com/bittorrent/go-btfs/s3/cctx" "github.com/bittorrent/go-btfs/s3/responses" "net/http" @@ -97,15 +95,4 @@ func ParsePutBucketAclRequest(r *http.Request) (req *PutBucketACLRequest, rerr * } req.ACL, rerr = ParseBucketACL(r) return -} - -func ParsePutObjectRequest(r *http.Request) (req *s3.PutObjectInput, rerr *responses.Error) { - err := responses.ParseRequest(r, &req) - if err != nil { - rerr = responses.ErrInvalidRequestParameter - return - } - - fmt.Printf("%+v", *req) - return -} +} \ No newline at end of file diff --git a/s3/requests/parsers_common.go b/s3/requests/parsers_common.go index ad42c684e..345d48a38 100644 --- a/s3/requests/parsers_common.go +++ b/s3/requests/parsers_common.go @@ -23,7 +23,7 @@ func ParseBucketAndObject(r *http.Request) (bucket string, object string, rerr * } func ParseBucket(r *http.Request) (bucket string, rerr *responses.Error) { - bucket = mux.Vars(r)["bucket"] + bucket = mux.Vars(r)["Bucket"] err := s3utils.CheckValidBucketNameStrict(bucket) if err != nil { rerr = responses.ErrInvalidBucketName @@ -32,7 +32,7 @@ func ParseBucket(r *http.Request) (bucket string, rerr *responses.Error) { } func ParseObject(r *http.Request) (object string, rerr *responses.Error) { - object, err := unescapePath(mux.Vars(r)["object"]) + object, err := unescapePath(mux.Vars(r)["Object"]) if err != nil { rerr = responses.ErrInvalidRequestParameter } diff --git a/s3/responses/responses_object.go b/s3/responses/responses_object.go index 317995af3..83a21f770 100644 --- a/s3/responses/responses_object.go +++ b/s3/responses/responses_object.go @@ -38,7 +38,6 @@ func WriteCopyObjectResponse(w http.ResponseWriter, r *http.Request, obj *object func WriteDeleteObjectResponse(w http.ResponseWriter, r *http.Request, obj *object.Object) { output := new(s3.DeleteObjectOutput) - output.SetDeleteMarker(true) WriteSuccessResponse(w, output, "") } diff --git a/s3/routers/routers.go b/s3/routers/routers.go index 8801210c3..c47eb952a 100644 --- a/s3/routers/routers.go +++ b/s3/routers/routers.go @@ -21,9 +21,10 @@ func NewRouters(handlers handlers.Handlerser, options ...Option) (routers *Route } func (routers *Routers) Register() http.Handler { + root := mux.NewRouter() + hs := routers.handlers - root := mux.NewRouter() root.Use( hs.Cors, @@ -31,11 +32,7 @@ func (routers *Routers) Register() http.Handler { hs.Sign, ) - root.Methods(http.MethodOptions).HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - }) - - bucket := root.PathPrefix("/{bucket}").Subrouter() + bucket := root.PathPrefix("/{Bucket}").Subrouter() // multipart object... // CreateMultipart @@ -47,40 +44,55 @@ func (routers *Routers) Register() http.Handler { //// AbortMultipart //bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(hs.AbortMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}") - ////object... // HeadObject - bucket.Methods(http.MethodHead).Path("/{object:.+}").HandlerFunc(hs.HeadObjectHandler) + bucket.Methods(http.MethodHead).Path("/{Object:.+}").HandlerFunc(hs.HeadObjectHandler) + // CopyObject - bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(hs.CopyObjectHandler) + bucket.Methods(http.MethodPut).Path("/{Object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(hs.CopyObjectHandler) + // PutObject - bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(hs.PutObjectHandler) + bucket.Methods(http.MethodPut).Path("/{Object:.+}").HandlerFunc(hs.PutObjectHandler) + // DeleteObject - bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(hs.DeleteObjectHandler) + bucket.Methods(http.MethodDelete).Path("/{Object:.+}").HandlerFunc(hs.DeleteObjectHandler) + // GetObjectACL - bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(hs.GetObjectACLHandler).Queries("acl", "") - // GetObject - bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(hs.GetObjectHandler) + bucket.Methods(http.MethodGet).Path("/{Object:.+}").HandlerFunc(hs.GetObjectACLHandler).Queries("acl", "") - // todo DeleteObjects new - // bucket.Methods(http.MethodDelete).HandlerFunc(hs.DeleteObjectsHandler).Queries("delete", "") + // GetObject + bucket.Methods(http.MethodGet).Path("/{Object:.+}").HandlerFunc(hs.GetObjectHandler) - //bucket... // GetBucketACL bucket.Methods(http.MethodGet).HandlerFunc(hs.GetBucketAclHandler).Queries("acl", "") + // ListObjectsV2 bucket.Methods(http.MethodGet).HandlerFunc(hs.ListObjectsV2Handler).Queries("list-type", "2") + // ListObjects bucket.Methods(http.MethodGet).HandlerFunc(hs.ListObjectsHandler) + // PutBucketACL bucket.Methods(http.MethodPut).HandlerFunc(hs.PutBucketAclHandler).Queries("acl", "") + // CreateBucket bucket.Methods(http.MethodPut).HandlerFunc(hs.CreateBucketHandler) + // HeadBucket bucket.Methods(http.MethodHead).HandlerFunc(hs.HeadBucketHandler) + + // DeleteObjects + bucket.Methods(http.MethodPost).HandlerFunc(hs.DeleteObjectsHandler).Queries("delete", "") + // DeleteBucket bucket.Methods(http.MethodDelete).HandlerFunc(hs.DeleteBucketHandler) + // ListBuckets root.Methods(http.MethodGet).Path("/").HandlerFunc(hs.ListBucketsHandler) + // Options + root.Methods(http.MethodOptions).HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + return root } diff --git a/s3/server/server.go b/s3/server/server.go index 876a882a7..b4b4b69a9 100644 --- a/s3/server/server.go +++ b/s3/server/server.go @@ -3,7 +3,6 @@ package server import ( "context" "errors" - "fmt" "github.com/bittorrent/go-btfs/s3/routers" "net/http" "sync" @@ -54,9 +53,7 @@ func (s *Server) Start() (err error) { } go func() { - fmt.Printf("Start s3-compatible-api server, endpoint-url: http://%s\n", httpSvr.Addr) - lErr := httpSvr.ListenAndServe() - fmt.Printf("Stop s3-compatible-api server: %v\n", lErr) + _ = httpSvr.ListenAndServe() }() return @@ -66,7 +63,7 @@ func (s *Server) Stop() (err error) { s.mutex.Lock() defer s.mutex.Unlock() if s.shutdown == nil { - err = ErrServerStarted + err = ErrServerNotStarted return } err = s.shutdown() diff --git a/s3/services/object/service_object.go b/s3/services/object/service_object.go index 0286e713d..951672ddc 100644 --- a/s3/services/object/service_object.go +++ b/s3/services/object/service_object.go @@ -3,7 +3,6 @@ package object import ( "context" "errors" - "fmt" "github.com/bittorrent/go-btfs/s3/action" "github.com/bittorrent/go-btfs/s3/consts" "github.com/bittorrent/go-btfs/s3/providers" @@ -518,8 +517,6 @@ func (s *service) ListObjects(ctx context.Context, user, bucname, prefix, delimi } } - fmt.Printf("%-18s | %10s\n", objname, commonPrefix) - // If collect not begin, check the marker, if it is matched // with the common prefix or object name, then begin collection from next iterate // and if common prefix matched, mark this common prefix as seen From 0410494f44c074959d71f4a5d9038112656ac192 Mon Sep 17 00:00:00 2001 From: Steve Date: Fri, 8 Sep 2023 17:24:39 +0800 Subject: [PATCH 098/139] refractor: multipart --- s3/handlers/handlers_multipart.go | 554 ++++++++++-------------- s3/handlers/handlers_object.go | 3 +- s3/handlers/proto.go | 10 +- s3/{responses => protocol}/request.go | 14 +- s3/{responses => protocol}/responses.go | 2 +- s3/responses/responses_bucket.go | 3 +- s3/responses/responses_common.go | 5 +- s3/routers/routers.go | 32 +- s3/services/object/proto.go | 10 +- s3/services/object/service_multipart.go | 38 +- s3/utils/if.go | 17 + 11 files changed, 332 insertions(+), 356 deletions(-) rename s3/{responses => protocol}/request.go (97%) rename s3/{responses => protocol}/responses.go (99%) create mode 100644 s3/utils/if.go diff --git a/s3/handlers/handlers_multipart.go b/s3/handlers/handlers_multipart.go index ecc3870de..168f767ee 100644 --- a/s3/handlers/handlers_multipart.go +++ b/s3/handlers/handlers_multipart.go @@ -1,318 +1,240 @@ package handlers -//func (h *Handlers) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Request) { -// ctx := r.Context() -// ack := cctx.GetAccessKey(r) -// var err error -// defer func() { -// cctx.SetHandleInf(r, h.name(), err) -// }() -// -// bucname, objname, err := requests.ParseBucketAndObject(r) -// if err != nil { -// responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) -// return -// } -// -// err = s3utils.CheckNewMultipartArgs(ctx, bucname, objname) -// if err != nil { -// responses.WriteErrorResponse(w, r, err) -// return -// } -// -// meta, err := extractMetadata(ctx, r) -// if err != nil { -// responses.WriteErrorResponse(w, r, responses.ErrInvalidRequest) -// return -// } -// -// // rlock bucket -// runlock, err := h.rlock(ctx, bucname, w, r) -// if err != nil { -// return -// } -// defer runlock() -// -// // lock object -// unlock, err := h.lock(ctx, bucname+"/"+objname, w, r) -// if err != nil { -// return -// } -// defer unlock() -// -// err = h.bucsvc.CheckACL(ack, bucname, action.CreateMultipartUploadAction) -// if errors.Is(err, object.ErrBucketNotFound) { -// responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) -// return -// } -// if err != nil { -// responses.WriteErrorResponse(w, r, err) -// return -// } -// -// mtp, err := h.objsvc.CreateMultipartUpload(ctx, bucname, objname, meta) -// if err != nil { -// responses.WriteErrorResponse(w, r, err) -// return -// } -// -// responses.WriteCreateMultipartUploadResponse(w, r, bucname, objname, mtp.UploadID) -// -// return -//} -// -//func (h *Handlers) UploadPartHandler(w http.ResponseWriter, r *http.Request) { -// ctx := r.Context() -// ack := cctx.GetAccessKey(r) -// var err error -// defer func() { -// cctx.SetHandleInf(r, h.name(), err) -// }() -// -// // X-Amz-Copy-Source shouldn't be set for this call. -// if _, ok := r.Header[consts.AmzCopySource]; ok { -// err = errors.New("shouldn't be copy") -// responses.WriteErrorResponse(w, r, responses.ErrInvalidCopySource) -// return -// } -// -// bucname, objname, err := requests.ParseBucketAndObject(r) -// if err != nil { -// responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) -// return -// } -// -// err = s3utils.CheckPutObjectPartArgs(ctx, bucname, objname) -// if err != nil { // todo: convert error -// responses.WriteErrorResponse(w, r, err) -// return -// } -// -// uploadID := r.Form.Get(consts.UploadID) -// partIDString := r.Form.Get(consts.PartNumber) -// partID, err := strconv.Atoi(partIDString) -// if err != nil { -// responses.WriteErrorResponse(w, r, responses.ErrInvalidPart) -// return -// } -// if partID > consts.MaxPartID { -// responses.WriteErrorResponse(w, r, responses.ErrInvalidMaxParts) -// return -// } -// -// if r.ContentLength == 0 { -// responses.WriteErrorResponse(w, r, responses.ErrEntityTooSmall) -// return -// } -// -// if r.ContentLength > consts.MaxPartSize { -// responses.WriteErrorResponse(w, r, responses.ErrEntityTooLarge) -// return -// } -// -// hrdr, ok := r.Body.(*hash.Reader) -// if !ok { -// responses.WriteErrorResponse(w, r, responses.ErrInternalError) -// return -// } -// -// mtp, err := h.objsvc.GetMultipart(ctx, bucname, objname, uploadID) -// if errors.Is(err, object.ErrUploadNotFound) { -// responses.WriteErrorResponse(w, r, responses.ErrNoSuchUpload) -// return -// } -// if err != nil { -// responses.WriteErrorResponse(w, r, err) -// return -// } -// -// // rlock bucket -// runlock, err := h.rlock(ctx, bucname, w, r) -// if err != nil { -// return -// } -// defer runlock() -// -// // lock object -// unlock, err := h.lock(ctx, bucname+"/"+objname, w, r) -// if err != nil { -// return -// } -// defer unlock() -// -// err = h.bucsvc.CheckACL(ack, bucname, action.PutObjectAction) -// if errors.Is(err, object.ErrBucketNotFound) { -// responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) -// return -// } -// if err != nil { -// responses.WriteErrorResponse(w, r, err) -// return -// } -// -// part, err := h.objsvc.UploadPart(ctx, bucname, objname, uploadID, partID, hrdr, r.ContentLength, mtp.MetaData) -// if err != nil { -// responses.WriteErrorResponse(w, r, err) -// return -// } -// -// responses.WriteUploadPartResponse(w, r, part) -// -// return -//} -// -//func (h *Handlers) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Request) { -// ctx := r.Context() -// ack := cctx.GetAccessKey(r) -// var err error -// defer func() { -// cctx.SetHandleInf(r, h.name(), err) -// }() -// -// bucname, objname, err := requests.ParseBucketAndObject(r) -// if err != nil { -// responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) -// return -// } -// -// err = s3utils.CheckAbortMultipartArgs(ctx, bucname, objname) -// if err != nil { -// responses.WriteErrorResponse(w, r, err) -// return -// } -// -// uploadID, _, _, _, rerr := h.getObjectResources(r.Form) -// if rerr != nil { -// err = rerr -// responses.WriteErrorResponse(w, r, err) -// return -// } -// -// // rlock bucket -// runlock, err := h.rlock(ctx, bucname, w, r) -// if err != nil { -// return -// } -// defer runlock() -// -// // rlock object -// unlock, err := h.lock(ctx, bucname+"/"+objname, w, r) -// if err != nil { -// return -// } -// defer unlock() -// -// err = h.bucsvc.CheckACL(ack, bucname, action.AbortMultipartUploadAction) -// if errors.Is(err, object.ErrBucketNotFound) { -// responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) -// return -// } -// if err != nil { -// responses.WriteErrorResponse(w, r, err) -// return -// } -// -// err = h.objsvc.AbortMultipartUpload(ctx, bucname, objname, uploadID) -// if err != nil { -// responses.WriteErrorResponse(w, r, err) -// return -// } -// -// responses.WriteAbortMultipartUploadResponse(w, r) -// -// return -//} -// -//func (h *Handlers) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.Request) { -// ctx := r.Context() -// ack := cctx.GetAccessKey(r) -// var err error -// defer func() { -// cctx.SetHandleInf(r, h.name(), err) -// }() -// -// bucname, objname, err := requests.ParseBucketAndObject(r) -// if err != nil { -// responses.WriteErrorResponse(w, r, responses.ErrInvalidRequestParameter) -// return -// } -// -// err = s3utils.CheckCompleteMultipartArgs(ctx, bucname, objname) -// if err != nil { // todo: convert error -// responses.WriteErrorResponse(w, r, err) -// return -// } -// -// // Content-Length is required and should be non-zero -// if r.ContentLength <= 0 { -// responses.WriteErrorResponse(w, r, responses.ErrMissingContentLength) -// return -// } -// -// // Get upload id. -// uploadID, _, _, _, rerr := h.getObjectResources(r.Form) -// if rerr != nil { -// err = rerr -// responses.WriteErrorResponse(w, r, rerr) -// return -// } -// -// complMultipartUpload := &object.CompleteMultipartUpload{} -// if err = utils.XmlDecoder(r.Body, complMultipartUpload, r.ContentLength); err != nil { -// responses.WriteErrorResponse(w, r, responses.ErrMalformedXML) -// return -// } -// if len(complMultipartUpload.Parts) == 0 { -// responses.WriteErrorResponse(w, r, responses.ErrMalformedXML) -// return -// } -// if !sort.IsSorted(object.CompletedParts(complMultipartUpload.Parts)) { -// responses.WriteErrorResponse(w, r, responses.ErrInvalidPartOrder) -// return -// } -// -// // rlock bucket -// runlock, err := h.rlock(ctx, bucname, w, r) -// if err != nil { -// return -// } -// defer runlock() -// -// // rlock object -// unlock, err := h.lock(ctx, bucname+"/"+objname, w, r) -// if err != nil { -// return -// } -// defer unlock() -// -// err = h.bucsvc.CheckACL(ack, bucname, action.CompleteMultipartUploadAction) -// if errors.Is(err, object.ErrBucketNotFound) { -// responses.WriteErrorResponse(w, r, responses.ErrNoSuchBucket) -// return -// } -// if err != nil { -// responses.WriteErrorResponse(w, r, err) -// return -// } -// -// obj, err := h.objsvc.CompleteMultiPartUpload(ctx, bucname, objname, uploadID, complMultipartUpload.Parts) -// if errors.Is(err, object.ErrUploadNotFound) { -// rerr = responses.ErrNoSuchUpload -// return -// } -// if err != nil { -// responses.WriteErrorResponse(w, r, err) -// return -// } -// -// buc, err := h.bucsvc.GetBucketMeta(ctx, bucname) -// if err != nil { -// responses.WriteErrorResponse(w, r, err) -// return -// } -// -// responses.WriteCompleteMultipartUploadResponse(w, r, bucname, objname, buc.Region, obj) -// -// return -//} -// +import ( + "github.com/aws/aws-sdk-go/service/s3" + "github.com/bittorrent/go-btfs/s3/cctx" + "github.com/bittorrent/go-btfs/s3/consts" + "github.com/bittorrent/go-btfs/s3/protocol" + "github.com/bittorrent/go-btfs/s3/responses" + "github.com/bittorrent/go-btfs/s3/s3utils" + "github.com/bittorrent/go-btfs/s3/services/object" + "github.com/bittorrent/go-btfs/s3/utils/hash" + "net/http" + "sort" +) + +func (h *Handlers) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + ack := cctx.GetAccessKey(r) + var err error + defer func() { + cctx.SetHandleInf(r, h.name(), err) + }() + + var input s3.CreateMultipartUploadInput + + err = protocol.ParseRequest(r, &input) + if err != nil { + rerr := responses.ErrBadRequest + responses.WriteErrorResponse(w, r, rerr) + return + } + + bucname, objname := *input.Bucket, *input.Key + + err = s3utils.CheckNewMultipartArgs(ctx, bucname, objname) + if err != nil { + rerr := h.respErr(err) + responses.WriteErrorResponse(w, r, rerr) + return + } + + meta := input.Metadata + + mtp, err := h.objsvc.CreateMultipartUpload(ctx, ack, bucname, objname, meta) + if err != nil { + rerr := h.respErr(err) + responses.WriteErrorResponse(w, r, rerr) + return + } + + output := new(s3.CreateMultipartUploadOutput) + output.SetBucket(bucname) + output.SetKey(objname) + output.SetUploadId(mtp.UploadID) + + responses.WriteSuccessResponse(w, output, "InitiateMultipartUploadResult") + + return +} + +func (h *Handlers) UploadPartHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + ack := cctx.GetAccessKey(r) + var err error + defer func() { + cctx.SetHandleInf(r, h.name(), err) + }() + + var input s3.UploadPartInput + + err = protocol.ParseRequest(r, &input) + if err != nil { + rerr := responses.ErrBadRequest + responses.WriteErrorResponse(w, r, rerr) + return + } + + bucname, objname := *input.Bucket, *input.Key + + err = s3utils.CheckPutObjectPartArgs(ctx, bucname, objname) + if err != nil { + rerr := h.respErr(err) + responses.WriteErrorResponse(w, r, rerr) + return + } + + uploadId, partId := *input.UploadId, int(*input.PartNumber) + if partId > consts.MaxPartID { + responses.WriteErrorResponse(w, r, responses.ErrInvalidMaxParts) + return + } + + size := r.ContentLength + + if size == 0 { + responses.WriteErrorResponse(w, r, responses.ErrEntityTooSmall) + return + } + + if size > consts.MaxPartSize { + responses.WriteErrorResponse(w, r, responses.ErrEntityTooLarge) + return + } + + hrdr, ok := r.Body.(*hash.Reader) + if !ok { + responses.WriteErrorResponse(w, r, responses.ErrInternalError) + return + } + + part, err := h.objsvc.UploadPart(ctx, ack, bucname, objname, uploadId, partId, hrdr, size) + if err != nil { + rerr := h.respErr(err) + responses.WriteErrorResponse(w, r, rerr) + return + } + + output := new(s3.UploadPartOutput) + output.SetETag(`"` + part.ETag + `"`) + w.Header().Set(consts.Cid, part.CID) + + responses.WriteSuccessResponse(w, output, "") + + return +} + +func (h *Handlers) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + ack := cctx.GetAccessKey(r) + var err error + defer func() { + cctx.SetHandleInf(r, h.name(), err) + }() + + var input s3.AbortMultipartUploadInput + + err = protocol.ParseRequest(r, &input) + if err != nil { + rerr := responses.ErrBadRequest + responses.WriteErrorResponse(w, r, rerr) + return + } + + bucname, objname := *input.Bucket, *input.Key + + err = s3utils.CheckAbortMultipartArgs(ctx, bucname, objname) + if err != nil { + rerr := h.respErr(err) + responses.WriteErrorResponse(w, r, rerr) + return + } + + uploadId := *input.UploadId + + err = h.objsvc.AbortMultipartUpload(ctx, ack, bucname, objname, uploadId) + if err != nil { + rerr := h.respErr(err) + responses.WriteErrorResponse(w, r, rerr) + return + } + + output := new(s3.AbortMultipartUploadOutput) + + responses.WriteSuccessResponse(w, output, "") + + return +} + +func (h *Handlers) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + ack := cctx.GetAccessKey(r) + var err error + defer func() { + cctx.SetHandleInf(r, h.name(), err) + }() + + var input s3.CompleteMultipartUploadInput + + err = protocol.ParseRequest(r, &input) + if err != nil { + rerr := responses.ErrBadRequest + responses.WriteErrorResponse(w, r, rerr) + return + } + + bucname, objname := *input.Bucket, *input.Key + + err = s3utils.CheckCompleteMultipartArgs(ctx, bucname, objname) + if err != nil { + rerr := h.respErr(err) + responses.WriteErrorResponse(w, r, rerr) + return + } + + // Content-Length is required and should be non-zero + if r.ContentLength <= 0 { + responses.WriteErrorResponse(w, r, responses.ErrMissingContentLength) + return + } + + if len(input.MultipartUpload.Parts) == 0 { + rerr := responses.ErrMalformedXML + err = rerr + responses.WriteErrorResponse(w, r, rerr) + return + } + + complUpload := new(object.CompleteMultipartUpload) + + for _, part := range input.MultipartUpload.Parts { + complUpload.Parts = append(complUpload.Parts, &object.CompletePart{ + PartNumber: int(*part.PartNumber), + ETag: *part.ETag, + }) + + } + + if !sort.IsSorted(object.CompletedParts(complUpload.Parts)) { + responses.WriteErrorResponse(w, r, responses.ErrInvalidPartOrder) + return + } + + uploadId := *input.UploadId + + obj, err := h.objsvc.CompleteMultiPartUpload(ctx, ack, bucname, objname, uploadId, complUpload.Parts) + if err != nil { + rerr := h.respErr(err) + responses.WriteErrorResponse(w, r, rerr) + return + } + + output := new(s3.CompleteMultipartUploadOutput) + output.SetBucket(bucname) + output.SetKey(objname) + output.SetETag(`"` + obj.ETag + `"`) + w.Header().Set(consts.Cid, obj.CID) + + responses.WriteSuccessResponse(w, output, "") +} + diff --git a/s3/handlers/handlers_object.go b/s3/handlers/handlers_object.go index 12697b447..3f28c65f6 100644 --- a/s3/handlers/handlers_object.go +++ b/s3/handlers/handlers_object.go @@ -6,6 +6,7 @@ import ( "github.com/aws/aws-sdk-go/service/s3" "github.com/bittorrent/go-btfs/s3/cctx" "github.com/bittorrent/go-btfs/s3/consts" + "github.com/bittorrent/go-btfs/s3/protocol" "github.com/bittorrent/go-btfs/s3/requests" "github.com/bittorrent/go-btfs/s3/responses" "github.com/bittorrent/go-btfs/s3/s3utils" @@ -233,7 +234,7 @@ func (h *Handlers) DeleteObjectsHandler(w http.ResponseWriter, r *http.Request) var input s3.DeleteObjectsInput - err = responses.ParseRequest(r, &input) + err = protocol.ParseRequest(r, &input) if err != nil { rerr := h.respErr(err) responses.WriteErrorResponse(w, r, rerr) diff --git a/s3/handlers/proto.go b/s3/handlers/proto.go index 2fb6d94a1..ba14be994 100644 --- a/s3/handlers/proto.go +++ b/s3/handlers/proto.go @@ -32,10 +32,10 @@ type Handlerser interface { ListObjectsHandler(w http.ResponseWriter, r *http.Request) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) - //// Multipart + // Multipart - //CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Request) - //UploadPartHandler(w http.ResponseWriter, r *http.Request) - //AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Request) - //CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.Request) + CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Request) + UploadPartHandler(w http.ResponseWriter, r *http.Request) + AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Request) + CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.Request) } diff --git a/s3/responses/request.go b/s3/protocol/request.go similarity index 97% rename from s3/responses/request.go rename to s3/protocol/request.go index e4f0393d6..fa0b2726b 100644 --- a/s3/responses/request.go +++ b/s3/protocol/request.go @@ -1,4 +1,4 @@ -package responses +package protocol import ( "encoding/base64" @@ -159,6 +159,7 @@ func parseLocation(r *http.Request, inv reflect.Value) (err error) { fv = fv.Convert(byteSliceType) } + switch ft.Tag.Get("location") { case "headers": prefix := ft.Tag.Get("locationName") @@ -172,6 +173,17 @@ func parseLocation(r *http.Request, inv reflect.Value) (err error) { case "querystring": err = parseQueryString(query, fv, name, ft.Tag) } + + if err != nil { + return + } + + required := ft.Tag.Get("required") == "true" + + if required && !reflect.Indirect(fv).IsValid() { + err = fmt.Errorf("field %s is required", ft.Name) + return + } } return diff --git a/s3/responses/responses.go b/s3/protocol/responses.go similarity index 99% rename from s3/responses/responses.go rename to s3/protocol/responses.go index f24eb01be..d30de8c48 100644 --- a/s3/responses/responses.go +++ b/s3/protocol/responses.go @@ -1,4 +1,4 @@ -package responses +package protocol import ( "bytes" diff --git a/s3/responses/responses_bucket.go b/s3/responses/responses_bucket.go index 33da3ff6b..58bf32503 100644 --- a/s3/responses/responses_bucket.go +++ b/s3/responses/responses_bucket.go @@ -2,6 +2,7 @@ package responses import ( "github.com/aws/aws-sdk-go/service/s3" + "github.com/bittorrent/go-btfs/s3/protocol" "github.com/bittorrent/go-btfs/s3/services/object" "net/http" ) @@ -20,7 +21,7 @@ func WriteHeadBucketResponse(w http.ResponseWriter, r *http.Request) { func WriteDeleteBucketResponse(w http.ResponseWriter) { output := new(s3.DeleteBucketOutput) - _ = WriteResponse(w, http.StatusOK, output, "") + _ = protocol.WriteResponse(w, http.StatusOK, output, "") return } diff --git a/s3/responses/responses_common.go b/s3/responses/responses_common.go index 5d4a785c3..f978db3fd 100644 --- a/s3/responses/responses_common.go +++ b/s3/responses/responses_common.go @@ -3,6 +3,7 @@ package responses import ( "github.com/aws/aws-sdk-go/service/s3" "github.com/bittorrent/go-btfs/s3/consts" + "github.com/bittorrent/go-btfs/s3/protocol" "net/http" "path" ) @@ -39,11 +40,11 @@ func NewErrOutput(r *http.Request, rerr *Error) *ErrorOutput { func WriteErrorResponse(w http.ResponseWriter, r *http.Request, rerr *Error) { output := NewErrOutput(r, rerr) - _ = WriteResponse(w, rerr.HTTPStatusCode(), output, "Error") + _ = protocol.WriteResponse(w, rerr.HTTPStatusCode(), output, "Error") } func WriteSuccessResponse(w http.ResponseWriter, output interface{}, locationName string) { - _ = WriteResponse(w, http.StatusOK, output, locationName) + _ = protocol.WriteResponse(w, http.StatusOK, output, locationName) } func setPutObjHeaders(w http.ResponseWriter, etag, cid string, delete bool) { diff --git a/s3/routers/routers.go b/s3/routers/routers.go index c47eb952a..51f3382b7 100644 --- a/s3/routers/routers.go +++ b/s3/routers/routers.go @@ -34,33 +34,35 @@ func (routers *Routers) Register() http.Handler { bucket := root.PathPrefix("/{Bucket}").Subrouter() - // multipart object... + // HeadObject + bucket.Methods(http.MethodHead).Path("/{Key:.+}").HandlerFunc(hs.HeadObjectHandler) + // CreateMultipart - //bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(hs.CreateMultipartUploadHandler).Queries("uploads", "") - //// UploadPart - //bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(hs.UploadPartHandler).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}") - //// CompleteMultipartUpload - //bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(hs.CompleteMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}") - //// AbortMultipart - //bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(hs.AbortMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}") + bucket.Methods(http.MethodPost).Path("/{Key:.+}").HandlerFunc(hs.CreateMultipartUploadHandler).Queries("uploads", "") - // HeadObject - bucket.Methods(http.MethodHead).Path("/{Object:.+}").HandlerFunc(hs.HeadObjectHandler) + // CompleteMultipartUpload + bucket.Methods(http.MethodPost).Path("/{Key:.+}").HandlerFunc(hs.CompleteMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}") + + // UploadPart + bucket.Methods(http.MethodPut).Path("/{Key:.+}").HandlerFunc(hs.UploadPartHandler).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}") // CopyObject - bucket.Methods(http.MethodPut).Path("/{Object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(hs.CopyObjectHandler) + bucket.Methods(http.MethodPut).Path("/{Key:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(hs.CopyObjectHandler) // PutObject - bucket.Methods(http.MethodPut).Path("/{Object:.+}").HandlerFunc(hs.PutObjectHandler) + bucket.Methods(http.MethodPut).Path("/{Key:.+}").HandlerFunc(hs.PutObjectHandler) + + // AbortMultipart + bucket.Methods(http.MethodDelete).Path("/{Key:.+}").HandlerFunc(hs.AbortMultipartUploadHandler).Queries("uploadId", "{uploadId:.*}") // DeleteObject - bucket.Methods(http.MethodDelete).Path("/{Object:.+}").HandlerFunc(hs.DeleteObjectHandler) + bucket.Methods(http.MethodDelete).Path("/{Key:.+}").HandlerFunc(hs.DeleteObjectHandler) // GetObjectACL - bucket.Methods(http.MethodGet).Path("/{Object:.+}").HandlerFunc(hs.GetObjectACLHandler).Queries("acl", "") + bucket.Methods(http.MethodGet).Path("/{Key:.+}").HandlerFunc(hs.GetObjectACLHandler).Queries("acl", "") // GetObject - bucket.Methods(http.MethodGet).Path("/{Object:.+}").HandlerFunc(hs.GetObjectHandler) + bucket.Methods(http.MethodGet).Path("/{Key:.+}").HandlerFunc(hs.GetObjectHandler) // GetBucketACL bucket.Methods(http.MethodGet).HandlerFunc(hs.GetBucketAclHandler).Queries("acl", "") diff --git a/s3/services/object/proto.go b/s3/services/object/proto.go index cf8a31c3b..6eeddf2c9 100644 --- a/s3/services/object/proto.go +++ b/s3/services/object/proto.go @@ -36,8 +36,8 @@ type Service interface { ListObjects(ctx context.Context, user, bucname, prefix, delimiter, marker string, max int64) (list *ObjectsList, err error) ListObjectsV2(ctx context.Context, user string, bucket string, prefix string, token, delimiter string, max int64, owner bool, after string) (list *ObjectsListV2, err error) - CreateMultipartUpload(ctx context.Context, user, bucname, objname string, meta map[string]string) (multipart *Multipart, err error) - UploadPart(ctx context.Context, user, bucname, objname, uplid string, partId int, reader *hash.Reader, size int64, meta map[string]string) (part *Part, err error) + CreateMultipartUpload(ctx context.Context, user, bucname, objname string, meta map[string]*string) (multipart *Multipart, err error) + UploadPart(ctx context.Context, user, bucname, objname, uplid string, partId int, reader *hash.Reader, size int64) (part *Part, err error) AbortMultipartUpload(ctx context.Context, user, bucname, objname, uplid string) (err error) CompleteMultiPartUpload(ctx context.Context, user, bucname, objname, uplid string, parts []*CompletePart) (object *Object, err error) } @@ -75,7 +75,7 @@ type Multipart struct { Object string UploadID string Initiated time.Time - MetaData map[string]string + MetaData map[string]*string Parts []*Part } @@ -111,12 +111,12 @@ type CompletePart struct { ChecksumSHA256 string } -type CompletedParts []CompletePart +type CompletedParts []*CompletePart func (a CompletedParts) Len() int { return len(a) } func (a CompletedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a CompletedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber } type CompleteMultipartUpload struct { - Parts []CompletePart `xml:"Part"` + Parts []*CompletePart `xml:"Part"` } diff --git a/s3/services/object/service_multipart.go b/s3/services/object/service_multipart.go index f7d4ddbe4..851bc1cca 100644 --- a/s3/services/object/service_multipart.go +++ b/s3/services/object/service_multipart.go @@ -20,7 +20,7 @@ import ( ) // CreateMultipartUpload create user specified multipart upload -func (s *service) CreateMultipartUpload(ctx context.Context, user, bucname, objname string, meta map[string]string) (multipart *Multipart, err error) { +func (s *service) CreateMultipartUpload(ctx context.Context, user, bucname, objname string, meta map[string]*string) (multipart *Multipart, err error) { // Operation context ctx, cancel := s.opctx(ctx) defer cancel() @@ -81,7 +81,7 @@ func (s *service) CreateMultipartUpload(ctx context.Context, user, bucname, objn } // UploadPart upload user specified multipart part -func (s *service) UploadPart(ctx context.Context, user, bucname, objname, uplid string, partId int, body *hash.Reader, size int64, meta map[string]string) (part *Part, err error) { +func (s *service) UploadPart(ctx context.Context, user, bucname, objname, uplid string, partId int, body *hash.Reader, size int64) (part *Part, err error) { // Operation context ctx, cancel := s.opctx(ctx) defer cancel() @@ -405,27 +405,47 @@ func (s *service) CompleteMultiPartUpload(ctx context.Context, user, bucname, ob } }() + now := time.Now().UTC() + // Object object = &Object{ Bucket: bucname, Name: objname, - ModTime: time.Now().UTC(), + ModTime: now, Size: size, IsDir: false, ETag: s.computeMultipartMD5(parts), CID: cid, + ACL: "", VersionID: "", IsLatest: true, DeleteMarker: false, - ContentType: multipart.MetaData[strings.ToLower(consts.ContentType)], - ContentEncoding: multipart.MetaData[strings.ToLower(consts.ContentEncoding)], - SuccessorModTime: time.Now().UTC(), + ContentType: "", + ContentEncoding: "", + Expires: time.Time{}, + AccTime: time.Time{}, + SuccessorModTime: now, + } + + // Set object content type + ctyp := multipart.MetaData[strings.ToLower(consts.ContentType)] + if ctyp != nil { + object.ContentType = *ctyp + } + + // Set object content encoding + cecd := multipart.MetaData[strings.ToLower(consts.ContentEncoding)] + if cecd != nil { + object.ContentEncoding = *cecd } // Set object expires - exp, e := time.Parse(http.TimeFormat, multipart.MetaData[strings.ToLower(consts.Expires)]) - if e == nil { - object.Expires = exp.UTC() + cexp := multipart.MetaData[strings.ToLower(consts.Expires)] + if cexp != nil { + exp, e := time.Parse(http.TimeFormat, *cexp) + if e != nil { + object.Expires = exp.UTC() + } } // Put object diff --git a/s3/utils/if.go b/s3/utils/if.go new file mode 100644 index 000000000..3021d1346 --- /dev/null +++ b/s3/utils/if.go @@ -0,0 +1,17 @@ +package utils + +func IfEmpty(a, b string) (c string) { + c = a + if a == "" { + c = b + } + return +} + +func IfZero(a, b int) (c int) { + c = a + if a == 0 { + c = b + } + return +} From 4d3d6b7fbd3db1ef141a1d5620c10854754bb030 Mon Sep 17 00:00:00 2001 From: Steve Date: Sat, 9 Sep 2023 01:48:48 +0800 Subject: [PATCH 099/139] ref: multipart --- s3/handlers/handlers_multipart.go | 30 ++++++++++++++++--------- s3/providers/btfs_api.go | 6 +---- s3/requests/parsers_common.go | 2 +- s3/routers/routers.go | 2 +- s3/services/object/proto.go | 1 - s3/services/object/service_multipart.go | 5 ++--- s3/services/object/service_object.go | 15 +++++++++---- 7 files changed, 36 insertions(+), 25 deletions(-) diff --git a/s3/handlers/handlers_multipart.go b/s3/handlers/handlers_multipart.go index 168f767ee..a3c67bd93 100644 --- a/s3/handlers/handlers_multipart.go +++ b/s3/handlers/handlers_multipart.go @@ -86,25 +86,33 @@ func (h *Handlers) UploadPartHandler(w http.ResponseWriter, r *http.Request) { uploadId, partId := *input.UploadId, int(*input.PartNumber) if partId > consts.MaxPartID { - responses.WriteErrorResponse(w, r, responses.ErrInvalidMaxParts) + rerr := responses.ErrInvalidMaxParts + err = rerr + responses.WriteErrorResponse(w, r, rerr) return } size := r.ContentLength - if size == 0 { - responses.WriteErrorResponse(w, r, responses.ErrEntityTooSmall) + if size <= 0 { + rerr := responses.ErrEntityTooSmall + err = rerr + responses.WriteErrorResponse(w, r, rerr) return } if size > consts.MaxPartSize { - responses.WriteErrorResponse(w, r, responses.ErrEntityTooLarge) + rerr := responses.ErrEntityTooLarge + err = rerr + responses.WriteErrorResponse(w, r, rerr) return } hrdr, ok := r.Body.(*hash.Reader) if !ok { - responses.WriteErrorResponse(w, r, responses.ErrInternalError) + rerr := responses.ErrInternalError + err = rerr + responses.WriteErrorResponse(w, r, rerr) return } @@ -192,9 +200,10 @@ func (h *Handlers) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http return } - // Content-Length is required and should be non-zero if r.ContentLength <= 0 { - responses.WriteErrorResponse(w, r, responses.ErrMissingContentLength) + rerr := responses.ErrMissingContentLength + err = rerr + responses.WriteErrorResponse(w, r, rerr) return } @@ -212,11 +221,12 @@ func (h *Handlers) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http PartNumber: int(*part.PartNumber), ETag: *part.ETag, }) - } if !sort.IsSorted(object.CompletedParts(complUpload.Parts)) { - responses.WriteErrorResponse(w, r, responses.ErrInvalidPartOrder) + rerr := responses.ErrInvalidPartOrder + err = rerr + responses.WriteErrorResponse(w, r, rerr) return } @@ -235,6 +245,6 @@ func (h *Handlers) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http output.SetETag(`"` + obj.ETag + `"`) w.Header().Set(consts.Cid, obj.CID) - responses.WriteSuccessResponse(w, output, "") + responses.WriteSuccessResponse(w, output, "CompleteMultipartUploadResult") } diff --git a/s3/providers/btfs_api.go b/s3/providers/btfs_api.go index 51885c865..66f8d55ee 100644 --- a/s3/providers/btfs_api.go +++ b/s3/providers/btfs_api.go @@ -1,7 +1,6 @@ package providers import ( - "errors" shell "github.com/bittorrent/go-btfs-api" "github.com/mitchellh/go-homedir" "io" @@ -60,10 +59,7 @@ func (api *BtfsAPI) Store(r io.Reader) (id string, err error) { } func (api *BtfsAPI) Remove(id string) (err error) { - ok := api.shell.Remove(id) - if !ok { - err = errors.New("not removed") - } + err = api.shell.Unpin(id) return } diff --git a/s3/requests/parsers_common.go b/s3/requests/parsers_common.go index 345d48a38..89ca3655b 100644 --- a/s3/requests/parsers_common.go +++ b/s3/requests/parsers_common.go @@ -32,7 +32,7 @@ func ParseBucket(r *http.Request) (bucket string, rerr *responses.Error) { } func ParseObject(r *http.Request) (object string, rerr *responses.Error) { - object, err := unescapePath(mux.Vars(r)["Object"]) + object, err := unescapePath(mux.Vars(r)["Key"]) if err != nil { rerr = responses.ErrInvalidRequestParameter } diff --git a/s3/routers/routers.go b/s3/routers/routers.go index 51f3382b7..94f4687ca 100644 --- a/s3/routers/routers.go +++ b/s3/routers/routers.go @@ -89,7 +89,7 @@ func (routers *Routers) Register() http.Handler { bucket.Methods(http.MethodDelete).HandlerFunc(hs.DeleteBucketHandler) // ListBuckets - root.Methods(http.MethodGet).Path("/").HandlerFunc(hs.ListBucketsHandler) + root.Methods(http.MethodGet).HandlerFunc(hs.ListBucketsHandler) // Options root.Methods(http.MethodOptions).HandlerFunc(func(w http.ResponseWriter, r *http.Request) { diff --git a/s3/services/object/proto.go b/s3/services/object/proto.go index 6eeddf2c9..949b50369 100644 --- a/s3/services/object/proto.go +++ b/s3/services/object/proto.go @@ -32,7 +32,6 @@ type Service interface { CopyObject(ctx context.Context, user, srcBucname, srcObjname, dstBucname, dstObjname string, meta map[string]string) (dstObject *Object, err error) GetObject(ctx context.Context, user, bucname, objname string, withBody bool) (object *Object, body io.ReadCloser, err error) DeleteObject(ctx context.Context, user, bucname, objname string) (err error) - // todo: DeleteObjects ListObjects(ctx context.Context, user, bucname, prefix, delimiter, marker string, max int64) (list *ObjectsList, err error) ListObjectsV2(ctx context.Context, user string, bucket string, prefix string, token, delimiter string, max int64, owner bool, after string) (list *ObjectsListV2, err error) diff --git a/s3/services/object/service_multipart.go b/s3/services/object/service_multipart.go index 851bc1cca..1ee203baf 100644 --- a/s3/services/object/service_multipart.go +++ b/s3/services/object/service_multipart.go @@ -317,8 +317,7 @@ func (s *service) CompleteMultiPartUpload(ctx context.Context, user, bucname, ob // All parts body readers var readers []io.Reader - // Try to close all parts body readers, because some or all of - // these body may not be used + // Try to close all parts body readers defer func() { for _, rdr := range readers { _ = rdr.(io.ReadCloser).Close() @@ -362,7 +361,7 @@ func (s *service) CompleteMultiPartUpload(ctx context.Context, user, bucname, ob } // All parts except the last part has to be at least 5MB. - if (i < len(parts)-1) && !(gotPart.Size >= consts.MinPartSize) { + if (i < len(parts)-1) && !(gotPart.Size >= 0) { err = s3utils.PartTooSmall{ PartNumber: part.PartNumber, PartSize: gotPart.Size, diff --git a/s3/services/object/service_object.go b/s3/services/object/service_object.go index 951672ddc..a7ff85ef1 100644 --- a/s3/services/object/service_object.go +++ b/s3/services/object/service_object.go @@ -3,6 +3,7 @@ package object import ( "context" "errors" + "fmt" "github.com/bittorrent/go-btfs/s3/action" "github.com/bittorrent/go-btfs/s3/consts" "github.com/bittorrent/go-btfs/s3/providers" @@ -671,7 +672,15 @@ func (s *service) storeBody(ctx context.Context, body io.Reader, toKey string) ( return } -func (s *service) removeBody(ctx context.Context, cid, toKey string) (err error) { +func (s *service) removeBody(ctx context.Context, cid, tokey string) (err error) { + // Flag to mark cid be referenced by other object + otherRef := false + + // Log removing + defer func() { + fmt.Printf("remove <%s>, ref <%s>, refered - %v, err: %v\n", cid, tokey, otherRef, err) + }() + // Lock all cid refs to enable new cid reference can not be added when // remove is executing err = s.lock.Lock(ctx, s.cidrefSpace) @@ -681,7 +690,7 @@ func (s *service) removeBody(ctx context.Context, cid, toKey string) (err error) defer s.lock.Unlock(s.cidrefSpace) // This object cid reference key - crfKey := s.getCidrefKey(cid, toKey) + crfKey := s.getCidrefKey(cid, tokey) // Delete cid ref of this object err = s.providers.StateStore().Delete(crfKey) @@ -692,8 +701,6 @@ func (s *service) removeBody(ctx context.Context, cid, toKey string) (err error) // All this cid references prefix allRefsPrefix := s.getAllCidrefsKeyPrefix(cid) - // Flag to mark cid be referenced by other object - otherRef := false // Iterate all this cid refs, if exists other object's ref, set // the otherRef mark to true From df7cb3d0d7e49454352398756b11fcbb899b6a83 Mon Sep 17 00:00:00 2001 From: Steve Date: Sat, 9 Sep 2023 03:13:19 +0800 Subject: [PATCH 100/139] fix: multipart etag calculation --- s3/handlers/handlers_bucket.go | 2 + s3/handlers/handlers_multipart.go | 1 - s3/handlers/handlers_object.go | 2 +- s3/protocol/request.go | 1 - s3/requests/parsers.go | 2 +- s3/routers/routers.go | 1 - s3/services/object/proto.go | 15 ++- s3/services/object/service.go | 90 ++++++++++++++++- s3/services/object/service_bucket.go | 53 ++-------- s3/services/object/service_multipart.go | 48 +++------ s3/services/object/service_object.go | 123 ++---------------------- 11 files changed, 130 insertions(+), 208 deletions(-) diff --git a/s3/handlers/handlers_bucket.go b/s3/handlers/handlers_bucket.go index 8f26b417e..cdb3e2bad 100644 --- a/s3/handlers/handlers_bucket.go +++ b/s3/handlers/handlers_bucket.go @@ -16,6 +16,8 @@ func (h *Handlers) respErr(err error) (rerr *responses.Error) { switch err { case object.ErrBucketNotFound: rerr = responses.ErrNoSuchBucket + case object.ErrBucketeNotEmpty: + rerr = responses.ErrBucketNotEmpty case object.ErrObjectNotFound: rerr = responses.ErrNoSuchKey case object.ErrUploadNotFound: diff --git a/s3/handlers/handlers_multipart.go b/s3/handlers/handlers_multipart.go index a3c67bd93..efafc1299 100644 --- a/s3/handlers/handlers_multipart.go +++ b/s3/handlers/handlers_multipart.go @@ -247,4 +247,3 @@ func (h *Handlers) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http responses.WriteSuccessResponse(w, output, "CompleteMultipartUploadResult") } - diff --git a/s3/handlers/handlers_object.go b/s3/handlers/handlers_object.go index 3f28c65f6..f3ad3d3d5 100644 --- a/s3/handlers/handlers_object.go +++ b/s3/handlers/handlers_object.go @@ -243,7 +243,7 @@ func (h *Handlers) DeleteObjectsHandler(w http.ResponseWriter, r *http.Request) if input.Delete == nil || len(input.Delete.Objects) == 0 || - len(input.Delete.Objects) > consts.MaxObjectList { + len(input.Delete.Objects) > consts.MaxDeleteList { rerr := responses.ErrMalformedXML err = rerr responses.WriteErrorResponse(w, r, rerr) diff --git a/s3/protocol/request.go b/s3/protocol/request.go index fa0b2726b..031ef9a0b 100644 --- a/s3/protocol/request.go +++ b/s3/protocol/request.go @@ -159,7 +159,6 @@ func parseLocation(r *http.Request, inv reflect.Value) (err error) { fv = fv.Convert(byteSliceType) } - switch ft.Tag.Get("location") { case "headers": prefix := ft.Tag.Get("locationName") diff --git a/s3/requests/parsers.go b/s3/requests/parsers.go index 47b0f415c..d53e4af6a 100644 --- a/s3/requests/parsers.go +++ b/s3/requests/parsers.go @@ -95,4 +95,4 @@ func ParsePutBucketAclRequest(r *http.Request) (req *PutBucketACLRequest, rerr * } req.ACL, rerr = ParseBucketACL(r) return -} \ No newline at end of file +} diff --git a/s3/routers/routers.go b/s3/routers/routers.go index 94f4687ca..98ed85f41 100644 --- a/s3/routers/routers.go +++ b/s3/routers/routers.go @@ -25,7 +25,6 @@ func (routers *Routers) Register() http.Handler { hs := routers.handlers - root.Use( hs.Cors, hs.Log, diff --git a/s3/services/object/proto.go b/s3/services/object/proto.go index 949b50369..0d8ff5eaf 100644 --- a/s3/services/object/proto.go +++ b/s3/services/object/proto.go @@ -9,14 +9,12 @@ import ( ) var ( - ErrBucketNotFound = errors.New("bucket not found") - ErrObjectNotFound = errors.New("object not found") - ErrUploadNotFound = errors.New("upload not found") - ErrNotAllowed = errors.New("not allowed") - ErrBucketAlreadyExists = errors.New("bucket already exists") - ErrOperationTimeout = errors.New("operation timeout") - ErrContentSHA256Mismatch = errors.New("sha256 mismatch") - ErrBadDigest = errors.New("bad digest") + ErrBucketNotFound = errors.New("bucket not found") + ErrBucketeNotEmpty = errors.New("bucket not empty") + ErrObjectNotFound = errors.New("object not found") + ErrUploadNotFound = errors.New("upload not found") + ErrNotAllowed = errors.New("not allowed") + ErrBucketAlreadyExists = errors.New("bucket already exists") ) type Service interface { @@ -26,7 +24,6 @@ type Service interface { GetAllBuckets(ctx context.Context, user string) (list []*Bucket, err error) PutBucketACL(ctx context.Context, user, bucname, acl string) (err error) GetBucketACL(ctx context.Context, user, bucname string) (acl string, err error) - EmptyBucket(ctx context.Context, user, bucname string) (empty bool, err error) PutObject(ctx context.Context, user, bucname, objname string, body *hash.Reader, size int64, meta map[string]string) (object *Object, err error) CopyObject(ctx context.Context, user, srcBucname, srcObjname, dstBucname, dstObjname string, meta map[string]string) (dstObject *Object, err error) diff --git a/s3/services/object/service.go b/s3/services/object/service.go index 4c503a67c..be7b38c36 100644 --- a/s3/services/object/service.go +++ b/s3/services/object/service.go @@ -6,6 +6,7 @@ import ( "github.com/bittorrent/go-btfs/s3/action" "github.com/bittorrent/go-btfs/s3/ctxmu" "github.com/bittorrent/go-btfs/s3/policy" + "io" "strings" "time" @@ -14,7 +15,6 @@ import ( var _ Service = (*service)(nil) -// service captures all bucket metadata for a given cluster. type service struct { providers providers.Providerser lock ctxmu.MultiCtxRWLocker @@ -102,3 +102,91 @@ func (s *service) checkACL(owner, acl, user string, act action.Action) (allow bo allow = policy.IsAllowed(own, acl, act) return } + +func (s *service) addBodyRef(ctx context.Context, cid, tokey string) (err error) { + // Cid reference key + crfkey := s.getCidrefKey(cid, tokey) + + // Add cid reference + err = s.providers.StateStore().Put(crfkey, nil) + + return +} + +func (s *service) removeBodyRef(ctx context.Context, cid, tokey string) (err error) { + // This object cid reference key + crfkey := s.getCidrefKey(cid, tokey) + + // Delete cid ref of this object + err = s.providers.StateStore().Delete(crfkey) + + return +} + +func (s *service) storeBody(ctx context.Context, body io.Reader, tokey string) (cid string, err error) { + // RLock all cid refs to enable no cid will be deleted + err = s.lock.RLock(ctx, s.cidrefSpace) + if err != nil { + return + } + defer s.lock.RUnlock(s.cidrefSpace) + + // Store body and get the cid + cid, err = s.providers.FileStore().Store(body) + if err != nil { + return + } + + // Add cid reference + err = s.addBodyRef(ctx, cid, tokey) + + return +} + +func (s *service) removeBody(ctx context.Context, cid, tokey string) (err error) { + // Flag to mark cid be referenced by other object + otherRef := false + + // Log removing + defer func() { + fmt.Printf("s3-api: remove <%s>, ref <%s>, other-ref - %v, err: %v\n", cid, tokey, otherRef, err) + }() + + // Lock all cid refs to enable new cid reference can not be added when + // remove is executing + err = s.lock.Lock(ctx, s.cidrefSpace) + if err != nil { + return + } + defer s.lock.Unlock(s.cidrefSpace) + + // Remove cid ref of this object + err = s.removeBodyRef(ctx, cid, tokey) + if err != nil { + return + } + + // All this cid references prefix + allRefsPrefix := s.getAllCidrefsKeyPrefix(cid) + + // Iterate all this cid refs, if exists other object's ref, set + // the otherRef mark to true + err = s.providers.StateStore().Iterate(allRefsPrefix, func(key, _ []byte) (stop bool, err error) { + otherRef = true + stop = true + return + }) + if err != nil { + return + } + + // Exists other refs, cid body can not be removed + if otherRef { + return + } + + // No other refs to this cid, remove it + err = s.providers.FileStore().Remove(cid) + + return +} diff --git a/s3/services/object/service_bucket.go b/s3/services/object/service_bucket.go index 078e4b6f8..ff347d258 100644 --- a/s3/services/object/service_bucket.go +++ b/s3/services/object/service_bucket.go @@ -126,23 +126,18 @@ func (s *service) DeleteBucket(ctx context.Context, user, bucname string) (err e return } - // Delete bucket - err = s.providers.StateStore().Delete(buckey) + // Check if bucket is empty + empty, err := s.isBucketEmpty(bucname) if err != nil { return } + if !empty { + err = ErrBucketeNotEmpty + return + } - // All bucket objects prefix - objectsPrefix := s.getAllObjectsKeyPrefix(bucname) - - // Try to delete all bucket objects - _ = s.deleteObjectsByPrefix(ctx, objectsPrefix) - - // All bucket uploads prefix - uploadsPrefix := s.getAllUploadsKeyPrefix(bucname) - - // Try to delete all bucket uploads - _ = s.deleteUploadsByPrefix(ctx, uploadsPrefix) + // Delete bucket + err = s.providers.StateStore().Delete(buckey) return } @@ -278,37 +273,7 @@ func (s *service) GetBucketACL(ctx context.Context, user, bucname string) (acl s } // EmptyBucket check if the user specified bucked is empty -func (s *service) EmptyBucket(ctx context.Context, user, bucname string) (empty bool, err error) { - ctx, cancel := s.opctx(ctx) - defer cancel() - - // Bucket key - buckey := s.getBucketKey(bucname) - - // RLock bucket - err = s.lock.RLock(ctx, buckey) - if err != nil { - return - } - defer s.lock.RUnlock(buckey) - - // Get bucket - bucket, err := s.getBucket(buckey) - if err != nil { - return - } - if bucket == nil { - err = ErrBucketNotFound - return - } - - // Check action ACL - allow := s.checkACL(bucket.Owner, bucket.ACL, user, action.ListObjectsAction) - if !allow { - err = ErrNotAllowed - return - } - +func (s *service) isBucketEmpty(bucname string) (empty bool, err error) { // All bucket objects prefix objectsPrefix := s.getAllObjectsKeyPrefix(bucname) diff --git a/s3/services/object/service_multipart.go b/s3/services/object/service_multipart.go index 1ee203baf..3e0533558 100644 --- a/s3/services/object/service_multipart.go +++ b/s3/services/object/service_multipart.go @@ -2,9 +2,7 @@ package object import ( "context" - "encoding/hex" "errors" - "fmt" "github.com/bittorrent/go-btfs/s3/action" "github.com/bittorrent/go-btfs/s3/consts" "github.com/bittorrent/go-btfs/s3/etag" @@ -404,6 +402,13 @@ func (s *service) CompleteMultiPartUpload(ctx context.Context, user, bucname, ob } }() + // Calculate multipart etag + multiEtag, err := s.calcMultiETag(parts) + if err != nil { + return + } + + // Current time now := time.Now().UTC() // Object @@ -413,7 +418,7 @@ func (s *service) CompleteMultiPartUpload(ctx context.Context, user, bucname, ob ModTime: now, Size: size, IsDir: false, - ETag: s.computeMultipartMD5(parts), + ETag: multiEtag.String(), CID: cid, ACL: "", VersionID: "", @@ -500,39 +505,16 @@ func (s *service) canonicalizeETag(etag string) string { return etagRegex.ReplaceAllString(etag, "$1") } -func (s *service) computeMultipartMD5(parts []*CompletePart) (md5 string) { - var finalMD5Bytes []byte +func (s *service) calcMultiETag(parts []*CompletePart) (multiEtag etag.ETag, err error) { + var completeETags []etag.ETag for _, part := range parts { - md5Bytes, err := hex.DecodeString(s.canonicalizeETag(part.ETag)) + var etg etag.ETag + etg, err = etag.Parse(part.ETag) if err != nil { - finalMD5Bytes = append(finalMD5Bytes, []byte(part.ETag)...) - } else { - finalMD5Bytes = append(finalMD5Bytes, md5Bytes...) - } - } - md5 = fmt.Sprintf("%s-%d", etag.Multipart(finalMD5Bytes), len(parts)) - return -} - -// deleteUploadsByPrefix try to delete all multipart uploads with the specified common prefix -func (s *service) deleteUploadsByPrefix(ctx context.Context, uploadsPrefix string) (err error) { - err = s.providers.StateStore().Iterate(uploadsPrefix, func(key, _ []byte) (stop bool, er error) { - uplkey := string(key) - var multipart *Multipart - er = s.providers.StateStore().Get(uplkey, multipart) - if er != nil { - return - } - er = s.providers.StateStore().Delete(uplkey) - if er != nil { return } - for i, part := range multipart.Parts { - prtkey := s.getUploadPartKey(uplkey, i) - _ = s.removeBody(ctx, part.CID, prtkey) - } - return - }) - + completeETags = append(completeETags, etg) + } + multiEtag = etag.Multipart(completeETags...) return } diff --git a/s3/services/object/service_object.go b/s3/services/object/service_object.go index a7ff85ef1..2218a7ca4 100644 --- a/s3/services/object/service_object.go +++ b/s3/services/object/service_object.go @@ -3,7 +3,6 @@ package object import ( "context" "errors" - "fmt" "github.com/bittorrent/go-btfs/s3/action" "github.com/bittorrent/go-btfs/s3/consts" "github.com/bittorrent/go-btfs/s3/providers" @@ -479,8 +478,15 @@ func (s *service) ListObjects(ctx context.Context, user, bucname, prefix, delimi return } + // Object list list = &ObjectsList{} + // MaxKeys is zero + if max == 0 { + list.IsTruncated = true + return + } + // All bucket objects key prefix allObjectsKeyPrefix := s.getAllObjectsKeyPrefix(bucname) @@ -608,118 +614,3 @@ func (s *service) getObject(objkey string) (object *Object, err error) { } return } - -// deleteObjectsByPrefix try to delete all objects with the specified common prefix -func (s *service) deleteObjectsByPrefix(ctx context.Context, objectsPrefix string) (err error) { - err = s.providers.StateStore().Iterate(objectsPrefix, func(key, _ []byte) (stop bool, er error) { - objkey := string(key) - var object *Object - er = s.providers.StateStore().Get(objkey, object) - if er != nil { - return - } - er = s.providers.StateStore().Delete(objkey) - if er != nil { - return - } - _ = s.removeBody(ctx, object.CID, objkey) - return - }) - - return -} - -func (s *service) addBodyRef(ctx context.Context, cid, toKey string) (err error) { - // Cid reference key - crfKey := s.getCidrefKey(cid, toKey) - - // Add cid reference - err = s.providers.StateStore().Put(crfKey, nil) - - return -} - -func (s *service) removeBodyRef(ctx context.Context, cid, toKey string) (err error) { - // This object cid reference key - crfKey := s.getCidrefKey(cid, toKey) - - // Delete cid ref of this object - err = s.providers.StateStore().Delete(crfKey) - - return -} - -func (s *service) storeBody(ctx context.Context, body io.Reader, toKey string) (cid string, err error) { - // RLock all cid refs to enable no cid will be deleted - err = s.lock.RLock(ctx, s.cidrefSpace) - if err != nil { - return - } - defer s.lock.RUnlock(s.cidrefSpace) - - // Store body and get the cid - cid, err = s.providers.FileStore().Store(body) - if err != nil { - return - } - - // Cid reference key - crfKey := s.getCidrefKey(cid, toKey) - - // Add cid reference - err = s.providers.StateStore().Put(crfKey, nil) - - return -} - -func (s *service) removeBody(ctx context.Context, cid, tokey string) (err error) { - // Flag to mark cid be referenced by other object - otherRef := false - - // Log removing - defer func() { - fmt.Printf("remove <%s>, ref <%s>, refered - %v, err: %v\n", cid, tokey, otherRef, err) - }() - - // Lock all cid refs to enable new cid reference can not be added when - // remove is executing - err = s.lock.Lock(ctx, s.cidrefSpace) - if err != nil { - return - } - defer s.lock.Unlock(s.cidrefSpace) - - // This object cid reference key - crfKey := s.getCidrefKey(cid, tokey) - - // Delete cid ref of this object - err = s.providers.StateStore().Delete(crfKey) - if err != nil { - return - } - - // All this cid references prefix - allRefsPrefix := s.getAllCidrefsKeyPrefix(cid) - - - // Iterate all this cid refs, if exists other object's ref, set - // the otherRef mark to true - err = s.providers.StateStore().Iterate(allRefsPrefix, func(key, _ []byte) (stop bool, err error) { - otherRef = true - stop = true - return - }) - if err != nil { - return - } - - // Exists other refs, cid body can not be removed - if otherRef { - return - } - - // No other refs to this cid, remove it - err = s.providers.FileStore().Remove(cid) - - return -} From 568e64be7079418efdfb49edc44b9db47ba26fa0 Mon Sep 17 00:00:00 2001 From: Steve Date: Sat, 9 Sep 2023 03:16:27 +0800 Subject: [PATCH 101/139] chore: add min part size todo --- s3/services/object/service_multipart.go | 1 + 1 file changed, 1 insertion(+) diff --git a/s3/services/object/service_multipart.go b/s3/services/object/service_multipart.go index 3e0533558..67d196a53 100644 --- a/s3/services/object/service_multipart.go +++ b/s3/services/object/service_multipart.go @@ -359,6 +359,7 @@ func (s *service) CompleteMultiPartUpload(ctx context.Context, user, bucname, ob } // All parts except the last part has to be at least 5MB. + // todo: change to '''!(gotPart.Size >= consts.MinPartSize)''' if (i < len(parts)-1) && !(gotPart.Size >= 0) { err = s3utils.PartTooSmall{ PartNumber: part.PartNumber, From 8dbe3c7bcb068dc9823b556d6794a1c113d2b026 Mon Sep 17 00:00:00 2001 From: Steve Date: Sun, 10 Sep 2023 00:19:59 +0800 Subject: [PATCH 102/139] chore: upgrade 'github.com/anacrolix/torrent' from v1.47.0 to v1.52.5 --- go.mod | 23 ++++++++++++----------- go.sum | 55 ++++++++++++++++++++++++++++++++----------------------- 2 files changed, 44 insertions(+), 34 deletions(-) diff --git a/go.mod b/go.mod index 8bb429e1a..fbc52759f 100644 --- a/go.mod +++ b/go.mod @@ -135,20 +135,20 @@ require ( ) require ( - crawshaw.io/sqlite v0.3.3-0.20210127221821-98b1f83c5508 // indirect + crawshaw.io/sqlite v0.3.3-0.20220618202545-d1964889ea3c // indirect github.com/BurntSushi/toml v1.2.0 // indirect - github.com/RoaringBitmap/roaring v1.2.1 // indirect + github.com/RoaringBitmap/roaring v1.2.3 // indirect github.com/ajwerner/btree v0.0.0-20211221152037-f427b3e689c0 // indirect github.com/alecthomas/atomic v0.1.0-alpha2 // indirect github.com/anacrolix/chansync v0.3.0 // indirect - github.com/anacrolix/dht/v2 v2.19.0 // indirect + github.com/anacrolix/dht/v2 v2.19.2-0.20221121215055-066ad8494444 // indirect github.com/anacrolix/envpprof v1.2.1 // indirect - github.com/anacrolix/generics v0.0.0-20220618083756-f99e35403a60 // indirect - github.com/anacrolix/go-libutp v1.2.0 // indirect - github.com/anacrolix/log v0.13.2-0.20220711050817-613cb738ef30 // indirect + github.com/anacrolix/generics v0.0.0-20230428105757-683593396d68 // indirect + github.com/anacrolix/go-libutp v1.3.1 // indirect + github.com/anacrolix/log v0.14.0 // indirect github.com/anacrolix/missinggo v1.3.0 // indirect github.com/anacrolix/missinggo/perf v1.0.0 // indirect - github.com/anacrolix/missinggo/v2 v2.7.0 // indirect + github.com/anacrolix/missinggo/v2 v2.7.2-0.20230527121029-a582b4f397b9 // indirect github.com/anacrolix/mmsg v1.0.0 // indirect github.com/anacrolix/multiless v0.3.0 // indirect github.com/anacrolix/stm v0.4.0 // indirect @@ -198,7 +198,7 @@ require ( github.com/opencontainers/runtime-spec v1.0.2 // indirect github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 // indirect github.com/pion/datachannel v1.5.2 // indirect - github.com/pion/dtls/v2 v2.1.5 // indirect + github.com/pion/dtls/v2 v2.2.4 // indirect github.com/pion/ice/v2 v2.2.6 // indirect github.com/pion/interceptor v0.1.11 // indirect github.com/pion/logging v0.2.2 // indirect @@ -211,14 +211,15 @@ require ( github.com/pion/srtp/v2 v2.0.9 // indirect github.com/pion/stun v0.3.5 // indirect github.com/pion/transport v0.13.1 // indirect + github.com/pion/transport/v2 v2.0.0 // indirect github.com/pion/turn/v2 v2.0.8 // indirect - github.com/pion/udp v0.1.1 // indirect + github.com/pion/udp v0.1.4 // indirect github.com/pion/webrtc/v3 v3.1.42 // indirect github.com/prometheus/statsd_exporter v0.22.7 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect github.com/rs/dnscache v0.0.0-20211102005908-e0241e321417 // indirect github.com/segmentio/asm v1.2.0 // indirect - github.com/tidwall/btree v1.3.1 // indirect + github.com/tidwall/btree v1.6.0 // indirect github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb // indirect github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 // indirect go.etcd.io/bbolt v1.3.6 // indirect @@ -236,7 +237,7 @@ require ( github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect github.com/Stebalien/go-bitfield v0.0.1 // indirect github.com/alexbrainman/goissue34681 v0.0.0-20191006012335-3fc7a47baff5 // indirect - github.com/anacrolix/torrent v1.47.0 + github.com/anacrolix/torrent v1.52.5 github.com/andybalholm/brotli v1.0.4 // indirect github.com/benbjohnson/clock v1.3.0 github.com/beorn7/perks v1.0.1 // indirect diff --git a/go.sum b/go.sum index ba79092d7..1229c7e8e 100644 --- a/go.sum +++ b/go.sum @@ -40,8 +40,8 @@ contrib.go.opencensus.io/exporter/prometheus v0.4.2/go.mod h1:dvEHbiKmgvbr5pjaF9 crawshaw.io/iox v0.0.0-20181124134642-c51c3df30797 h1:yDf7ARQc637HoxDho7xjqdvO5ZA2Yb+xzv/fOnnvZzw= crawshaw.io/iox v0.0.0-20181124134642-c51c3df30797/go.mod h1:sXBiorCo8c46JlQV3oXPKINnZ8mcqnye1EkVkqsectk= crawshaw.io/sqlite v0.3.2/go.mod h1:igAO5JulrQ1DbdZdtVq48mnZUBAPOeFzer7VhDWNtW4= -crawshaw.io/sqlite v0.3.3-0.20210127221821-98b1f83c5508 h1:fILCBBFnjnrQ0whVJlGhfv1E/QiaFDNtGFBObEVRnYg= -crawshaw.io/sqlite v0.3.3-0.20210127221821-98b1f83c5508/go.mod h1:igAO5JulrQ1DbdZdtVq48mnZUBAPOeFzer7VhDWNtW4= +crawshaw.io/sqlite v0.3.3-0.20220618202545-d1964889ea3c h1:wvzox0eLO6CKQAMcOqz7oH3UFqMpMmK7kwmwV+22HIs= +crawshaw.io/sqlite v0.3.3-0.20220618202545-d1964889ea3c/go.mod h1:igAO5JulrQ1DbdZdtVq48mnZUBAPOeFzer7VhDWNtW4= dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= @@ -83,8 +83,8 @@ github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdko github.com/RoaringBitmap/roaring v0.4.7/go.mod h1:8khRDP4HmeXns4xIj9oGrKSz7XTQiJx2zgh7AcNke4w= github.com/RoaringBitmap/roaring v0.4.17/go.mod h1:D3qVegWTmfCaX4Bl5CrBE9hfrSrrXIr8KVNvRsDi1NI= github.com/RoaringBitmap/roaring v0.4.23/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo= -github.com/RoaringBitmap/roaring v1.2.1 h1:58/LJlg/81wfEHd5L9qsHduznOIhyv4qb1yWcSvVq9A= -github.com/RoaringBitmap/roaring v1.2.1/go.mod h1:icnadbWcNyfEHlYdr+tDlOTih1Bf/h+rzPpv4sbomAA= +github.com/RoaringBitmap/roaring v1.2.3 h1:yqreLINqIrX22ErkKI0vY47/ivtJr6n+kMhVOVmhWBY= +github.com/RoaringBitmap/roaring v1.2.3/go.mod h1:plvDsJQpxOC5bw8LRteu/MLWHsHez/3y6cubLI4/1yE= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk= @@ -114,23 +114,23 @@ github.com/alexbrainman/goissue34681 v0.0.0-20191006012335-3fc7a47baff5 h1:iW0a5 github.com/alexbrainman/goissue34681 v0.0.0-20191006012335-3fc7a47baff5/go.mod h1:Y2QMoi1vgtOIfc+6DhrMOGkLoGzqSV2rKp4Sm+opsyA= github.com/anacrolix/chansync v0.3.0 h1:lRu9tbeuw3wl+PhMu/r+JJCRu5ArFXIluOgdF0ao6/U= github.com/anacrolix/chansync v0.3.0/go.mod h1:DZsatdsdXxD0WiwcGl0nJVwyjCKMDv+knl1q2iBjA2k= -github.com/anacrolix/dht/v2 v2.19.0 h1:A9oMHWRGbLmCyx1JlYzg79bDrur8V60+0ts8ZwEVYt4= -github.com/anacrolix/dht/v2 v2.19.0/go.mod h1:0h83KnnAQ2AUYhpQ/CkoZP45K41pjDAlPR9zGHgFjQE= +github.com/anacrolix/dht/v2 v2.19.2-0.20221121215055-066ad8494444 h1:8V0K09lrGoeT2KRJNOtspA7q+OMxGwQqK/Ug0IiaaRE= +github.com/anacrolix/dht/v2 v2.19.2-0.20221121215055-066ad8494444/go.mod h1:MctKM1HS5YYDb3F30NGJxLE+QPuqWoT5ReW/4jt8xew= github.com/anacrolix/envpprof v0.0.0-20180404065416-323002cec2fa/go.mod h1:KgHhUaQMc8cC0+cEflSgCFNFbKwi5h54gqtVn8yhP7c= github.com/anacrolix/envpprof v1.0.0/go.mod h1:KgHhUaQMc8cC0+cEflSgCFNFbKwi5h54gqtVn8yhP7c= github.com/anacrolix/envpprof v1.1.0/go.mod h1:My7T5oSqVfEn4MD4Meczkw/f5lSIndGAKu/0SM/rkf4= github.com/anacrolix/envpprof v1.2.1 h1:25TJe6t/i0AfzzldiGFKCpD+s+dk8lONBcacJZB2rdE= github.com/anacrolix/envpprof v1.2.1/go.mod h1:My7T5oSqVfEn4MD4Meczkw/f5lSIndGAKu/0SM/rkf4= -github.com/anacrolix/generics v0.0.0-20220618083756-f99e35403a60 h1:k4/h2B1gGF+PJGyGHxs8nmHHt1pzWXZWBj6jn4OBlRc= -github.com/anacrolix/generics v0.0.0-20220618083756-f99e35403a60/go.mod h1:ff2rHB/joTV03aMSSn/AZNnaIpUw0h3njetGsaXcMy8= -github.com/anacrolix/go-libutp v1.2.0 h1:sjxoB+/ARiKUR7IK/6wLWyADIBqGmu1fm0xo+8Yy7u0= -github.com/anacrolix/go-libutp v1.2.0/go.mod h1:RrJ3KcaDcf9Jqp33YL5V/5CBEc6xMc7aJL8wXfuWL50= +github.com/anacrolix/generics v0.0.0-20230428105757-683593396d68 h1:fyXlBfnlFzZSFckJ8QLb2lfmWfY++4RiUnae7ZMuv0A= +github.com/anacrolix/generics v0.0.0-20230428105757-683593396d68/go.mod h1:ff2rHB/joTV03aMSSn/AZNnaIpUw0h3njetGsaXcMy8= +github.com/anacrolix/go-libutp v1.3.1 h1:idJzreNLl+hNjGC3ZnUOjujEaryeOGgkwHLqSGoige0= +github.com/anacrolix/go-libutp v1.3.1/go.mod h1:heF41EC8kN0qCLMokLBVkB8NXiLwx3t8R8810MTNI5o= github.com/anacrolix/log v0.3.0/go.mod h1:lWvLTqzAnCWPJA08T2HCstZi0L1y2Wyvm3FJgwU9jwU= github.com/anacrolix/log v0.6.0/go.mod h1:lWvLTqzAnCWPJA08T2HCstZi0L1y2Wyvm3FJgwU9jwU= -github.com/anacrolix/log v0.10.0/go.mod h1:s5yBP/j046fm9odtUTbHOfDUq/zh1W8OkPpJtnX0oQI= github.com/anacrolix/log v0.10.1-0.20220123034749-3920702c17f8/go.mod h1:GmnE2c0nvz8pOIPUSC9Rawgefy1sDXqposC2wgtBZE4= -github.com/anacrolix/log v0.13.2-0.20220711050817-613cb738ef30 h1:bAgFzUxN1K3U8KwOzqCOhiygOr5NqYO3kNlV9tvp2Rc= -github.com/anacrolix/log v0.13.2-0.20220711050817-613cb738ef30/go.mod h1:D4+CvN8SnruK6zIFS/xPoRJmtvtnxs+CSfDQ+BFxZ68= +github.com/anacrolix/log v0.13.1/go.mod h1:D4+CvN8SnruK6zIFS/xPoRJmtvtnxs+CSfDQ+BFxZ68= +github.com/anacrolix/log v0.14.0 h1:mYhTSemILe/Z8tIxbGdTIWWpPspI8W/fhZHpoFbDaL0= +github.com/anacrolix/log v0.14.0/go.mod h1:1OmJESOtxQGNMlUO5rcv96Vpp9mfMqXXbe2RdinFLdY= github.com/anacrolix/lsan v0.0.0-20211126052245-807000409a62 h1:P04VG6Td13FHMgS5ZBcJX23NPC/fiC4cp9bXwYujdYM= github.com/anacrolix/lsan v0.0.0-20211126052245-807000409a62/go.mod h1:66cFKPCO7Sl4vbFnAaSq7e4OXtdMhRSBagJGWgmpJbM= github.com/anacrolix/missinggo v0.0.0-20180725070939-60ef2fbf63df/go.mod h1:kwGiTUTZ0+p4vAz3VbAI5a30t2YbvemcmspjKwrAz5s= @@ -144,8 +144,8 @@ github.com/anacrolix/missinggo/perf v1.0.0/go.mod h1:ljAFWkBuzkO12MQclXzZrosP5ur github.com/anacrolix/missinggo/v2 v2.2.0/go.mod h1:o0jgJoYOyaoYQ4E2ZMISVa9c88BbUBVQQW4QeRkNCGY= github.com/anacrolix/missinggo/v2 v2.5.1/go.mod h1:WEjqh2rmKECd0t1VhQkLGTdIWXO6f6NLjp5GlMZ+6FA= github.com/anacrolix/missinggo/v2 v2.5.2/go.mod h1:yNvsLrtZYRYCOI+KRH/JM8TodHjtIE/bjOGhQaLOWIE= -github.com/anacrolix/missinggo/v2 v2.7.0 h1:4fzOAAn/VCvfWGviLmh64MPMttrlYew81JdPO7nSHvI= -github.com/anacrolix/missinggo/v2 v2.7.0/go.mod h1:2IZIvmRTizALNYFYXsPR7ofXPzJgyBpKZ4kMqMEICkI= +github.com/anacrolix/missinggo/v2 v2.7.2-0.20230527121029-a582b4f397b9 h1:W/oGeHhYwxueeiDjQfmK9G+X9M2xJgfTtow62v0TWAs= +github.com/anacrolix/missinggo/v2 v2.7.2-0.20230527121029-a582b4f397b9/go.mod h1:mIEtp9pgaXqt8VQ3NQxFOod/eQ1H0D1XsZzKUQfwtac= github.com/anacrolix/mmsg v0.0.0-20180515031531-a4a3ba1fc8bb/go.mod h1:x2/ErsYUmT77kezS63+wzZp8E3byYB0gzirM/WMBLfw= github.com/anacrolix/mmsg v1.0.0 h1:btC7YLjOn29aTUAExJiVUhQOuf/8rhm+/nWCMAnL3Hg= github.com/anacrolix/mmsg v1.0.0/go.mod h1:x8kRaJY/dCrY9Al0PEcj1mb/uFHwP6GCJ9fLl4thEPc= @@ -161,8 +161,8 @@ github.com/anacrolix/sync v0.4.0/go.mod h1:BbecHL6jDSExojhNtgTFSBcdGerzNc64tz3DC github.com/anacrolix/tagflag v0.0.0-20180109131632-2146c8d41bf0/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw= github.com/anacrolix/tagflag v1.0.0/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw= github.com/anacrolix/tagflag v1.1.0/go.mod h1:Scxs9CV10NQatSmbyjqmqmeQNwGzlNe0CMUMIxqHIG8= -github.com/anacrolix/torrent v1.47.0 h1:aDUnhQZ8+kfStLICHiXOGGYVFgDENK+kz4q96linyRg= -github.com/anacrolix/torrent v1.47.0/go.mod h1:SYPxEUjMwqhDr3kWGzyQLkFMuAb1bgJ57JRMpuD3ZzE= +github.com/anacrolix/torrent v1.52.5 h1:jWowdx+EU6zFVfBwmnL0d3H4J6vTFEGOrHI35YdfIT8= +github.com/anacrolix/torrent v1.52.5/go.mod h1:CcM8oPMYye5J42cSqJrmUpqwRFgSsJQ1jCEHwygqnqQ= github.com/anacrolix/upnp v0.1.3-0.20220123035249-922794e51c96 h1:QAVZ3pN/J4/UziniAhJR2OZ9Ox5kOY2053tBbbqUPYA= github.com/anacrolix/upnp v0.1.3-0.20220123035249-922794e51c96/go.mod h1:Wa6n8cYIdaG35x15aH3Zy6d03f7P728QfdcDeD/IEOs= github.com/anacrolix/utp v0.1.0 h1:FOpQOmIwYsnENnz7tAGohA+r6iXpRjrq8ssKSre2Cp4= @@ -407,7 +407,7 @@ github.com/frankban/quicktest v1.9.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9 github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= github.com/frankban/quicktest v1.14.2/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= -github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= @@ -1414,8 +1414,9 @@ github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi github.com/pion/datachannel v1.5.2 h1:piB93s8LGmbECrpO84DnkIVWasRMk3IimbcXkTQLE6E= github.com/pion/datachannel v1.5.2/go.mod h1:FTGQWaHrdCwIJ1rw6xBIfZVkslikjShim5yr05XFuCQ= github.com/pion/dtls/v2 v2.1.3/go.mod h1:o6+WvyLDAlXF7YiPB/RlskRoeK+/JtuaZa5emwQcWus= -github.com/pion/dtls/v2 v2.1.5 h1:jlh2vtIyUBShchoTDqpCCqiYCyRFJ/lvf/gQ8TALs+c= github.com/pion/dtls/v2 v2.1.5/go.mod h1:BqCE7xPZbPSubGasRoDFJeTsyJtdD1FanJYL0JGheqY= +github.com/pion/dtls/v2 v2.2.4 h1:YSfYwDQgrxMYXLBc/m7PFY5BVtWlNm/DN4qoU2CbcWg= +github.com/pion/dtls/v2 v2.2.4/go.mod h1:WGKfxqhrddne4Kg3p11FUMJrynkOY4lb25zHNO49wuw= github.com/pion/ice/v2 v2.2.6 h1:R/vaLlI1J2gCx141L5PEwtuGAGcyS6e7E0hDeJFq5Ig= github.com/pion/ice/v2 v2.2.6/go.mod h1:SWuHiOGP17lGromHTFadUe1EuPgFh/oCU6FCMZHooVE= github.com/pion/interceptor v0.1.11 h1:00U6OlqxA3FFB50HSg25J/8cWi7P6FbSzw4eFn24Bvs= @@ -1444,10 +1445,13 @@ github.com/pion/transport v0.12.3/go.mod h1:OViWW9SP2peE/HbwBvARicmAVnesphkNkCVZ github.com/pion/transport v0.13.0/go.mod h1:yxm9uXpK9bpBBWkITk13cLo1y5/ur5VQpG22ny6EP7g= github.com/pion/transport v0.13.1 h1:/UH5yLeQtwm2VZIPjxwnNFxjS4DFhyLfS4GlfuKUzfA= github.com/pion/transport v0.13.1/go.mod h1:EBxbqzyv+ZrmDb82XswEE0BjfQFtuw1Nu6sjnjWCsGg= +github.com/pion/transport/v2 v2.0.0 h1:bsMYyqHCbkvHwj+eNCFBuxtlKndKfyGI2vaQmM3fIE4= +github.com/pion/transport/v2 v2.0.0/go.mod h1:HS2MEBJTwD+1ZI2eSXSvHJx/HnzQqRy2/LXxt6eVMHc= github.com/pion/turn/v2 v2.0.8 h1:KEstL92OUN3k5k8qxsXHpr7WWfrdp7iJZHx99ud8muw= github.com/pion/turn/v2 v2.0.8/go.mod h1:+y7xl719J8bAEVpSXBXvTxStjJv3hbz9YFflvkpcGPw= -github.com/pion/udp v0.1.1 h1:8UAPvyqmsxK8oOjloDk4wUt63TzFe9WEJkg5lChlj7o= github.com/pion/udp v0.1.1/go.mod h1:6AFo+CMdKQm7UiA0eUPA8/eVCTx8jBIITLZHc9DWX5M= +github.com/pion/udp v0.1.4 h1:OowsTmu1Od3sD6i3fQUJxJn2fEvJO6L1TidgadtbTI8= +github.com/pion/udp v0.1.4/go.mod h1:G8LDo56HsFwC24LIcnT4YIDU5qcB6NepqqjP0keL2us= github.com/pion/webrtc/v3 v3.1.42 h1:wJEQFIXVanptnQcHOLTuIo4AtGB2+mG2x4OhIhnITOA= github.com/pion/webrtc/v3 v3.1.42/go.mod h1:ffD9DulDrPxyWvDPUIPAOSAWx9GUlOExiJPf7cCcMLA= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= @@ -1648,8 +1652,8 @@ github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45 github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/texttheater/golang-levenshtein v0.0.0-20180516184445-d188e65d659e h1:T5PdfK/M1xyrHwynxMIVMWLS7f/qHwfslZphxtGnw7s= github.com/texttheater/golang-levenshtein v0.0.0-20180516184445-d188e65d659e/go.mod h1:XDKHRm5ThF8YJjx001LtgelzsoaEcvnA7lVWz9EeX3g= -github.com/tidwall/btree v1.3.1 h1:636+tdVDs8Hjcf35Di260W2xCW4KuoXOKyk9QWOvCpA= -github.com/tidwall/btree v1.3.1/go.mod h1:LGm8L/DZjPLmeWGjv5kFrY8dL4uVhMmzmmLYmsObdKE= +github.com/tidwall/btree v1.6.0 h1:LDZfKfQIBHGHWSwckhXI0RPSXzlo+KYdjK7FWSqOzzg= +github.com/tidwall/btree v1.6.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY= github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= @@ -1846,6 +1850,7 @@ golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220516162934-403b01795ae8/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1892,7 +1897,6 @@ golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/net v0.0.0-20180524181706-dfa909b99c79/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1965,6 +1969,7 @@ golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220920183852-bf014ff85ad5/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= @@ -2095,7 +2100,9 @@ golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2103,6 +2110,7 @@ golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXR golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2114,6 +2122,7 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= From bcc55bf3a8096f4d49b72af5d9684ae8b764a480 Mon Sep 17 00:00:00 2001 From: Steve Date: Sun, 10 Sep 2023 01:41:04 +0800 Subject: [PATCH 103/139] opt: comment and amz header --- s3/handlers/handlers.go | 2 +- s3/handlers/options.go | 8 ++++---- s3/s3.go | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/s3/handlers/handlers.go b/s3/handlers/handlers.go index bd82b4349..c01e6bd58 100644 --- a/s3/handlers/handlers.go +++ b/s3/handlers/handlers.go @@ -1,4 +1,4 @@ -// Package handlers is an implementation of s3.Handlerser +// Package handlers is an implementation of Handlerser package handlers import ( diff --git a/s3/handlers/options.go b/s3/handlers/options.go index 091930878..09b69fa1a 100644 --- a/s3/handlers/options.go +++ b/s3/handlers/options.go @@ -39,13 +39,13 @@ var defaultCorsHeaders = []string{ consts.XRequestWith, consts.Range, consts.UserAgent, - "Amz-Sdk-Request", - "Amz-Sdk-Invocation-Id", + consts.Cid, + consts.CidList, + "Amz-*", + "amz-*", "X-Amz*", "x-amz*", "*", - consts.Cid, - consts.CidList, } var defaultHeaders = map[string][]string{ diff --git a/s3/s3.go b/s3/s3.go index 722bd3260..c7ff7bc07 100644 --- a/s3/s3.go +++ b/s3/s3.go @@ -40,7 +40,7 @@ func GetProviders() *providers.Providers { } func NewServer(cfg config.S3CompatibleAPI) *server.Server { - // lock global multiple keys read write lock + // global multiple keys read write lock lock := ctxmu.NewDefaultMultiCtxRWMutex() // services From 510e7fed71375dd0b9c0ed01c863419d2cfe4c6e Mon Sep 17 00:00:00 2001 From: Steve Date: Mon, 11 Sep 2023 02:12:56 +0800 Subject: [PATCH 104/139] opt: code --- s3/handlers/handlers.go | 74 +++++++++++++++++++++-------- s3/handlers/handlers_bucket.go | 60 ----------------------- s3/handlers/options.go | 4 +- s3/responses/responses_common.go | 9 ---- s3/responses/responses_multipart.go | 21 -------- 5 files changed, 55 insertions(+), 113 deletions(-) delete mode 100644 s3/responses/responses_multipart.go diff --git a/s3/handlers/handlers.go b/s3/handlers/handlers.go index c01e6bd58..2e9032d75 100644 --- a/s3/handlers/handlers.go +++ b/s3/handlers/handlers.go @@ -2,14 +2,15 @@ package handlers import ( - "github.com/bittorrent/go-btfs/s3/consts" + "context" "github.com/bittorrent/go-btfs/s3/responses" + "github.com/bittorrent/go-btfs/s3/s3utils" "github.com/bittorrent/go-btfs/s3/services/accesskey" "github.com/bittorrent/go-btfs/s3/services/object" "github.com/bittorrent/go-btfs/s3/services/sign" + "github.com/bittorrent/go-btfs/s3/utils/hash" "net/url" "runtime" - "strconv" ) var _ Handlerser = (*Handlers)(nil) @@ -41,26 +42,57 @@ func (h *Handlers) name() string { return f.Name() } -// Parse object url queries -func (h *Handlers) getObjectResources(values url.Values) (uploadId string, partNumberMarker, maxParts int, encodingType string, rerr *responses.Error) { - var err error - if values.Get("max-parts") != "" { - if maxParts, err = strconv.Atoi(values.Get("max-parts")); err != nil { - rerr = responses.ErrInvalidMaxParts - return +func (h *Handlers) respErr(err error) (rerr *responses.Error) { + switch err { + case object.ErrBucketNotFound: + rerr = responses.ErrNoSuchBucket + case object.ErrBucketeNotEmpty: + rerr = responses.ErrBucketNotEmpty + case object.ErrObjectNotFound: + rerr = responses.ErrNoSuchKey + case object.ErrUploadNotFound: + rerr = responses.ErrNoSuchUpload + case object.ErrBucketAlreadyExists: + rerr = responses.ErrBucketAlreadyExists + case object.ErrNotAllowed: + rerr = responses.ErrAccessDenied + case context.Canceled: + rerr = responses.ErrClientDisconnected + case context.DeadlineExceeded: + rerr = responses.ErrOperationTimedOut + default: + switch err.(type) { + case hash.SHA256Mismatch: + rerr = responses.ErrContentSHA256Mismatch + case hash.BadDigest: + rerr = responses.ErrBadDigest + case s3utils.BucketNameInvalid: + rerr = responses.ErrInvalidBucketName + case s3utils.ObjectNameInvalid: + rerr = responses.ErrInvalidObjectName + case s3utils.ObjectNameTooLong: + rerr = responses.ErrKeyTooLongError + case s3utils.ObjectNamePrefixAsSlash: + rerr = responses.ErrInvalidObjectNamePrefixSlash + case s3utils.InvalidUploadIDKeyCombination: + rerr = responses.ErrNotImplemented + case s3utils.InvalidMarkerPrefixCombination: + rerr = responses.ErrNotImplemented + case s3utils.MalformedUploadID: + rerr = responses.ErrNoSuchUpload + case s3utils.InvalidUploadID: + rerr = responses.ErrNoSuchUpload + case s3utils.InvalidPart: + rerr = responses.ErrInvalidPart + case s3utils.PartTooSmall: + rerr = responses.ErrEntityTooSmall + case s3utils.PartTooBig: + rerr = responses.ErrEntityTooLarge + case url.EscapeError: + rerr = responses.ErrInvalidObjectName + default: + rerr = responses.ErrInternalError } - } else { - maxParts = consts.MaxPartsList } - - if values.Get("part-number-marker") != "" { - if partNumberMarker, err = strconv.Atoi(values.Get("part-number-marker")); err != nil { - rerr = responses.ErrInvalidPartNumberMarker - return - } - } - - uploadId = values.Get("uploadId") - encodingType = values.Get("encoding-type") return } diff --git a/s3/handlers/handlers_bucket.go b/s3/handlers/handlers_bucket.go index cdb3e2bad..7b52fc94e 100644 --- a/s3/handlers/handlers_bucket.go +++ b/s3/handlers/handlers_bucket.go @@ -1,72 +1,12 @@ package handlers import ( - "context" "github.com/bittorrent/go-btfs/s3/cctx" "github.com/bittorrent/go-btfs/s3/requests" "github.com/bittorrent/go-btfs/s3/responses" - "github.com/bittorrent/go-btfs/s3/s3utils" - "github.com/bittorrent/go-btfs/s3/services/object" - "github.com/bittorrent/go-btfs/s3/utils/hash" "net/http" - "net/url" ) -func (h *Handlers) respErr(err error) (rerr *responses.Error) { - switch err { - case object.ErrBucketNotFound: - rerr = responses.ErrNoSuchBucket - case object.ErrBucketeNotEmpty: - rerr = responses.ErrBucketNotEmpty - case object.ErrObjectNotFound: - rerr = responses.ErrNoSuchKey - case object.ErrUploadNotFound: - rerr = responses.ErrNoSuchUpload - case object.ErrBucketAlreadyExists: - rerr = responses.ErrBucketAlreadyExists - case object.ErrNotAllowed: - rerr = responses.ErrAccessDenied - case context.Canceled: - rerr = responses.ErrClientDisconnected - case context.DeadlineExceeded: - rerr = responses.ErrOperationTimedOut - default: - switch err.(type) { - case hash.SHA256Mismatch: - rerr = responses.ErrContentSHA256Mismatch - case hash.BadDigest: - rerr = responses.ErrBadDigest - case s3utils.BucketNameInvalid: - rerr = responses.ErrInvalidBucketName - case s3utils.ObjectNameInvalid: - rerr = responses.ErrInvalidObjectName - case s3utils.ObjectNameTooLong: - rerr = responses.ErrKeyTooLongError - case s3utils.ObjectNamePrefixAsSlash: - rerr = responses.ErrInvalidObjectNamePrefixSlash - case s3utils.InvalidUploadIDKeyCombination: - rerr = responses.ErrNotImplemented - case s3utils.InvalidMarkerPrefixCombination: - rerr = responses.ErrNotImplemented - case s3utils.MalformedUploadID: - rerr = responses.ErrNoSuchUpload - case s3utils.InvalidUploadID: - rerr = responses.ErrNoSuchUpload - case s3utils.InvalidPart: - rerr = responses.ErrInvalidPart - case s3utils.PartTooSmall: - rerr = responses.ErrEntityTooSmall - case s3utils.PartTooBig: - rerr = responses.ErrEntityTooLarge - case url.EscapeError: - rerr = responses.ErrInvalidObjectName - default: - rerr = responses.ErrInternalError - } - } - return -} - func (h *Handlers) CreateBucketHandler(w http.ResponseWriter, r *http.Request) { var err error defer func() { diff --git a/s3/handlers/options.go b/s3/handlers/options.go index 09b69fa1a..e7fca5c31 100644 --- a/s3/handlers/options.go +++ b/s3/handlers/options.go @@ -49,11 +49,11 @@ var defaultCorsHeaders = []string{ } var defaultHeaders = map[string][]string{ - consts.AccessControlAllowOrigin: []string{"*"}, + consts.AccessControlAllowOrigin: {"*"}, consts.AccessControlAllowMethods: defaultCorsMethods, consts.AccessControlAllowHeaders: defaultCorsHeaders, consts.AccessControlExposeHeaders: defaultCorsHeaders, - consts.AccessControlAllowCredentials: []string{"true"}, + consts.AccessControlAllowCredentials: {"true"}, } type Option func(handlers *Handlers) diff --git a/s3/responses/responses_common.go b/s3/responses/responses_common.go index f978db3fd..d1098cdf2 100644 --- a/s3/responses/responses_common.go +++ b/s3/responses/responses_common.go @@ -47,15 +47,6 @@ func WriteSuccessResponse(w http.ResponseWriter, output interface{}, locationNam _ = protocol.WriteResponse(w, http.StatusOK, output, locationName) } -func setPutObjHeaders(w http.ResponseWriter, etag, cid string, delete bool) { - if etag != "" && !delete { - w.Header()[consts.ETag] = []string{`"` + etag + `"`} - } - if cid != "" { - w.Header()[consts.Cid] = []string{cid} - } -} - func pathClean(p string) string { cp := path.Clean(p) if cp == "." { diff --git a/s3/responses/responses_multipart.go b/s3/responses/responses_multipart.go deleted file mode 100644 index fd7aff5d2..000000000 --- a/s3/responses/responses_multipart.go +++ /dev/null @@ -1,21 +0,0 @@ -package responses - -//func WriteCreateMultipartUploadResponse(w http.ResponseWriter, r *http.Request, bucname, objname, uploadID string) { -// resp := GenerateInitiateMultipartUploadResponse(bucname, objname, uploadID) -// WriteSuccessResponse(w, resp, "") -//} -// -//func WriteAbortMultipartUploadResponse(w http.ResponseWriter, r *http.Request) { -// WriteSuccessResponse(w, nil, "") -//} -// -//func WriteUploadPartResponse(w http.ResponseWriter, r *http.Request, part object.Part) { -// setPutObjHeaders(w, part.ETag, part.CID, false) -// WriteSuccessResponse(w, nil, "") -//} -// -//func WriteCompleteMultipartUploadResponse(w http.ResponseWriter, r *http.Request, bucname, objname, region string, obj object.Object) { -// resp := GenerateCompleteMultipartUploadResponse(bucname, objname, region, obj) -// setPutObjHeaders(w, obj.ETag, obj.CID, false) -// WriteSuccessResponse(w, resp, "") -//} From b5f5e5ffd513f489adf74bf00c221ddd5eafda6f Mon Sep 17 00:00:00 2001 From: Steve Date: Mon, 11 Sep 2023 14:26:39 +0800 Subject: [PATCH 105/139] opt: preflight cache max age --- s3/consts/consts.go | 1 + s3/handlers/handlers.go | 3 ++- s3/handlers/handlers_middlewares.go | 14 ++++++++------ s3/handlers/options.go | 3 +++ 4 files changed, 14 insertions(+), 7 deletions(-) diff --git a/s3/consts/consts.go b/s3/consts/consts.go index ed60adb0f..827b0516b 100644 --- a/s3/consts/consts.go +++ b/s3/consts/consts.go @@ -173,6 +173,7 @@ const ( AccessControlAllowHeaders = "Access-Control-Allow-Headers" AccessControlExposeHeaders = "Access-Control-Expose-Headers" AccessControlAllowCredentials = "Access-Control-Allow-Credentials" + AccessControlMaxAge = "Access-Control-Max-Age" ) // object const diff --git a/s3/handlers/handlers.go b/s3/handlers/handlers.go index 2e9032d75..10239c119 100644 --- a/s3/handlers/handlers.go +++ b/s3/handlers/handlers.go @@ -9,6 +9,7 @@ import ( "github.com/bittorrent/go-btfs/s3/services/object" "github.com/bittorrent/go-btfs/s3/services/sign" "github.com/bittorrent/go-btfs/s3/utils/hash" + "net/http" "net/url" "runtime" ) @@ -16,7 +17,7 @@ import ( var _ Handlerser = (*Handlers)(nil) type Handlers struct { - headers map[string][]string + headers http.Header acksvc accesskey.Service sigsvc sign.Service objsvc object.Service diff --git a/s3/handlers/handlers_middlewares.go b/s3/handlers/handlers_middlewares.go index 1a7261019..5e15162b4 100644 --- a/s3/handlers/handlers_middlewares.go +++ b/s3/handlers/handlers_middlewares.go @@ -9,18 +9,20 @@ import ( "github.com/bittorrent/go-btfs/s3/services/accesskey" rscors "github.com/rs/cors" "net/http" + "strconv" "time" ) func (h *Handlers) Cors(handler http.Handler) http.Handler { headers := h.headers - cred := len(headers[consts.AccessControlAllowCredentials]) > 0 && - headers[consts.AccessControlAllowCredentials][0] == "true" + cred := headers.Get(consts.AccessControlAllowCredentials) == "true" + maxAge, _ := strconv.Atoi(headers.Get(consts.AccessControlMaxAge)) ch := rscors.New(rscors.Options{ - AllowedOrigins: headers[consts.AccessControlAllowOrigin], - AllowedMethods: headers[consts.AccessControlAllowMethods], - AllowedHeaders: headers[consts.AccessControlExposeHeaders], - ExposedHeaders: headers[consts.AccessControlAllowHeaders], + AllowedOrigins: headers.Values(consts.AccessControlAllowOrigin), + AllowedMethods: headers.Values(consts.AccessControlAllowMethods), + AllowedHeaders: headers.Values(consts.AccessControlAllowHeaders), + ExposedHeaders: headers.Values(consts.AccessControlExposeHeaders), + MaxAge: maxAge, AllowCredentials: cred, }) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { diff --git a/s3/handlers/options.go b/s3/handlers/options.go index e7fca5c31..281b83eb2 100644 --- a/s3/handlers/options.go +++ b/s3/handlers/options.go @@ -48,12 +48,15 @@ var defaultCorsHeaders = []string{ "*", } +const defaultCorsMaxAge = "36000" + var defaultHeaders = map[string][]string{ consts.AccessControlAllowOrigin: {"*"}, consts.AccessControlAllowMethods: defaultCorsMethods, consts.AccessControlAllowHeaders: defaultCorsHeaders, consts.AccessControlExposeHeaders: defaultCorsHeaders, consts.AccessControlAllowCredentials: {"true"}, + consts.AccessControlMaxAge: {defaultCorsMaxAge}, } type Option func(handlers *Handlers) From 6c71fb547ee0fe81faf9ab854f26dd48a3c4726e Mon Sep 17 00:00:00 2001 From: Steve Date: Mon, 11 Sep 2023 18:22:29 +0800 Subject: [PATCH 106/139] feat: bucket response add acl header --- s3/handlers/handlers_bucket.go | 8 ++++---- s3/responses/responses_bucket.go | 9 +++++++-- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/s3/handlers/handlers_bucket.go b/s3/handlers/handlers_bucket.go index 7b52fc94e..2efe1fe16 100644 --- a/s3/handlers/handlers_bucket.go +++ b/s3/handlers/handlers_bucket.go @@ -20,14 +20,14 @@ func (h *Handlers) CreateBucketHandler(w http.ResponseWriter, r *http.Request) { return } - _, err = h.objsvc.CreateBucket(r.Context(), req.AccessKey, req.Bucket, req.Region, req.ACL) + buc, err := h.objsvc.CreateBucket(r.Context(), req.AccessKey, req.Bucket, req.Region, req.ACL) if err != nil { rerr = h.respErr(err) responses.WriteErrorResponse(w, r, rerr) return } - responses.WriteCreateBucketResponse(w, r) + responses.WriteCreateBucketResponse(w, r, buc) return } @@ -45,14 +45,14 @@ func (h *Handlers) HeadBucketHandler(w http.ResponseWriter, r *http.Request) { return } - _, err = h.objsvc.GetBucket(r.Context(), req.AccessKey, req.Bucket) + buc, err := h.objsvc.GetBucket(r.Context(), req.AccessKey, req.Bucket) if err != nil { rerr = h.respErr(err) responses.WriteErrorResponse(w, r, rerr) return } - responses.WriteHeadBucketResponse(w, r) + responses.WriteHeadBucketResponse(w, r, buc) return } diff --git a/s3/responses/responses_bucket.go b/s3/responses/responses_bucket.go index 58bf32503..4a513c229 100644 --- a/s3/responses/responses_bucket.go +++ b/s3/responses/responses_bucket.go @@ -2,19 +2,22 @@ package responses import ( "github.com/aws/aws-sdk-go/service/s3" + "github.com/bittorrent/go-btfs/s3/consts" "github.com/bittorrent/go-btfs/s3/protocol" "github.com/bittorrent/go-btfs/s3/services/object" "net/http" ) -func WriteCreateBucketResponse(w http.ResponseWriter, r *http.Request) { +func WriteCreateBucketResponse(w http.ResponseWriter, r *http.Request, buc *object.Bucket) { output := new(s3.CreateBucketOutput).SetLocation(pathClean(r.URL.Path)) + w.Header().Add(consts.AmzACL, buc.ACL) WriteSuccessResponse(w, output, "") return } -func WriteHeadBucketResponse(w http.ResponseWriter, r *http.Request) { +func WriteHeadBucketResponse(w http.ResponseWriter, r *http.Request, buc *object.Bucket) { output := new(s3.HeadBucketOutput) + w.Header().Add(consts.AmzACL, buc.ACL) WriteSuccessResponse(w, output, "") return } @@ -32,6 +35,7 @@ func WriteListBucketsResponse(w http.ResponseWriter, r *http.Request, accessKey for _, buc := range buckets { s3Bucket := new(s3.Bucket).SetName(buc.Name).SetCreationDate(buc.Created) s3Buckets = append(s3Buckets, s3Bucket) + w.Header().Add(consts.AmzACL, buc.ACL) } output.SetBuckets(s3Buckets) WriteSuccessResponse(w, output, "ListAllMyBucketsResult") @@ -59,6 +63,7 @@ func WriteGetBucketACLResponse(w http.ResponseWriter, r *http.Request, accessKey panic("unknown acl") } output.SetGrants(grants) + w.Header().Add(consts.AmzACL, acl) WriteSuccessResponse(w, output, "AccessControlPolicy") return } From dbb474db33e3659cc16e21d2a5f767076ee0c585 Mon Sep 17 00:00:00 2001 From: Steve Date: Mon, 11 Sep 2023 18:25:49 +0800 Subject: [PATCH 107/139] opt: change cid-list header to cid --- s3/consts/consts.go | 1 - s3/handlers/options.go | 1 - s3/responses/responses_object.go | 4 ++-- 3 files changed, 2 insertions(+), 4 deletions(-) diff --git a/s3/consts/consts.go b/s3/consts/consts.go index 827b0516b..f38a2c052 100644 --- a/s3/consts/consts.go +++ b/s3/consts/consts.go @@ -163,7 +163,6 @@ const ( Range = "Range" UserAgent = "User-Agent" Cid = "Cid" - CidList = "Cid-List" ) // Standard HTTP cors headers diff --git a/s3/handlers/options.go b/s3/handlers/options.go index 281b83eb2..0abda6c36 100644 --- a/s3/handlers/options.go +++ b/s3/handlers/options.go @@ -40,7 +40,6 @@ var defaultCorsHeaders = []string{ consts.Range, consts.UserAgent, consts.Cid, - consts.CidList, "Amz-*", "amz-*", "X-Amz*", diff --git a/s3/responses/responses_object.go b/s3/responses/responses_object.go index 83a21f770..fde197eee 100644 --- a/s3/responses/responses_object.go +++ b/s3/responses/responses_object.go @@ -93,7 +93,7 @@ func WriteListObjectsResponse(w http.ResponseWriter, r *http.Request, accessKey, s3Obj.SetSize(obj.Size) s3Obj.SetStorageClass("") s3Objs[i] = s3Obj - w.Header().Add(consts.CidList, obj.CID) + w.Header().Add(consts.Cid, obj.CID) } out.SetContents(s3Objs) s3CommPrefixes := make([]*s3.CommonPrefix, len(list.Prefixes)) @@ -127,7 +127,7 @@ func WriteListObjectsV2Response(w http.ResponseWriter, r *http.Request, accessKe s3Obj.SetSize(obj.Size) s3Obj.SetStorageClass("") s3Objs[i] = s3Obj - w.Header().Add(consts.CidList, obj.CID) + w.Header().Add(consts.Cid, obj.CID) } out.SetContents(s3Objs) s3CommPrefixes := make([]*s3.CommonPrefix, len(list.Prefixes)) From fe87cab8e2942178bca701f194b982371a954a25 Mon Sep 17 00:00:00 2001 From: Steve Date: Tue, 12 Sep 2023 17:35:28 +0800 Subject: [PATCH 108/139] fix: required check exlude unknow location --- s3/protocol/request.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/s3/protocol/request.go b/s3/protocol/request.go index 031ef9a0b..4d917a37e 100644 --- a/s3/protocol/request.go +++ b/s3/protocol/request.go @@ -138,6 +138,7 @@ func getInputValue(input interface{}) (inv reflect.Value, err error) { func parseLocation(r *http.Request, inv reflect.Value) (err error) { query := r.URL.Query() +loop: for i := 0; i < inv.NumField(); i++ { fv := inv.Field(i) ft := inv.Type().Field(i) @@ -171,15 +172,13 @@ func parseLocation(r *http.Request, inv reflect.Value) (err error) { err = parseLocationValue(locVal, fv, ft.Tag) case "querystring": err = parseQueryString(query, fv, name, ft.Tag) + default: + continue loop } - if err != nil { return } - - required := ft.Tag.Get("required") == "true" - - if required && !reflect.Indirect(fv).IsValid() { + if ft.Tag.Get("required") == "true" && !reflect.Indirect(fv).IsValid() { err = fmt.Errorf("field %s is required", ft.Name) return } From 6a2ac3a9594376681f454d6e419c88140e5ac883 Mon Sep 17 00:00:00 2001 From: Steve Date: Wed, 13 Sep 2023 16:23:25 +0800 Subject: [PATCH 109/139] fix: get object acl --- s3/handlers/handlers_object.go | 4 +- s3/services/object/proto.go | 1 + s3/services/object/service_object.go | 58 ++++++++++++++++++++++++++++ 3 files changed, 61 insertions(+), 2 deletions(-) diff --git a/s3/handlers/handlers_object.go b/s3/handlers/handlers_object.go index f3ad3d3d5..ba95b6442 100644 --- a/s3/handlers/handlers_object.go +++ b/s3/handlers/handlers_object.go @@ -337,14 +337,14 @@ func (h *Handlers) GetObjectACLHandler(w http.ResponseWriter, r *http.Request) { cctx.SetHandleInf(r, h.name(), err) }() - bucname, _, rerr := requests.ParseBucketAndObject(r) + bucname, objname, rerr := requests.ParseBucketAndObject(r) if rerr != nil { err = rerr responses.WriteErrorResponse(w, r, rerr) return } - acl, err := h.objsvc.GetBucketACL(ctx, ack, bucname) + acl, err := h.objsvc.GetObjectACL(ctx, ack, bucname, objname) if err != nil { rerr = h.respErr(err) responses.WriteErrorResponse(w, r, rerr) diff --git a/s3/services/object/proto.go b/s3/services/object/proto.go index 0d8ff5eaf..4a1837f70 100644 --- a/s3/services/object/proto.go +++ b/s3/services/object/proto.go @@ -31,6 +31,7 @@ type Service interface { DeleteObject(ctx context.Context, user, bucname, objname string) (err error) ListObjects(ctx context.Context, user, bucname, prefix, delimiter, marker string, max int64) (list *ObjectsList, err error) ListObjectsV2(ctx context.Context, user string, bucket string, prefix string, token, delimiter string, max int64, owner bool, after string) (list *ObjectsListV2, err error) + GetObjectACL(ctx context.Context, user, bucname, objname string) (acl string, err error) CreateMultipartUpload(ctx context.Context, user, bucname, objname string, meta map[string]*string) (multipart *Multipart, err error) UploadPart(ctx context.Context, user, bucname, objname, uplid string, partId int, reader *hash.Reader, size int64) (part *Part, err error) diff --git a/s3/services/object/service_object.go b/s3/services/object/service_object.go index 2218a7ca4..b9056ebec 100644 --- a/s3/services/object/service_object.go +++ b/s3/services/object/service_object.go @@ -614,3 +614,61 @@ func (s *service) getObject(objkey string) (object *Object, err error) { } return } + +// GetObjectACL get user specified object ACL(bucket acl) +func (s *service) GetObjectACL(ctx context.Context, user, bucname, objname string) (acl string, err error) { + // Operation context + ctx, cancel := s.opctx(ctx) + defer cancel() + + // Bucket key + buckey := s.getBucketKey(bucname) + + // RLock bucket + err = s.lock.RLock(ctx, buckey) + if err != nil { + return + } + defer s.lock.RUnlock(buckey) + + // Get bucket + bucket, err := s.getBucket(buckey) + if err != nil { + return + } + if bucket == nil { + err = ErrBucketNotFound + return + } + + // Check action ACL + allow := s.checkACL(bucket.Owner, bucket.ACL, user, action.GetBucketAclAction) + if !allow { + err = ErrNotAllowed + return + } + + // Object key + objkey := s.getObjectKey(bucname, objname) + + // RLock object + err = s.lock.RLock(ctx, objkey) + if err != nil { + return + } + defer s.lock.RUnlock(objkey) + + // Get object + object, err := s.getObject(objkey) + if err != nil { + return + } + if object == nil { + err = ErrObjectNotFound + } + + // Get ACL field value + acl = bucket.ACL + + return +} From 3e1def6a0428a0ccb5d3f833254927cd62a8b220 Mon Sep 17 00:00:00 2001 From: Steve Date: Mon, 18 Sep 2023 18:02:00 +0800 Subject: [PATCH 110/139] ref: requests --- core/commands/object/patch.go | 2 +- s3/consts/consts.go | 21 +- s3/handlers/handlers.go | 46 ++- s3/handlers/handlers_bucket.go | 87 +++-- s3/handlers/handlers_multipart.go | 26 +- s3/handlers/handlers_object.go | 353 +++++--------------- s3/handlers/proto.go | 8 +- s3/protocol/request.go | 425 ------------------------- s3/requests/input.go | 330 +++++++++++++++++++ s3/requests/input_errors.go | 65 ++++ s3/requests/parsers.go | 98 ------ s3/requests/parsers_bucket.go | 122 +++++++ s3/requests/parsers_common.go | 109 ------- s3/requests/parsers_object.go | 259 +++++++++++++++ s3/requests/validate.go | 305 ++++++++++++++++++ s3/requests/validate_errors.go | 22 ++ s3/responses/errors.go | 11 +- s3/responses/responses_bucket.go | 3 +- s3/responses/responses_object.go | 91 ++++-- s3/s3utils/request_test.go | 27 -- s3/s3utils/utils.go | 33 +- s3/services/object/proto.go | 138 ++++++-- s3/services/object/service_bucket.go | 58 ++-- s3/services/object/service_object.go | 239 +++++++++----- s3/services/sign/signature-v4-utils.go | 4 +- s3/utils/encode.go | 4 +- s3/utils/if.go | 20 +- s3/utils/signature.go | 2 +- 28 files changed, 1700 insertions(+), 1208 deletions(-) delete mode 100644 s3/protocol/request.go create mode 100644 s3/requests/input.go create mode 100644 s3/requests/input_errors.go delete mode 100644 s3/requests/parsers.go create mode 100644 s3/requests/parsers_bucket.go delete mode 100644 s3/requests/parsers_common.go create mode 100644 s3/requests/parsers_object.go create mode 100644 s3/requests/validate.go create mode 100644 s3/requests/validate_errors.go delete mode 100644 s3/s3utils/request_test.go diff --git a/core/commands/object/patch.go b/core/commands/object/patch.go index ff2c6933e..b196738b2 100644 --- a/core/commands/object/patch.go +++ b/core/commands/object/patch.go @@ -40,7 +40,7 @@ Example: $ echo "hello" | btfs object patch $HASH append-data NOTE: This does not append data to a file - it modifies the actual raw -data within an object. Objects have a max size of 1MB and objects larger than +data within an object. ToDeleteObjects have a max size of 1MB and objects larger than the limit will not be respected by the network. `, }, diff --git a/s3/consts/consts.go b/s3/consts/consts.go index f38a2c052..beea060bc 100644 --- a/s3/consts/consts.go +++ b/s3/consts/consts.go @@ -16,6 +16,7 @@ const ( // MaxLocationConstraintSize Limit of location constraint XML for unauthenticated PUT bucket operations. MaxLocationConstraintSize = 3 * humanize.MiByte EmptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + UnsignedSHA256 = "UNSIGNED-PAYLOAD" StsRequestBodyLimit = 10 * (1 << 20) // 10 MiB SlashSeparator = "/" @@ -28,15 +29,17 @@ const ( AssumeRole = "AssumeRole" SignV4Algorithm = "AWS4-HMAC-SHA256" - DefaultServerInfo = "BTFS" - DefaultLocation = "us-east-1" - DefaultBucketACL = s3.BucketCannedACLPublicRead - DefaultObjectACL = "" - AllUsersURI = "http://acs.amazonaws.com/groups/global/AllUsers" + StreamingContentEncoding = "aws-chunked" + DefaultEncodingType = "url" + DefaultContentType = "binary/octet-stream" + DefaultServerInfo = "BTFS" + DefaultBucketRegion = "us-east-1" + DefaultBucketACL = s3.BucketCannedACLPublicRead + AllUsersURI = "http://acs.amazonaws.com/groups/global/AllUsers" ) -var SupportedLocations = map[string]bool{ - DefaultLocation: true, +var SupportedBucketRegions = map[string]bool{ + DefaultBucketRegion: true, } var SupportedBucketACLs = map[string]bool{ @@ -45,10 +48,6 @@ var SupportedBucketACLs = map[string]bool{ s3.BucketCannedACLPublicReadWrite: true, } -var SupportedObjectACLs = map[string]bool{ - DefaultObjectACL: true, -} - // Standard S3 HTTP request constants const ( IfModifiedSince = "If-Modified-Since" diff --git a/s3/handlers/handlers.go b/s3/handlers/handlers.go index 10239c119..d8ddb4460 100644 --- a/s3/handlers/handlers.go +++ b/s3/handlers/handlers.go @@ -3,6 +3,7 @@ package handlers import ( "context" + "github.com/bittorrent/go-btfs/s3/requests" "github.com/bittorrent/go-btfs/s3/responses" "github.com/bittorrent/go-btfs/s3/s3utils" "github.com/bittorrent/go-btfs/s3/services/accesskey" @@ -43,11 +44,43 @@ func (h *Handlers) name() string { return f.Name() } -func (h *Handlers) respErr(err error) (rerr *responses.Error) { +func (h *Handlers) toRespErr(err error) (rerr *responses.Error) { switch err { + + // requests errors + case requests.ErrBucketNameInvalid: + rerr = responses.ErrInvalidBucketName + case requests.ErrObjectNameInvalid: + rerr = responses.ErrInvalidObjectName + case requests.ErrObjectNameTooLong: + rerr = responses.ErrKeyTooLongError + case requests.ErrObjectNamePrefixSlash: + rerr = responses.ErrInvalidObjectNamePrefixSlash + case requests.ErrRegionUnsupported: + rerr = responses.ErrInvalidRegion + case requests.ErrACLUnsupported: + rerr = responses.ErrMalformedACLError + case requests.ErrInvalidContentMd5: + rerr = responses.ErrInvalidDigest + case requests.ErrInvalidChecksumSha256: + rerr = responses.ErrContentSHA256Mismatch + case requests.ErrContentLengthMissing: + rerr = responses.ErrMissingContentLength + case requests.ErrContentLengthTooLarge: + rerr = responses.ErrEntityTooLarge + case requests.ErrCopySrcInvalid: + rerr = responses.ErrInvalidCopySource + case requests.ErrCopyDestInvalid: + rerr = responses.ErrInvalidCopyDest + case requests.ErrMaxKeysInvalid: + rerr = responses.ErrInvalidMaxKeys + case requests.ErrMarkerPrefixCombinationInvalid: + rerr = responses.ErrInvalidRequest + + // object service errors case object.ErrBucketNotFound: rerr = responses.ErrNoSuchBucket - case object.ErrBucketeNotEmpty: + case object.ErrBucketNotEmpty: rerr = responses.ErrBucketNotEmpty case object.ErrObjectNotFound: rerr = responses.ErrNoSuchKey @@ -63,6 +96,15 @@ func (h *Handlers) respErr(err error) (rerr *responses.Error) { rerr = responses.ErrOperationTimedOut default: switch err.(type) { + case requests.ErrFailedParseValue: + rerr = responses.ErrInvalidRequest + case requests.ErrFailedDecodeXML: + rerr = responses.ErrMalformedXML + case requests.ErrMissingRequiredParam: + rerr = responses.ErrInvalidRequest + case requests.ErrWithUnsupportedParam: + rerr = responses.ErrNotImplemented + case hash.SHA256Mismatch: rerr = responses.ErrContentSHA256Mismatch case hash.BadDigest: diff --git a/s3/handlers/handlers_bucket.go b/s3/handlers/handlers_bucket.go index 2efe1fe16..4ee102e98 100644 --- a/s3/handlers/handlers_bucket.go +++ b/s3/handlers/handlers_bucket.go @@ -8,77 +8,71 @@ import ( ) func (h *Handlers) CreateBucketHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() var err error defer func() { cctx.SetHandleInf(r, h.name(), err) }() - req, rerr := requests.ParseCreateBucketRequest(r) - if rerr != nil { - err = rerr - responses.WriteErrorResponse(w, r, rerr) + args, err := requests.ParseCreateBucketRequest(r) + if err != nil { + responses.WriteErrorResponse(w, r, h.toRespErr(err)) return } - buc, err := h.objsvc.CreateBucket(r.Context(), req.AccessKey, req.Bucket, req.Region, req.ACL) + buc, err := h.objsvc.CreateBucket(ctx, args) if err != nil { - rerr = h.respErr(err) - responses.WriteErrorResponse(w, r, rerr) + responses.WriteErrorResponse(w, r, h.toRespErr(err)) return } responses.WriteCreateBucketResponse(w, r, buc) - return } func (h *Handlers) HeadBucketHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() var err error defer func() { cctx.SetHandleInf(r, h.name(), err) }() - req, rerr := requests.ParseHeadBucketRequest(r) - if rerr != nil { - err = rerr - responses.WriteErrorResponse(w, r, rerr) + args, err := requests.ParseHeadBucketRequest(r) + if err != nil { + responses.WriteErrorResponse(w, r, h.toRespErr(err)) return } - buc, err := h.objsvc.GetBucket(r.Context(), req.AccessKey, req.Bucket) + buc, err := h.objsvc.GetBucket(ctx, args) if err != nil { - rerr = h.respErr(err) - responses.WriteErrorResponse(w, r, rerr) + responses.WriteErrorResponse(w, r, h.toRespErr(err)) return } responses.WriteHeadBucketResponse(w, r, buc) - return } func (h *Handlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() var err error defer func() { cctx.SetHandleInf(r, h.name(), err) }() - req, rerr := requests.ParseDeleteBucketRequest(r) - if rerr != nil { - err = rerr - responses.WriteErrorResponse(w, r, rerr) + args, err := requests.ParseDeleteBucketRequest(r) + if err != nil { + responses.WriteErrorResponse(w, r, h.toRespErr(err)) return } - err = h.objsvc.DeleteBucket(r.Context(), req.AccessKey, req.Bucket) + err = h.objsvc.DeleteBucket(ctx, args) if err != nil { - rerr = h.respErr(err) - responses.WriteErrorResponse(w, r, rerr) + responses.WriteErrorResponse(w, r, h.toRespErr(err)) return } responses.WriteDeleteBucketResponse(w) - return } @@ -88,67 +82,64 @@ func (h *Handlers) ListBucketsHandler(w http.ResponseWriter, r *http.Request) { cctx.SetHandleInf(r, h.name(), err) }() - req, rerr := requests.ParseListBucketsRequest(r) - if rerr != nil { - err = rerr - responses.WriteErrorResponse(w, r, rerr) + args, err := requests.ParseListBucketsRequest(r) + if err != nil { + responses.WriteErrorResponse(w, r, h.toRespErr(err)) return } - list, err := h.objsvc.GetAllBuckets(r.Context(), req.AccessKey) + list, err := h.objsvc.ListBuckets(r.Context(), args) if err != nil { - rerr = h.respErr(err) - responses.WriteErrorResponse(w, r, rerr) + responses.WriteErrorResponse(w, r, h.toRespErr(err)) return } - responses.WriteListBucketsResponse(w, r, req.AccessKey, list) - + responses.WriteListBucketsResponse(w, r, args.AccessKey, list) return } func (h *Handlers) GetBucketAclHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() var err error defer func() { cctx.SetHandleInf(r, h.name(), err) }() - req, rerr := requests.ParseGetBucketACLRequest(r) - if rerr != nil { - err = rerr - responses.WriteErrorResponse(w, r, rerr) + args, err := requests.ParseGetBucketACLRequest(r) + if err != nil { + responses.WriteErrorResponse(w, r, h.toRespErr(err)) return } - acl, err := h.objsvc.GetBucketACL(r.Context(), req.AccessKey, req.Bucket) + acl, err := h.objsvc.GetBucketACL(ctx, args) if err != nil { - rerr = h.respErr(err) - responses.WriteErrorResponse(w, r, rerr) + responses.WriteErrorResponse(w, r, h.toRespErr(err)) return } - responses.WriteGetBucketACLResponse(w, r, req.AccessKey, acl) + responses.WriteGetBucketACLResponse(w, r, args.AccessKey, acl) + return } func (h *Handlers) PutBucketAclHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() var err error defer func() { cctx.SetHandleInf(r, h.name(), err) }() - req, rerr := requests.ParsePutBucketAclRequest(r) - if rerr != nil { - err = rerr - responses.WriteErrorResponse(w, r, rerr) + args, err := requests.ParsePutBucketAclRequest(r) + if err != nil { + responses.WriteErrorResponse(w, r, h.toRespErr(err)) return } - err = h.objsvc.PutBucketACL(r.Context(), req.AccessKey, req.Bucket, req.ACL) + err = h.objsvc.PutBucketACL(ctx, args) if err != nil { - rerr = h.respErr(err) - responses.WriteErrorResponse(w, r, rerr) + responses.WriteErrorResponse(w, r, h.toRespErr(err)) return } responses.WritePutBucketAclResponse(w, r) + return } diff --git a/s3/handlers/handlers_multipart.go b/s3/handlers/handlers_multipart.go index efafc1299..5c1bc0de6 100644 --- a/s3/handlers/handlers_multipart.go +++ b/s3/handlers/handlers_multipart.go @@ -4,7 +4,7 @@ import ( "github.com/aws/aws-sdk-go/service/s3" "github.com/bittorrent/go-btfs/s3/cctx" "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/protocol" + "github.com/bittorrent/go-btfs/s3/requests" "github.com/bittorrent/go-btfs/s3/responses" "github.com/bittorrent/go-btfs/s3/s3utils" "github.com/bittorrent/go-btfs/s3/services/object" @@ -23,7 +23,7 @@ func (h *Handlers) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.R var input s3.CreateMultipartUploadInput - err = protocol.ParseRequest(r, &input) + err = requests.ParseInput(r, &input) if err != nil { rerr := responses.ErrBadRequest responses.WriteErrorResponse(w, r, rerr) @@ -34,7 +34,7 @@ func (h *Handlers) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.R err = s3utils.CheckNewMultipartArgs(ctx, bucname, objname) if err != nil { - rerr := h.respErr(err) + rerr := h.toRespErr(err) responses.WriteErrorResponse(w, r, rerr) return } @@ -43,7 +43,7 @@ func (h *Handlers) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.R mtp, err := h.objsvc.CreateMultipartUpload(ctx, ack, bucname, objname, meta) if err != nil { - rerr := h.respErr(err) + rerr := h.toRespErr(err) responses.WriteErrorResponse(w, r, rerr) return } @@ -68,7 +68,7 @@ func (h *Handlers) UploadPartHandler(w http.ResponseWriter, r *http.Request) { var input s3.UploadPartInput - err = protocol.ParseRequest(r, &input) + err = requests.ParseInput(r, &input) if err != nil { rerr := responses.ErrBadRequest responses.WriteErrorResponse(w, r, rerr) @@ -79,7 +79,7 @@ func (h *Handlers) UploadPartHandler(w http.ResponseWriter, r *http.Request) { err = s3utils.CheckPutObjectPartArgs(ctx, bucname, objname) if err != nil { - rerr := h.respErr(err) + rerr := h.toRespErr(err) responses.WriteErrorResponse(w, r, rerr) return } @@ -118,7 +118,7 @@ func (h *Handlers) UploadPartHandler(w http.ResponseWriter, r *http.Request) { part, err := h.objsvc.UploadPart(ctx, ack, bucname, objname, uploadId, partId, hrdr, size) if err != nil { - rerr := h.respErr(err) + rerr := h.toRespErr(err) responses.WriteErrorResponse(w, r, rerr) return } @@ -142,7 +142,7 @@ func (h *Handlers) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Re var input s3.AbortMultipartUploadInput - err = protocol.ParseRequest(r, &input) + err = requests.ParseInput(r, &input) if err != nil { rerr := responses.ErrBadRequest responses.WriteErrorResponse(w, r, rerr) @@ -153,7 +153,7 @@ func (h *Handlers) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Re err = s3utils.CheckAbortMultipartArgs(ctx, bucname, objname) if err != nil { - rerr := h.respErr(err) + rerr := h.toRespErr(err) responses.WriteErrorResponse(w, r, rerr) return } @@ -162,7 +162,7 @@ func (h *Handlers) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Re err = h.objsvc.AbortMultipartUpload(ctx, ack, bucname, objname, uploadId) if err != nil { - rerr := h.respErr(err) + rerr := h.toRespErr(err) responses.WriteErrorResponse(w, r, rerr) return } @@ -184,7 +184,7 @@ func (h *Handlers) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http var input s3.CompleteMultipartUploadInput - err = protocol.ParseRequest(r, &input) + err = requests.ParseInput(r, &input) if err != nil { rerr := responses.ErrBadRequest responses.WriteErrorResponse(w, r, rerr) @@ -195,7 +195,7 @@ func (h *Handlers) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http err = s3utils.CheckCompleteMultipartArgs(ctx, bucname, objname) if err != nil { - rerr := h.respErr(err) + rerr := h.toRespErr(err) responses.WriteErrorResponse(w, r, rerr) return } @@ -234,7 +234,7 @@ func (h *Handlers) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http obj, err := h.objsvc.CompleteMultiPartUpload(ctx, ack, bucname, objname, uploadId, complUpload.Parts) if err != nil { - rerr := h.respErr(err) + rerr := h.toRespErr(err) responses.WriteErrorResponse(w, r, rerr) return } diff --git a/s3/handlers/handlers_object.go b/s3/handlers/handlers_object.go index ba95b6442..507b56a44 100644 --- a/s3/handlers/handlers_object.go +++ b/s3/handlers/handlers_object.go @@ -2,15 +2,11 @@ package handlers import ( "encoding/base64" - "errors" - "github.com/aws/aws-sdk-go/service/s3" "github.com/bittorrent/go-btfs/s3/cctx" "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/protocol" "github.com/bittorrent/go-btfs/s3/requests" "github.com/bittorrent/go-btfs/s3/responses" "github.com/bittorrent/go-btfs/s3/s3utils" - "github.com/bittorrent/go-btfs/s3/utils/hash" "net/http" "net/url" "path" @@ -18,342 +14,150 @@ import ( "strings" ) +// PutObjectHandler . func (h *Handlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) { ctx := r.Context() - ack := cctx.GetAccessKey(r) var err error defer func() { cctx.SetHandleInf(r, h.name(), err) }() - if _, ok := r.Header[consts.AmzCopySource]; ok { - err = errors.New("shouldn't be copy") - responses.WriteErrorResponse(w, r, responses.ErrInvalidCopySource) - return - } - - bucname, objname, rerr := requests.ParseBucketAndObject(r) - if rerr != nil { - err = rerr - responses.WriteErrorResponse(w, r, rerr) - return - } - - _, rerr = requests.ParseObjectACL(r) - if rerr != nil { - err = rerr - responses.WriteErrorResponse(w, r, rerr) - return - } - - err = s3utils.CheckPutObjectArgs(ctx, bucname, objname) - if err != nil { - rerr = h.respErr(err) - responses.WriteErrorResponse(w, r, rerr) - return - } - - meta, err := extractMetadata(ctx, r) + args, err := requests.ParsePutObjectRequest(r) if err != nil { - responses.WriteErrorResponse(w, r, responses.ErrInvalidRequest) - return - } - - if r.ContentLength == 0 { - responses.WriteErrorResponse(w, r, responses.ErrEntityTooSmall) - return - } - - body, ok := r.Body.(*hash.Reader) - if !ok { - responses.WriteErrorResponse(w, r, responses.ErrInternalError) + responses.WriteErrorResponse(w, r, h.toRespErr(err)) return } - obj, err := h.objsvc.PutObject(ctx, ack, bucname, objname, body, r.ContentLength, meta) + obj, err := h.objsvc.PutObject(ctx, args) if err != nil { - rerr = h.respErr(err) - responses.WriteErrorResponse(w, r, rerr) + responses.WriteErrorResponse(w, r, h.toRespErr(err)) return } responses.WritePutObjectResponse(w, r, obj) - return } -func (h *Handlers) HeadObjectHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - ack := cctx.GetAccessKey(r) - var err error - defer func() { - cctx.SetHandleInf(r, h.name(), err) - }() - - bucname, objname, rerr := requests.ParseBucketAndObject(r) - if rerr != nil { - err = rerr - responses.WriteErrorResponse(w, r, rerr) - return - } - - err = s3utils.CheckGetObjArgs(ctx, bucname, objname) - if err != nil { - rerr = h.respErr(err) - responses.WriteErrorResponse(w, r, rerr) - return - } - - //objsvc - obj, _, err := h.objsvc.GetObject(ctx, ack, bucname, objname, false) - if err != nil { - rerr = h.respErr(err) - responses.WriteErrorResponse(w, r, rerr) - return - } - - responses.WriteHeadObjectResponse(w, r, obj) -} - +// CopyObjectHandler . func (h *Handlers) CopyObjectHandler(w http.ResponseWriter, r *http.Request) { ctx := r.Context() - ack := cctx.GetAccessKey(r) var err error defer func() { cctx.SetHandleInf(r, h.name(), err) }() - dstBucket, dstObject, rerr := requests.ParseBucketAndObject(r) - if rerr != nil { - err = rerr - responses.WriteErrorResponse(w, r, rerr) - return - } - - err = s3utils.CheckPutObjectArgs(ctx, dstBucket, dstObject) + args, err := requests.ParseCopyObjectRequest(r) if err != nil { - rerr = h.respErr(err) - responses.WriteErrorResponse(w, r, rerr) + responses.WriteErrorResponse(w, r, h.toRespErr(err)) return } - // Copy source path. - cpSrcPath, err := url.QueryUnescape(r.Header.Get(consts.AmzCopySource)) + obj, err := h.objsvc.CopyObject(ctx, args) if err != nil { - // Save unescaped string as is. - cpSrcPath = r.Header.Get(consts.AmzCopySource) - err = nil - } - - srcBucket, srcObject := pathToBucketAndObject(cpSrcPath) - // If source object is empty or bucket is empty, reply back invalid copy source. - if srcObject == "" || srcBucket == "" { - err = responses.ErrInvalidCopySource - responses.WriteErrorResponse(w, r, responses.ErrInvalidCopySource) - return - } - if err = s3utils.CheckGetObjArgs(ctx, srcBucket, srcObject); err != nil { - rerr = h.respErr(err) - responses.WriteErrorResponse(w, r, rerr) - return - } - if srcBucket == dstBucket && srcObject == dstObject { - err = responses.ErrInvalidCopyDest - responses.WriteErrorResponse(w, r, responses.ErrInvalidCopyDest) - return - } - - metadata := make(map[string]string) - if isReplace(r) { - var inputMeta map[string]string - inputMeta, err = extractMetadata(ctx, r) - if err != nil { - rerr = h.respErr(err) - responses.WriteErrorResponse(w, r, rerr) - return - } - for key, val := range inputMeta { - metadata[key] = val - } - } - - //objsvc - obj, err := h.objsvc.CopyObject(ctx, ack, srcBucket, srcObject, dstBucket, dstObject, metadata) - if err != nil { - rerr = h.respErr(err) - responses.WriteErrorResponse(w, r, rerr) + responses.WriteErrorResponse(w, r, h.toRespErr(err)) return } responses.WriteCopyObjectResponse(w, r, obj) + return } -// DeleteObjectHandler - delete an object -// https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html -func (h *Handlers) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) { +// HeadObjectHandler . +func (h *Handlers) HeadObjectHandler(w http.ResponseWriter, r *http.Request) { ctx := r.Context() - ack := cctx.GetAccessKey(r) var err error defer func() { cctx.SetHandleInf(r, h.name(), err) }() - bucname, objname, rerr := requests.ParseBucketAndObject(r) - if rerr != nil { - err = rerr - responses.WriteErrorResponse(w, r, rerr) - return - } - - err = s3utils.CheckDelObjArgs(ctx, bucname, objname) + args, err := requests.ParseHeadObjectRequest(r) if err != nil { - rerr = h.respErr(err) - responses.WriteErrorResponse(w, r, rerr) + responses.WriteErrorResponse(w, r, h.toRespErr(err)) return } - err = h.objsvc.DeleteObject(ctx, ack, bucname, objname) + obj, _, err := h.objsvc.GetObject(ctx, args) if err != nil { - rerr = h.respErr(err) - responses.WriteErrorResponse(w, r, rerr) + responses.WriteErrorResponse(w, r, h.toRespErr(err)) return } - responses.WriteDeleteObjectResponse(w, r, nil) + responses.WriteHeadObjectResponse(w, r, obj) + return } -// DeleteObjectsHandler - delete objects -// https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html -func (h *Handlers) DeleteObjectsHandler(w http.ResponseWriter, r *http.Request) { +// GetObjectHandler . +func (h *Handlers) GetObjectHandler(w http.ResponseWriter, r *http.Request) { ctx := r.Context() - ack := cctx.GetAccessKey(r) var err error defer func() { cctx.SetHandleInf(r, h.name(), err) }() - var input s3.DeleteObjectsInput - - err = protocol.ParseRequest(r, &input) + args, err := requests.ParseGetObjectRequest(r) if err != nil { - rerr := h.respErr(err) - responses.WriteErrorResponse(w, r, rerr) - return - } - - if input.Delete == nil || - len(input.Delete.Objects) == 0 || - len(input.Delete.Objects) > consts.MaxDeleteList { - rerr := responses.ErrMalformedXML - err = rerr - responses.WriteErrorResponse(w, r, rerr) + responses.WriteErrorResponse(w, r, h.toRespErr(err)) return } - bucname := *input.Bucket - - _, err = h.objsvc.GetBucket(ctx, ack, bucname) + obj, body, err := h.objsvc.GetObject(ctx, args) if err != nil { - rerr := h.respErr(err) - responses.WriteErrorResponse(w, r, rerr) + responses.WriteErrorResponse(w, r, h.toRespErr(err)) return } - output := new(s3.DeleteObjectsOutput) - delObjs := make([]*s3.DeletedObject, 0) - delErrs := make([]*s3.Error, 0) - for _, obj := range input.Delete.Objects { - objname := *obj.Key - er := s3utils.CheckDelObjArgs(ctx, bucname, objname) - if er != nil { - rerr := h.respErr(er) - derr := new(s3.Error) - derr.SetCode(rerr.Code()) - derr.SetMessage(rerr.Description()) - derr.SetKey(objname) - delErrs = append(delErrs, derr) - continue - } - er = h.objsvc.DeleteObject(ctx, ack, bucname, objname) - if er != nil { - rerr := h.respErr(er) - derr := new(s3.Error) - derr.SetCode(rerr.Code()) - derr.SetMessage(rerr.Description()) - derr.SetKey(objname) - delErrs = append(delErrs, derr) - } else { - dobj := new(s3.DeletedObject) - dobj.SetKey(objname) - delObjs = append(delObjs, dobj) - } - } - - output.SetDeleted(delObjs) - output.SetErrors(delErrs) - - responses.WriteSuccessResponse(w, output, "DeleteResult") + responses.WriteGetObjectResponse(w, r, obj, body) + return } -// GetObjectHandler - GET Object -// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html -func (h *Handlers) GetObjectHandler(w http.ResponseWriter, r *http.Request) { +// DeleteObjectHandler . +func (h *Handlers) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) { ctx := r.Context() - ack := cctx.GetAccessKey(r) var err error defer func() { cctx.SetHandleInf(r, h.name(), err) }() - bucname, objname, rerr := requests.ParseBucketAndObject(r) - if rerr != nil { - err = rerr - responses.WriteErrorResponse(w, r, rerr) - return - } - - if err = s3utils.CheckGetObjArgs(ctx, bucname, objname); err != nil { - rerr = h.respErr(err) - responses.WriteErrorResponse(w, r, rerr) + args, err := requests.ParseDeleteObjectRequest(r) + if err != nil { + responses.WriteErrorResponse(w, r, h.toRespErr(err)) return } - - obj, body, err := h.objsvc.GetObject(ctx, ack, bucname, objname, true) + err = h.objsvc.DeleteObject(ctx, args) if err != nil { - rerr = h.respErr(err) - responses.WriteErrorResponse(w, r, rerr) + responses.WriteErrorResponse(w, r, h.toRespErr(err)) return } - responses.WriteGetObjectResponse(w, r, obj, body) + responses.WriteDeleteObjectResponse(w, r, nil) + return } -// GetObjectACLHandler - GET Object ACL -func (h *Handlers) GetObjectACLHandler(w http.ResponseWriter, r *http.Request) { +// DeleteObjectsHandler . +func (h *Handlers) DeleteObjectsHandler(w http.ResponseWriter, r *http.Request) { ctx := r.Context() - ack := cctx.GetAccessKey(r) var err error defer func() { cctx.SetHandleInf(r, h.name(), err) }() - bucname, objname, rerr := requests.ParseBucketAndObject(r) - if rerr != nil { - err = rerr - responses.WriteErrorResponse(w, r, rerr) + args, err := requests.ParseDeleteObjectsRequest(r) + if err != nil { + responses.WriteErrorResponse(w, r, h.toRespErr(err)) return } - acl, err := h.objsvc.GetObjectACL(ctx, ack, bucname, objname) + deletedObjects, err := h.objsvc.DeleteObjects(ctx, args) if err != nil { - rerr = h.respErr(err) - responses.WriteErrorResponse(w, r, rerr) + responses.WriteErrorResponse(w, r, h.toRespErr(err)) return } - responses.WriteGetObjectACLResponse(w, r, ack, acl) + responses.WriteDeleteObjectsResponse(w, r, h.toRespErr, deletedObjects) + return } +// ListObjectsHandler . func (h *Handlers) ListObjectsHandler(w http.ResponseWriter, r *http.Request) { ctx := r.Context() ack := cctx.GetAccessKey(r) @@ -362,35 +166,20 @@ func (h *Handlers) ListObjectsHandler(w http.ResponseWriter, r *http.Request) { cctx.SetHandleInf(r, h.name(), err) }() - bucname, rerr := requests.ParseBucket(r) - if rerr != nil { - err = rerr - responses.WriteErrorResponse(w, r, rerr) - return - } - - // Extract all the listsObjectsV1 query params to their native values. - prefix, marker, delimiter, maxKeys, encodingType, rerr := getListObjectsV1Args(r.Form) - if rerr != nil { - err = rerr - responses.WriteErrorResponse(w, r, rerr) - return - } - - err = s3utils.CheckListObjsArgs(ctx, bucname, prefix, marker) + args, err := requests.ParseListObjectsRequest(r) if err != nil { - rerr = h.respErr(err) - responses.WriteErrorResponse(w, r, rerr) + responses.WriteErrorResponse(w, r, h.toRespErr(err)) return } - list, err := h.objsvc.ListObjects(ctx, ack, bucname, prefix, delimiter, marker, maxKeys) + + list, err := h.objsvc.ListObjects(ctx, args) if err != nil { - rerr = h.respErr(err) - responses.WriteErrorResponse(w, r, rerr) + responses.WriteErrorResponse(w, r, h.toRespErr(err)) return } - responses.WriteListObjectsResponse(w, r, ack, bucname, prefix, marker, delimiter, encodingType, maxKeys, list) + responses.WriteListObjectsResponse(w, r, ack, list) + return } func (h *Handlers) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) { @@ -423,7 +212,7 @@ func (h *Handlers) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) } err = s3utils.CheckListObjsArgs(ctx, bucname, prefix, marker) if err != nil { - rerr = h.respErr(err) + rerr = h.toRespErr(err) responses.WriteErrorResponse(w, r, rerr) return } @@ -440,7 +229,7 @@ func (h *Handlers) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) list, err := h.objsvc.ListObjectsV2(ctx, ack, bucname, prefix, token, delimiter, maxKeys, fetchOwner, startAfter) if err != nil { - rerr = h.respErr(err) + rerr = h.toRespErr(err) responses.WriteErrorResponse(w, r, rerr) return } @@ -449,17 +238,31 @@ func (h *Handlers) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) delimiter, encodingType, maxKeys, list) } -func pathToBucketAndObject(path string) (bucket, object string) { - path = strings.TrimPrefix(path, consts.SlashSeparator) - idx := strings.Index(path, consts.SlashSeparator) - if idx < 0 { - return path, "" +// GetObjectACLHandler - GET Object ACL +func (h *Handlers) GetObjectACLHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + var err error + defer func() { + cctx.SetHandleInf(r, h.name(), err) + }() + + args, err := requests.ParseGetBucketACLRequest() + + bucname, objname, rerr := requests.ParseBucketAndObject(r) + if rerr != nil { + err = rerr + responses.WriteErrorResponse(w, r, rerr) + return } - return path[:idx], path[idx+len(consts.SlashSeparator):] -} -func isReplace(r *http.Request) bool { - return r.Header.Get("X-Amz-Metadata-Directive") == "REPLACE" + acl, err := h.objsvc.GetObjectACL(ctx, ack, bucname, objname) + if err != nil { + rerr = h.toRespErr(err) + responses.WriteErrorResponse(w, r, rerr) + return + } + + responses.WriteGetObjectACLResponse(w, r, ack, acl) } // Parse bucket url queries diff --git a/s3/handlers/proto.go b/s3/handlers/proto.go index ba14be994..ab6853100 100644 --- a/s3/handlers/proto.go +++ b/s3/handlers/proto.go @@ -17,20 +17,20 @@ type Handlerser interface { HeadBucketHandler(w http.ResponseWriter, r *http.Request) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) ListBucketsHandler(w http.ResponseWriter, r *http.Request) - GetBucketAclHandler(w http.ResponseWriter, r *http.Request) PutBucketAclHandler(w http.ResponseWriter, r *http.Request) + GetBucketAclHandler(w http.ResponseWriter, r *http.Request) // Object PutObjectHandler(w http.ResponseWriter, r *http.Request) - HeadObjectHandler(w http.ResponseWriter, r *http.Request) CopyObjectHandler(w http.ResponseWriter, r *http.Request) + HeadObjectHandler(w http.ResponseWriter, r *http.Request) + GetObjectHandler(w http.ResponseWriter, r *http.Request) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) DeleteObjectsHandler(w http.ResponseWriter, r *http.Request) - GetObjectHandler(w http.ResponseWriter, r *http.Request) - GetObjectACLHandler(w http.ResponseWriter, r *http.Request) ListObjectsHandler(w http.ResponseWriter, r *http.Request) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) + GetObjectACLHandler(w http.ResponseWriter, r *http.Request) // Multipart diff --git a/s3/protocol/request.go b/s3/protocol/request.go deleted file mode 100644 index 4d917a37e..000000000 --- a/s3/protocol/request.go +++ /dev/null @@ -1,425 +0,0 @@ -package protocol - -import ( - "encoding/base64" - "encoding/xml" - "errors" - "fmt" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/private/protocol" - "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" - "github.com/gorilla/mux" - "io" - "math" - "net/http" - "net/url" - "reflect" - "strconv" - "strings" - "time" -) - -var byteSliceType = reflect.TypeOf([]byte{}) - -func ParseRequest(r *http.Request, input interface{}) (err error) { - inv, err := getInputValue(input) - if err != nil { - return - } - - err = parseLocation(r, inv) - if err != nil { - return - } - - ptyp, pftp, pfvl := getPayload(inv) - if ptyp == noPayload { - return - } - - if ptyp == "structure" || ptyp == "" { - err = parseXMLBody(r, inv) - } else { - err = parseBody(r, pftp, pfvl) - } - - return - -} - -func parseXMLBody(r *http.Request, inv reflect.Value) (err error) { - defer r.Body.Close() - decoder := xml.NewDecoder(r.Body) - err = xmlutil.UnmarshalXML(inv.Addr().Interface(), decoder, "") - return -} - -func parseBody(r *http.Request, pftp reflect.Type, pfvl reflect.Value) (err error) { - var b []byte - switch pfvl.Interface().(type) { - case []byte: - defer r.Body.Close() - b, err = io.ReadAll(r.Body) - if err != nil { - return - } - pfvl.Set(reflect.ValueOf(b)) - case *string: - defer r.Body.Close() - b, err = io.ReadAll(r.Body) - if err != nil { - return - } - val := string(b) - pfvl.Set(reflect.ValueOf(&val)) - default: - switch pftp.String() { - case "io.ReadSeeker": - // keep the request body - default: - err = errValueNotSet - } - } - return -} - -func getInputValue(input interface{}) (inv reflect.Value, err error) { - typErr := fmt.Errorf("input <%T> must be non nil or ", input) - - if input == nil { - err = typErr - return - } - - t := reflect.TypeOf(input) - k := t.Kind() - - if k != reflect.Pointer { - err = typErr - return - } - - inv = reflect.ValueOf(input).Elem() - if !inv.IsValid() { - err = typErr - return - } - - t = t.Elem() - k = t.Kind() - - if k == reflect.Struct { - return - } - - if k != reflect.Pointer { - err = typErr - return - } - - t = t.Elem() - k = t.Kind() - if k != reflect.Struct { - err = typErr - return - } - - if inv.Elem().IsValid() { - inv = inv.Elem() - return - } - - inv.Set(reflect.New(inv.Type().Elem())) - inv = inv.Elem() - - return -} - -func parseLocation(r *http.Request, inv reflect.Value) (err error) { - query := r.URL.Query() - -loop: - for i := 0; i < inv.NumField(); i++ { - fv := inv.Field(i) - ft := inv.Type().Field(i) - if ft.Name[0:1] == strings.ToLower(ft.Name[0:1]) { - continue - } - - if ft.Tag.Get("ignore") != "" { - continue - } - - name := ifemp(ft.Tag.Get("locationName"), ft.Name) - - if ft.Tag.Get("marshal-as") == "blob" { - if fv.Kind() == reflect.Pointer { - fv.Set(reflect.New(fv.Type().Elem())) - fv = fv.Elem() - } - fv = fv.Convert(byteSliceType) - } - - switch ft.Tag.Get("location") { - case "headers": - prefix := ft.Tag.Get("locationName") - err = parseHeaderMap(r.Header, fv, prefix) - case "header": - locVal := r.Header.Get(name) - err = parseLocationValue(locVal, fv, ft.Tag) - case "uri": - locVal := mux.Vars(r)[name] - err = parseLocationValue(locVal, fv, ft.Tag) - case "querystring": - err = parseQueryString(query, fv, name, ft.Tag) - default: - continue loop - } - if err != nil { - return - } - if ft.Tag.Get("required") == "true" && !reflect.Indirect(fv).IsValid() { - err = fmt.Errorf("field %s is required", ft.Name) - return - } - } - - return -} - -func parseQueryString(query url.Values, fv reflect.Value, name string, tag reflect.StructTag) (err error) { - switch value := fv.Interface().(type) { - case []*string: - vals := make([]*string, len(query[name])) - for i, oval := range query[name] { - val := oval - vals[i] = &val - } - if len(vals) > 0 { - fv.Set(reflect.ValueOf(vals)) - } - case map[string]*string: - vals := make(map[string]*string, len(query)) - for key := range query { - val := query.Get(key) - vals[key] = &val - } - if len(vals) > 0 { - fv.Set(reflect.ValueOf(vals)) - } - case map[string][]*string: - for key, items := range value { - for _, item := range items { - query.Add(key, *item) - } - } - vals := make(map[string][]*string, len(query)) - for key := range query { - vals[key] = make([]*string, len(query[key])) - for i := range query[key] { - vals[key][i] = &(query[key][i]) - } - } - if len(vals) > 0 { - fv.Set(reflect.ValueOf(vals)) - } - default: - locVal := query.Get(name) - err = parseLocationValue(locVal, fv, tag) - if err != nil { - return - } - } - - return -} - -func parseHeaderMap(headers http.Header, fv reflect.Value, prefix string) (err error) { - if len(headers) == 0 { - return - } - switch fv.Interface().(type) { - case map[string]*string: - vals := map[string]*string{} - for key := range headers { - if !hasPrefixFold(key, prefix) { - continue - } - key = strings.ToLower(key) - val := headers.Get(key) - vals[key[len(prefix):]] = &val - } - if len(vals) != 0 { - fv.Set(reflect.ValueOf(vals)) - } - default: - err = errValueNotSet - } - return -} - -func parseLocationValue(locVal string, v reflect.Value, tag reflect.StructTag) (err error) { - switch tag.Get("type") { - case "jsonvalue": - if len(locVal) == 0 { - return - } - case "blob": - if len(locVal) == 0 { - return - } - default: - if !v.IsValid() || (locVal == "" && (v.Kind() != reflect.Pointer || v.Elem().Kind() != reflect.String)) { - return - } - } - - switch v.Interface().(type) { - case *string: - if tag.Get("suppressedJSONValue") == "true" && tag.Get("location") == "header" { - var b []byte - b, err = base64.StdEncoding.DecodeString(locVal) - if err != nil { - return - } - locVal = string(b) - } - v.Set(reflect.ValueOf(&locVal)) - case []*string: - if tag.Get("location") != "header" || tag.Get("enum") == "" { - return fmt.Errorf("%T is only supported with location header and enum shapes", v) - } - var vals []*string - vals, err = splitHeaderVal(locVal) - if err != nil { - return - } - if len(vals) > 0 { - v.Set(reflect.ValueOf(vals)) - } - case []byte: - var b []byte - b, err = base64.StdEncoding.DecodeString(locVal) - if err != nil { - return - } - v.Set(reflect.ValueOf(b)) - case *bool: - var b bool - b, err = strconv.ParseBool(locVal) - if err != nil { - return - } - v.Set(reflect.ValueOf(&b)) - case *int64: - var i int64 - i, err = strconv.ParseInt(locVal, 10, 64) - if err != nil { - return - } - v.Set(reflect.ValueOf(&i)) - case *float64: - var f float64 - switch { - case strings.EqualFold(locVal, floatNaN): - f = math.NaN() - case strings.EqualFold(locVal, floatInf): - f = math.Inf(1) - case strings.EqualFold(locVal, floatNegInf): - f = math.Inf(-1) - default: - f, err = strconv.ParseFloat(locVal, 64) - if err != nil { - return - } - } - v.Set(reflect.ValueOf(&f)) - case *time.Time: - format := tag.Get("timestampFormat") - if len(format) == 0 { - format = protocol.RFC822TimeFormatName - if tag.Get("location") == "querystring" { - format = protocol.ISO8601TimeFormatName - } - } - var t time.Time - t, err = protocol.ParseTime(format, locVal) - if err != nil { - return - } - v.Set(reflect.ValueOf(&t)) - case aws.JSONValue: - escaping := protocol.NoEscape - if tag.Get("location") == "header" { - escaping = protocol.Base64Escape - } - var m aws.JSONValue - m, err = protocol.DecodeJSONValue(locVal, escaping) - if err != nil { - return - } - v.Set(reflect.ValueOf(m)) - default: - err = fmt.Errorf("unsupported value for input %v (%s)", v.Interface(), v.Type()) - return - } - - return -} - -func hasPrefixFold(s, prefix string) bool { - return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix) -} - -func splitHeaderVal(header string) (vals []*string, err error) { - pv := ' ' - start := 0 - quote := false - for i, v := range header { - opv := pv - pv = v - if quote { - if v == '"' && opv != '\\' { - quote = false - val := header[start : i+1] - val, err = strconv.Unquote(val) - if err != nil { - return - } - vals = append(vals, &val) - start = i + 1 - } - continue - } - - if v == '"' && opv != '\\' { - quote = true - continue - } - - if v == ',' && opv == '"' { - start += 1 - continue - } - - if v == ',' { - val := header[start:i] - vals = append(vals, &val) - start = i + 1 - } - - continue - } - - if quote { - err = errors.New("unquote part") - return - } - - if start < len(header) || pv == ',' { - val := header[start:] - vals = append(vals, &val) - } - - return -} diff --git a/s3/requests/input.go b/s3/requests/input.go new file mode 100644 index 000000000..09d71dba7 --- /dev/null +++ b/s3/requests/input.go @@ -0,0 +1,330 @@ +package requests + +import ( + "encoding/base64" + "encoding/xml" + "errors" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" + "github.com/gorilla/mux" + "net/http" + "net/url" + "reflect" + "strconv" + "strings" + "time" +) + +type fields map[string]bool + +func ParseInput(r *http.Request, input interface{}, supports fields) (err error) { + inv, err := valueOf(input) + if err != nil { + return + } + err = parseLocation(r, inv, supports) + if err != nil { + return + } + err = parseBody(r, inv, supports) + return +} + +func valueOf(input interface{}) (inv reflect.Value, err error) { + defer func() { + if err != nil { + err = ErrInvalidInputValue{err} + } + }() + if input == nil { + err = errors.New("input is nil") + return + } + t := reflect.TypeOf(input) + k := t.Kind() + if k != reflect.Pointer { + err = errors.New("input is non pointer") + return + } + inv = reflect.ValueOf(input).Elem() + if !inv.IsValid() { + err = errors.New("input is nil pointer") + return + } + t = t.Elem() + k = t.Kind() + if k == reflect.Struct { + return + } + if k != reflect.Pointer { + err = errors.New("the type input point to is neither struct nor pointer") + return + } + t = t.Elem() + k = t.Kind() + if k != reflect.Struct { + err = errors.New("the pointer input point to is not point to struct") + return + } + if inv.Elem().IsValid() { + inv = inv.Elem() + return + } + inv.Set(reflect.New(inv.Type().Elem())) + inv = inv.Elem() + return +} + +func parseLocation(r *http.Request, inv reflect.Value, supports fields) (err error) { + vars := mux.Vars(r) + headers := r.Header + query := r.URL.Query() + for i := 0; i < inv.NumField(); i++ { + fv := inv.Field(i) + ft := inv.Type().Field(i) + err = parseLocationField(vars, query, headers, fv, ft, supports) + if err != nil { + return + } + } + return +} + +func parseLocationField(vars map[string]string, query url.Values, headers http.Header, fv reflect.Value, + ft reflect.StructField, supports fields) (err error) { + if ft.Name[0:1] == strings.ToLower(ft.Name[0:1]) { + return + } + ftag := ft.Tag + loca := ftag.Get("location") + name := ftag.Get("locationName") + requ := ftag.Get("required") == "true" + supp := supports[ft.Name] + var ( + vals map[string]*string + isVals bool + val string + has bool + ) + switch loca { + case "querystring": + has = query.Has(name) + val = query.Get(name) + case "uri": + val, has = vars[name] + case "header": + _, has = headers[name] + val = headers.Get(name) + case "headers": + vals, has = getHeaderValues(headers, name) + isVals = true + default: + return + } + if !supp && has { + err = ErrWithUnsupportedParam{name} + return + } + if requ && !has { + err = ErrMissingRequiredParam{name} + return + } + if isVals { + err = parseValues(vals, fv) + } else { + err = parseValue(val, fv, ftag) + if err != nil && !errors.As(err, new(ErrTypeNotSet)) { + err = ErrFailedParseValue{name, err} + } + } + return +} + +func getPayloadField(inv reflect.Value) (ft reflect.StructField, ok bool) { + mt, ok := inv.Type().FieldByName("_") + if !ok { + return + } + if mt.Tag.Get("nopayload") != "" { + return + } + pname := mt.Tag.Get("payload") + if pname == "" { + return + } + ft, ok = inv.Type().FieldByName(pname) + return +} + +func parseBody(r *http.Request, inv reflect.Value, supports fields) (err error) { + pft, ok := getPayloadField(inv) + if !ok { + return + } + name := pft.Name + supp := supports[name] + requ := pft.Tag.Get("required") == "true" + ptyp := pft.Tag.Get("type") + if ptyp != "structure" { + return + } + if !supp && r.ContentLength > 0 { + err = ErrWithUnsupportedParam{name} + return + } + if requ && r.ContentLength < 1 { + err = ErrMissingRequiredParam{name} + return + } + if r.ContentLength < 1 { + return + } + decoder := xml.NewDecoder(r.Body) + err = xmlutil.UnmarshalXML(inv.Addr().Interface(), decoder, "") + if err != nil { + err = ErrFailedDecodeXML{err} + } + return +} + +func getHeaderValues(header http.Header, prefix string) (vals map[string]*string, has bool) { + defer func() { + has = len(vals) > 0 + }() + vals = make(map[string]*string) + if len(header) == 0 { + return + } + for key := range header { + if len(key) >= len(prefix) && strings.EqualFold(key[:len(prefix)], prefix) { + val := header.Get(key) + k := strings.ToLower(key[len(prefix):]) + vals[k] = &val + } + } + return +} + +func parseValues(values map[string]*string, fv reflect.Value) (err error) { + _, ok := fv.Interface().(map[string]*string) + if !ok { + err = ErrTypeNotSet{fv.Type()} + return + } + fv.Set(reflect.ValueOf(values)) + return +} + +func parseValue(value string, rv reflect.Value, tag reflect.StructTag) (err error) { + switch rv.Interface().(type) { + case *string: + rv.Set(reflect.ValueOf(&value)) + return + case []*string: + var val []*string + val, err = split(value) + if err != nil { + return + } + rv.Set(reflect.ValueOf(&val)) + return + case []byte: + var val []byte + val, err = base64.StdEncoding.DecodeString(value) + if err != nil { + return + } + rv.Set(reflect.ValueOf(val)) + return + case *bool: + var val bool + val, err = strconv.ParseBool(value) + if err != nil { + return + } + rv.Set(reflect.ValueOf(&val)) + return + case *int64: + var val int64 + val, err = strconv.ParseInt(value, 10, 64) + if err != nil { + return + } + rv.Set(reflect.ValueOf(&val)) + return + case *time.Time: + var val time.Time + format := getTimeFormat(tag) + val, err = protocol.ParseTime(format, value) + if err != nil { + return + } + rv.Set(reflect.ValueOf(&val)) + return + default: + err = ErrTypeNotSet{rv.Type()} + return + } +} + +func getTimeFormat(tag reflect.StructTag) (format string) { + format = tag.Get("timestampFormat") + if format != "" { + return + } + if tag.Get("location") == "querystring" { + format = protocol.ISO8601TimeFormatName + return + } + format = protocol.RFC822TimeFormatName + return +} + +func split(value string) (vals []*string, err error) { + pv := ' ' + start := 0 + quote := false + for i, v := range value { + opv := pv + pv = v + if quote { + if v == '"' && opv != '\\' { + quote = false + val := value[start : i+1] + val, err = strconv.Unquote(val) + if err != nil { + return + } + val = strings.TrimSpace(val) + vals = append(vals, &val) + start = i + 1 + } + continue + } + if v == '"' && opv != '\\' { + quote = true + continue + } + if v == ',' && opv == '"' { + start += 1 + continue + } + if v == ',' { + val := value[start:i] + val = strings.TrimSpace(val) + vals = append(vals, &val) + start = i + 1 + } + continue + } + if quote { + err = errors.New("unquote part") + return + } + if start < len(value) || pv == ',' { + val := value[start:] + val = strings.TrimSpace(val) + vals = append(vals, &val) + } + return +} diff --git a/s3/requests/input_errors.go b/s3/requests/input_errors.go new file mode 100644 index 000000000..b2da8113a --- /dev/null +++ b/s3/requests/input_errors.go @@ -0,0 +1,65 @@ +package requests + +import ( + "fmt" + "reflect" +) + +// ErrInvalidInputValue . +type ErrInvalidInputValue struct { + er error +} + +func (err ErrInvalidInputValue) Error() string { + return fmt.Sprintf("invalid input value: %v", err.er) +} + +// ErrTypeNotSet . +type ErrTypeNotSet struct { + typ reflect.Type +} + +func (err ErrTypeNotSet) Error() string { + return fmt.Sprintf("type <%s> not set", err.typ.String()) +} + +// ErrFailedDecodeXML . +type ErrFailedDecodeXML struct { + err error +} + +func (err ErrFailedDecodeXML) Error() string { + return fmt.Sprintf("decode xml: %v", err.err) +} + +// ErrWithUnsupportedParam . +type ErrWithUnsupportedParam struct { + param string +} + +func (err ErrWithUnsupportedParam) Error() string { + return fmt.Sprintf("param %s is unsported", err.param) +} + +// ErrFailedParseValue . +type ErrFailedParseValue struct { + name string + err error +} + +func (err ErrFailedParseValue) Name() string { + return err.name +} + +func (err ErrFailedParseValue) Error() string { + return fmt.Sprintf("parse <%s> value: %v", err.name, err.err) +} + +// ErrMissingRequiredParam . +type ErrMissingRequiredParam struct { + param string +} + +func (err ErrMissingRequiredParam) Error() string { + return fmt.Sprintf("missing required param <%s>", err.param) +} diff --git a/s3/requests/parsers.go b/s3/requests/parsers.go deleted file mode 100644 index d53e4af6a..000000000 --- a/s3/requests/parsers.go +++ /dev/null @@ -1,98 +0,0 @@ -package requests - -import ( - "github.com/bittorrent/go-btfs/s3/cctx" - "github.com/bittorrent/go-btfs/s3/responses" - "net/http" -) - -// CreateBucketRequest . -type CreateBucketRequest struct { - AccessKey string - Bucket string - ACL string - Region string -} - -func ParseCreateBucketRequest(r *http.Request) (req *CreateBucketRequest, rerr *responses.Error) { - req = &CreateBucketRequest{} - req.AccessKey = cctx.GetAccessKey(r) - req.Bucket, rerr = ParseBucket(r) - if rerr != nil { - return - } - req.ACL, rerr = ParseBucketACL(r) - if rerr != nil { - return - } - req.Region, rerr = ParseLocation(r) - return -} - -// DeleteBucketRequest . -type DeleteBucketRequest struct { - AccessKey string - Bucket string -} - -func ParseDeleteBucketRequest(r *http.Request) (req *DeleteBucketRequest, rerr *responses.Error) { - req = &DeleteBucketRequest{} - req.AccessKey = cctx.GetAccessKey(r) - req.Bucket, rerr = ParseBucket(r) - return -} - -// HeadBucketRequest . -type HeadBucketRequest struct { - AccessKey string - Bucket string -} - -func ParseHeadBucketRequest(r *http.Request) (req *HeadBucketRequest, rerr *responses.Error) { - req = &HeadBucketRequest{} - req.AccessKey = cctx.GetAccessKey(r) - req.Bucket, rerr = ParseBucket(r) - return -} - -// ListBucketsRequest . -type ListBucketsRequest struct { - AccessKey string -} - -func ParseListBucketsRequest(r *http.Request) (req *ListBucketsRequest, rerr *responses.Error) { - req = &ListBucketsRequest{} - req.AccessKey = cctx.GetAccessKey(r) - return -} - -// GetBucketACLRequest . -type GetBucketACLRequest struct { - AccessKey string - Bucket string -} - -func ParseGetBucketACLRequest(r *http.Request) (req *GetBucketACLRequest, rerr *responses.Error) { - req = &GetBucketACLRequest{} - req.AccessKey = cctx.GetAccessKey(r) - req.Bucket, rerr = ParseBucket(r) - return -} - -// PutBucketACLRequest . -type PutBucketACLRequest struct { - AccessKey string - Bucket string - ACL string -} - -func ParsePutBucketAclRequest(r *http.Request) (req *PutBucketACLRequest, rerr *responses.Error) { - req = &PutBucketACLRequest{} - req.AccessKey = cctx.GetAccessKey(r) - req.Bucket, rerr = ParseBucket(r) - if rerr != nil { - return - } - req.ACL, rerr = ParseBucketACL(r) - return -} diff --git a/s3/requests/parsers_bucket.go b/s3/requests/parsers_bucket.go new file mode 100644 index 000000000..6df265a9a --- /dev/null +++ b/s3/requests/parsers_bucket.go @@ -0,0 +1,122 @@ +package requests + +import ( + "github.com/aws/aws-sdk-go/service/s3" + "github.com/bittorrent/go-btfs/s3/cctx" + "github.com/bittorrent/go-btfs/s3/services/object" + "net/http" +) + +var createBucketSupports = fields{ + "ACL": true, + "Bucket": true, + "CreateBucketConfiguration": true, +} + +func ParseCreateBucketRequest(r *http.Request) (args *object.CreateBucketArgs, err error) { + var input s3.CreateBucketInput + err = ParseInput(r, &input, createBucketSupports) + if err != nil { + return + } + args = &object.CreateBucketArgs{ + AccessKey: cctx.GetAccessKey(r), + } + args.Bucket, err = ValidateBucketName(input.Bucket) + if err != nil { + return + } + args.ACL, err = ValidateBucketACL(input.ACL) + if err != nil { + return + } + args.Region, err = ValidateCreateBucketConfiguration(input.CreateBucketConfiguration) + return +} + +var headBucketSupports = fields{ + "Bucket": true, +} + +func ParseHeadBucketRequest(r *http.Request) (args *object.GetBucketArgs, err error) { + var input s3.HeadBucketInput + err = ParseInput(r, &input, headBucketSupports) + if err != nil { + return + } + args = &object.GetBucketArgs{ + AccessKey: cctx.GetAccessKey(r), + } + args.Bucket, err = ValidateBucketName(input.Bucket) + return +} + +var deleteBucketSupports = fields{ + "Bucket": true, +} + +func ParseDeleteBucketRequest(r *http.Request) (args *object.DeleteBucketArgs, err error) { + var input s3.DeleteBucketInput + err = ParseInput(r, &input, deleteBucketSupports) + if err != nil { + return + } + args = &object.DeleteBucketArgs{ + AccessKey: cctx.GetAccessKey(r), + } + args.Bucket, err = ValidateBucketName(input.Bucket) + return +} + +var listBucketsSupports = fields{} + +func ParseListBucketsRequest(r *http.Request) (args *object.ListBucketsArgs, err error) { + var input s3.ListBucketsInput + err = ParseInput(r, input, listBucketsSupports) + if err != nil { + return + } + args = &object.ListBucketsArgs{ + AccessKey: cctx.GetAccessKey(r), + } + return +} + +var putBucketACLSupports = fields{ + "ACL": true, + "Bucket": true, +} + +func ParsePutBucketAclRequest(r *http.Request) (args *object.PutBucketACLArgs, err error) { + var input s3.PutBucketAclInput + err = ParseInput(r, &input, putBucketACLSupports) + if err != nil { + return + } + args = &object.PutBucketACLArgs{ + AccessKey: cctx.GetAccessKey(r), + } + args.Bucket, err = ValidateBucketName(input.Bucket) + if err != nil { + return + } + args.Bucket, err = ValidateBucketACL(input.ACL) + return +} + +var getBucketACLSupports = fields{ + "Bucket": true, +} + +func ParseGetBucketACLRequest(r *http.Request) (args *object.GetBucketACLArgs, err error) { + var input s3.GetBucketAclInput + err = ParseInput(r, &input, getBucketACLSupports) + if err != nil { + return + } + args = &object.GetBucketACLArgs{ + AccessKey: cctx.GetAccessKey(r), + } + args.Bucket, err = ValidateBucketName(input.Bucket) + return +} diff --git a/s3/requests/parsers_common.go b/s3/requests/parsers_common.go deleted file mode 100644 index 89ca3655b..000000000 --- a/s3/requests/parsers_common.go +++ /dev/null @@ -1,109 +0,0 @@ -package requests - -import ( - "encoding/xml" - "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/responses" - "github.com/bittorrent/go-btfs/s3/s3utils" - "github.com/gorilla/mux" - "net/http" - "net/url" - "path" -) - -func ParseBucketAndObject(r *http.Request) (bucket string, object string, rerr *responses.Error) { - bucket, rerr = ParseBucket(r) - if rerr != nil { - return - } - object, rerr = ParseObject(r) - return -} - -func ParseBucket(r *http.Request) (bucket string, rerr *responses.Error) { - bucket = mux.Vars(r)["Bucket"] - err := s3utils.CheckValidBucketNameStrict(bucket) - if err != nil { - rerr = responses.ErrInvalidBucketName - } - return -} - -func ParseObject(r *http.Request) (object string, rerr *responses.Error) { - object, err := unescapePath(mux.Vars(r)["Key"]) - if err != nil { - rerr = responses.ErrInvalidRequestParameter - } - return -} - -func ParseLocation(r *http.Request) (location string, rerr *responses.Error) { - if r.ContentLength != 0 { - locationCfg := s3.CreateBucketConfiguration{} - decoder := xml.NewDecoder(r.Body) - err := xmlutil.UnmarshalXML(&locationCfg, decoder, "") - if err != nil { - rerr = responses.ErrMalformedXML - return - } - location = *locationCfg.LocationConstraint - } - if len(location) == 0 { - location = consts.DefaultLocation - } - if !consts.SupportedLocations[location] { - rerr = responses.ErrNotImplemented - } - - return -} - -func ParseBucketACL(r *http.Request) (acl string, rerr *responses.Error) { - acl = r.Header.Get(consts.AmzACL) - if len(acl) == 0 { - acl = consts.DefaultBucketACL - } - if !consts.SupportedBucketACLs[acl] { - rerr = responses.ErrNotImplemented - } - return -} - -func ParseObjectACL(r *http.Request) (acl string, rerr *responses.Error) { - acl = r.Header.Get(consts.AmzACL) - if len(acl) == 0 { - acl = consts.DefaultObjectACL - } - if !consts.SupportedObjectACLs[acl] { - rerr = responses.ErrNotImplemented - } - return -} - -// unescapePath is similar to url.PathUnescape or url.QueryUnescape -// depending on input, additionally also handles situations such as -// `//` are normalized as `/`, also removes any `/` prefix before -// returning. -func unescapePath(p string) (string, error) { - ep, err := url.PathUnescape(p) - if err != nil { - return "", err - } - return trimLeadingSlash(ep), nil -} - -func trimLeadingSlash(ep string) string { - if len(ep) > 0 && ep[0] == '/' { - // Path ends with '/' preserve it - if ep[len(ep)-1] == '/' && len(ep) > 1 { - ep = path.Clean(ep) - ep += "/" - } else { - ep = path.Clean(ep) - } - ep = ep[1:] - } - return ep -} diff --git a/s3/requests/parsers_object.go b/s3/requests/parsers_object.go new file mode 100644 index 000000000..75e4eada6 --- /dev/null +++ b/s3/requests/parsers_object.go @@ -0,0 +1,259 @@ +package requests + +import ( + "errors" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/bittorrent/go-btfs/s3/cctx" + "github.com/bittorrent/go-btfs/s3/services/object" + "github.com/bittorrent/go-btfs/s3/utils/hash" + "net/http" +) + +var putObjectSupports = fields{ + "Body": true, + "Bucket": true, + "Key": true, + "ContentLength": true, + "ContentEncoding": true, + "ContentType": true, + "Expires": true, + "ContentMD5": true, + "ChecksumSHA256": true, +} + +func ParsePutObjectRequest(r *http.Request) (args *object.PutObjectArgs, err error) { + var input s3.PutObjectInput + err = ParseInput(r, &input, putObjectSupports) + if err != nil { + return + } + args = &object.PutObjectArgs{ + AccessKey: cctx.GetAccessKey(r), + } + args.Bucket, err = ValidateBucketName(input.Bucket) + if err != nil { + return + } + args.Object, err = ValidateObjectName(input.Key) + if err != nil { + return + } + args.ContentLength, err = ValidateContentLength(input.ContentLength) + if err != nil { + return + } + args.ContentType, err = ValidateContentType(input.ContentType) + if err != nil { + return + } + args.ContentEncoding, err = ValidateContentEncoding(input.ContentEncoding) + if err != nil { + return + } + args.Expires, err = ValidateExpires(input.Expires) + if err != nil { + return + } + contentMD5, err := ValidateContentMD5(input.ContentMD5) + if err != nil { + return + } + checksumSHA256, err := ValidateCheckSum(input.ChecksumSHA256) + if err != nil { + return + } + args.Body, err = hash.NewReader( + r.Body, args.ContentLength, contentMD5, + checksumSHA256, args.ContentLength, + ) + return +} + +var copyObjectSupports = fields{ + "Bucket": true, + "Key": true, + "CopySource": true, + "ContentEncoding": true, + "ContentType": true, + "Expires": true, + "MetadataDirective": true, +} + +func ParseCopyObjectRequest(r *http.Request) (args *object.CopyObjectArgs, err error) { + var input s3.CopyObjectInput + err = ParseInput(r, &input, copyObjectSupports) + if err != nil { + return + } + args = &object.CopyObjectArgs{ + AccessKey: cctx.GetAccessKey(r), + } + args.Bucket, err = ValidateBucketName(input.Bucket) + if err != nil { + return + } + args.Object, err = ValidateObjectName(input.Key) + if err != nil { + return + } + args.SrcBucket, args.SrcObject, err = ValidateCopySource(input.CopySource) + if err != nil { + return + } + args.ReplaceMeta, err = ValidateMetadataDirective(input.MetadataDirective) + if err != nil { + return + } + if args.Bucket == args.SrcBucket && args.Object == args.SrcObject { + err = ErrCopyDestInvalid + return + } + args.ContentType, err = ValidateContentType(input.ContentType) + if err != nil { + return + } + args.ContentEncoding, err = ValidateContentEncoding(input.ContentEncoding) + if err != nil { + return + } + args.Expires, err = ValidateExpires(input.Expires) + return +} + +var headObjectSupports = fields{ + "Bucket": true, + "Key": true, +} + +func ParseHeadObjectRequest(r *http.Request) (args *object.GetObjectArgs, err error) { + var input s3.HeadObjectInput + err = ParseInput(r, &input, headObjectSupports) + if err != nil { + return + } + args = &object.GetObjectArgs{ + AccessKey: cctx.GetAccessKey(r), + } + args.Bucket, err = ValidateBucketName(input.Bucket) + if err != nil { + return + } + args.Object, err = ValidateObjectName(input.Key) + if err != nil { + return + } + args.WithBody = false + return +} + +var getObjectSupports = fields{ + "Bucket": true, + "Key": true, +} + +func ParseGetObjectRequest(r *http.Request) (args *object.GetObjectArgs, err error) { + var input s3.GetObjectInput + err = ParseInput(r, &input, getObjectSupports) + if err != nil { + return + } + args = &object.GetObjectArgs{ + AccessKey: cctx.GetAccessKey(r), + } + args.Bucket, err = ValidateBucketName(input.Bucket) + if err != nil { + return + } + args.Object, err = ValidateObjectName(input.Key) + if err != nil { + return + } + args.WithBody = true + return +} + +var deleteObjectSupports = fields{ + "Bucket": true, + "Key": true, +} + +func ParseDeleteObjectRequest(r *http.Request) (args *object.DeleteObjectArgs, err error) { + var input s3.DeleteObjectInput + err = ParseInput(r, &input, deleteObjectSupports) + if err != nil { + return + } + args = &object.DeleteObjectArgs{ + AccessKey: cctx.GetAccessKey(r), + } + args.Bucket, err = ValidateBucketName(input.Bucket) + if err != nil { + return + } + args.Object, err = ValidateObjectName(input.Key) + return +} + +var deleteObjectsSupports = fields{ + "Bucket": true, + "Delete": true, +} + +func ParseDeleteObjectsRequest(r *http.Request) (args *object.DeleteObjectsArgs, err error) { + var input s3.DeleteObjectsInput + err = ParseInput(r, &input, deleteObjectsSupports) + if err != nil { + return + } + args = &object.DeleteObjectsArgs{ + AccessKey: cctx.GetAccessKey(r), + } + args.Bucket, err = ValidateBucketName(input.Bucket) + if err != nil { + return + } + args.ToDeleteObjects, args.Quite, err = ValidateObjectsDelete(input.Delete) + return +} + +var listObjectsSupports = fields{ + "Bucket": true, + "MaxKeys": true, + "Prefix": true, + "Marker": true, + "Delimiter": true, + "EncodingType": true, +} + +func ParseListObjectsRequest(r *http.Request) (args *object.ListObjectsArgs, err error) { + var input s3.ListObjectsInput + err = ParseInput(r, &input, listObjectsSupports) + if err != nil { + var er ErrFailedParseValue + if errors.As(err, &er) && er.Name() == "max-keys" { + err = ErrMaxKeysInvalid + } + return + } + args = &object.ListObjectsArgs{ + AccessKey: cctx.GetAccessKey(r), + } + args.Bucket, err = ValidateBucketName(input.Bucket) + if err != nil { + return + } + args.MaxKeys, err = ValidateMaxKeys(input.MaxKeys) + if err != nil { + return + } + args.Marker, args.Prefix, err = ValidateMarkerAndPrefix(input.Marker, input.Prefix) + if err != nil { + return + } + args.Delimiter, err = ValidateDelimiter(input.Delimiter) + if err != nil { + return + } + args.EncodingType, err = ValidateEncodingType(input.EncodingType) + return +} diff --git a/s3/requests/validate.go b/s3/requests/validate.go new file mode 100644 index 000000000..445e80810 --- /dev/null +++ b/s3/requests/validate.go @@ -0,0 +1,305 @@ +package requests + +import ( + "crypto/md5" + "encoding/base64" + "encoding/hex" + "errors" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/bittorrent/go-btfs/s3/consts" + "github.com/bittorrent/go-btfs/s3/etag" + "github.com/bittorrent/go-btfs/s3/services/object" + "net/url" + "path" + "regexp" + "strings" + "time" + "unicode/utf8" +) + +func ValidateBucketACL(acl *string) (val string, err error) { + if acl == nil || *acl == "" { + val = consts.DefaultBucketACL + } else { + val = *acl + } + if !consts.SupportedBucketACLs[val] { + err = ErrACLUnsupported + return + } + return +} + +var ( + validBucketName = regexp.MustCompile(`^[A-Za-z0-9][A-Za-z0-9\.\-]{1,61}[A-Za-z0-9]$`) + ipAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`) +) + +func ValidateBucketName(bucketName *string) (val string, err error) { + if bucketName == nil || *bucketName == "" { + return + } + val = *bucketName + if ipAddress.MatchString(val) || + !validBucketName.MatchString(val) || + strings.Contains(val, "..") || + strings.Contains(val, ".-") || + strings.Contains(val, "-.") { + err = ErrBucketNameInvalid + } + return +} + +func ValidateCreateBucketConfiguration(configuration *s3.CreateBucketConfiguration) (val string, err error) { + if configuration == nil || configuration.LocationConstraint == nil || *configuration.LocationConstraint == "" { + val = consts.DefaultBucketRegion + } + if !consts.SupportedBucketRegions[val] { + err = ErrRegionUnsupported + } + return +} + +func ValidateObjectName(objectName *string) (val string, err error) { + if objectName == nil || *objectName == "" { + return + } + val, err = url.PathUnescape(*objectName) + if err != nil { + err = ErrObjectNameInvalid + return + } + if len(val) > 1024 { + err = ErrObjectNameTooLong + return + } + if strings.HasPrefix(val, "/") { + err = ErrObjectNamePrefixSlash + return + } + if !utf8.ValidString(val) || strings.Contains(val, `//`) { + err = ErrObjectNameInvalid + } + for _, p := range strings.Split(val, "/") { + switch strings.TrimSpace(p) { + case "..", ".": + err = ErrObjectNameInvalid + return + } + } + return +} + +func ValidateContentMD5(contentMD5 *string) (val string, err error) { + if contentMD5 == nil { + return + } + if *contentMD5 == "" { + err = ErrInvalidContentMd5 + return + } + b, err := base64.StdEncoding.Strict().DecodeString(*contentMD5) + if err != nil || len(b) != md5.Size { + err = ErrInvalidContentMd5 + return + } + val = etag.ETag(b).String() + return +} + +func ValidateCheckSum(checksumSHA256 *string) (val string, err error) { + if checksumSHA256 == nil || *checksumSHA256 == "" { + return + } + if *checksumSHA256 == consts.UnsignedSHA256 { + return + } + b, err := hex.DecodeString(*checksumSHA256) + if err != nil || len(b) == 0 { + err = ErrInvalidChecksumSha256 + return + } + val = hex.EncodeToString(b) + return +} + +func ValidateContentLength(contentLength *int64) (val int64, err error) { + if contentLength == nil { + return + } + if *contentLength == -1 { + err = ErrContentLengthMissing + return + } + if *contentLength < 1 { + err = ErrContentLengthTooSmall + return + } + if *contentLength > consts.MaxObjectSize { + err = ErrContentLengthTooLarge + return + } + val = *contentLength + return +} + +func ValidateContentType(contentType *string) (val string, err error) { + if contentType == nil || *contentType == "" { + val = consts.DefaultContentType + return + } + val = *contentType + return +} + +func ValidateContentEncoding(contentEncoding *string) (val string, err error) { + if contentEncoding == nil || *contentEncoding == "" { + return + } + encs := make([]string, 0) + for _, enc := range strings.Split(*contentEncoding, ",") { + if enc != consts.StreamingContentEncoding { + encs = append(encs, enc) + } + } + val = strings.Join(encs, ",") + return +} + +func ValidateExpires(expires *time.Time) (val time.Time, err error) { + if expires == nil { + return + } + val = *expires + return +} + +func ValidateCopySource(copySource *string) (val1, val2 string, err error) { + if copySource == nil { + return + } + src, err := url.QueryUnescape(*copySource) + if err != nil { + src = *copySource + err = nil + } + src = strings.TrimPrefix(*copySource, consts.SlashSeparator) + idx := strings.Index(src, consts.SlashSeparator) + if idx < 0 { + err = ErrCopySrcInvalid + return + } + val1 = src[:idx] + val2 = src[idx+len(consts.SlashSeparator):] + if val1 == "" || val2 == "" { + err = ErrCopySrcInvalid + return + } + val1, err = ValidateBucketName(&val1) + if err != nil { + return + } + val2, err = ValidateObjectName(&val2) + return +} + +func ValidateMetadataDirective(metadataDirective *string) (val bool, err error) { + if metadataDirective == nil { + return + } + if *metadataDirective == "REPLACE" { + val = true + } + return +} + +func ValidateObjectsDelete(delete *s3.Delete) (vals []*object.ToDeleteObject, quite bool, err error) { + if delete == nil { + return + } + if len(delete.Objects) < 1 { + err = ErrFailedDecodeXML{errors.New("objects count is 0")} + return + } + if len(delete.Objects) > consts.MaxDeleteList { + err = ErrFailedDecodeXML{errors.New("objects count is too many")} + return + } + if delete.Quiet != nil && *delete.Quiet == true { + quite = true + } + for _, obj := range delete.Objects { + deleteObj := &object.ToDeleteObject{} + deleteObj.Object, deleteObj.ValidateErr = ValidateObjectName(obj.Key) + vals = append(vals, deleteObj) + } + return +} + +func ValidateMaxKeys(maxKeys *int64) (val int64, err error) { + if maxKeys == nil || *maxKeys > consts.MaxObjectList { + val = consts.MaxObjectList + return + } + if *maxKeys < 0 { + err = ErrMaxKeysInvalid + return + } + val = *maxKeys + return +} + +func ValidateMarkerAndPrefix(marker, prefix *string) (val1, val2 string, err error) { + if marker != nil { + val1 = trimLeadingSlash(*marker) + } + if prefix != nil { + val2 = trimLeadingSlash(*prefix) + } + val1, err = ValidateObjectName(&val1) + if err != nil { + return + } + val2, err = ValidateObjectName(&val2) + if err != nil { + return + } + if !strings.HasPrefix(val1, val2) { + err = ErrMarkerPrefixCombinationInvalid + } + return +} + +func ValidateDelimiter(delimiter *string) (val string, err error) { + if delimiter == nil { + return + } + val = *delimiter + return +} + +func ValidateEncodingType(encodingType *string) (val string, err error) { + if encodingType == nil || *encodingType == "" { + return + } + if !strings.EqualFold(*encodingType, consts.DefaultEncodingType) { + err = ErrEncodingTypeInvalid + return + } + val = consts.DefaultEncodingType + return +} + +func trimLeadingSlash(ep string) string { + if len(ep) > 0 && ep[0] == '/' { + // Path ends with '/' preserve it + if ep[len(ep)-1] == '/' && len(ep) > 1 { + ep = path.Clean(ep) + ep += "/" + } else { + ep = path.Clean(ep) + } + ep = ep[1:] + } + return ep +} diff --git a/s3/requests/validate_errors.go b/s3/requests/validate_errors.go new file mode 100644 index 000000000..839a6ed83 --- /dev/null +++ b/s3/requests/validate_errors.go @@ -0,0 +1,22 @@ +package requests + +import "errors" + +var ( + ErrBucketNameInvalid = errors.New("the bucket name is invalid") + ErrObjectNameInvalid = errors.New("the object name is invalid") + ErrObjectNameTooLong = errors.New("the object name cannot be longer than 1024 characters") + ErrObjectNamePrefixSlash = errors.New("the object name cannot start with slash") + ErrRegionUnsupported = errors.New("the location is not supported by this server") + ErrACLUnsupported = errors.New("the ACL is not supported by this server") + ErrInvalidContentMd5 = errors.New("the content md5 is invalid") + ErrInvalidChecksumSha256 = errors.New("the checksum-sha256 is invalid") + ErrContentLengthMissing = errors.New("the content-length is missing") + ErrContentLengthTooSmall = errors.New("the content-length is too small") + ErrContentLengthTooLarge = errors.New("the content-length is too large") + ErrCopySrcInvalid = errors.New("the copy-source is invalid") + ErrCopyDestInvalid = errors.New("the copy-destination is invalid") + ErrMaxKeysInvalid = errors.New("the max-keys is invalid") + ErrEncodingTypeInvalid = errors.New("the encoding-type is invalid") + ErrMarkerPrefixCombinationInvalid = errors.New("the marker-prefix combination is invalid") +) diff --git a/s3/responses/errors.go b/s3/responses/errors.go index 6e009096e..49dd9a22b 100644 --- a/s3/responses/errors.go +++ b/s3/responses/errors.go @@ -24,7 +24,7 @@ func (err *Error) HTTPStatusCode() int { } func (err *Error) Error() string { - return fmt.Sprintf("[%s]%s", err.code, err.description) + return fmt.Sprintf("<%s> %s", err.code, err.description) } // Errors http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html @@ -334,7 +334,6 @@ var ( description: "Signature header missing SignedHeaders field.", httpStatusCode: http.StatusBadRequest, } - ErrAuthHeaderEmpty = &Error{ code: "InvalidArgument", description: "Authorization header is invalid -- one and only one ' ' (space) required.", @@ -586,7 +585,7 @@ var ( // Generic Invalid-Request error. Should be used for response errors only for unlikely // corner case errors for which introducing new APIorcode is not worth it. LogIf() // should be used to log the error at the source of the error for debugging purposes. - ErrErrInvalidRequest = &Error{ + ErrInvalidRequest = &Error{ code: "InvalidRequest", description: "Invalid Request", httpStatusCode: http.StatusBadRequest, @@ -1032,9 +1031,9 @@ var ( description: "The JSON was not well-formed or did not validate against our published format.", httpStatusCode: http.StatusBadRequest, } - ErrInvalidRequest = &Error{ - code: "InvalidRequest", - description: "InvalidRequest", + ErrMalformedACLError = &Error{ + code: "MalformedACLError", + description: "The ACL that you provided was not well formed or did not validate against our published schema.", httpStatusCode: http.StatusBadRequest, } ) diff --git a/s3/responses/responses_bucket.go b/s3/responses/responses_bucket.go index 4a513c229..7fb986326 100644 --- a/s3/responses/responses_bucket.go +++ b/s3/responses/responses_bucket.go @@ -3,7 +3,6 @@ package responses import ( "github.com/aws/aws-sdk-go/service/s3" "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/protocol" "github.com/bittorrent/go-btfs/s3/services/object" "net/http" ) @@ -24,7 +23,7 @@ func WriteHeadBucketResponse(w http.ResponseWriter, r *http.Request, buc *object func WriteDeleteBucketResponse(w http.ResponseWriter) { output := new(s3.DeleteBucketOutput) - _ = protocol.WriteResponse(w, http.StatusOK, output, "") + WriteSuccessResponse(w, output, "") return } diff --git a/s3/responses/responses_object.go b/s3/responses/responses_object.go index fde197eee..b4601d4cb 100644 --- a/s3/responses/responses_object.go +++ b/s3/responses/responses_object.go @@ -17,40 +17,77 @@ func WritePutObjectResponse(w http.ResponseWriter, r *http.Request, obj *object. WriteSuccessResponse(w, output, "") } +func WriteCopyObjectResponse(w http.ResponseWriter, r *http.Request, obj *object.Object) { + output := new(s3.CopyObjectResult) + output.SetETag(`"` + obj.ETag + `"`) + output.SetLastModified(obj.ModTime) + w.Header().Set(consts.Cid, obj.CID) + WriteSuccessResponse(w, output, "CopyObjectResult") +} + func WriteHeadObjectResponse(w http.ResponseWriter, r *http.Request, obj *object.Object) { output := new(s3.HeadObjectOutput) + output.SetETag(`"` + obj.ETag + `"`) + output.SetLastModified(obj.ModTime) + output.SetContentLength(obj.Size) + output.SetContentType(obj.ContentType) + output.SetContentEncoding(obj.ContentEncoding) + if !obj.Expires.IsZero() { + output.SetExpiration(obj.Expires.UTC().Format(http.TimeFormat)) + } w.Header().Set(consts.Cid, obj.CID) output.SetMetadata(map[string]*string{ consts.Cid: &obj.CID, }) - SetObjectHeaders(w, r, obj) - SetHeadGetRespHeaders(w, r.Form) WriteSuccessResponse(w, output, "") } -func WriteCopyObjectResponse(w http.ResponseWriter, r *http.Request, obj *object.Object) { - output := new(s3.CopyObjectResult) - output.SetETag(`"` + obj.ETag + `"`) - output.SetLastModified(obj.ModTime) - w.Header().Set(consts.Cid, obj.CID) - WriteSuccessResponse(w, output, "CopyObjectResult") -} - func WriteDeleteObjectResponse(w http.ResponseWriter, r *http.Request, obj *object.Object) { output := new(s3.DeleteObjectOutput) WriteSuccessResponse(w, output, "") } +func WriteDeleteObjectsResponse(w http.ResponseWriter, r *http.Request, toErr func(error) *Error, deletedObjects []*object.DeletedObject) { + output := new(s3.DeleteObjectsOutput) + objs := make([]*s3.DeletedObject, 0) + errs := make([]*s3.Error, 0) + for _, obj := range deletedObjects { + if obj.DeleteErr != nil { + rerr := toErr(obj.DeleteErr) + s3Err := new(s3.Error) + s3Err.SetCode(rerr.Code()) + s3Err.SetMessage(rerr.Description()) + s3Err.SetKey(obj.Object) + errs = append(errs, s3Err) + continue + } + s3Obj := new(s3.DeletedObject) + s3Obj.SetKey(obj.Object) + objs = append(objs, s3Obj) + } + if len(errs) > 0 { + output.SetErrors(errs) + } + if len(objs) > 0 { + output.SetDeleted(objs) + } + WriteSuccessResponse(w, output, "DeleteResult") +} + func WriteGetObjectResponse(w http.ResponseWriter, r *http.Request, obj *object.Object, body io.ReadCloser) { output := new(s3.GetObjectOutput) + output.SetLastModified(obj.ModTime) output.SetContentLength(obj.Size) + output.SetContentType(obj.ContentType) + output.SetContentEncoding(obj.ContentEncoding) output.SetBody(body) + if !obj.Expires.IsZero() { + output.SetExpiration(obj.Expires.UTC().Format(http.TimeFormat)) + } + w.Header().Set(consts.Cid, obj.CID) output.SetMetadata(map[string]*string{ consts.Cid: &obj.CID, }) - w.Header().Set(consts.Cid, obj.CID) - SetObjectHeaders(w, r, obj) - SetHeadGetRespHeaders(w, r.Form) WriteSuccessResponse(w, output, "") } @@ -73,14 +110,14 @@ func WriteGetObjectACLResponse(w http.ResponseWriter, r *http.Request, accessKey return } -func WriteListObjectsResponse(w http.ResponseWriter, r *http.Request, accessKey, bucname, prefix, marker, delimiter, encodingType string, maxKeys int64, list *object.ObjectsList) { +func WriteListObjectsResponse(w http.ResponseWriter, r *http.Request, accessKey string, list *object.ObjectsList) { out := new(s3.ListObjectsOutput) - out.SetName(bucname) - out.SetEncodingType(encodingType) - out.SetPrefix(utils.S3EncodeName(prefix, encodingType)) - out.SetMarker(utils.S3EncodeName(marker, encodingType)) - out.SetDelimiter(utils.S3EncodeName(delimiter, encodingType)) - out.SetMaxKeys(maxKeys) + out.SetName(list.Bucket) + out.SetEncodingType(list.EncodingType) + out.SetPrefix(utils.S3Encode(list.Prefix, list.EncodingType)) + out.SetMarker(utils.S3Encode(list.Marker, list.EncodingType)) + out.SetDelimiter(utils.S3Encode(list.Delimiter, list.EncodingType)) + out.SetMaxKeys(list.MaxKeys) out.SetNextMarker(list.NextMarker) out.SetIsTruncated(list.IsTruncated) s3Objs := make([]*s3.Object, len(list.Objects)) @@ -89,7 +126,7 @@ func WriteListObjectsResponse(w http.ResponseWriter, r *http.Request, accessKey, s3Obj.SetETag(`"` + obj.ETag + `"`) s3Obj.SetOwner(owner(accessKey)) s3Obj.SetLastModified(obj.ModTime) - s3Obj.SetKey(utils.S3EncodeName(obj.Name, encodingType)) + s3Obj.SetKey(utils.S3Encode(obj.Name, list.EncodingType)) s3Obj.SetSize(obj.Size) s3Obj.SetStorageClass("") s3Objs[i] = s3Obj @@ -99,7 +136,7 @@ func WriteListObjectsResponse(w http.ResponseWriter, r *http.Request, accessKey, s3CommPrefixes := make([]*s3.CommonPrefix, len(list.Prefixes)) for i, cpf := range list.Prefixes { pfx := new(s3.CommonPrefix) - pfx.SetPrefix(utils.S3EncodeName(cpf, encodingType)) + pfx.SetPrefix(utils.S3Encode(cpf, list.EncodingType)) s3CommPrefixes[i] = pfx } out.SetCommonPrefixes(s3CommPrefixes) @@ -110,9 +147,9 @@ func WriteListObjectsV2Response(w http.ResponseWriter, r *http.Request, accessKe out := new(s3.ListObjectsV2Output) out.SetName(bucname) out.SetEncodingType(encodingType) - out.SetStartAfter(utils.S3EncodeName(startAfter, encodingType)) - out.SetDelimiter(utils.S3EncodeName(delimiter, encodingType)) - out.SetPrefix(utils.S3EncodeName(prefix, encodingType)) + out.SetStartAfter(utils.S3Encode(startAfter, encodingType)) + out.SetDelimiter(utils.S3Encode(delimiter, encodingType)) + out.SetPrefix(utils.S3Encode(prefix, encodingType)) out.SetMaxKeys(maxKeys) out.SetContinuationToken(base64.StdEncoding.EncodeToString([]byte(token))) out.SetNextContinuationToken(base64.StdEncoding.EncodeToString([]byte(list.NextContinuationToken))) @@ -123,7 +160,7 @@ func WriteListObjectsV2Response(w http.ResponseWriter, r *http.Request, accessKe s3Obj.SetETag(`"` + obj.ETag + `"`) s3Obj.SetOwner(owner(accessKey)) s3Obj.SetLastModified(obj.ModTime) - s3Obj.SetKey(utils.S3EncodeName(obj.Name, encodingType)) + s3Obj.SetKey(utils.S3Encode(obj.Name, encodingType)) s3Obj.SetSize(obj.Size) s3Obj.SetStorageClass("") s3Objs[i] = s3Obj @@ -133,7 +170,7 @@ func WriteListObjectsV2Response(w http.ResponseWriter, r *http.Request, accessKe s3CommPrefixes := make([]*s3.CommonPrefix, len(list.Prefixes)) for i, cpf := range list.Prefixes { pfx := new(s3.CommonPrefix) - pfx.SetPrefix(utils.S3EncodeName(cpf, encodingType)) + pfx.SetPrefix(utils.S3Encode(cpf, encodingType)) s3CommPrefixes[i] = pfx } out.SetCommonPrefixes(s3CommPrefixes) diff --git a/s3/s3utils/request_test.go b/s3/s3utils/request_test.go deleted file mode 100644 index fd84d329a..000000000 --- a/s3/s3utils/request_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package s3utils - -import ( - "fmt" - "github.com/aws/aws-sdk-go/service/s3" - "io" - "reflect" - "testing" -) - -type req struct { - _ struct{} `embed:"PutObjectInput"` - s3.PutObjectInput `location:"embed"` - Body io.ReadCloser `type:"blob"` -} - -func TestParseRequest(t *testing.T) { - var r req - v := reflect.ValueOf(r) - p := v.Type() - n := v.NumField() - for i := 0; i < n; i++ { - ft := p.Field(i) - fmt.Println(ft.Name) - } - -} diff --git a/s3/s3utils/utils.go b/s3/s3utils/utils.go index 532954cca..f8d9b083a 100644 --- a/s3/s3utils/utils.go +++ b/s3/s3utils/utils.go @@ -12,20 +12,19 @@ import ( // GenericError - generic object layer error. type GenericError struct { - Bucket string - Object string - VersionID string - Err error + Bucket string + Object string + Err error } // Bucket related errors. -// BucketNameInvalid - bucketname provided is invalid. +// BucketNameInvalid - bucket name provided is invalid. type BucketNameInvalid GenericError // Error returns string an error formatted as the given text. func (e BucketNameInvalid) Error() string { - return "Bucket name invalid: " + e.Bucket + return "bucket name invalid: " + e.Bucket } // Object related errors. @@ -51,7 +50,7 @@ func (e ObjectNameTooLong) Error() string { // Error returns string an error formatted as the given text. func (e ObjectNamePrefixAsSlash) Error() string { - return "Object name contains forward slash as pefix: " + e.Bucket + "/" + e.Object + return "Object name contains forward slash as prefix: " + e.Bucket + "/" + e.Object } // InvalidUploadIDKeyCombination - invalid upload id and key marker combination. @@ -133,44 +132,44 @@ var ( ) // Common checker for both stricter and basic validation. -func checkBucketNameCommon(bucketName string, strict bool) (err error) { +func checkBucketName(bucketName string, strict bool) (err error) { if strings.TrimSpace(bucketName) == "" { - return errors.New("Bucket name cannot be empty") + return errors.New("bucket name cannot be empty") } if len(bucketName) < 3 { - return errors.New("Bucket name cannot be shorter than 3 characters") + return errors.New("bucket name cannot be shorter than 3 characters") } if len(bucketName) > 63 { - return errors.New("Bucket name cannot be longer than 63 characters") + return errors.New("bucket name cannot be longer than 63 characters") } if ipAddress.MatchString(bucketName) { - return errors.New("Bucket name cannot be an ip address") + return errors.New("bucket name cannot be an ip address") } if strings.Contains(bucketName, "..") || strings.Contains(bucketName, ".-") || strings.Contains(bucketName, "-.") { - return errors.New("Bucket name contains invalid characters") + return errors.New("bucket name contains invalid characters") } if strict { if !validBucketNameStrict.MatchString(bucketName) { - err = errors.New("Bucket name contains invalid characters") + err = errors.New("bucket name contains invalid characters") } return err } if !validBucketName.MatchString(bucketName) { - err = errors.New("Bucket name contains invalid characters") + err = errors.New("bucket name contains invalid characters") } return err } // CheckValidBucketName - checks if we have a valid input bucket name. func CheckValidBucketName(bucketName string) (err error) { - return checkBucketNameCommon(bucketName, false) + return checkBucketName(bucketName, false) } // CheckValidBucketNameStrict - checks if we have a valid input bucket name. // This is a stricter version. // - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html func CheckValidBucketNameStrict(bucketName string) (err error) { - return checkBucketNameCommon(bucketName, true) + return checkBucketName(bucketName, true) } // Checks on GetObject arguments, bucket and object. diff --git a/s3/services/object/proto.go b/s3/services/object/proto.go index 4a1837f70..f3edf1413 100644 --- a/s3/services/object/proto.go +++ b/s3/services/object/proto.go @@ -10,7 +10,7 @@ import ( var ( ErrBucketNotFound = errors.New("bucket not found") - ErrBucketeNotEmpty = errors.New("bucket not empty") + ErrBucketNotEmpty = errors.New("bucket not empty") ErrObjectNotFound = errors.New("object not found") ErrUploadNotFound = errors.New("upload not found") ErrNotAllowed = errors.New("not allowed") @@ -18,19 +18,20 @@ var ( ) type Service interface { - CreateBucket(ctx context.Context, user, bucname, region, acl string) (bucket *Bucket, err error) - GetBucket(ctx context.Context, user, bucname string) (bucket *Bucket, err error) - DeleteBucket(ctx context.Context, user, bucname string) (err error) - GetAllBuckets(ctx context.Context, user string) (list []*Bucket, err error) - PutBucketACL(ctx context.Context, user, bucname, acl string) (err error) - GetBucketACL(ctx context.Context, user, bucname string) (acl string, err error) - - PutObject(ctx context.Context, user, bucname, objname string, body *hash.Reader, size int64, meta map[string]string) (object *Object, err error) - CopyObject(ctx context.Context, user, srcBucname, srcObjname, dstBucname, dstObjname string, meta map[string]string) (dstObject *Object, err error) - GetObject(ctx context.Context, user, bucname, objname string, withBody bool) (object *Object, body io.ReadCloser, err error) - DeleteObject(ctx context.Context, user, bucname, objname string) (err error) - ListObjects(ctx context.Context, user, bucname, prefix, delimiter, marker string, max int64) (list *ObjectsList, err error) - ListObjectsV2(ctx context.Context, user string, bucket string, prefix string, token, delimiter string, max int64, owner bool, after string) (list *ObjectsListV2, err error) + CreateBucket(ctx context.Context, args *CreateBucketArgs) (bucket *Bucket, err error) + GetBucket(ctx context.Context, args *GetBucketArgs) (bucket *Bucket, err error) + DeleteBucket(ctx context.Context, args *DeleteBucketArgs) (err error) + ListBuckets(ctx context.Context, args *ListBucketsArgs) (list []*Bucket, err error) + PutBucketACL(ctx context.Context, args *PutBucketACLArgs) (err error) + GetBucketACL(ctx context.Context, args *GetBucketACLArgs) (acl string, err error) + + PutObject(ctx context.Context, args *PutObjectArgs) (object *Object, err error) + CopyObject(ctx context.Context, args *CopyObjectArgs) (object *Object, err error) + GetObject(ctx context.Context, args *GetObjectArgs) (object *Object, body io.ReadCloser, err error) + DeleteObject(ctx context.Context, args *DeleteObjectArgs) (err error) + DeleteObjects(ctx context.Context, args *DeleteObjectsArgs) (deletedObjects []*DeletedObject, err error) + ListObjects(ctx context.Context, args *ListObjectsArgs) (list *ObjectsList, err error) + ListObjectsV2(ctx context.Context, user, bucket, prefix, token, delimiter string, max int64, owner bool, after string) (list *ObjectsListV2, err error) GetObjectACL(ctx context.Context, user, bucname, objname string) (acl string, err error) CreateMultipartUpload(ctx context.Context, user, bucname, objname string, meta map[string]*string) (multipart *Multipart, err error) @@ -39,6 +40,101 @@ type Service interface { CompleteMultiPartUpload(ctx context.Context, user, bucname, objname, uplid string, parts []*CompletePart) (object *Object, err error) } +type CreateBucketArgs struct { + AccessKey string + ACL string + Bucket string + Region string +} + +type GetBucketArgs struct { + AccessKey string + Bucket string +} + +type DeleteBucketArgs struct { + AccessKey string + Bucket string +} + +type ListBucketsArgs struct { + AccessKey string +} + +type GetBucketACLArgs struct { + AccessKey string + Bucket string +} + +type PutBucketACLArgs struct { + AccessKey string + ACL string + Bucket string +} + +type PutObjectArgs struct { + AccessKey string + Body *hash.Reader + Bucket string + Object string + ContentEncoding string + ContentLength int64 + ContentType string + Expires time.Time +} + +type CopyObjectArgs struct { + AccessKey string + Bucket string + Object string + SrcBucket string + SrcObject string + ContentEncoding string + ContentType string + Expires time.Time + ReplaceMeta bool +} + +type GetObjectArgs struct { + AccessKey string + Bucket string + Object string + WithBody bool +} + +type DeleteObjectArgs struct { + AccessKey string + Bucket string + Object string +} + +type DeleteObjectsArgs struct { + AccessKey string + Bucket string + ToDeleteObjects []*ToDeleteObject + Quite bool +} + +type ToDeleteObject struct { + Object string + ValidateErr error +} + +type ListObjectsArgs struct { + AccessKey string + Bucket string + MaxKeys int64 + Marker string + Prefix string + Delimiter string + EncodingType string +} + +type DeletedObject struct { + Object string + DeleteErr error +} + // Bucket contains bucket metadata. type Bucket struct { Name string @@ -85,10 +181,16 @@ type Part struct { } type ObjectsList struct { - IsTruncated bool - NextMarker string - Objects []*Object - Prefixes []string + Bucket string + MaxKeys int64 + Marker string + Prefix string + Delimiter string + EncodingType string + IsTruncated bool + NextMarker string + Objects []*Object + Prefixes []string } type ObjectsListV2 struct { diff --git a/s3/services/object/service_bucket.go b/s3/services/object/service_bucket.go index ff347d258..11726ffd1 100644 --- a/s3/services/object/service_bucket.go +++ b/s3/services/object/service_bucket.go @@ -11,13 +11,13 @@ import ( ) // CreateBucket create a new bucket for the specified user -func (s *service) CreateBucket(ctx context.Context, user, bucname, region, acl string) (bucket *Bucket, err error) { +func (s *service) CreateBucket(ctx context.Context, args *CreateBucketArgs) (bucket *Bucket, err error) { // Operation context ctx, cancel := s.opctx(ctx) defer cancel() // Bucket key - buckey := s.getBucketKey(bucname) + buckey := s.getBucketKey(args.Bucket) // Lock bucket err = s.lock.Lock(ctx, buckey) @@ -37,19 +37,22 @@ func (s *service) CreateBucket(ctx context.Context, user, bucname, region, acl s } // Check action ACL - allow := s.checkACL(user, policy.Private, user, action.CreateBucketAction) + allow := s.checkACL(args.AccessKey, policy.Private, args.AccessKey, action.CreateBucketAction) if !allow { err = ErrNotAllowed return } + // now + now := time.Now().UTC() + // Bucket bucket = &Bucket{ - Name: bucname, - Region: region, - Owner: user, - ACL: acl, - Created: time.Now().UTC(), + Name: args.Bucket, + Region: args.Region, + Owner: args.AccessKey, + ACL: args.ACL, + Created: now, } // Put bucket @@ -59,13 +62,13 @@ func (s *service) CreateBucket(ctx context.Context, user, bucname, region, acl s } // GetBucket get a user specified bucket -func (s *service) GetBucket(ctx context.Context, user, bucname string) (bucket *Bucket, err error) { +func (s *service) GetBucket(ctx context.Context, args *GetBucketArgs) (bucket *Bucket, err error) { // Operation context ctx, cancel := s.opctx(ctx) defer cancel() // Bucket key - buckey := s.getBucketKey(bucname) + buckey := s.getBucketKey(args.Bucket) // RLock bucket err = s.lock.RLock(ctx, buckey) @@ -85,7 +88,7 @@ func (s *service) GetBucket(ctx context.Context, user, bucname string) (bucket * } // Check action ACL - allow := s.checkACL(bucket.Owner, bucket.ACL, user, action.HeadBucketAction) + allow := s.checkACL(bucket.Owner, bucket.ACL, args.AccessKey, action.HeadBucketAction) if !allow { err = ErrNotAllowed } @@ -94,13 +97,13 @@ func (s *service) GetBucket(ctx context.Context, user, bucname string) (bucket * } // DeleteBucket delete a user specified bucket and clear all bucket objects and uploads -func (s *service) DeleteBucket(ctx context.Context, user, bucname string) (err error) { +func (s *service) DeleteBucket(ctx context.Context, args *DeleteBucketArgs) (err error) { // Operation context ctx, cancel := s.opctx(ctx) defer cancel() // Bucket key - buckey := s.getBucketKey(bucname) + buckey := s.getBucketKey(args.Bucket) // Lock bucket err = s.lock.Lock(ctx, buckey) @@ -120,19 +123,19 @@ func (s *service) DeleteBucket(ctx context.Context, user, bucname string) (err e } // Check action ACL - allow := s.checkACL(bucket.Owner, bucket.ACL, user, action.DeleteBucketAction) + allow := s.checkACL(bucket.Owner, bucket.ACL, args.AccessKey, action.DeleteBucketAction) if !allow { err = ErrNotAllowed return } // Check if bucket is empty - empty, err := s.isBucketEmpty(bucname) + empty, err := s.isBucketEmpty(args.Bucket) if err != nil { return } if !empty { - err = ErrBucketeNotEmpty + err = ErrBucketNotEmpty return } @@ -142,18 +145,19 @@ func (s *service) DeleteBucket(ctx context.Context, user, bucname string) (err e return } -// GetAllBuckets get all buckets of the specified user -func (s *service) GetAllBuckets(ctx context.Context, user string) (list []*Bucket, err error) { +// ListBuckets list all buckets of the specified user +func (s *service) ListBuckets(ctx context.Context, args *ListBucketsArgs) (list []*Bucket, err error) { // Operation context ctx, cancel := s.opctx(ctx) defer cancel() // Check action ACL - allow := s.checkACL(user, policy.Private, user, action.ListBucketAction) + allow := s.checkACL(args.AccessKey, policy.Private, args.AccessKey, action.ListBucketAction) if !allow { err = ErrNotAllowed return } + // All buckets prefix bucketsPrefix := s.getAllBucketsKeyPrefix() @@ -181,7 +185,7 @@ func (s *service) GetAllBuckets(ctx context.Context, user string) (list []*Bucke } // Collect user's bucket - if bucket.Owner == user { + if bucket.Owner == args.AccessKey { list = append(list, bucket) } @@ -192,13 +196,13 @@ func (s *service) GetAllBuckets(ctx context.Context, user string) (list []*Bucke } // PutBucketACL update user specified bucket's ACL field value -func (s *service) PutBucketACL(ctx context.Context, user, bucname, acl string) (err error) { +func (s *service) PutBucketACL(ctx context.Context, args *PutBucketACLArgs) (err error) { // Operation context ctx, cancel := s.opctx(ctx) defer cancel() // Bucket key - buckey := s.getBucketKey(bucname) + buckey := s.getBucketKey(args.Bucket) // Lock bucket err = s.lock.Lock(ctx, buckey) @@ -218,14 +222,14 @@ func (s *service) PutBucketACL(ctx context.Context, user, bucname, acl string) ( } // Check action ACL - allow := s.checkACL(bucket.Owner, bucket.ACL, user, action.PutBucketAclAction) + allow := s.checkACL(bucket.Owner, bucket.ACL, args.AccessKey, action.PutBucketAclAction) if !allow { err = ErrNotAllowed return } // Update bucket ACL - bucket.ACL = acl + bucket.ACL = args.ACL // Put bucket err = s.providers.StateStore().Put(buckey, bucket) @@ -234,13 +238,13 @@ func (s *service) PutBucketACL(ctx context.Context, user, bucname, acl string) ( } // GetBucketACL get user specified bucket ACL field value -func (s *service) GetBucketACL(ctx context.Context, user, bucname string) (acl string, err error) { +func (s *service) GetBucketACL(ctx context.Context, args *GetBucketACLArgs) (acl string, err error) { // Operation context ctx, cancel := s.opctx(ctx) defer cancel() // Bucket key - buckey := s.getBucketKey(bucname) + buckey := s.getBucketKey(args.Bucket) // RLock bucket err = s.lock.RLock(ctx, buckey) @@ -260,7 +264,7 @@ func (s *service) GetBucketACL(ctx context.Context, user, bucname string) (acl s } // Check action ACL - allow := s.checkACL(bucket.Owner, bucket.ACL, user, action.GetBucketAclAction) + allow := s.checkACL(bucket.Owner, bucket.ACL, args.AccessKey, action.GetBucketAclAction) if !allow { err = ErrNotAllowed return diff --git a/s3/services/object/service_object.go b/s3/services/object/service_object.go index b9056ebec..e2e499a56 100644 --- a/s3/services/object/service_object.go +++ b/s3/services/object/service_object.go @@ -4,23 +4,20 @@ import ( "context" "errors" "github.com/bittorrent/go-btfs/s3/action" - "github.com/bittorrent/go-btfs/s3/consts" "github.com/bittorrent/go-btfs/s3/providers" - "github.com/bittorrent/go-btfs/s3/utils/hash" "io" - "net/http" "strings" "time" ) // PutObject put a user specified object -func (s *service) PutObject(ctx context.Context, user, bucname, objname string, body *hash.Reader, size int64, meta map[string]string) (object *Object, err error) { +func (s *service) PutObject(ctx context.Context, args *PutObjectArgs) (object *Object, err error) { // Operation context ctx, cancel := s.opctx(ctx) defer cancel() // Bucket key - buckey := s.getBucketKey(bucname) + buckey := s.getBucketKey(args.Bucket) // RLock bucket err = s.lock.RLock(ctx, buckey) @@ -40,14 +37,14 @@ func (s *service) PutObject(ctx context.Context, user, bucname, objname string, } // Check action ACL - allow := s.checkACL(bucket.Owner, bucket.ACL, user, action.PutObjectAction) + allow := s.checkACL(bucket.Owner, bucket.ACL, args.AccessKey, action.PutObjectAction) if !allow { err = ErrNotAllowed return } // Object key - objkey := s.getObjectKey(bucname, objname) + objkey := s.getObjectKey(args.Bucket, args.Object) // Lock object err = s.lock.Lock(ctx, objkey) @@ -63,7 +60,7 @@ func (s *service) PutObject(ctx context.Context, user, bucname, objname string, } // Store object body - cid, err := s.storeBody(ctx, body, objkey) + cid, err := s.storeBody(ctx, args.Body, objkey) if err != nil { return } @@ -80,30 +77,25 @@ func (s *service) PutObject(ctx context.Context, user, bucname, objname string, }() // now - now := time.Now() + now := time.Now().UTC() // new object object = &Object{ - Bucket: bucname, - Name: objname, - ModTime: now.UTC(), - Size: size, + Bucket: args.Bucket, + Name: args.Object, + ModTime: now, + Size: args.ContentLength, IsDir: false, - ETag: body.ETag().String(), + ETag: args.Body.ETag().String(), CID: cid, VersionID: "", IsLatest: true, DeleteMarker: false, - ACL: meta[consts.AmzACL], - ContentType: meta[strings.ToLower(consts.ContentType)], - ContentEncoding: meta[strings.ToLower(consts.ContentEncoding)], - SuccessorModTime: now.UTC(), - } - - // set object expires - exp, er := time.Parse(http.TimeFormat, meta[strings.ToLower(consts.Expires)]) - if er == nil { - object.Expires = exp.UTC() + ACL: "", + ContentType: args.ContentType, + ContentEncoding: args.ContentEncoding, + SuccessorModTime: now, + Expires: args.Expires, } // put object @@ -124,13 +116,13 @@ func (s *service) PutObject(ctx context.Context, user, bucname, objname string, } // CopyObject copy from a user specified source object to a desert object -func (s *service) CopyObject(ctx context.Context, user, srcBucname, srcObjname, dstBucname, dstObjname string, meta map[string]string) (dstObject *Object, err error) { +func (s *service) CopyObject(ctx context.Context, args *CopyObjectArgs) (dstObject *Object, err error) { // Operation context ctx, cancel := s.opctx(ctx) defer cancel() // Source bucket key - srcBuckey := s.getBucketKey(srcBucname) + srcBuckey := s.getBucketKey(args.SrcBucket) // RLock source bucket err = s.lock.RLock(ctx, srcBuckey) @@ -150,14 +142,14 @@ func (s *service) CopyObject(ctx context.Context, user, srcBucname, srcObjname, } // Check source action ACL - srcAllow := s.checkACL(srcBucket.Owner, srcBucket.ACL, user, action.GetObjectAction) + srcAllow := s.checkACL(srcBucket.Owner, srcBucket.ACL, args.AccessKey, action.GetObjectAction) if !srcAllow { err = ErrNotAllowed return } // Source object key - srcObjkey := s.getObjectKey(srcBucname, srcObjname) + srcObjkey := s.getObjectKey(args.SrcBucket, args.SrcObject) // RLock source object err = s.lock.RLock(ctx, srcObjkey) @@ -177,7 +169,7 @@ func (s *service) CopyObject(ctx context.Context, user, srcBucname, srcObjname, } // Desert bucket key - dstBuckey := s.getBucketKey(dstBucname) + dstBuckey := s.getBucketKey(args.Bucket) // RLock destination bucket err = s.lock.RLock(ctx, dstBuckey) @@ -197,14 +189,14 @@ func (s *service) CopyObject(ctx context.Context, user, srcBucname, srcObjname, } // Check destination action ACL - dstAllow := s.checkACL(dstBucket.Owner, dstBucket.ACL, user, action.PutObjectAction) + dstAllow := s.checkACL(dstBucket.Owner, dstBucket.ACL, args.AccessKey, action.PutObjectAction) if !dstAllow { err = ErrNotAllowed return } // Destination object key - dstObjkey := s.getObjectKey(dstBucname, dstObjname) + dstObjkey := s.getObjectKey(args.Bucket, args.Object) // Lock Destination object err = s.lock.Lock(ctx, dstObjkey) @@ -222,7 +214,7 @@ func (s *service) CopyObject(ctx context.Context, user, srcBucname, srcObjname, // Mark if delete the cid ref deleteRef := true - // If put new object failed, try to delete it's reference + // If put new object failed, try to delete its reference defer func() { if deleteRef { _ = s.removeBodyRef(ctx, srcObject.CID, dstObjkey) @@ -236,13 +228,13 @@ func (s *service) CopyObject(ctx context.Context, user, srcBucname, srcObjname, } // now - now := time.Now() + now := time.Now().UTC() // Destination object dstObject = &Object{ - Bucket: dstBucname, - Name: dstObjname, - ModTime: now.UTC(), + Bucket: args.Bucket, + Name: args.Object, + ModTime: now, Size: srcObject.Size, IsDir: false, ETag: srcObject.ETag, @@ -252,25 +244,14 @@ func (s *service) CopyObject(ctx context.Context, user, srcBucname, srcObjname, DeleteMarker: false, ContentType: srcObject.ContentType, ContentEncoding: srcObject.ContentEncoding, - SuccessorModTime: now.UTC(), - Expires: srcObject.Expires, + SuccessorModTime: now, + Expires: args.Expires, } - // Set destination object metadata - val, ok := meta[consts.ContentType] - if ok { - dstObject.ContentType = val - } - val, ok = meta[consts.ContentEncoding] - if ok { - dstObject.ContentEncoding = val - } - val, ok = meta[strings.ToLower(consts.Expires)] - if ok { - exp, er := time.Parse(http.TimeFormat, val) - if er != nil { - dstObject.Expires = exp.UTC() - } + // Replace metadata + if args.ReplaceMeta { + dstObject.ContentType = args.ContentType + dstObject.ContentEncoding = args.ContentEncoding } // Put destination object @@ -291,13 +272,13 @@ func (s *service) CopyObject(ctx context.Context, user, srcBucname, srcObjname, } // GetObject get a user specified object -func (s *service) GetObject(ctx context.Context, user, bucname, objname string, withBody bool) (object *Object, body io.ReadCloser, err error) { +func (s *service) GetObject(ctx context.Context, args *GetObjectArgs) (object *Object, body io.ReadCloser, err error) { // Operation context ctx, cancel := s.opctx(ctx) defer cancel() // bucket key - buckey := s.getBucketKey(bucname) + buckey := s.getBucketKey(args.Bucket) // RLock bucket err = s.lock.RLock(ctx, buckey) @@ -322,14 +303,14 @@ func (s *service) GetObject(ctx context.Context, user, bucname, objname string, } // Check action ACL - allow := s.checkACL(bucket.Owner, bucket.ACL, user, action.GetObjectAction) + allow := s.checkACL(bucket.Owner, bucket.ACL, args.AccessKey, action.GetObjectAction) if !allow { err = ErrNotAllowed return } // Object key - objkey := s.getObjectKey(bucname, objname) + objkey := s.getObjectKey(args.Bucket, args.Object) // RLock object err = s.lock.RLock(ctx, objkey) @@ -354,7 +335,7 @@ func (s *service) GetObject(ctx context.Context, user, bucname, objname string, } // no need body - if !withBody { + if !args.WithBody { return } @@ -381,13 +362,13 @@ func (s *service) GetObject(ctx context.Context, user, bucname, objname string, } // DeleteObject delete a user specified object -func (s *service) DeleteObject(ctx context.Context, user, bucname, objname string) (err error) { +func (s *service) DeleteObject(ctx context.Context, args *DeleteObjectArgs) (err error) { // Operation context ctx, cancel := s.opctx(ctx) defer cancel() // Bucket key - buckey := s.getBucketKey(bucname) + buckey := s.getBucketKey(args.Bucket) // RLock bucket err = s.lock.RLock(ctx, buckey) @@ -407,14 +388,14 @@ func (s *service) DeleteObject(ctx context.Context, user, bucname, objname strin } // Check action ACL - allow := s.checkACL(bucket.Owner, bucket.ACL, user, action.DeleteObjectAction) + allow := s.checkACL(bucket.Owner, bucket.ACL, args.AccessKey, action.DeleteObjectAction) if !allow { err = ErrNotAllowed return } // Object key - objkey := s.getObjectKey(bucname, objname) + objkey := s.getObjectKey(args.Bucket, args.Object) // Lock object err = s.lock.Lock(ctx, objkey) @@ -445,14 +426,14 @@ func (s *service) DeleteObject(ctx context.Context, user, bucname, objname strin return } -// ListObjects list user specified objects -func (s *service) ListObjects(ctx context.Context, user, bucname, prefix, delimiter, marker string, max int64) (list *ObjectsList, err error) { +// DeleteObjects delete multiple user specified objects +func (s *service) DeleteObjects(ctx context.Context, args *DeleteObjectsArgs) (deletedObjects []*DeletedObject, err error) { // Operation context ctx, cancel := s.opctx(ctx) defer cancel() // Bucket key - buckey := s.getBucketKey(bucname) + buckey := s.getBucketKey(args.Bucket) // RLock bucket err = s.lock.RLock(ctx, buckey) @@ -472,37 +453,137 @@ func (s *service) ListObjects(ctx context.Context, user, bucname, prefix, delimi } // Check action ACL - allow := s.checkACL(bucket.Owner, bucket.ACL, user, action.ListObjectsAction) + allow := s.checkACL(bucket.Owner, bucket.ACL, args.AccessKey, action.DeleteObjectAction) if !allow { err = ErrNotAllowed return } + for _, deleteObj := range args.ToDeleteObjects { + func(deleteObj *ToDeleteObject) { + var er error + // Collection delete result + defer func() { + if er != nil || !args.Quite { + deletedObjects = append(deletedObjects, &DeletedObject{ + Object: deleteObj.Object, + DeleteErr: er, + }) + } + }() + + // Validate failed + er = deleteObj.ValidateErr + if er != nil { + return + } + + // Object key + objkey := s.getObjectKey(args.Bucket, deleteObj.Object) + + // Lock object + er = s.lock.Lock(ctx, objkey) + if er != nil { + return + } + defer s.lock.Unlock(objkey) + + // Get object + object, er := s.getObject(objkey) + if er != nil { + return + } + if object == nil { + err = ErrObjectNotFound + return + } + + // Delete object + er = s.deleteObject(objkey) + if er != nil { + return + } + + // Try to delete object body + _ = s.removeBody(ctx, object.CID, objkey) + + }(deleteObj) + } + + return +} + +// ListObjects list user specified objects +func (s *service) ListObjects(ctx context.Context, args *ListObjectsArgs) (list *ObjectsList, err error) { + // Operation context + ctx, cancel := s.opctx(ctx) + defer cancel() + // Object list - list = &ObjectsList{} + list = &ObjectsList{ + Bucket: args.Bucket, + MaxKeys: args.MaxKeys, + Marker: args.Marker, + Prefix: args.Prefix, + Delimiter: args.Delimiter, + EncodingType: args.EncodingType, + } + + // Bucket key + buckey := s.getBucketKey(args.Bucket) + + // RLock bucket + err = s.lock.RLock(ctx, buckey) + if err != nil { + return + } + defer s.lock.RUnlock(buckey) + + // Get bucket + bucket, err := s.getBucket(buckey) + if err != nil { + return + } + if bucket == nil { + err = ErrBucketNotFound + return + } + + // Check action ACL + allow := s.checkACL(bucket.Owner, bucket.ACL, args.AccessKey, action.ListObjectsAction) + if !allow { + err = ErrNotAllowed + return + } // MaxKeys is zero - if max == 0 { + if args.MaxKeys == 0 { list.IsTruncated = true return } // All bucket objects key prefix - allObjectsKeyPrefix := s.getAllObjectsKeyPrefix(bucname) + allObjectsKeyPrefix := s.getAllObjectsKeyPrefix(args.Bucket) // List objects key prefix - listObjectsKeyPrefix := allObjectsKeyPrefix + prefix + listObjectsKeyPrefix := allObjectsKeyPrefix + args.Prefix // Accumulate count count := int64(0) // Flag mark if begin collect, it initialized to true if // marker is "" - begin := marker == "" + begin := args.Marker == "" // Seen keys, used to group common keys seen := make(map[string]bool) + // Delimiter length + dl := len(args.Delimiter) + + // Prefix length + pl := len(args.Prefix) + // Iterate all objects with the specified prefix to collect and group specified range items err = s.providers.StateStore().Iterate(listObjectsKeyPrefix, func(key, _ []byte) (stop bool, er error) { // Object key @@ -515,10 +596,8 @@ func (s *service) ListObjects(ctx context.Context, user, bucname, prefix, delimi // it is the string truncated object name after the delimiter, else // it is empty string commonPrefix := "" - if delimiter != "" { - dl := len(delimiter) - pl := len(prefix) - di := strings.Index(objname[pl:], delimiter) + if dl > 0 { + di := strings.Index(objname[pl:], args.Delimiter) if di >= 0 { commonPrefix = objname[:(pl + di + dl)] } @@ -528,20 +607,20 @@ func (s *service) ListObjects(ctx context.Context, user, bucname, prefix, delimi // with the common prefix or object name, then begin collection from next iterate // and if common prefix matched, mark this common prefix as seen if !begin { - if commonPrefix != "" && marker == commonPrefix { + if commonPrefix != "" && args.Marker == commonPrefix { seen[commonPrefix] = true begin = true - } else if marker == objname { + } else if args.Marker == objname { begin = true } return } - // Objects with same common prefix will be grouped into one + // ToDeleteObjects with same common prefix will be grouped into one // note: the objects without common prefix will present only once, so // it is not necessary to add these objects names in the seen map - // Objects with common prefix grouped int one + // ToDeleteObjects with common prefix grouped int one if commonPrefix != "" { if seen[commonPrefix] { return @@ -566,7 +645,7 @@ func (s *service) ListObjects(ctx context.Context, user, bucname, prefix, delimi // Check the count, if it matched the max, means // the collect is complete, but the items may remain, so stop the // iteration, and mark the list was truncated - if count == max { + if count == args.MaxKeys { list.IsTruncated = true stop = true } diff --git a/s3/services/sign/signature-v4-utils.go b/s3/services/sign/signature-v4-utils.go index e615a9b73..e64b8d23d 100644 --- a/s3/services/sign/signature-v4-utils.go +++ b/s3/services/sign/signature-v4-utils.go @@ -126,12 +126,12 @@ func isValidRegion(reqRegion string, confRegion string) bool { return true } if confRegion == "US" { - confRegion = consts.DefaultLocation + confRegion = consts.DefaultBucketRegion } // Some older s3 clients set region as "US" instead of // globalDefaultRegion, handle it. if reqRegion == "US" { - reqRegion = consts.DefaultLocation + reqRegion = consts.DefaultBucketRegion } return reqRegion == confRegion } diff --git a/s3/utils/encode.go b/s3/utils/encode.go index b85ad7bf8..6db0367dc 100644 --- a/s3/utils/encode.go +++ b/s3/utils/encode.go @@ -2,8 +2,8 @@ package utils import "strings" -// S3EncodeName encodes string in response when encodingType is specified in AWS S3 requests. -func S3EncodeName(name string, encodingType string) (result string) { +// S3Encode encodes string in response when encodingType is specified in AWS S3 requests. +func S3Encode(name string, encodingType string) (result string) { // Quick path to exit if encodingType == "" { return name diff --git a/s3/utils/if.go b/s3/utils/if.go index 3021d1346..25d89ce81 100644 --- a/s3/utils/if.go +++ b/s3/utils/if.go @@ -1,17 +1,11 @@ package utils -func IfEmpty(a, b string) (c string) { - c = a - if a == "" { - c = b +// CoalesceStr return the first non-empty string in the list +func CoalesceStr(list ...string) string { + for _, str := range list { + if str != "" { + return str + } } - return -} - -func IfZero(a, b int) (c int) { - c = a - if a == 0 { - c = b - } - return + return "" } diff --git a/s3/utils/signature.go b/s3/utils/signature.go index 674b25505..002d3d36f 100644 --- a/s3/utils/signature.go +++ b/s3/utils/signature.go @@ -103,7 +103,7 @@ func SignRequestV4(req *http.Request, accessKey, secretKey string, st ServiceTyp //req.Form.Add(b,string(a)) //queryStr := req.Form.Encode() queryStr := req.URL.Query().Encode() - region := consts.DefaultLocation + region := consts.DefaultBucketRegion // Get scope. scope := strings.Join([]string{ currTime.Format(yyyymmdd), From c62f6517cd36f43ce08937d39c6bf500cdf49274 Mon Sep 17 00:00:00 2001 From: Steve Date: Tue, 19 Sep 2023 08:01:55 +0800 Subject: [PATCH 111/139] ref: complete refractor --- cmd/btfs/daemon.go | 2 +- core/commands/accesskey.go | 2 +- s3/action/action_test.go | 45 --- s3/{cctx/cctx.go => api/contexts/contexts.go} | 2 +- .../contexts/contexts_access_key.go} | 2 +- .../contexts/contexts_handle_inf.go} | 2 +- s3/{ => api}/handlers/handlers.go | 97 ++--- s3/{ => api}/handlers/handlers_bucket.go | 46 +-- s3/{ => api}/handlers/handlers_middlewares.go | 12 +- s3/api/handlers/handlers_multipart.go | 100 +++++ s3/api/handlers/handlers_object.go | 223 ++++++++++ s3/{ => api}/handlers/options.go | 0 s3/{ => api}/handlers/proto.go | 0 s3/{ => api}/providers/btfs_api.go | 0 s3/{ => api}/providers/btfs_api_options.go | 0 s3/{ => api}/providers/proto.go | 0 s3/{ => api}/providers/providers.go | 0 s3/{ => api}/providers/providers_options.go | 0 .../providers/storage_state_store_proxy.go | 0 s3/api/requests/errors.go | 103 +++++ .../input.go => api/requests/parsers.go} | 91 ++--- s3/{ => api}/requests/parsers_bucket.go | 28 +- s3/api/requests/parsers_multipart.go | 184 +++++++++ s3/{ => api}/requests/parsers_object.go | 145 ++++++- .../validate.go => api/requests/validates.go} | 183 ++++++--- s3/{ => api}/responses/errors.go | 4 +- .../responses.go => api/responses/writers.go} | 115 ++---- .../responses/writers_bucket.go} | 37 +- s3/api/responses/writers_multipart.go | 37 ++ .../responses/writers_object.go} | 88 ++-- s3/{ => api}/routers/options.go | 0 s3/{ => api}/routers/proto.go | 0 s3/{ => api}/routers/routers.go | 9 +- s3/{ => api}/server/options.go | 0 s3/{ => api}/server/server.go | 2 +- s3/{ => api}/services/accesskey/instance.go | 2 +- s3/{ => api}/services/accesskey/options.go | 0 s3/{ => api}/services/accesskey/proto.go | 0 s3/{ => api}/services/accesskey/service.go | 18 +- .../services/object/clean_read_closer.go | 0 s3/{ => api}/services/object/options.go | 0 s3/{ => api}/services/object/proto.go | 188 ++++++--- s3/{ => api}/services/object/service.go | 3 +- .../services/object/service_bucket.go | 38 +- .../services/object/service_multipart.go | 131 +++--- .../services/object/service_object.go | 76 ++-- s3/{ => api}/services/sign/options.go | 0 s3/{ => api}/services/sign/proto.go | 2 +- s3/{ => api}/services/sign/service.go | 8 +- .../services/sign/signature-type.go} | 0 .../services/sign/signature-v4-parser.go | 104 +---- .../services/sign/signature-v4-streaming.go} | 115 +----- s3/api/services/sign/signature-v4-utils.go | 276 +++++++++++++ s3/api/services/sign/signature-v4.go | 119 ++++++ s3/consts/consts.go | 152 +------ s3/handlers/handlers_multipart.go | 249 ------------ s3/handlers/handlers_object.go | 366 ----------------- s3/handlers/utils.go | 147 ------- s3/{utils => }/hash/errors.go | 0 s3/{utils => }/hash/reader.go | 0 s3/policy/policy.go | 31 +- s3/requests/input_errors.go | 65 --- s3/requests/validate_errors.go | 22 - s3/responses/object_header.go | 61 --- s3/responses/responses_common.go | 56 --- s3/s3.go | 14 +- s3/s3utils/utils.go | 380 ------------------ s3/services/sign/signature-v4-utils.go | 234 ----------- s3/services/sign/signature-v4.go | 272 ------------- s3/services/sign/signature.go | 98 ----- s3/utils/bgcontext.go | 35 -- s3/utils/{if.go => coalesce.go} | 0 s3/utils/levels.go | 15 - s3/utils/signature.go | 359 ----------------- s3/utils/utils.go | 9 - s3/utils/xml.go | 26 -- 76 files changed, 1859 insertions(+), 3371 deletions(-) delete mode 100644 s3/action/action_test.go rename s3/{cctx/cctx.go => api/contexts/contexts.go} (95%) rename s3/{cctx/access_key.go => api/contexts/contexts_access_key.go} (92%) rename s3/{cctx/handle_err.go => api/contexts/contexts_handle_inf.go} (95%) rename s3/{ => api}/handlers/handlers.go (57%) rename s3/{ => api}/handlers/handlers_bucket.go (62%) rename s3/{ => api}/handlers/handlers_middlewares.go (88%) create mode 100644 s3/api/handlers/handlers_multipart.go create mode 100644 s3/api/handlers/handlers_object.go rename s3/{ => api}/handlers/options.go (100%) rename s3/{ => api}/handlers/proto.go (100%) rename s3/{ => api}/providers/btfs_api.go (100%) rename s3/{ => api}/providers/btfs_api_options.go (100%) rename s3/{ => api}/providers/proto.go (100%) rename s3/{ => api}/providers/providers.go (100%) rename s3/{ => api}/providers/providers_options.go (100%) rename s3/{ => api}/providers/storage_state_store_proxy.go (100%) create mode 100644 s3/api/requests/errors.go rename s3/{requests/input.go => api/requests/parsers.go} (80%) rename s3/{ => api}/requests/parsers_bucket.go (77%) create mode 100644 s3/api/requests/parsers_multipart.go rename s3/{ => api}/requests/parsers_object.go (60%) rename s3/{requests/validate.go => api/requests/validates.go} (64%) rename s3/{ => api}/responses/errors.go (99%) rename s3/{protocol/responses.go => api/responses/writers.go} (73%) rename s3/{responses/responses_bucket.go => api/responses/writers_bucket.go} (58%) create mode 100644 s3/api/responses/writers_multipart.go rename s3/{responses/responses_object.go => api/responses/writers_object.go} (75%) rename s3/{ => api}/routers/options.go (100%) rename s3/{ => api}/routers/proto.go (100%) rename s3/{ => api}/routers/routers.go (96%) rename s3/{ => api}/server/options.go (100%) rename s3/{ => api}/server/server.go (95%) rename s3/{ => api}/services/accesskey/instance.go (94%) rename s3/{ => api}/services/accesskey/options.go (100%) rename s3/{ => api}/services/accesskey/proto.go (100%) rename s3/{ => api}/services/accesskey/service.go (89%) rename s3/{ => api}/services/object/clean_read_closer.go (100%) rename s3/{ => api}/services/object/options.go (100%) rename s3/{ => api}/services/object/proto.go (55%) rename s3/{ => api}/services/object/service.go (98%) rename s3/{ => api}/services/object/service_bucket.go (85%) rename s3/{ => api}/services/object/service_multipart.go (71%) rename s3/{ => api}/services/object/service_object.go (88%) rename s3/{ => api}/services/sign/options.go (100%) rename s3/{ => api}/services/sign/proto.go (81%) rename s3/{ => api}/services/sign/service.go (81%) rename s3/{services/sign/signature-auth-type.go => api/services/sign/signature-type.go} (100%) rename s3/{ => api}/services/sign/signature-v4-parser.go (62%) rename s3/{services/sign/streaming-signature-v4.go => api/services/sign/signature-v4-streaming.go} (77%) create mode 100644 s3/api/services/sign/signature-v4-utils.go create mode 100644 s3/api/services/sign/signature-v4.go delete mode 100644 s3/handlers/handlers_multipart.go delete mode 100644 s3/handlers/handlers_object.go delete mode 100644 s3/handlers/utils.go rename s3/{utils => }/hash/errors.go (100%) rename s3/{utils => }/hash/reader.go (100%) delete mode 100644 s3/requests/input_errors.go delete mode 100644 s3/requests/validate_errors.go delete mode 100644 s3/responses/object_header.go delete mode 100644 s3/responses/responses_common.go delete mode 100644 s3/s3utils/utils.go delete mode 100644 s3/services/sign/signature-v4-utils.go delete mode 100644 s3/services/sign/signature-v4.go delete mode 100644 s3/services/sign/signature.go delete mode 100644 s3/utils/bgcontext.go rename s3/utils/{if.go => coalesce.go} (100%) delete mode 100644 s3/utils/levels.go delete mode 100644 s3/utils/signature.go delete mode 100644 s3/utils/utils.go delete mode 100644 s3/utils/xml.go diff --git a/cmd/btfs/daemon.go b/cmd/btfs/daemon.go index 0fb94782e..91f14ce38 100644 --- a/cmd/btfs/daemon.go +++ b/cmd/btfs/daemon.go @@ -8,7 +8,7 @@ import ( _ "expvar" "fmt" "github.com/bittorrent/go-btfs/s3" - "github.com/bittorrent/go-btfs/s3/services/accesskey" + "github.com/bittorrent/go-btfs/s3/api/services/accesskey" "io/ioutil" "math/rand" "net" diff --git a/core/commands/accesskey.go b/core/commands/accesskey.go index 0b3a51928..013e0ca63 100644 --- a/core/commands/accesskey.go +++ b/core/commands/accesskey.go @@ -4,7 +4,7 @@ import ( "errors" cmds "github.com/bittorrent/go-btfs-cmds" "github.com/bittorrent/go-btfs/core/commands/cmdenv" - "github.com/bittorrent/go-btfs/s3/services/accesskey" + "github.com/bittorrent/go-btfs/s3/api/services/accesskey" ) var AccessKeyCmd = &cmds.Command{ diff --git a/s3/action/action_test.go b/s3/action/action_test.go deleted file mode 100644 index 696846a67..000000000 --- a/s3/action/action_test.go +++ /dev/null @@ -1,45 +0,0 @@ -package action - -import "testing" - -func TestAction_IsValid(t *testing.T) { - testCases := []struct { - action Action - expectedResult bool - }{ - {Action("*"), true}, - {Action(PutObjectAction), true}, - {Action("abcd"), false}, - {Action(PutObjectAction + "*"), true}, - } - for _, testCase := range testCases { - if testCase.action.IsValid() != testCase.expectedResult { - t.Errorf("Test case failed: %s", testCase.action) - } - } -} -func TestAction_Match(t *testing.T) { - testCases := []struct { - name string - action Action - resource Action - expectedResult bool - }{ - {"test1", Action("*"), Action(""), true}, - {"test1", Action("*"), Action(PutObjectAction), true}, - {"test1", Action("*"), Action("abcd"), true}, - {"test2", Action(PutObjectAction), Action(""), false}, - {"test2", Action(PutObjectAction), Action(PutObjectAction), true}, - {"test2", Action(PutObjectAction), Action("abcd"), false}, - {"test3", Action(""), Action("*"), false}, - {"test3", Action(""), Action(PutObjectAction), false}, - {"test3", Action(""), Action("abcd"), false}, - } - for _, testCase := range testCases { - t.Run(testCase.name, func(t *testing.T) { - if testCase.action.Match(testCase.resource) != testCase.expectedResult { - t.Errorf("Test case failed: %s", testCase.action) - } - }) - } -} diff --git a/s3/cctx/cctx.go b/s3/api/contexts/contexts.go similarity index 95% rename from s3/cctx/cctx.go rename to s3/api/contexts/contexts.go index 797043a1e..ca7211b75 100644 --- a/s3/cctx/cctx.go +++ b/s3/api/contexts/contexts.go @@ -1,4 +1,4 @@ -package cctx +package contexts import ( "context" diff --git a/s3/cctx/access_key.go b/s3/api/contexts/contexts_access_key.go similarity index 92% rename from s3/cctx/access_key.go rename to s3/api/contexts/contexts_access_key.go index ca4b1b5b8..cc4b6103f 100644 --- a/s3/cctx/access_key.go +++ b/s3/api/contexts/contexts_access_key.go @@ -1,4 +1,4 @@ -package cctx +package contexts import ( "net/http" diff --git a/s3/cctx/handle_err.go b/s3/api/contexts/contexts_handle_inf.go similarity index 95% rename from s3/cctx/handle_err.go rename to s3/api/contexts/contexts_handle_inf.go index f07b91f4c..c1a25ef6e 100644 --- a/s3/cctx/handle_err.go +++ b/s3/api/contexts/contexts_handle_inf.go @@ -1,4 +1,4 @@ -package cctx +package contexts import ( "net/http" diff --git a/s3/handlers/handlers.go b/s3/api/handlers/handlers.go similarity index 57% rename from s3/handlers/handlers.go rename to s3/api/handlers/handlers.go index d8ddb4460..d9ae7697f 100644 --- a/s3/handlers/handlers.go +++ b/s3/api/handlers/handlers.go @@ -2,16 +2,13 @@ package handlers import ( - "context" - "github.com/bittorrent/go-btfs/s3/requests" - "github.com/bittorrent/go-btfs/s3/responses" - "github.com/bittorrent/go-btfs/s3/s3utils" - "github.com/bittorrent/go-btfs/s3/services/accesskey" - "github.com/bittorrent/go-btfs/s3/services/object" - "github.com/bittorrent/go-btfs/s3/services/sign" - "github.com/bittorrent/go-btfs/s3/utils/hash" + "github.com/bittorrent/go-btfs/s3/api/requests" + "github.com/bittorrent/go-btfs/s3/api/responses" + "github.com/bittorrent/go-btfs/s3/api/services/accesskey" + "github.com/bittorrent/go-btfs/s3/api/services/object" + "github.com/bittorrent/go-btfs/s3/api/services/sign" + "github.com/bittorrent/go-btfs/s3/hash" "net/http" - "net/url" "runtime" ) @@ -24,7 +21,9 @@ type Handlers struct { objsvc object.Service } -func NewHandlers(acksvc accesskey.Service, sigsvc sign.Service, objsvc object.Service, options ...Option) (handlers *Handlers) { +func NewHandlers( + acksvc accesskey.Service, sigsvc sign.Service, objsvc object.Service, + options ...Option) (handlers *Handlers) { handlers = &Handlers{ headers: defaultHeaders, acksvc: acksvc, @@ -37,6 +36,7 @@ func NewHandlers(acksvc accesskey.Service, sigsvc sign.Service, objsvc object.Se return } +// name returns name of the called handler func (h *Handlers) name() string { pc := make([]uintptr, 1) runtime.Callers(3, pc) @@ -44,10 +44,10 @@ func (h *Handlers) name() string { return f.Name() } -func (h *Handlers) toRespErr(err error) (rerr *responses.Error) { +// toResponseErr convert internal error to response error +func (h *Handlers) toResponseErr(err error) (rerr *responses.Error) { switch err { - - // requests errors + // Errors from requests case requests.ErrBucketNameInvalid: rerr = responses.ErrInvalidBucketName case requests.ErrObjectNameInvalid: @@ -60,24 +60,43 @@ func (h *Handlers) toRespErr(err error) (rerr *responses.Error) { rerr = responses.ErrInvalidRegion case requests.ErrACLUnsupported: rerr = responses.ErrMalformedACLError - case requests.ErrInvalidContentMd5: + case requests.ErrContentMd5Invalid: rerr = responses.ErrInvalidDigest - case requests.ErrInvalidChecksumSha256: + case requests.ErrChecksumSha256Invalid: rerr = responses.ErrContentSHA256Mismatch case requests.ErrContentLengthMissing: rerr = responses.ErrMissingContentLength + case requests.ErrContentLengthTooSmall: + rerr = responses.ErrEntityTooSmall case requests.ErrContentLengthTooLarge: rerr = responses.ErrEntityTooLarge case requests.ErrCopySrcInvalid: rerr = responses.ErrInvalidCopySource case requests.ErrCopyDestInvalid: rerr = responses.ErrInvalidCopyDest + case requests.ErrDeletesCountInvalid: + rerr = responses.ErrInvalidRequest case requests.ErrMaxKeysInvalid: rerr = responses.ErrInvalidMaxKeys + case requests.ErrPrefixInvalid: + rerr = responses.ErrInvalidRequest + case requests.ErrMarkerInvalid: + rerr = responses.ErrInvalidRequest case requests.ErrMarkerPrefixCombinationInvalid: rerr = responses.ErrInvalidRequest - - // object service errors + case requests.ErrContinuationTokenInvalid: + rerr = responses.ErrIncorrectContinuationToken + case requests.ErrStartAfterInvalid: + rerr = responses.ErrInvalidRequest + case requests.ErrPartNumberInvalid: + rerr = responses.ErrInvalidPartNumber + case requests.ErrPartsCountInvalid: + rerr = responses.ErrInvalidRequest + case requests.ErrPartInvalid: + rerr = responses.ErrInvalidPart + case requests.ErrPartOrderInvalid: + rerr = responses.ErrInvalidPartOrder + // Errors from Object service case object.ErrBucketNotFound: rerr = responses.ErrNoSuchBucket case object.ErrBucketNotEmpty: @@ -90,12 +109,19 @@ func (h *Handlers) toRespErr(err error) (rerr *responses.Error) { rerr = responses.ErrBucketAlreadyExists case object.ErrNotAllowed: rerr = responses.ErrAccessDenied - case context.Canceled: + case object.ErrPartNotExists: + rerr = responses.ErrInvalidPart + case object.ErrPartETagNotMatch: + rerr = responses.ErrInvalidPart + case object.ErrPartTooSmall: + rerr = responses.ErrEntityTooSmall + case object.ErrCanceled: rerr = responses.ErrClientDisconnected - case context.DeadlineExceeded: + case object.ErrTimout: rerr = responses.ErrOperationTimedOut + // Others default: - switch err.(type) { + switch nerr := err.(type) { case requests.ErrFailedParseValue: rerr = responses.ErrInvalidRequest case requests.ErrFailedDecodeXML: @@ -104,35 +130,16 @@ func (h *Handlers) toRespErr(err error) (rerr *responses.Error) { rerr = responses.ErrInvalidRequest case requests.ErrWithUnsupportedParam: rerr = responses.ErrNotImplemented - case hash.SHA256Mismatch: rerr = responses.ErrContentSHA256Mismatch case hash.BadDigest: rerr = responses.ErrBadDigest - case s3utils.BucketNameInvalid: - rerr = responses.ErrInvalidBucketName - case s3utils.ObjectNameInvalid: - rerr = responses.ErrInvalidObjectName - case s3utils.ObjectNameTooLong: - rerr = responses.ErrKeyTooLongError - case s3utils.ObjectNamePrefixAsSlash: - rerr = responses.ErrInvalidObjectNamePrefixSlash - case s3utils.InvalidUploadIDKeyCombination: - rerr = responses.ErrNotImplemented - case s3utils.InvalidMarkerPrefixCombination: - rerr = responses.ErrNotImplemented - case s3utils.MalformedUploadID: - rerr = responses.ErrNoSuchUpload - case s3utils.InvalidUploadID: - rerr = responses.ErrNoSuchUpload - case s3utils.InvalidPart: - rerr = responses.ErrInvalidPart - case s3utils.PartTooSmall: - rerr = responses.ErrEntityTooSmall - case s3utils.PartTooBig: - rerr = responses.ErrEntityTooLarge - case url.EscapeError: - rerr = responses.ErrInvalidObjectName + case hash.ErrSizeMismatch: + if nerr.Got < nerr.Want { + rerr = responses.ErrIncompleteBody + } else { + rerr = responses.ErrMissingContentLength + } default: rerr = responses.ErrInternalError } diff --git a/s3/handlers/handlers_bucket.go b/s3/api/handlers/handlers_bucket.go similarity index 62% rename from s3/handlers/handlers_bucket.go rename to s3/api/handlers/handlers_bucket.go index 4ee102e98..d1771b319 100644 --- a/s3/handlers/handlers_bucket.go +++ b/s3/api/handlers/handlers_bucket.go @@ -1,9 +1,9 @@ package handlers import ( - "github.com/bittorrent/go-btfs/s3/cctx" - "github.com/bittorrent/go-btfs/s3/requests" - "github.com/bittorrent/go-btfs/s3/responses" + "github.com/bittorrent/go-btfs/s3/api/contexts" + "github.com/bittorrent/go-btfs/s3/api/requests" + "github.com/bittorrent/go-btfs/s3/api/responses" "net/http" ) @@ -11,18 +11,18 @@ func (h *Handlers) CreateBucketHandler(w http.ResponseWriter, r *http.Request) { ctx := r.Context() var err error defer func() { - cctx.SetHandleInf(r, h.name(), err) + contexts.SetHandleInf(r, h.name(), err) }() args, err := requests.ParseCreateBucketRequest(r) if err != nil { - responses.WriteErrorResponse(w, r, h.toRespErr(err)) + responses.WriteErrorResponse(w, r, h.toResponseErr(err)) return } buc, err := h.objsvc.CreateBucket(ctx, args) if err != nil { - responses.WriteErrorResponse(w, r, h.toRespErr(err)) + responses.WriteErrorResponse(w, r, h.toResponseErr(err)) return } @@ -34,18 +34,18 @@ func (h *Handlers) HeadBucketHandler(w http.ResponseWriter, r *http.Request) { ctx := r.Context() var err error defer func() { - cctx.SetHandleInf(r, h.name(), err) + contexts.SetHandleInf(r, h.name(), err) }() args, err := requests.ParseHeadBucketRequest(r) if err != nil { - responses.WriteErrorResponse(w, r, h.toRespErr(err)) + responses.WriteErrorResponse(w, r, h.toResponseErr(err)) return } buc, err := h.objsvc.GetBucket(ctx, args) if err != nil { - responses.WriteErrorResponse(w, r, h.toRespErr(err)) + responses.WriteErrorResponse(w, r, h.toResponseErr(err)) return } @@ -57,18 +57,18 @@ func (h *Handlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) { ctx := r.Context() var err error defer func() { - cctx.SetHandleInf(r, h.name(), err) + contexts.SetHandleInf(r, h.name(), err) }() args, err := requests.ParseDeleteBucketRequest(r) if err != nil { - responses.WriteErrorResponse(w, r, h.toRespErr(err)) + responses.WriteErrorResponse(w, r, h.toResponseErr(err)) return } err = h.objsvc.DeleteBucket(ctx, args) if err != nil { - responses.WriteErrorResponse(w, r, h.toRespErr(err)) + responses.WriteErrorResponse(w, r, h.toResponseErr(err)) return } @@ -79,22 +79,22 @@ func (h *Handlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) { func (h *Handlers) ListBucketsHandler(w http.ResponseWriter, r *http.Request) { var err error defer func() { - cctx.SetHandleInf(r, h.name(), err) + contexts.SetHandleInf(r, h.name(), err) }() args, err := requests.ParseListBucketsRequest(r) if err != nil { - responses.WriteErrorResponse(w, r, h.toRespErr(err)) + responses.WriteErrorResponse(w, r, h.toResponseErr(err)) return } list, err := h.objsvc.ListBuckets(r.Context(), args) if err != nil { - responses.WriteErrorResponse(w, r, h.toRespErr(err)) + responses.WriteErrorResponse(w, r, h.toResponseErr(err)) return } - responses.WriteListBucketsResponse(w, r, args.AccessKey, list) + responses.WriteListBucketsResponse(w, r, list) return } @@ -102,22 +102,22 @@ func (h *Handlers) GetBucketAclHandler(w http.ResponseWriter, r *http.Request) { ctx := r.Context() var err error defer func() { - cctx.SetHandleInf(r, h.name(), err) + contexts.SetHandleInf(r, h.name(), err) }() args, err := requests.ParseGetBucketACLRequest(r) if err != nil { - responses.WriteErrorResponse(w, r, h.toRespErr(err)) + responses.WriteErrorResponse(w, r, h.toResponseErr(err)) return } acl, err := h.objsvc.GetBucketACL(ctx, args) if err != nil { - responses.WriteErrorResponse(w, r, h.toRespErr(err)) + responses.WriteErrorResponse(w, r, h.toResponseErr(err)) return } - responses.WriteGetBucketACLResponse(w, r, args.AccessKey, acl) + responses.WriteGetBucketACLResponse(w, r, acl) return } @@ -125,18 +125,18 @@ func (h *Handlers) PutBucketAclHandler(w http.ResponseWriter, r *http.Request) { ctx := r.Context() var err error defer func() { - cctx.SetHandleInf(r, h.name(), err) + contexts.SetHandleInf(r, h.name(), err) }() args, err := requests.ParsePutBucketAclRequest(r) if err != nil { - responses.WriteErrorResponse(w, r, h.toRespErr(err)) + responses.WriteErrorResponse(w, r, h.toResponseErr(err)) return } err = h.objsvc.PutBucketACL(ctx, args) if err != nil { - responses.WriteErrorResponse(w, r, h.toRespErr(err)) + responses.WriteErrorResponse(w, r, h.toResponseErr(err)) return } diff --git a/s3/handlers/handlers_middlewares.go b/s3/api/handlers/handlers_middlewares.go similarity index 88% rename from s3/handlers/handlers_middlewares.go rename to s3/api/handlers/handlers_middlewares.go index 5e15162b4..cf7181b6c 100644 --- a/s3/handlers/handlers_middlewares.go +++ b/s3/api/handlers/handlers_middlewares.go @@ -3,10 +3,10 @@ package handlers import ( "errors" "fmt" - "github.com/bittorrent/go-btfs/s3/cctx" + "github.com/bittorrent/go-btfs/s3/api/contexts" + "github.com/bittorrent/go-btfs/s3/api/responses" + "github.com/bittorrent/go-btfs/s3/api/services/accesskey" "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/responses" - "github.com/bittorrent/go-btfs/s3/services/accesskey" rscors "github.com/rs/cors" "net/http" "strconv" @@ -40,7 +40,7 @@ func (h *Handlers) Log(handler http.Handler) http.Handler { start := time.Now() fmt.Printf("s3-api: [I] %s | <%-4s> | %s\n", start.Format(time.RFC3339), r.Method, r.URL) handler.ServeHTTP(w, r) - hname, herr := cctx.GetHandleInf(r) + hname, herr := contexts.GetHandleInf(r) end := time.Now() ela := end.Sub(start) fmt.Printf("s3-api: [O] %s | <%-4s> | %s | %s | %v | %s \n", end.Format(time.RFC3339), r.Method, r.URL, hname, herr, ela) @@ -69,7 +69,7 @@ func (h *Handlers) Sign(handler http.Handler) http.Handler { var err *responses.Error defer func() { if err != nil { - cctx.SetHandleInf(r, h.name(), err) + contexts.SetHandleInf(r, h.name(), err) } }() @@ -79,7 +79,7 @@ func (h *Handlers) Sign(handler http.Handler) http.Handler { return } - cctx.SetAccessKey(r, ack) + contexts.SetAccessKey(r, ack) handler.ServeHTTP(w, r) }) diff --git a/s3/api/handlers/handlers_multipart.go b/s3/api/handlers/handlers_multipart.go new file mode 100644 index 000000000..386c65fc2 --- /dev/null +++ b/s3/api/handlers/handlers_multipart.go @@ -0,0 +1,100 @@ +package handlers + +import ( + "github.com/bittorrent/go-btfs/s3/api/contexts" + "github.com/bittorrent/go-btfs/s3/api/requests" + "github.com/bittorrent/go-btfs/s3/api/responses" + "net/http" +) + +func (h *Handlers) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + var err error + defer func() { + contexts.SetHandleInf(r, h.name(), err) + }() + + args, err := requests.ParseCreateMultipartUploadRequest(r) + if err != nil { + responses.WriteErrorResponse(w, r, h.toResponseErr(err)) + return + } + + multipart, err := h.objsvc.CreateMultipartUpload(ctx, args) + if err != nil { + responses.WriteErrorResponse(w, r, h.toResponseErr(err)) + return + } + + responses.WriteCreateMultipartUploadResponse(w, r, multipart) + return +} + +func (h *Handlers) UploadPartHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + var err error + defer func() { + contexts.SetHandleInf(r, h.name(), err) + }() + + args, err := requests.ParseUploadPartRequest(r) + if err != nil { + responses.WriteErrorResponse(w, r, h.toResponseErr(err)) + return + } + + part, err := h.objsvc.UploadPart(ctx, args) + if err != nil { + responses.WriteErrorResponse(w, r, h.toResponseErr(err)) + return + } + + responses.WriteUploadPartResponse(w, r, part) + return +} + +func (h *Handlers) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + var err error + defer func() { + contexts.SetHandleInf(r, h.name(), err) + }() + + args, err := requests.ParseAbortMultipartUploadRequest(r) + if err != nil { + responses.WriteErrorResponse(w, r, h.toResponseErr(err)) + return + } + + err = h.objsvc.AbortMultipartUpload(ctx, args) + if err != nil { + responses.WriteErrorResponse(w, r, h.toResponseErr(err)) + return + } + + responses.WriteAbortMultipartUploadResponse(w, r) + return +} + +func (h *Handlers) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + var err error + defer func() { + contexts.SetHandleInf(r, h.name(), err) + }() + + args, err := requests.ParseCompleteMultipartUploadRequest(r) + if err != nil { + responses.WriteErrorResponse(w, r, h.toResponseErr(err)) + return + } + + obj, err := h.objsvc.CompleteMultiPartUpload(ctx, args) + if err != nil { + responses.WriteErrorResponse(w, r, h.toResponseErr(err)) + return + } + + responses.WriteCompleteMultipartUploadResponse(w, r, obj) + return +} diff --git a/s3/api/handlers/handlers_object.go b/s3/api/handlers/handlers_object.go new file mode 100644 index 000000000..edad88e95 --- /dev/null +++ b/s3/api/handlers/handlers_object.go @@ -0,0 +1,223 @@ +package handlers + +import ( + "github.com/bittorrent/go-btfs/s3/api/contexts" + "github.com/bittorrent/go-btfs/s3/api/requests" + "github.com/bittorrent/go-btfs/s3/api/responses" + "net/http" +) + +// PutObjectHandler . +func (h *Handlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + var err error + defer func() { + contexts.SetHandleInf(r, h.name(), err) + }() + + args, err := requests.ParsePutObjectRequest(r) + if err != nil { + responses.WriteErrorResponse(w, r, h.toResponseErr(err)) + return + } + + obj, err := h.objsvc.PutObject(ctx, args) + if err != nil { + responses.WriteErrorResponse(w, r, h.toResponseErr(err)) + return + } + + responses.WritePutObjectResponse(w, r, obj) + return +} + +// CopyObjectHandler . +func (h *Handlers) CopyObjectHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + var err error + defer func() { + contexts.SetHandleInf(r, h.name(), err) + }() + + args, err := requests.ParseCopyObjectRequest(r) + if err != nil { + responses.WriteErrorResponse(w, r, h.toResponseErr(err)) + return + } + + obj, err := h.objsvc.CopyObject(ctx, args) + if err != nil { + responses.WriteErrorResponse(w, r, h.toResponseErr(err)) + return + } + + responses.WriteCopyObjectResponse(w, r, obj) + return +} + +// HeadObjectHandler . +func (h *Handlers) HeadObjectHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + var err error + defer func() { + contexts.SetHandleInf(r, h.name(), err) + }() + + args, err := requests.ParseHeadObjectRequest(r) + if err != nil { + responses.WriteErrorResponse(w, r, h.toResponseErr(err)) + return + } + + obj, _, err := h.objsvc.GetObject(ctx, args) + if err != nil { + responses.WriteErrorResponse(w, r, h.toResponseErr(err)) + return + } + + responses.WriteHeadObjectResponse(w, r, obj) + return +} + +// GetObjectHandler . +func (h *Handlers) GetObjectHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + var err error + defer func() { + contexts.SetHandleInf(r, h.name(), err) + }() + + args, err := requests.ParseGetObjectRequest(r) + if err != nil { + responses.WriteErrorResponse(w, r, h.toResponseErr(err)) + return + } + + obj, body, err := h.objsvc.GetObject(ctx, args) + if err != nil { + responses.WriteErrorResponse(w, r, h.toResponseErr(err)) + return + } + + responses.WriteGetObjectResponse(w, r, obj, body) + return +} + +// DeleteObjectHandler . +func (h *Handlers) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + var err error + defer func() { + contexts.SetHandleInf(r, h.name(), err) + }() + + args, err := requests.ParseDeleteObjectRequest(r) + if err != nil { + responses.WriteErrorResponse(w, r, h.toResponseErr(err)) + return + } + err = h.objsvc.DeleteObject(ctx, args) + if err != nil { + responses.WriteErrorResponse(w, r, h.toResponseErr(err)) + return + } + + responses.WriteDeleteObjectResponse(w, r, nil) + return +} + +// DeleteObjectsHandler . +func (h *Handlers) DeleteObjectsHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + var err error + defer func() { + contexts.SetHandleInf(r, h.name(), err) + }() + + args, err := requests.ParseDeleteObjectsRequest(r) + if err != nil { + responses.WriteErrorResponse(w, r, h.toResponseErr(err)) + return + } + + deletes, err := h.objsvc.DeleteObjects(ctx, args) + if err != nil { + responses.WriteErrorResponse(w, r, h.toResponseErr(err)) + return + } + + responses.WriteDeleteObjectsResponse(w, r, h.toResponseErr, deletes) + return +} + +// ListObjectsHandler . +func (h *Handlers) ListObjectsHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + var err error + defer func() { + contexts.SetHandleInf(r, h.name(), err) + }() + + args, err := requests.ParseListObjectsRequest(r) + if err != nil { + responses.WriteErrorResponse(w, r, h.toResponseErr(err)) + return + } + + list, err := h.objsvc.ListObjects(ctx, args) + if err != nil { + responses.WriteErrorResponse(w, r, h.toResponseErr(err)) + return + } + + responses.WriteListObjectsResponse(w, r, list) + return +} + +// ListObjectsV2Handler . +func (h *Handlers) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + var err error + defer func() { + contexts.SetHandleInf(r, h.name(), err) + }() + + args, err := requests.ParseListObjectsV2Request(r) + if err != nil { + responses.WriteErrorResponse(w, r, h.toResponseErr(err)) + return + } + + list, err := h.objsvc.ListObjectsV2(ctx, args) + if err != nil { + responses.WriteErrorResponse(w, r, h.toResponseErr(err)) + return + } + + responses.WriteListObjectsV2Response(w, r, list) + return +} + +// GetObjectACLHandler - GET Object ACL +func (h *Handlers) GetObjectACLHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + var err error + defer func() { + contexts.SetHandleInf(r, h.name(), err) + }() + + args, err := requests.ParseGetObjectACLRequest(r) + if err != nil { + responses.WriteErrorResponse(w, r, h.toResponseErr(err)) + return + } + + acl, err := h.objsvc.GetObjectACL(ctx, args) + if err != nil { + responses.WriteErrorResponse(w, r, h.toResponseErr(err)) + return + } + + responses.WriteGetObjectACLResponse(w, r, acl) + return +} diff --git a/s3/handlers/options.go b/s3/api/handlers/options.go similarity index 100% rename from s3/handlers/options.go rename to s3/api/handlers/options.go diff --git a/s3/handlers/proto.go b/s3/api/handlers/proto.go similarity index 100% rename from s3/handlers/proto.go rename to s3/api/handlers/proto.go diff --git a/s3/providers/btfs_api.go b/s3/api/providers/btfs_api.go similarity index 100% rename from s3/providers/btfs_api.go rename to s3/api/providers/btfs_api.go diff --git a/s3/providers/btfs_api_options.go b/s3/api/providers/btfs_api_options.go similarity index 100% rename from s3/providers/btfs_api_options.go rename to s3/api/providers/btfs_api_options.go diff --git a/s3/providers/proto.go b/s3/api/providers/proto.go similarity index 100% rename from s3/providers/proto.go rename to s3/api/providers/proto.go diff --git a/s3/providers/providers.go b/s3/api/providers/providers.go similarity index 100% rename from s3/providers/providers.go rename to s3/api/providers/providers.go diff --git a/s3/providers/providers_options.go b/s3/api/providers/providers_options.go similarity index 100% rename from s3/providers/providers_options.go rename to s3/api/providers/providers_options.go diff --git a/s3/providers/storage_state_store_proxy.go b/s3/api/providers/storage_state_store_proxy.go similarity index 100% rename from s3/providers/storage_state_store_proxy.go rename to s3/api/providers/storage_state_store_proxy.go diff --git a/s3/api/requests/errors.go b/s3/api/requests/errors.go new file mode 100644 index 000000000..c26b3be58 --- /dev/null +++ b/s3/api/requests/errors.go @@ -0,0 +1,103 @@ +package requests + +import ( + "errors" + "fmt" + "reflect" +) + +var ( + ErrBucketNameInvalid = errors.New("the bucket name is invalid") + ErrObjectNameInvalid = errors.New("the object name is invalid") + ErrObjectNameTooLong = errors.New("the object name cannot be longer than 1024 characters") + ErrObjectNamePrefixSlash = errors.New("the object name cannot start with slash") + ErrRegionUnsupported = errors.New("the location is not supported by this server") + ErrACLUnsupported = errors.New("the ACL is not supported by this server") + ErrContentMd5Invalid = errors.New("the content md5 is invalid") + ErrChecksumSha256Invalid = errors.New("the checksum-sha256 is invalid") + ErrContentLengthMissing = errors.New("the content-length is missing") + ErrContentLengthTooSmall = errors.New("the content-length is too small") + ErrContentLengthTooLarge = errors.New("the content-length is too large") + ErrCopySrcInvalid = errors.New("the copy-source is invalid") + ErrCopyDestInvalid = errors.New("the copy-destination is invalid") + ErrDeletesCountInvalid = errors.New("the deletes-count is invalid") + ErrMaxKeysInvalid = errors.New("the max-keys is invalid") + ErrEncodingTypeInvalid = errors.New("the encoding-type is invalid") + ErrPrefixInvalid = errors.New("the prefix is invalid") + ErrMarkerInvalid = errors.New("the marker is invalid") + ErrMarkerPrefixCombinationInvalid = errors.New("the marker-prefix combination is invalid") + ErrContinuationTokenInvalid = errors.New("the continuation-token is invalid") + ErrStartAfterInvalid = errors.New("the start-after is invalid") + ErrPartNumberInvalid = errors.New("the part-number is invalid") + ErrPartsCountInvalid = errors.New("the parts-count is invalid") + ErrPartInvalid = errors.New("the part is invalid") + ErrPartOrderInvalid = errors.New("the part-order is invalid") +) + +// ErrInvalidInputValue . +type ErrInvalidInputValue struct { + msg string +} + +func (err ErrInvalidInputValue) Error() string { + return fmt.Sprintf("invalid input value: %s", err.msg) +} + +// ErrTypeNotSet . +type ErrTypeNotSet struct { + typ reflect.Type +} + +func (err ErrTypeNotSet) Error() string { + return fmt.Sprintf("type <%s> not set", err.typ.String()) +} + +// ErrPayloadNotSet . +type ErrPayloadNotSet struct { + el string +} + +func (err ErrPayloadNotSet) Error() string { + return fmt.Sprintf("payload <%s> not set", err.el) +} + +// ErrFailedDecodeXML . +type ErrFailedDecodeXML struct { + err error +} + +func (err ErrFailedDecodeXML) Error() string { + return fmt.Sprintf("decode xml: %v", err.err) +} + +// ErrWithUnsupportedParam . +type ErrWithUnsupportedParam struct { + param string +} + +func (err ErrWithUnsupportedParam) Error() string { + return fmt.Sprintf("param %s is unsported", err.param) +} + +// ErrFailedParseValue . +type ErrFailedParseValue struct { + name string + err error +} + +func (err ErrFailedParseValue) Name() string { + return err.name +} + +func (err ErrFailedParseValue) Error() string { + return fmt.Sprintf("parse <%s> value: %v", err.name, err.err) +} + +// ErrMissingRequiredParam . +type ErrMissingRequiredParam struct { + param string +} + +func (err ErrMissingRequiredParam) Error() string { + return fmt.Sprintf("missing required param <%s>", err.param) +} diff --git a/s3/requests/input.go b/s3/api/requests/parsers.go similarity index 80% rename from s3/requests/input.go rename to s3/api/requests/parsers.go index 09d71dba7..4bc95c271 100644 --- a/s3/requests/input.go +++ b/s3/api/requests/parsers.go @@ -17,61 +17,47 @@ import ( type fields map[string]bool -func ParseInput(r *http.Request, input interface{}, supports fields) (err error) { +func ParseLocation(r *http.Request, input interface{}, supports fields) (err error) { inv, err := valueOf(input) if err != nil { return } err = parseLocation(r, inv, supports) - if err != nil { - return - } - err = parseBody(r, inv, supports) return } -func valueOf(input interface{}) (inv reflect.Value, err error) { - defer func() { - if err != nil { - err = ErrInvalidInputValue{err} - } - }() - if input == nil { - err = errors.New("input is nil") - return - } - t := reflect.TypeOf(input) - k := t.Kind() - if k != reflect.Pointer { - err = errors.New("input is non pointer") +func ParseXMLBody(r *http.Request, input interface{}) (err error) { + inv, err := valueOf(input) + if err != nil { return } - inv = reflect.ValueOf(input).Elem() - if !inv.IsValid() { - err = errors.New("input is nil pointer") + pft, ok := getPayloadField(inv) + if !ok { + err = ErrPayloadNotSet{"field"} return } - t = t.Elem() - k = t.Kind() - if k == reflect.Struct { + ptyp := pft.Tag.Get("type") + if ptyp != "structure" { + err = ErrPayloadNotSet{"structure"} return } - if k != reflect.Pointer { - err = errors.New("the type input point to is neither struct nor pointer") - return + decoder := xml.NewDecoder(r.Body) + err = xmlutil.UnmarshalXML(inv.Addr().Interface(), decoder, "") + if err != nil { + err = ErrFailedDecodeXML{err} } - t = t.Elem() - k = t.Kind() - if k != reflect.Struct { - err = errors.New("the pointer input point to is not point to struct") + return +} + +func valueOf(input interface{}) (inv reflect.Value, err error) { + inv = reflect.Indirect(reflect.ValueOf(input)) + if !inv.IsValid() { + err = ErrInvalidInputValue{"input is nil"} return } - if inv.Elem().IsValid() { - inv = inv.Elem() - return + if inv.Kind() != reflect.Struct { + err = ErrInvalidInputValue{"input is not point to struct"} } - inv.Set(reflect.New(inv.Type().Elem())) - inv = inv.Elem() return } @@ -156,37 +142,6 @@ func getPayloadField(inv reflect.Value) (ft reflect.StructField, ok bool) { return } -func parseBody(r *http.Request, inv reflect.Value, supports fields) (err error) { - pft, ok := getPayloadField(inv) - if !ok { - return - } - name := pft.Name - supp := supports[name] - requ := pft.Tag.Get("required") == "true" - ptyp := pft.Tag.Get("type") - if ptyp != "structure" { - return - } - if !supp && r.ContentLength > 0 { - err = ErrWithUnsupportedParam{name} - return - } - if requ && r.ContentLength < 1 { - err = ErrMissingRequiredParam{name} - return - } - if r.ContentLength < 1 { - return - } - decoder := xml.NewDecoder(r.Body) - err = xmlutil.UnmarshalXML(inv.Addr().Interface(), decoder, "") - if err != nil { - err = ErrFailedDecodeXML{err} - } - return -} - func getHeaderValues(header http.Header, prefix string) (vals map[string]*string, has bool) { defer func() { has = len(vals) > 0 diff --git a/s3/requests/parsers_bucket.go b/s3/api/requests/parsers_bucket.go similarity index 77% rename from s3/requests/parsers_bucket.go rename to s3/api/requests/parsers_bucket.go index 6df265a9a..479a37029 100644 --- a/s3/requests/parsers_bucket.go +++ b/s3/api/requests/parsers_bucket.go @@ -2,8 +2,8 @@ package requests import ( "github.com/aws/aws-sdk-go/service/s3" - "github.com/bittorrent/go-btfs/s3/cctx" - "github.com/bittorrent/go-btfs/s3/services/object" + "github.com/bittorrent/go-btfs/s3/api/contexts" + "github.com/bittorrent/go-btfs/s3/api/services/object" "net/http" ) @@ -15,12 +15,12 @@ var createBucketSupports = fields{ func ParseCreateBucketRequest(r *http.Request) (args *object.CreateBucketArgs, err error) { var input s3.CreateBucketInput - err = ParseInput(r, &input, createBucketSupports) + err = ParseLocation(r, &input, createBucketSupports) if err != nil { return } args = &object.CreateBucketArgs{ - AccessKey: cctx.GetAccessKey(r), + UserId: contexts.GetAccessKey(r), } args.Bucket, err = ValidateBucketName(input.Bucket) if err != nil { @@ -40,12 +40,12 @@ var headBucketSupports = fields{ func ParseHeadBucketRequest(r *http.Request) (args *object.GetBucketArgs, err error) { var input s3.HeadBucketInput - err = ParseInput(r, &input, headBucketSupports) + err = ParseLocation(r, &input, headBucketSupports) if err != nil { return } args = &object.GetBucketArgs{ - AccessKey: cctx.GetAccessKey(r), + UserId: contexts.GetAccessKey(r), } args.Bucket, err = ValidateBucketName(input.Bucket) return @@ -57,12 +57,12 @@ var deleteBucketSupports = fields{ func ParseDeleteBucketRequest(r *http.Request) (args *object.DeleteBucketArgs, err error) { var input s3.DeleteBucketInput - err = ParseInput(r, &input, deleteBucketSupports) + err = ParseLocation(r, &input, deleteBucketSupports) if err != nil { return } args = &object.DeleteBucketArgs{ - AccessKey: cctx.GetAccessKey(r), + UserId: contexts.GetAccessKey(r), } args.Bucket, err = ValidateBucketName(input.Bucket) return @@ -72,12 +72,12 @@ var listBucketsSupports = fields{} func ParseListBucketsRequest(r *http.Request) (args *object.ListBucketsArgs, err error) { var input s3.ListBucketsInput - err = ParseInput(r, input, listBucketsSupports) + err = ParseLocation(r, input, listBucketsSupports) if err != nil { return } args = &object.ListBucketsArgs{ - AccessKey: cctx.GetAccessKey(r), + UserId: contexts.GetAccessKey(r), } return } @@ -89,12 +89,12 @@ var putBucketACLSupports = fields{ func ParsePutBucketAclRequest(r *http.Request) (args *object.PutBucketACLArgs, err error) { var input s3.PutBucketAclInput - err = ParseInput(r, &input, putBucketACLSupports) + err = ParseLocation(r, &input, putBucketACLSupports) if err != nil { return } args = &object.PutBucketACLArgs{ - AccessKey: cctx.GetAccessKey(r), + UserId: contexts.GetAccessKey(r), } args.Bucket, err = ValidateBucketName(input.Bucket) if err != nil { @@ -110,12 +110,12 @@ var getBucketACLSupports = fields{ func ParseGetBucketACLRequest(r *http.Request) (args *object.GetBucketACLArgs, err error) { var input s3.GetBucketAclInput - err = ParseInput(r, &input, getBucketACLSupports) + err = ParseLocation(r, &input, getBucketACLSupports) if err != nil { return } args = &object.GetBucketACLArgs{ - AccessKey: cctx.GetAccessKey(r), + UserId: contexts.GetAccessKey(r), } args.Bucket, err = ValidateBucketName(input.Bucket) return diff --git a/s3/api/requests/parsers_multipart.go b/s3/api/requests/parsers_multipart.go new file mode 100644 index 000000000..fc99de44c --- /dev/null +++ b/s3/api/requests/parsers_multipart.go @@ -0,0 +1,184 @@ +package requests + +import ( + "errors" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/bittorrent/go-btfs/s3/api/contexts" + "github.com/bittorrent/go-btfs/s3/api/services/object" + "github.com/bittorrent/go-btfs/s3/consts" + "github.com/bittorrent/go-btfs/s3/hash" + "net/http" +) + +var createMultipartUploadSupports = fields{ + "Bucket": true, + "Key": true, + "ContentLength": true, + "ContentEncoding": true, + "ContentType": true, + "Expires": true, +} + +func ParseCreateMultipartUploadRequest(r *http.Request) (args *object.CreateMultipartUploadArgs, err error) { + var input s3.CreateMultipartUploadInput + err = ParseLocation(r, &input, createMultipartUploadSupports) + if err != nil { + return + } + args = &object.CreateMultipartUploadArgs{ + UserId: contexts.GetAccessKey(r), + } + args.Bucket, err = ValidateBucketName(input.Bucket) + if err != nil { + return + } + args.Object, err = ValidateObjectName(input.Key) + if err != nil { + return + } + args.ContentType, err = ValidateContentType(input.ContentType) + if err != nil { + return + } + args.ContentEncoding, err = ValidateContentEncoding(input.ContentEncoding) + if err != nil { + return + } + args.Expires, err = ValidateExpires(input.Expires) + return +} + +var uploadPartSupports = fields{ + "Body": true, + "Bucket": true, + "Key": true, + "UploadId": true, + "PartNumber": true, + "ContentLength": true, + "ContentMD5": true, + "ChecksumSHA256": true, +} + +func ParseUploadPartRequest(r *http.Request) (args *object.UploadPartArgs, err error) { + var input s3.UploadPartInput + err = ParseLocation(r, &input, uploadPartSupports) + if err != nil { + var er ErrFailedParseValue + if errors.As(err, &er) && er.Name() == consts.PartNumber { + err = ErrPartNumberInvalid + } + return + } + args = &object.UploadPartArgs{ + UserId: contexts.GetAccessKey(r), + } + args.Bucket, err = ValidateBucketName(input.Bucket) + if err != nil { + return + } + args.Object, err = ValidateObjectName(input.Key) + if err != nil { + return + } + args.UploadId, err = ValidateUploadId(input.UploadId) + if err != nil { + return + } + args.PartNumber, err = ValidatePartNumber(input.PartNumber) + if err != nil { + return + } + args.ContentLength, err = ValidateContentLength(input.ContentLength, consts.MaxPartSize) + if err != nil { + return + } + contentMD5, err := ValidateContentMD5(input.ContentMD5) + if err != nil { + return + } + checksumSHA256, err := ValidateChecksumSHA256(input.ChecksumSHA256) + if err != nil { + return + } + args.Body, err = hash.NewReader( + r.Body, args.ContentLength, contentMD5, + checksumSHA256, args.ContentLength, + ) + return +} + +var abortMultipartUploadSupports = fields{ + "Bucket": true, + "Key": true, + "UploadId": true, +} + +func ParseAbortMultipartUploadRequest(r *http.Request) (args *object.AbortMultipartUploadArgs, err error) { + var input s3.AbortMultipartUploadInput + err = ParseLocation(r, &input, abortMultipartUploadSupports) + if err != nil { + return + } + args = &object.AbortMultipartUploadArgs{ + UserId: contexts.GetAccessKey(r), + } + args.Bucket, err = ValidateBucketName(input.Bucket) + if err != nil { + return + } + args.Object, err = ValidateObjectName(input.Key) + if err != nil { + return + } + args.UploadId, err = ValidateUploadId(input.UploadId) + return +} + +var completeMultipartUploadSupports = fields{ + "Bucket": true, + "Key": true, + "UploadId": true, + "MultipartUpload": true, + "ChecksumSHA256": true, +} + +func ParseCompleteMultipartUploadRequest(r *http.Request) (args *object.CompleteMultipartUploadArgs, err error) { + var input s3.CompleteMultipartUploadInput + err = ParseLocation(r, &input, completeMultipartUploadSupports) + if err != nil { + return + } + args = &object.CompleteMultipartUploadArgs{ + UserId: contexts.GetAccessKey(r), + } + args.Bucket, err = ValidateBucketName(input.Bucket) + if err != nil { + return + } + args.Object, err = ValidateObjectName(input.Key) + if err != nil { + return + } + args.UploadId, err = ValidateUploadId(input.UploadId) + if err != nil { + return + } + size, err := ValidateContentLength(&r.ContentLength, consts.MaxXMLBodySize) + if err != nil { + return + } + checksumSHA256, err := ValidateChecksumSHA256(input.ChecksumSHA256) + if err != nil { + return + } + r.Body, err = hash.NewReader(r.Body, size, "", checksumSHA256, size) + if err != nil { + return + } + err = ParseXMLBody(r, &input) + if err != nil { + return + } + args.CompletedParts, err = ValidateCompletedMultipartUpload(input.MultipartUpload) + return +} diff --git a/s3/requests/parsers_object.go b/s3/api/requests/parsers_object.go similarity index 60% rename from s3/requests/parsers_object.go rename to s3/api/requests/parsers_object.go index 75e4eada6..67d8fe369 100644 --- a/s3/requests/parsers_object.go +++ b/s3/api/requests/parsers_object.go @@ -3,9 +3,10 @@ package requests import ( "errors" "github.com/aws/aws-sdk-go/service/s3" - "github.com/bittorrent/go-btfs/s3/cctx" - "github.com/bittorrent/go-btfs/s3/services/object" - "github.com/bittorrent/go-btfs/s3/utils/hash" + "github.com/bittorrent/go-btfs/s3/api/contexts" + "github.com/bittorrent/go-btfs/s3/api/services/object" + "github.com/bittorrent/go-btfs/s3/consts" + "github.com/bittorrent/go-btfs/s3/hash" "net/http" ) @@ -23,12 +24,12 @@ var putObjectSupports = fields{ func ParsePutObjectRequest(r *http.Request) (args *object.PutObjectArgs, err error) { var input s3.PutObjectInput - err = ParseInput(r, &input, putObjectSupports) + err = ParseLocation(r, &input, putObjectSupports) if err != nil { return } args = &object.PutObjectArgs{ - AccessKey: cctx.GetAccessKey(r), + UserId: contexts.GetAccessKey(r), } args.Bucket, err = ValidateBucketName(input.Bucket) if err != nil { @@ -38,7 +39,7 @@ func ParsePutObjectRequest(r *http.Request) (args *object.PutObjectArgs, err err if err != nil { return } - args.ContentLength, err = ValidateContentLength(input.ContentLength) + args.ContentLength, err = ValidateContentLength(input.ContentLength, consts.MaxObjectSize) if err != nil { return } @@ -58,7 +59,7 @@ func ParsePutObjectRequest(r *http.Request) (args *object.PutObjectArgs, err err if err != nil { return } - checksumSHA256, err := ValidateCheckSum(input.ChecksumSHA256) + checksumSHA256, err := ValidateChecksumSHA256(input.ChecksumSHA256) if err != nil { return } @@ -81,12 +82,12 @@ var copyObjectSupports = fields{ func ParseCopyObjectRequest(r *http.Request) (args *object.CopyObjectArgs, err error) { var input s3.CopyObjectInput - err = ParseInput(r, &input, copyObjectSupports) + err = ParseLocation(r, &input, copyObjectSupports) if err != nil { return } args = &object.CopyObjectArgs{ - AccessKey: cctx.GetAccessKey(r), + UserId: contexts.GetAccessKey(r), } args.Bucket, err = ValidateBucketName(input.Bucket) if err != nil { @@ -127,12 +128,12 @@ var headObjectSupports = fields{ func ParseHeadObjectRequest(r *http.Request) (args *object.GetObjectArgs, err error) { var input s3.HeadObjectInput - err = ParseInput(r, &input, headObjectSupports) + err = ParseLocation(r, &input, headObjectSupports) if err != nil { return } args = &object.GetObjectArgs{ - AccessKey: cctx.GetAccessKey(r), + UserId: contexts.GetAccessKey(r), } args.Bucket, err = ValidateBucketName(input.Bucket) if err != nil { @@ -153,12 +154,12 @@ var getObjectSupports = fields{ func ParseGetObjectRequest(r *http.Request) (args *object.GetObjectArgs, err error) { var input s3.GetObjectInput - err = ParseInput(r, &input, getObjectSupports) + err = ParseLocation(r, &input, getObjectSupports) if err != nil { return } args = &object.GetObjectArgs{ - AccessKey: cctx.GetAccessKey(r), + UserId: contexts.GetAccessKey(r), } args.Bucket, err = ValidateBucketName(input.Bucket) if err != nil { @@ -179,12 +180,12 @@ var deleteObjectSupports = fields{ func ParseDeleteObjectRequest(r *http.Request) (args *object.DeleteObjectArgs, err error) { var input s3.DeleteObjectInput - err = ParseInput(r, &input, deleteObjectSupports) + err = ParseLocation(r, &input, deleteObjectSupports) if err != nil { return } args = &object.DeleteObjectArgs{ - AccessKey: cctx.GetAccessKey(r), + UserId: contexts.GetAccessKey(r), } args.Bucket, err = ValidateBucketName(input.Bucket) if err != nil { @@ -201,17 +202,29 @@ var deleteObjectsSupports = fields{ func ParseDeleteObjectsRequest(r *http.Request) (args *object.DeleteObjectsArgs, err error) { var input s3.DeleteObjectsInput - err = ParseInput(r, &input, deleteObjectsSupports) + err = ParseLocation(r, &input, deleteObjectsSupports) if err != nil { return } args = &object.DeleteObjectsArgs{ - AccessKey: cctx.GetAccessKey(r), + UserId: contexts.GetAccessKey(r), } args.Bucket, err = ValidateBucketName(input.Bucket) if err != nil { return } + size, err := ValidateContentLength(&r.ContentLength, consts.MaxXMLBodySize) + if err != nil { + return + } + r.Body, err = hash.NewReader(r.Body, size, "", "", size) + if err != nil { + return + } + err = ParseXMLBody(r, &input) + if err != nil { + return + } args.ToDeleteObjects, args.Quite, err = ValidateObjectsDelete(input.Delete) return } @@ -227,16 +240,68 @@ var listObjectsSupports = fields{ func ParseListObjectsRequest(r *http.Request) (args *object.ListObjectsArgs, err error) { var input s3.ListObjectsInput - err = ParseInput(r, &input, listObjectsSupports) + err = ParseLocation(r, &input, listObjectsSupports) if err != nil { var er ErrFailedParseValue - if errors.As(err, &er) && er.Name() == "max-keys" { + if errors.As(err, &er) && er.Name() == consts.MaxKeys { err = ErrMaxKeysInvalid } return } args = &object.ListObjectsArgs{ - AccessKey: cctx.GetAccessKey(r), + UserId: contexts.GetAccessKey(r), + } + args.Bucket, err = ValidateBucketName(input.Bucket) + if err != nil { + return + } + args.MaxKeys, err = ValidateMaxKeys(input.MaxKeys) + if err != nil { + return + } + args.Marker, err = ValidateMarker(input.Marker) + if err != nil { + return + } + args.Prefix, err = ValidatePrefix(input.Prefix) + if err != nil { + return + } + err = ValidateMarkerAndPrefixCombination(args.Marker, args.Prefix) + if err != nil { + return + } + args.Delimiter, err = ValidateDelimiter(input.Delimiter) + if err != nil { + return + } + args.EncodingType, err = ValidateEncodingType(input.EncodingType) + return +} + +var listObjectsV2Supports = fields{ + "Bucket": true, + "MaxKeys": true, + "Prefix": true, + "ContinuationToken": true, + "StartAfter": true, + "Delimiter": true, + "EncodingType": true, + "FetchOwner": true, +} + +func ParseListObjectsV2Request(r *http.Request) (args *object.ListObjectsV2Args, err error) { + var input s3.ListObjectsV2Input + err = ParseLocation(r, &input, listObjectsV2Supports) + if err != nil { + var er ErrFailedParseValue + if errors.As(err, &er) && er.Name() == "max-keys" { + err = ErrMaxKeysInvalid + } + return + } + args = &object.ListObjectsV2Args{ + UserId: contexts.GetAccessKey(r), } args.Bucket, err = ValidateBucketName(input.Bucket) if err != nil { @@ -246,7 +311,19 @@ func ParseListObjectsRequest(r *http.Request) (args *object.ListObjectsArgs, err if err != nil { return } - args.Marker, args.Prefix, err = ValidateMarkerAndPrefix(input.Marker, input.Prefix) + args.Token, err = ValidateContinuationToken(input.ContinuationToken) + if err != nil { + return + } + args.After, err = ValidateStartAfter(input.StartAfter) + if err != nil { + return + } + err = ValidateMarkerAndPrefixCombination(args.Token, args.Prefix) + if err != nil { + return + } + err = ValidateMarkerAndPrefixCombination(args.After, args.Prefix) if err != nil { return } @@ -255,5 +332,31 @@ func ParseListObjectsRequest(r *http.Request) (args *object.ListObjectsArgs, err return } args.EncodingType, err = ValidateEncodingType(input.EncodingType) + if err != nil { + return + } + args.FetchOwner, err = ValidateFetchOwner(input.FetchOwner) + return +} + +var getObjectACLSupports = fields{ + "Bucket": true, + "Key": true, +} + +func ParseGetObjectACLRequest(r *http.Request) (args *object.GetObjectACLArgs, err error) { + var input s3.GetObjectAclInput + err = ParseLocation(r, &input, getObjectACLSupports) + if err != nil { + return + } + args = &object.GetObjectACLArgs{ + UserId: contexts.GetAccessKey(r), + } + args.Bucket, err = ValidateBucketName(input.Bucket) + if err != nil { + return + } + args.Object, err = ValidateObjectName(input.Key) return } diff --git a/s3/requests/validate.go b/s3/api/requests/validates.go similarity index 64% rename from s3/requests/validate.go rename to s3/api/requests/validates.go index 445e80810..839ba7415 100644 --- a/s3/requests/validate.go +++ b/s3/api/requests/validates.go @@ -6,37 +6,28 @@ import ( "encoding/hex" "errors" "github.com/aws/aws-sdk-go/service/s3" + "github.com/bittorrent/go-btfs/s3/api/services/object" "github.com/bittorrent/go-btfs/s3/consts" "github.com/bittorrent/go-btfs/s3/etag" - "github.com/bittorrent/go-btfs/s3/services/object" "net/url" - "path" "regexp" + "sort" "strings" "time" "unicode/utf8" ) -func ValidateBucketACL(acl *string) (val string, err error) { - if acl == nil || *acl == "" { - val = consts.DefaultBucketACL - } else { - val = *acl - } - if !consts.SupportedBucketACLs[val] { - err = ErrACLUnsupported - return - } - return -} - var ( validBucketName = regexp.MustCompile(`^[A-Za-z0-9][A-Za-z0-9\.\-]{1,61}[A-Za-z0-9]$`) ipAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`) ) func ValidateBucketName(bucketName *string) (val string, err error) { - if bucketName == nil || *bucketName == "" { + if bucketName == nil { + return + } + if *bucketName == "" { + err = ErrBucketNameInvalid return } val = *bucketName @@ -50,6 +41,19 @@ func ValidateBucketName(bucketName *string) (val string, err error) { return } +func ValidateBucketACL(acl *string) (val string, err error) { + if acl == nil || *acl == "" { + val = consts.DefaultBucketACL + } else { + val = *acl + } + if !consts.SupportedBucketACLs[val] { + err = ErrACLUnsupported + return + } + return +} + func ValidateCreateBucketConfiguration(configuration *s3.CreateBucketConfiguration) (val string, err error) { if configuration == nil || configuration.LocationConstraint == nil || *configuration.LocationConstraint == "" { val = consts.DefaultBucketRegion @@ -61,7 +65,11 @@ func ValidateCreateBucketConfiguration(configuration *s3.CreateBucketConfigurati } func ValidateObjectName(objectName *string) (val string, err error) { - if objectName == nil || *objectName == "" { + if objectName == nil { + return + } + if *objectName == "" { + err = ErrObjectNameInvalid return } val, err = url.PathUnescape(*objectName) @@ -95,19 +103,19 @@ func ValidateContentMD5(contentMD5 *string) (val string, err error) { return } if *contentMD5 == "" { - err = ErrInvalidContentMd5 + err = ErrContentMd5Invalid return } b, err := base64.StdEncoding.Strict().DecodeString(*contentMD5) if err != nil || len(b) != md5.Size { - err = ErrInvalidContentMd5 + err = ErrContentMd5Invalid return } val = etag.ETag(b).String() return } -func ValidateCheckSum(checksumSHA256 *string) (val string, err error) { +func ValidateChecksumSHA256(checksumSHA256 *string) (val string, err error) { if checksumSHA256 == nil || *checksumSHA256 == "" { return } @@ -116,14 +124,14 @@ func ValidateCheckSum(checksumSHA256 *string) (val string, err error) { } b, err := hex.DecodeString(*checksumSHA256) if err != nil || len(b) == 0 { - err = ErrInvalidChecksumSha256 + err = ErrChecksumSha256Invalid return } val = hex.EncodeToString(b) return } -func ValidateContentLength(contentLength *int64) (val int64, err error) { +func ValidateContentLength(contentLength *int64, max int64) (val int64, err error) { if contentLength == nil { return } @@ -135,7 +143,7 @@ func ValidateContentLength(contentLength *int64) (val int64, err error) { err = ErrContentLengthTooSmall return } - if *contentLength > consts.MaxObjectSize { + if *contentLength > max { err = ErrContentLengthTooLarge return } @@ -215,14 +223,11 @@ func ValidateMetadataDirective(metadataDirective *string) (val bool, err error) func ValidateObjectsDelete(delete *s3.Delete) (vals []*object.ToDeleteObject, quite bool, err error) { if delete == nil { + err = ErrFailedDecodeXML{errors.New("delete is nil")} return } - if len(delete.Objects) < 1 { - err = ErrFailedDecodeXML{errors.New("objects count is 0")} - return - } - if len(delete.Objects) > consts.MaxDeleteList { - err = ErrFailedDecodeXML{errors.New("objects count is too many")} + if len(delete.Objects) < 1 || len(delete.Objects) > consts.MaxDeleteList { + err = ErrDeletesCountInvalid return } if delete.Quiet != nil && *delete.Quiet == true { @@ -249,22 +254,30 @@ func ValidateMaxKeys(maxKeys *int64) (val int64, err error) { return } -func ValidateMarkerAndPrefix(marker, prefix *string) (val1, val2 string, err error) { - if marker != nil { - val1 = trimLeadingSlash(*marker) - } - if prefix != nil { - val2 = trimLeadingSlash(*prefix) +func ValidateMarker(marker *string) (val string, err error) { + if marker == nil || *marker == "" { + return } - val1, err = ValidateObjectName(&val1) + val, err = ValidateObjectName(marker) if err != nil { + err = ErrMarkerInvalid + } + return +} + +func ValidatePrefix(prefix *string) (val string, err error) { + if prefix == nil || *prefix == "" { return } - val2, err = ValidateObjectName(&val2) + val, err = ValidateObjectName(prefix) if err != nil { - return + err = ErrPrefixInvalid } - if !strings.HasPrefix(val1, val2) { + return +} + +func ValidateMarkerAndPrefixCombination(marker, prefix string) (err error) { + if marker != "" && !strings.HasPrefix(marker, prefix) { err = ErrMarkerPrefixCombinationInvalid } return @@ -290,16 +303,88 @@ func ValidateEncodingType(encodingType *string) (val string, err error) { return } -func trimLeadingSlash(ep string) string { - if len(ep) > 0 && ep[0] == '/' { - // Path ends with '/' preserve it - if ep[len(ep)-1] == '/' && len(ep) > 1 { - ep = path.Clean(ep) - ep += "/" - } else { - ep = path.Clean(ep) +func ValidateContinuationToken(continuationToken *string) (val string, err error) { + if continuationToken == nil || *continuationToken == "" { + return + } + token, err := base64.StdEncoding.DecodeString(*continuationToken) + if err != nil { + err = ErrContinuationTokenInvalid + return + } + tokenStr := string(token) + val, err = ValidateObjectName(&tokenStr) + if err != nil { + err = ErrContinuationTokenInvalid + } + return +} + +func ValidateStartAfter(startAfter *string) (val string, err error) { + if startAfter == nil || *startAfter == "" { + return + } + val, err = ValidateObjectName(startAfter) + if err != nil { + err = ErrStartAfterInvalid + } + return +} + +func ValidateFetchOwner(fetchOwner *bool) (val bool, err error) { + if fetchOwner == nil { + return + } + val = *fetchOwner + return +} + +func ValidateUploadId(uploadId *string) (val string, err error) { + if uploadId == nil { + return + } + val = *uploadId + return +} + +func ValidatePartNumber(partNumber *int64) (val int64, err error) { + if partNumber == nil { + return + } + if *partNumber < consts.MinPartNumber || *partNumber > consts.MaxPartNumber { + err = ErrPartNumberInvalid + return + } + val = *partNumber + return +} + +func ValidateCompletedMultipartUpload(upload *s3.CompletedMultipartUpload) (val object.CompletedParts, err error) { + if upload == nil { + err = ErrFailedDecodeXML{errors.New("complete-upload is nil")} + return + } + if len(upload.Parts) < 1 || len(upload.Parts) > consts.MaxPartNumber { + err = ErrPartsCountInvalid + return + } + for _, part := range upload.Parts { + if part.PartNumber == nil || part.ETag == nil { + err = ErrPartInvalid + return } - ep = ep[1:] + + opart := &object.CompletePart{ + ETag: *part.ETag, + } + opart.PartNumber, err = ValidatePartNumber(part.PartNumber) + if err != nil { + return + } + val = append(val, opart) } - return ep + if !sort.IsSorted(val) { + err = ErrPartOrderInvalid + } + return } diff --git a/s3/responses/errors.go b/s3/api/responses/errors.go similarity index 99% rename from s3/responses/errors.go rename to s3/api/responses/errors.go index 49dd9a22b..24bdfdd13 100644 --- a/s3/responses/errors.go +++ b/s3/api/responses/errors.go @@ -49,7 +49,7 @@ var ( description: "Body shouldn't be set for this request.", httpStatusCode: http.StatusBadRequest, } - ErrInvalidMaxUploads = &Error{ + ErrInvalidUploads = &Error{ code: "InvalidArgument", description: "Argument max-uploads must be an integer between 0 and 2147483647", httpStatusCode: http.StatusBadRequest, @@ -64,7 +64,7 @@ var ( description: "Invalid Encoding Method specified in Request", httpStatusCode: http.StatusBadRequest, } - ErrInvalidMaxParts = &Error{ + ErrInvalidPartNumber = &Error{ code: "InvalidArgument", description: "Part number must be an integer between 1 and 10000, inclusive", httpStatusCode: http.StatusBadRequest, diff --git a/s3/protocol/responses.go b/s3/api/responses/writers.go similarity index 73% rename from s3/protocol/responses.go rename to s3/api/responses/writers.go index d30de8c48..ac1805b27 100644 --- a/s3/protocol/responses.go +++ b/s3/api/responses/writers.go @@ -1,4 +1,4 @@ -package protocol +package responses import ( "bytes" @@ -8,15 +8,14 @@ import ( "fmt" "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" "github.com/bittorrent/go-btfs/s3/consts" + "github.com/bittorrent/go-btfs/s3/utils" "io" - "math" "net/http" "reflect" "strconv" "strings" "time" - "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/private/protocol" ) @@ -25,14 +24,29 @@ const ( noPayload = "nopayload" ) -const ( - floatNaN = "NaN" - floatInf = "Infinity" - floatNegInf = "-Infinity" -) - var errValueNotSet = fmt.Errorf("value not set") +func WriteSuccessResponse(w http.ResponseWriter, output interface{}, locationName string) { + _ = WriteResponse(w, http.StatusOK, output, locationName) +} + +type ErrorOutput struct { + _ struct{} `type:"structure"` + Code string `locationName:"Code"` + Message string `locationName:"Message"` + Resource string `locationName:"Resource"` + RequestID string `locationName:"RequestID"` +} + +func WriteErrorResponse(w http.ResponseWriter, r *http.Request, rerr *Error) { + _ = WriteResponse(w, rerr.HTTPStatusCode(), &ErrorOutput{ + Code: rerr.Code(), + Message: rerr.Description(), + Resource: r.URL.Path, + RequestID: "", + }, "Error") +} + func WriteResponse(w http.ResponseWriter, statusCode int, output interface{}, locationName string) (err error) { setCommonHeaders(w.Header()) @@ -112,7 +126,7 @@ func wrapOutput(output interface{}, locationName string) (wrapper interface{}) { } func extractBody(v reflect.Value) (body io.ReadCloser, clen int, ctyp string, err error) { - ptyp, _, pfvl := getPayload(v) + ptyp, pfvl := getPayload(v) if ptyp == noPayload { return } @@ -134,30 +148,11 @@ func extractBody(v reflect.Value) (body io.ReadCloser, clen int, ctyp string, er return } - switch pifc := pfvl.Interface().(type) { - case io.ReadCloser: - body = pifc - clen = -1 - case io.ReadSeeker: - var bs []byte - bs, err = io.ReadAll(pifc) - if err != nil { - return - } - body = io.NopCloser(bytes.NewBuffer(bs)) - clen = len(bs) - ctyp = http.DetectContentType(bs) - case []byte: - body = io.NopCloser(bytes.NewBuffer(pifc)) - clen = len(pifc) - case string: - body = io.NopCloser(bytes.NewBufferString(pifc)) - clen = len(pifc) - default: - err = fmt.Errorf( - "unknown payload type %s", - pfvl.Type(), - ) + clen = -1 + + body, ok := pfvl.Interface().(io.ReadCloser) + if !ok { + err = fmt.Errorf("unsupported payload type <%s>", pfvl.Type()) } return @@ -212,7 +207,7 @@ func setLocationHeaders(header http.Header, v reflect.Value) (err error) { switch ft.Tag.Get("location") { case "header": - name := ifemp(ft.Tag.Get("locationName"), ft.Name) + name := utils.CoalesceStr(ft.Tag.Get("locationName"), ft.Name) err = setHeaders(&header, fv, name, ft.Tag) case "headers": err = setHeadersMap(&header, fv, ft.Tag) @@ -256,7 +251,7 @@ func setHeadersMap(header *http.Header, v reflect.Value, tag reflect.StructTag) return } -func getPayload(v reflect.Value) (ptyp string, pftp reflect.Type, pfvl reflect.Value) { +func getPayload(v reflect.Value) (ptyp string, pfvl reflect.Value) { ptyp = noPayload field, ok := v.Type().FieldByName("_") @@ -280,7 +275,6 @@ func getPayload(v reflect.Value) (ptyp string, pftp reflect.Type, pfvl reflect.V } ptyp = pfld.Tag.Get("type") - pftp = pfld.Type pfvl = reflect.Indirect(v.FieldByName(payloadName)) return @@ -289,15 +283,11 @@ func getPayload(v reflect.Value) (ptyp string, pftp reflect.Type, pfvl reflect.V func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) { v = reflect.Indirect(v) if !v.IsValid() { - err = errValueNotSet return } switch value := v.Interface().(type) { case string: - if tag.Get("suppressedJSONValue") == "true" && tag.Get("location") == "header" { - value = base64.StdEncoding.EncodeToString([]byte(value)) - } str = value case []byte: str = base64.StdEncoding.EncodeToString(value) @@ -305,45 +295,22 @@ func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) str = strconv.FormatBool(value) case int64: str = strconv.FormatInt(value, 10) - case float64: - switch { - case math.IsNaN(value): - str = floatNaN - case math.IsInf(value, 1): - str = floatInf - case math.IsInf(value, -1): - str = floatNegInf - default: - str = strconv.FormatFloat(value, 'f', -1, 64) - } case time.Time: - format := tag.Get("timestampFormat") - if len(format) == 0 { - format = protocol.RFC822TimeFormatName - if tag.Get("location") == "querystring" { - format = protocol.ISO8601TimeFormatName - } - } - str = protocol.FormatTime(format, value) - case aws.JSONValue: - if len(value) == 0 { - return "", errValueNotSet - } - escaping := protocol.NoEscape - if tag.Get("location") == "header" { - escaping = protocol.Base64Escape - } - str, err = protocol.EncodeJSONValue(value, escaping) + str = protocol.FormatTime(getTimeFormat(tag), value) default: - err = fmt.Errorf("unsupported value for param %v (%s)", v.Interface(), v.Type()) + err = fmt.Errorf("unsupported value type <%s>", v.Type()) } return } -func ifemp(a, b string) string { - if a != "" { - return a +func getTimeFormat(tag reflect.StructTag) string { + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.RFC822TimeFormatName + if tag.Get("location") == "querystring" { + format = protocol.ISO8601TimeFormatName + } } - return b + return format } diff --git a/s3/responses/responses_bucket.go b/s3/api/responses/writers_bucket.go similarity index 58% rename from s3/responses/responses_bucket.go rename to s3/api/responses/writers_bucket.go index 7fb986326..ef7548475 100644 --- a/s3/responses/responses_bucket.go +++ b/s3/api/responses/writers_bucket.go @@ -2,13 +2,26 @@ package responses import ( "github.com/aws/aws-sdk-go/service/s3" + "github.com/bittorrent/go-btfs/s3/api/services/object" "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/services/object" "net/http" ) +func newS3Owner(userId string) *s3.Owner { + return new(s3.Owner).SetID(userId).SetDisplayName(userId) +} + +func newS3FullControlGrant(userId string) *s3.Grant { + return new(s3.Grant).SetGrantee(new(s3.Grantee).SetType(s3.TypeCanonicalUser).SetID(userId).SetDisplayName(userId)).SetPermission(s3.PermissionFullControl) +} + +var ( + s3AllUsersReadGrant = new(s3.Grant).SetGrantee(new(s3.Grantee).SetType(s3.TypeGroup).SetURI(consts.AllUsersURI)).SetPermission(s3.PermissionRead) + s3AllUsersWriteGrant = new(s3.Grant).SetGrantee(new(s3.Grantee).SetType(s3.TypeGroup).SetURI(consts.AllUsersURI)).SetPermission(s3.PermissionWrite) +) + func WriteCreateBucketResponse(w http.ResponseWriter, r *http.Request, buc *object.Bucket) { - output := new(s3.CreateBucketOutput).SetLocation(pathClean(r.URL.Path)) + output := new(s3.CreateBucketOutput).SetLocation(r.URL.Path) w.Header().Add(consts.AmzACL, buc.ACL) WriteSuccessResponse(w, output, "") return @@ -27,11 +40,11 @@ func WriteDeleteBucketResponse(w http.ResponseWriter) { return } -func WriteListBucketsResponse(w http.ResponseWriter, r *http.Request, accessKey string, buckets []*object.Bucket) { +func WriteListBucketsResponse(w http.ResponseWriter, r *http.Request, list *object.BucketList) { output := new(s3.ListBucketsOutput) - output.SetOwner(owner(accessKey)) + output.SetOwner(newS3Owner(list.Owner)) s3Buckets := make([]*s3.Bucket, 0) - for _, buc := range buckets { + for _, buc := range list.Buckets { s3Bucket := new(s3.Bucket).SetName(buc.Name).SetCreationDate(buc.Created) s3Buckets = append(s3Buckets, s3Bucket) w.Header().Add(consts.AmzACL, buc.ACL) @@ -47,22 +60,22 @@ func WritePutBucketAclResponse(w http.ResponseWriter, r *http.Request) { return } -func WriteGetBucketACLResponse(w http.ResponseWriter, r *http.Request, accessKey string, acl string) { +func WriteGetBucketACLResponse(w http.ResponseWriter, r *http.Request, acl *object.ACL) { output := new(s3.GetBucketAclOutput) - output.SetOwner(owner(accessKey)) + output.SetOwner(newS3Owner(acl.Owner)) grants := make([]*s3.Grant, 0) - grants = append(grants, ownerFullControlGrant(accessKey)) - switch acl { + grants = append(grants, newS3FullControlGrant(acl.Owner)) + switch acl.ACL { case s3.BucketCannedACLPrivate: case s3.BucketCannedACLPublicRead: - grants = append(grants, allUsersReadGrant) + grants = append(grants, s3AllUsersReadGrant) case s3.BucketCannedACLPublicReadWrite: - grants = append(grants, allUsersReadGrant, allUsersWriteGrant) + grants = append(grants, s3AllUsersReadGrant, s3AllUsersWriteGrant) default: panic("unknown acl") } output.SetGrants(grants) - w.Header().Add(consts.AmzACL, acl) + w.Header().Add(consts.AmzACL, acl.ACL) WriteSuccessResponse(w, output, "AccessControlPolicy") return } diff --git a/s3/api/responses/writers_multipart.go b/s3/api/responses/writers_multipart.go new file mode 100644 index 000000000..a50ceca1f --- /dev/null +++ b/s3/api/responses/writers_multipart.go @@ -0,0 +1,37 @@ +package responses + +import ( + "github.com/aws/aws-sdk-go/service/s3" + "github.com/bittorrent/go-btfs/s3/api/services/object" + "github.com/bittorrent/go-btfs/s3/consts" + "net/http" +) + +func WriteCreateMultipartUploadResponse(w http.ResponseWriter, r *http.Request, multipart *object.Multipart) { + output := new(s3.CreateMultipartUploadOutput) + output.SetBucket(multipart.Bucket) + output.SetKey(multipart.Object) + output.SetUploadId(multipart.UploadID) + WriteSuccessResponse(w, output, "InitiateMultipartUploadResult") +} + +func WriteUploadPartResponse(w http.ResponseWriter, r *http.Request, part *object.Part) { + output := new(s3.UploadPartOutput) + output.SetETag(`"` + part.ETag + `"`) + w.Header().Set(consts.Cid, part.CID) + WriteSuccessResponse(w, output, "") +} + +func WriteAbortMultipartUploadResponse(w http.ResponseWriter, r *http.Request) { + output := new(s3.AbortMultipartUploadOutput) + WriteSuccessResponse(w, output, "") +} + +func WriteCompleteMultipartUploadResponse(w http.ResponseWriter, r *http.Request, obj *object.Object) { + output := new(s3.CompleteMultipartUploadOutput) + output.SetBucket(obj.Bucket) + output.SetKey(obj.Name) + output.SetETag(`"` + obj.ETag + `"`) + w.Header().Set(consts.Cid, obj.CID) + WriteSuccessResponse(w, output, "CompleteMultipartUploadResult") +} diff --git a/s3/responses/responses_object.go b/s3/api/responses/writers_object.go similarity index 75% rename from s3/responses/responses_object.go rename to s3/api/responses/writers_object.go index b4601d4cb..6158a92cd 100644 --- a/s3/responses/responses_object.go +++ b/s3/api/responses/writers_object.go @@ -3,8 +3,8 @@ package responses import ( "encoding/base64" "github.com/aws/aws-sdk-go/service/s3" + "github.com/bittorrent/go-btfs/s3/api/services/object" "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/services/object" "github.com/bittorrent/go-btfs/s3/utils" "io" "net/http" @@ -47,11 +47,11 @@ func WriteDeleteObjectResponse(w http.ResponseWriter, r *http.Request, obj *obje WriteSuccessResponse(w, output, "") } -func WriteDeleteObjectsResponse(w http.ResponseWriter, r *http.Request, toErr func(error) *Error, deletedObjects []*object.DeletedObject) { +func WriteDeleteObjectsResponse(w http.ResponseWriter, r *http.Request, toErr func(error) *Error, deletes []*object.DeletedObject) { output := new(s3.DeleteObjectsOutput) objs := make([]*s3.DeletedObject, 0) errs := make([]*s3.Error, 0) - for _, obj := range deletedObjects { + for _, obj := range deletes { if obj.DeleteErr != nil { rerr := toErr(obj.DeleteErr) s3Err := new(s3.Error) @@ -91,42 +91,23 @@ func WriteGetObjectResponse(w http.ResponseWriter, r *http.Request, obj *object. WriteSuccessResponse(w, output, "") } -func WriteGetObjectACLResponse(w http.ResponseWriter, r *http.Request, accessKey, acl string) { - output := new(s3.GetObjectAclOutput) - output.SetOwner(owner(accessKey)) - grants := make([]*s3.Grant, 0) - grants = append(grants, ownerFullControlGrant(accessKey)) - switch acl { - case s3.BucketCannedACLPrivate: - case s3.BucketCannedACLPublicRead: - grants = append(grants, allUsersReadGrant) - case s3.BucketCannedACLPublicReadWrite: - grants = append(grants, allUsersReadGrant, allUsersWriteGrant) - default: - panic("unknown acl") - } - output.SetGrants(grants) - WriteSuccessResponse(w, output, "AccessControlPolicy") - return -} - -func WriteListObjectsResponse(w http.ResponseWriter, r *http.Request, accessKey string, list *object.ObjectsList) { +func WriteListObjectsResponse(w http.ResponseWriter, r *http.Request, list *object.ObjectsList) { out := new(s3.ListObjectsOutput) - out.SetName(list.Bucket) - out.SetEncodingType(list.EncodingType) - out.SetPrefix(utils.S3Encode(list.Prefix, list.EncodingType)) - out.SetMarker(utils.S3Encode(list.Marker, list.EncodingType)) - out.SetDelimiter(utils.S3Encode(list.Delimiter, list.EncodingType)) - out.SetMaxKeys(list.MaxKeys) + out.SetName(list.Args.Bucket) + out.SetEncodingType(list.Args.EncodingType) + out.SetPrefix(utils.S3Encode(list.Args.Prefix, list.Args.EncodingType)) + out.SetMarker(utils.S3Encode(list.Args.Marker, list.Args.EncodingType)) + out.SetDelimiter(utils.S3Encode(list.Args.Delimiter, list.Args.EncodingType)) + out.SetMaxKeys(list.Args.MaxKeys) out.SetNextMarker(list.NextMarker) out.SetIsTruncated(list.IsTruncated) s3Objs := make([]*s3.Object, len(list.Objects)) for i, obj := range list.Objects { s3Obj := new(s3.Object) s3Obj.SetETag(`"` + obj.ETag + `"`) - s3Obj.SetOwner(owner(accessKey)) + s3Obj.SetOwner(newS3Owner(list.Owner)) s3Obj.SetLastModified(obj.ModTime) - s3Obj.SetKey(utils.S3Encode(obj.Name, list.EncodingType)) + s3Obj.SetKey(utils.S3Encode(obj.Name, list.Args.EncodingType)) s3Obj.SetSize(obj.Size) s3Obj.SetStorageClass("") s3Objs[i] = s3Obj @@ -136,31 +117,33 @@ func WriteListObjectsResponse(w http.ResponseWriter, r *http.Request, accessKey s3CommPrefixes := make([]*s3.CommonPrefix, len(list.Prefixes)) for i, cpf := range list.Prefixes { pfx := new(s3.CommonPrefix) - pfx.SetPrefix(utils.S3Encode(cpf, list.EncodingType)) + pfx.SetPrefix(utils.S3Encode(cpf, list.Args.EncodingType)) s3CommPrefixes[i] = pfx } out.SetCommonPrefixes(s3CommPrefixes) WriteSuccessResponse(w, out, "ListBucketResult") } -func WriteListObjectsV2Response(w http.ResponseWriter, r *http.Request, accessKey, bucname, prefix, token, startAfter, delimiter, encodingType string, maxKeys int64, list *object.ObjectsListV2) { +func WriteListObjectsV2Response(w http.ResponseWriter, r *http.Request, list *object.ObjectsListV2) { out := new(s3.ListObjectsV2Output) - out.SetName(bucname) - out.SetEncodingType(encodingType) - out.SetStartAfter(utils.S3Encode(startAfter, encodingType)) - out.SetDelimiter(utils.S3Encode(delimiter, encodingType)) - out.SetPrefix(utils.S3Encode(prefix, encodingType)) - out.SetMaxKeys(maxKeys) - out.SetContinuationToken(base64.StdEncoding.EncodeToString([]byte(token))) + out.SetName(list.Args.Bucket) + out.SetEncodingType(list.Args.EncodingType) + out.SetStartAfter(utils.S3Encode(list.Args.After, list.Args.EncodingType)) + out.SetDelimiter(utils.S3Encode(list.Args.Delimiter, list.Args.EncodingType)) + out.SetPrefix(utils.S3Encode(list.Args.Prefix, list.Args.EncodingType)) + out.SetMaxKeys(list.Args.MaxKeys) + out.SetContinuationToken(base64.StdEncoding.EncodeToString([]byte(list.Args.Token))) out.SetNextContinuationToken(base64.StdEncoding.EncodeToString([]byte(list.NextContinuationToken))) out.SetIsTruncated(list.IsTruncated) s3Objs := make([]*s3.Object, len(list.Objects)) for i, obj := range list.Objects { s3Obj := new(s3.Object) s3Obj.SetETag(`"` + obj.ETag + `"`) - s3Obj.SetOwner(owner(accessKey)) + if list.Args.FetchOwner { + s3Obj.SetOwner(newS3Owner(list.Owner)) + } s3Obj.SetLastModified(obj.ModTime) - s3Obj.SetKey(utils.S3Encode(obj.Name, encodingType)) + s3Obj.SetKey(utils.S3Encode(obj.Name, list.Args.EncodingType)) s3Obj.SetSize(obj.Size) s3Obj.SetStorageClass("") s3Objs[i] = s3Obj @@ -170,10 +153,29 @@ func WriteListObjectsV2Response(w http.ResponseWriter, r *http.Request, accessKe s3CommPrefixes := make([]*s3.CommonPrefix, len(list.Prefixes)) for i, cpf := range list.Prefixes { pfx := new(s3.CommonPrefix) - pfx.SetPrefix(utils.S3Encode(cpf, encodingType)) + pfx.SetPrefix(utils.S3Encode(cpf, list.Args.EncodingType)) s3CommPrefixes[i] = pfx } out.SetCommonPrefixes(s3CommPrefixes) out.SetKeyCount(int64(len(list.Objects) + len(list.Prefixes))) WriteSuccessResponse(w, out, "ListBucketResult") } + +func WriteGetObjectACLResponse(w http.ResponseWriter, r *http.Request, acl *object.ACL) { + output := new(s3.GetObjectAclOutput) + output.SetOwner(newS3Owner(acl.Owner)) + grants := make([]*s3.Grant, 0) + grants = append(grants, newS3FullControlGrant(acl.Owner)) + switch acl.Owner { + case s3.BucketCannedACLPrivate: + case s3.BucketCannedACLPublicRead: + grants = append(grants, s3AllUsersReadGrant) + case s3.BucketCannedACLPublicReadWrite: + grants = append(grants, s3AllUsersReadGrant, s3AllUsersWriteGrant) + default: + panic("unknown acl") + } + output.SetGrants(grants) + WriteSuccessResponse(w, output, "AccessControlPolicy") + return +} diff --git a/s3/routers/options.go b/s3/api/routers/options.go similarity index 100% rename from s3/routers/options.go rename to s3/api/routers/options.go diff --git a/s3/routers/proto.go b/s3/api/routers/proto.go similarity index 100% rename from s3/routers/proto.go rename to s3/api/routers/proto.go diff --git a/s3/routers/routers.go b/s3/api/routers/routers.go similarity index 96% rename from s3/routers/routers.go rename to s3/api/routers/routers.go index 98ed85f41..1bfab0270 100644 --- a/s3/routers/routers.go +++ b/s3/api/routers/routers.go @@ -1,7 +1,7 @@ package routers import ( - "github.com/bittorrent/go-btfs/s3/handlers" + "github.com/bittorrent/go-btfs/s3/api/handlers" "github.com/gorilla/mux" "net/http" ) @@ -25,11 +25,8 @@ func (routers *Routers) Register() http.Handler { hs := routers.handlers - root.Use( - hs.Cors, - hs.Log, - hs.Sign, - ) + // Middlewares + root.Use(hs.Cors, hs.Log, hs.Sign) bucket := root.PathPrefix("/{Bucket}").Subrouter() diff --git a/s3/server/options.go b/s3/api/server/options.go similarity index 100% rename from s3/server/options.go rename to s3/api/server/options.go diff --git a/s3/server/server.go b/s3/api/server/server.go similarity index 95% rename from s3/server/server.go rename to s3/api/server/server.go index b4b4b69a9..c38b38453 100644 --- a/s3/server/server.go +++ b/s3/api/server/server.go @@ -3,7 +3,7 @@ package server import ( "context" "errors" - "github.com/bittorrent/go-btfs/s3/routers" + "github.com/bittorrent/go-btfs/s3/api/routers" "net/http" "sync" ) diff --git a/s3/services/accesskey/instance.go b/s3/api/services/accesskey/instance.go similarity index 94% rename from s3/services/accesskey/instance.go rename to s3/api/services/accesskey/instance.go index 5499e9405..254abd174 100644 --- a/s3/services/accesskey/instance.go +++ b/s3/api/services/accesskey/instance.go @@ -1,7 +1,7 @@ package accesskey import ( - "github.com/bittorrent/go-btfs/s3/providers" + "github.com/bittorrent/go-btfs/s3/api/providers" "sync" ) diff --git a/s3/services/accesskey/options.go b/s3/api/services/accesskey/options.go similarity index 100% rename from s3/services/accesskey/options.go rename to s3/api/services/accesskey/options.go diff --git a/s3/services/accesskey/proto.go b/s3/api/services/accesskey/proto.go similarity index 100% rename from s3/services/accesskey/proto.go rename to s3/api/services/accesskey/proto.go diff --git a/s3/services/accesskey/service.go b/s3/api/services/accesskey/service.go similarity index 89% rename from s3/services/accesskey/service.go rename to s3/api/services/accesskey/service.go index e4e16a027..3deabe580 100644 --- a/s3/services/accesskey/service.go +++ b/s3/api/services/accesskey/service.go @@ -3,8 +3,8 @@ package accesskey import ( "context" "errors" + "github.com/bittorrent/go-btfs/s3/api/providers" "github.com/bittorrent/go-btfs/s3/ctxmu" - "github.com/bittorrent/go-btfs/s3/providers" "github.com/bittorrent/go-btfs/transaction/storage" "github.com/bittorrent/go-btfs/utils" "github.com/google/uuid" @@ -140,31 +140,31 @@ func (svc *service) update(key string, args *updateArgs) (err error) { } defer svc.lock.Unlock(key) - record := &AccessKey{} + ack := &AccessKey{} stk := svc.getStoreKey(key) - err = svc.providers.StateStore().Get(stk, record) + err = svc.providers.StateStore().Get(stk, ack) if err != nil && !errors.Is(err, storage.ErrNotFound) { return } - if errors.Is(err, storage.ErrNotFound) || record.IsDeleted { + if errors.Is(err, storage.ErrNotFound) || ack.IsDeleted { err = ErrNotFound return } if args.Enable != nil { - record.Enable = *args.Enable + ack.Enable = *args.Enable } if args.Secret != nil { - record.Secret = *args.Secret + ack.Secret = *args.Secret } if args.IsDelete != nil { - record.IsDeleted = *args.IsDelete + ack.IsDeleted = *args.IsDelete } - record.UpdatedAt = time.Now() + ack.UpdatedAt = time.Now() - err = svc.providers.StateStore().Put(stk, record) + err = svc.providers.StateStore().Put(stk, ack) return } diff --git a/s3/services/object/clean_read_closer.go b/s3/api/services/object/clean_read_closer.go similarity index 100% rename from s3/services/object/clean_read_closer.go rename to s3/api/services/object/clean_read_closer.go diff --git a/s3/services/object/options.go b/s3/api/services/object/options.go similarity index 100% rename from s3/services/object/options.go rename to s3/api/services/object/options.go diff --git a/s3/services/object/proto.go b/s3/api/services/object/proto.go similarity index 55% rename from s3/services/object/proto.go rename to s3/api/services/object/proto.go index f3edf1413..2fbe251de 100644 --- a/s3/services/object/proto.go +++ b/s3/api/services/object/proto.go @@ -3,7 +3,7 @@ package object import ( "context" "errors" - "github.com/bittorrent/go-btfs/s3/utils/hash" + "github.com/bittorrent/go-btfs/s3/hash" "io" "time" ) @@ -13,67 +13,72 @@ var ( ErrBucketNotEmpty = errors.New("bucket not empty") ErrObjectNotFound = errors.New("object not found") ErrUploadNotFound = errors.New("upload not found") - ErrNotAllowed = errors.New("not allowed") + ErrNotAllowed = errors.New("authentication not allowed") ErrBucketAlreadyExists = errors.New("bucket already exists") + ErrPartNotExists = errors.New("part not exists") + ErrPartETagNotMatch = errors.New("part etag not match") + ErrPartTooSmall = errors.New("part size too small") + ErrCanceled = context.Canceled + ErrTimout = context.DeadlineExceeded ) type Service interface { CreateBucket(ctx context.Context, args *CreateBucketArgs) (bucket *Bucket, err error) GetBucket(ctx context.Context, args *GetBucketArgs) (bucket *Bucket, err error) DeleteBucket(ctx context.Context, args *DeleteBucketArgs) (err error) - ListBuckets(ctx context.Context, args *ListBucketsArgs) (list []*Bucket, err error) + ListBuckets(ctx context.Context, args *ListBucketsArgs) (list *BucketList, err error) PutBucketACL(ctx context.Context, args *PutBucketACLArgs) (err error) - GetBucketACL(ctx context.Context, args *GetBucketACLArgs) (acl string, err error) + GetBucketACL(ctx context.Context, args *GetBucketACLArgs) (acl *ACL, err error) PutObject(ctx context.Context, args *PutObjectArgs) (object *Object, err error) CopyObject(ctx context.Context, args *CopyObjectArgs) (object *Object, err error) GetObject(ctx context.Context, args *GetObjectArgs) (object *Object, body io.ReadCloser, err error) DeleteObject(ctx context.Context, args *DeleteObjectArgs) (err error) - DeleteObjects(ctx context.Context, args *DeleteObjectsArgs) (deletedObjects []*DeletedObject, err error) + DeleteObjects(ctx context.Context, args *DeleteObjectsArgs) (deletes []*DeletedObject, err error) ListObjects(ctx context.Context, args *ListObjectsArgs) (list *ObjectsList, err error) - ListObjectsV2(ctx context.Context, user, bucket, prefix, token, delimiter string, max int64, owner bool, after string) (list *ObjectsListV2, err error) - GetObjectACL(ctx context.Context, user, bucname, objname string) (acl string, err error) + ListObjectsV2(ctx context.Context, args *ListObjectsV2Args) (list *ObjectsListV2, err error) + GetObjectACL(ctx context.Context, args *GetObjectACLArgs) (acl *ACL, err error) - CreateMultipartUpload(ctx context.Context, user, bucname, objname string, meta map[string]*string) (multipart *Multipart, err error) - UploadPart(ctx context.Context, user, bucname, objname, uplid string, partId int, reader *hash.Reader, size int64) (part *Part, err error) - AbortMultipartUpload(ctx context.Context, user, bucname, objname, uplid string) (err error) - CompleteMultiPartUpload(ctx context.Context, user, bucname, objname, uplid string, parts []*CompletePart) (object *Object, err error) + CreateMultipartUpload(ctx context.Context, args *CreateMultipartUploadArgs) (multipart *Multipart, err error) + UploadPart(ctx context.Context, args *UploadPartArgs) (part *Part, err error) + AbortMultipartUpload(ctx context.Context, args *AbortMultipartUploadArgs) (err error) + CompleteMultiPartUpload(ctx context.Context, args *CompleteMultipartUploadArgs) (object *Object, err error) } type CreateBucketArgs struct { - AccessKey string - ACL string - Bucket string - Region string + UserId string + ACL string + Bucket string + Region string } type GetBucketArgs struct { - AccessKey string - Bucket string + UserId string + Bucket string } type DeleteBucketArgs struct { - AccessKey string - Bucket string + UserId string + Bucket string } type ListBucketsArgs struct { - AccessKey string + UserId string } type GetBucketACLArgs struct { - AccessKey string - Bucket string + UserId string + Bucket string } type PutBucketACLArgs struct { - AccessKey string - ACL string - Bucket string + UserId string + ACL string + Bucket string } type PutObjectArgs struct { - AccessKey string + UserId string Body *hash.Reader Bucket string Object string @@ -84,7 +89,7 @@ type PutObjectArgs struct { } type CopyObjectArgs struct { - AccessKey string + UserId string Bucket string Object string SrcBucket string @@ -96,20 +101,20 @@ type CopyObjectArgs struct { } type GetObjectArgs struct { - AccessKey string - Bucket string - Object string - WithBody bool + UserId string + Bucket string + Object string + WithBody bool } type DeleteObjectArgs struct { - AccessKey string - Bucket string - Object string + UserId string + Bucket string + Object string } type DeleteObjectsArgs struct { - AccessKey string + UserId string Bucket string ToDeleteObjects []*ToDeleteObject Quite bool @@ -121,7 +126,7 @@ type ToDeleteObject struct { } type ListObjectsArgs struct { - AccessKey string + UserId string Bucket string MaxKeys int64 Marker string @@ -130,12 +135,69 @@ type ListObjectsArgs struct { EncodingType string } +type ListObjectsV2Args struct { + UserId string + Bucket string + MaxKeys int64 + Prefix string + Delimiter string + EncodingType string + Token string + After string + FetchOwner bool +} + +type GetObjectACLArgs struct { + UserId string + Bucket string + Object string +} + +type CreateMultipartUploadArgs struct { + UserId string + Bucket string + Object string + ContentEncoding string + ContentType string + Expires time.Time +} + +type UploadPartArgs struct { + UserId string + Body *hash.Reader + Bucket string + Object string + UploadId string + PartNumber int64 + ContentLength int64 +} + +type AbortMultipartUploadArgs struct { + UserId string + Body *hash.Reader + Bucket string + Object string + UploadId string +} + +type CompleteMultipartUploadArgs struct { + UserId string + Bucket string + Object string + UploadId string + CompletedParts CompletedParts +} + +type ACL struct { + Owner string + ACL string +} + type DeletedObject struct { Object string DeleteErr error } -// Bucket contains bucket metadata. type Bucket struct { Name string Region string @@ -144,6 +206,11 @@ type Bucket struct { Created time.Time } +type BucketList struct { + Owner string + Buckets []*Bucket +} + type Object struct { Bucket string Name string @@ -164,50 +231,45 @@ type Object struct { } type Multipart struct { - Bucket string - Object string - UploadID string - Initiated time.Time - MetaData map[string]*string - Parts []*Part + Bucket string + Object string + UploadID string + Initiated time.Time + ContentType string + ContentEncoding string + Expires time.Time + Parts []*Part } type Part struct { ETag string `json:"etag,omitempty"` CID string `json:"cid,omitempty"` - Number int `json:"number"` + Number int64 `json:"number"` Size int64 `json:"size"` ModTime time.Time `json:"mod_time"` } type ObjectsList struct { - Bucket string - MaxKeys int64 - Marker string - Prefix string - Delimiter string - EncodingType string - IsTruncated bool - NextMarker string - Objects []*Object - Prefixes []string + Args *ListObjectsArgs + Owner string + IsTruncated bool + NextMarker string + Objects []*Object + Prefixes []string } type ObjectsListV2 struct { + Args *ListObjectsV2Args + Owner string IsTruncated bool - ContinuationToken string NextContinuationToken string Objects []*Object Prefixes []string } type CompletePart struct { - PartNumber int - ETag string - ChecksumCRC32 string - ChecksumCRC32C string - ChecksumSHA1 string - ChecksumSHA256 string + PartNumber int64 + ETag string } type CompletedParts []*CompletePart @@ -215,7 +277,3 @@ type CompletedParts []*CompletePart func (a CompletedParts) Len() int { return len(a) } func (a CompletedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a CompletedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber } - -type CompleteMultipartUpload struct { - Parts []*CompletePart `xml:"Part"` -} diff --git a/s3/services/object/service.go b/s3/api/services/object/service.go similarity index 98% rename from s3/services/object/service.go rename to s3/api/services/object/service.go index be7b38c36..e99fd58dd 100644 --- a/s3/services/object/service.go +++ b/s3/api/services/object/service.go @@ -4,13 +4,12 @@ import ( "context" "fmt" "github.com/bittorrent/go-btfs/s3/action" + "github.com/bittorrent/go-btfs/s3/api/providers" "github.com/bittorrent/go-btfs/s3/ctxmu" "github.com/bittorrent/go-btfs/s3/policy" "io" "strings" "time" - - "github.com/bittorrent/go-btfs/s3/providers" ) var _ Service = (*service)(nil) diff --git a/s3/services/object/service_bucket.go b/s3/api/services/object/service_bucket.go similarity index 85% rename from s3/services/object/service_bucket.go rename to s3/api/services/object/service_bucket.go index 11726ffd1..de61066d5 100644 --- a/s3/services/object/service_bucket.go +++ b/s3/api/services/object/service_bucket.go @@ -3,8 +3,8 @@ package object import ( "context" "errors" + "github.com/bittorrent/go-btfs/s3/api/providers" "github.com/bittorrent/go-btfs/s3/policy" - "github.com/bittorrent/go-btfs/s3/providers" "time" "github.com/bittorrent/go-btfs/s3/action" @@ -37,7 +37,7 @@ func (s *service) CreateBucket(ctx context.Context, args *CreateBucketArgs) (buc } // Check action ACL - allow := s.checkACL(args.AccessKey, policy.Private, args.AccessKey, action.CreateBucketAction) + allow := s.checkACL(args.UserId, policy.Private, args.UserId, action.CreateBucketAction) if !allow { err = ErrNotAllowed return @@ -50,7 +50,7 @@ func (s *service) CreateBucket(ctx context.Context, args *CreateBucketArgs) (buc bucket = &Bucket{ Name: args.Bucket, Region: args.Region, - Owner: args.AccessKey, + Owner: args.UserId, ACL: args.ACL, Created: now, } @@ -88,7 +88,7 @@ func (s *service) GetBucket(ctx context.Context, args *GetBucketArgs) (bucket *B } // Check action ACL - allow := s.checkACL(bucket.Owner, bucket.ACL, args.AccessKey, action.HeadBucketAction) + allow := s.checkACL(bucket.Owner, bucket.ACL, args.UserId, action.HeadBucketAction) if !allow { err = ErrNotAllowed } @@ -123,7 +123,7 @@ func (s *service) DeleteBucket(ctx context.Context, args *DeleteBucketArgs) (err } // Check action ACL - allow := s.checkACL(bucket.Owner, bucket.ACL, args.AccessKey, action.DeleteBucketAction) + allow := s.checkACL(bucket.Owner, bucket.ACL, args.UserId, action.DeleteBucketAction) if !allow { err = ErrNotAllowed return @@ -146,18 +146,23 @@ func (s *service) DeleteBucket(ctx context.Context, args *DeleteBucketArgs) (err } // ListBuckets list all buckets of the specified user -func (s *service) ListBuckets(ctx context.Context, args *ListBucketsArgs) (list []*Bucket, err error) { +func (s *service) ListBuckets(ctx context.Context, args *ListBucketsArgs) (list *BucketList, err error) { // Operation context ctx, cancel := s.opctx(ctx) defer cancel() // Check action ACL - allow := s.checkACL(args.AccessKey, policy.Private, args.AccessKey, action.ListBucketAction) + allow := s.checkACL(args.UserId, policy.Private, args.UserId, action.ListBucketAction) if !allow { err = ErrNotAllowed return } + // List + list = &BucketList{ + Owner: args.UserId, + } + // All buckets prefix bucketsPrefix := s.getAllBucketsKeyPrefix() @@ -185,8 +190,8 @@ func (s *service) ListBuckets(ctx context.Context, args *ListBucketsArgs) (list } // Collect user's bucket - if bucket.Owner == args.AccessKey { - list = append(list, bucket) + if bucket.Owner == args.UserId { + list.Buckets = append(list.Buckets, bucket) } return @@ -222,7 +227,7 @@ func (s *service) PutBucketACL(ctx context.Context, args *PutBucketACLArgs) (err } // Check action ACL - allow := s.checkACL(bucket.Owner, bucket.ACL, args.AccessKey, action.PutBucketAclAction) + allow := s.checkACL(bucket.Owner, bucket.ACL, args.UserId, action.PutBucketAclAction) if !allow { err = ErrNotAllowed return @@ -237,8 +242,8 @@ func (s *service) PutBucketACL(ctx context.Context, args *PutBucketACLArgs) (err return } -// GetBucketACL get user specified bucket ACL field value -func (s *service) GetBucketACL(ctx context.Context, args *GetBucketACLArgs) (acl string, err error) { +// GetBucketACL get user specified bucket ACL +func (s *service) GetBucketACL(ctx context.Context, args *GetBucketACLArgs) (acl *ACL, err error) { // Operation context ctx, cancel := s.opctx(ctx) defer cancel() @@ -264,14 +269,17 @@ func (s *service) GetBucketACL(ctx context.Context, args *GetBucketACLArgs) (acl } // Check action ACL - allow := s.checkACL(bucket.Owner, bucket.ACL, args.AccessKey, action.GetBucketAclAction) + allow := s.checkACL(bucket.Owner, bucket.ACL, args.UserId, action.GetBucketAclAction) if !allow { err = ErrNotAllowed return } - // Get ACL field value - acl = bucket.ACL + // ACL + acl = &ACL{ + Owner: bucket.Owner, + ACL: bucket.ACL, + } return } diff --git a/s3/services/object/service_multipart.go b/s3/api/services/object/service_multipart.go similarity index 71% rename from s3/services/object/service_multipart.go rename to s3/api/services/object/service_multipart.go index 67d196a53..b08c9e0f9 100644 --- a/s3/services/object/service_multipart.go +++ b/s3/api/services/object/service_multipart.go @@ -4,27 +4,23 @@ import ( "context" "errors" "github.com/bittorrent/go-btfs/s3/action" + "github.com/bittorrent/go-btfs/s3/api/providers" "github.com/bittorrent/go-btfs/s3/consts" "github.com/bittorrent/go-btfs/s3/etag" - "github.com/bittorrent/go-btfs/s3/providers" - "github.com/bittorrent/go-btfs/s3/s3utils" - "github.com/bittorrent/go-btfs/s3/utils/hash" "github.com/google/uuid" "io" - "net/http" "regexp" - "strings" "time" ) // CreateMultipartUpload create user specified multipart upload -func (s *service) CreateMultipartUpload(ctx context.Context, user, bucname, objname string, meta map[string]*string) (multipart *Multipart, err error) { +func (s *service) CreateMultipartUpload(ctx context.Context, args *CreateMultipartUploadArgs) (multipart *Multipart, err error) { // Operation context ctx, cancel := s.opctx(ctx) defer cancel() // Bucket key - buckey := s.getBucketKey(bucname) + buckey := s.getBucketKey(args.Bucket) // RLock bucket err = s.lock.RLock(ctx, buckey) @@ -44,7 +40,7 @@ func (s *service) CreateMultipartUpload(ctx context.Context, user, bucname, objn } // Check action ACL - allow := s.checkACL(bucket.Owner, bucket.ACL, user, action.CreateMultipartUploadAction) + allow := s.checkACL(bucket.Owner, bucket.ACL, args.UserId, action.CreateMultipartUploadAction) if !allow { err = ErrNotAllowed return @@ -54,7 +50,7 @@ func (s *service) CreateMultipartUpload(ctx context.Context, user, bucname, objn uplid := uuid.NewString() // upload key - uplkey := s.getUploadKey(bucname, objname, uplid) + uplkey := s.getUploadKey(args.Bucket, args.Object, uplid) // Lock upload err = s.lock.Lock(ctx, uplkey) @@ -63,13 +59,18 @@ func (s *service) CreateMultipartUpload(ctx context.Context, user, bucname, objn } defer s.lock.Unlock(uplkey) + // now + now := time.Now().UTC() + // Multipart upload multipart = &Multipart{ - Bucket: bucname, - Object: objname, - UploadID: uplid, - MetaData: meta, - Initiated: time.Now().UTC(), + Bucket: args.Bucket, + Object: args.Object, + UploadID: uplid, + ContentType: args.ContentType, + ContentEncoding: args.ContentEncoding, + Expires: args.Expires, + Initiated: now, } // Put multipart upload @@ -79,13 +80,13 @@ func (s *service) CreateMultipartUpload(ctx context.Context, user, bucname, objn } // UploadPart upload user specified multipart part -func (s *service) UploadPart(ctx context.Context, user, bucname, objname, uplid string, partId int, body *hash.Reader, size int64) (part *Part, err error) { +func (s *service) UploadPart(ctx context.Context, args *UploadPartArgs) (part *Part, err error) { // Operation context ctx, cancel := s.opctx(ctx) defer cancel() // Bucket key - buckey := s.getBucketKey(bucname) + buckey := s.getBucketKey(args.Bucket) // RLock bucket err = s.lock.RLock(ctx, buckey) @@ -105,14 +106,14 @@ func (s *service) UploadPart(ctx context.Context, user, bucname, objname, uplid } // Check action ACL - allow := s.checkACL(bucket.Owner, bucket.ACL, user, action.UploadPartAction) + allow := s.checkACL(bucket.Owner, bucket.ACL, args.UserId, action.UploadPartAction) if !allow { err = ErrNotAllowed return } // Upload key - uplkey := s.getUploadKey(bucname, objname, uplid) + uplkey := s.getUploadKey(args.Bucket, args.Object, args.UploadId) // Lock upload err = s.lock.Lock(ctx, uplkey) @@ -135,7 +136,7 @@ func (s *service) UploadPart(ctx context.Context, user, bucname, objname, uplid prtkey := s.getUploadPartKey(uplkey, len(multipart.Parts)) // Store part body - cid, err := s.storeBody(ctx, body, prtkey) + cid, err := s.storeBody(ctx, args.Body, prtkey) if err != nil { return } @@ -151,13 +152,16 @@ func (s *service) UploadPart(ctx context.Context, user, bucname, objname, uplid } }() + // Now + now := time.Now().UTC() + // Part part = &Part{ - Number: partId, - ETag: body.ETag().String(), + Number: args.PartNumber, + ETag: args.Body.ETag().String(), CID: cid, - Size: size, - ModTime: time.Now().UTC(), + Size: args.ContentLength, + ModTime: now, } // Append part @@ -176,13 +180,13 @@ func (s *service) UploadPart(ctx context.Context, user, bucname, objname, uplid } // AbortMultipartUpload abort user specified multipart upload -func (s *service) AbortMultipartUpload(ctx context.Context, user, bucname, objname, uplid string) (err error) { +func (s *service) AbortMultipartUpload(ctx context.Context, args *AbortMultipartUploadArgs) (err error) { // Operation context ctx, cancel := s.opctx(ctx) defer cancel() // Bucket key - buckey := s.getBucketKey(bucname) + buckey := s.getBucketKey(args.Bucket) // RLock bucket err = s.lock.RLock(ctx, buckey) @@ -202,14 +206,14 @@ func (s *service) AbortMultipartUpload(ctx context.Context, user, bucname, objna } // Check action ACL - allow := s.checkACL(bucket.Owner, bucket.ACL, user, action.AbortMultipartUploadAction) + allow := s.checkACL(bucket.Owner, bucket.ACL, args.UserId, action.AbortMultipartUploadAction) if !allow { err = ErrNotAllowed return } // Multipart upload key - uplkey := s.getUploadKey(bucname, objname, uplid) + uplkey := s.getUploadKey(args.Bucket, args.Object, args.UploadId) // Lock upload err = s.lock.Lock(ctx, uplkey) @@ -244,13 +248,13 @@ func (s *service) AbortMultipartUpload(ctx context.Context, user, bucname, objna } // CompleteMultiPartUpload complete user specified multipart upload -func (s *service) CompleteMultiPartUpload(ctx context.Context, user, bucname, objname, uplid string, parts []*CompletePart) (object *Object, err error) { +func (s *service) CompleteMultiPartUpload(ctx context.Context, args *CompleteMultipartUploadArgs) (object *Object, err error) { // Operation context ctx, cancel := s.opctx(ctx) defer cancel() // Bucket key - buckey := s.getBucketKey(bucname) + buckey := s.getBucketKey(args.Bucket) // RLock bucket err = s.lock.RLock(ctx, buckey) @@ -270,14 +274,14 @@ func (s *service) CompleteMultiPartUpload(ctx context.Context, user, bucname, ob } // Check action ACL - allow := s.checkACL(bucket.Owner, bucket.ACL, user, action.CompleteMultipartUploadAction) + allow := s.checkACL(bucket.Owner, bucket.ACL, args.UserId, action.CompleteMultipartUploadAction) if !allow { err = ErrNotAllowed return } // Object key - objkey := s.getObjectKey(bucname, objname) + objkey := s.getObjectKey(args.Bucket, args.Object) // Lock object err = s.lock.Lock(ctx, objkey) @@ -293,7 +297,7 @@ func (s *service) CompleteMultiPartUpload(ctx context.Context, user, bucname, ob } // Upload key - uplkey := s.getUploadKey(bucname, objname, uplid) + uplkey := s.getUploadKey(args.Bucket, args.Object, args.UploadId) // Lock upload err = s.lock.Lock(ctx, uplkey) @@ -329,16 +333,13 @@ func (s *service) CompleteMultiPartUpload(ctx context.Context, user, bucname, ob idxmp := s.partIdxMap(multipart.Parts) // Iterate all parts to collect all body readers - for i, part := range parts { + for i, part := range args.CompletedParts { // Index in multipart.Parts partIndex, ok := idxmp[part.PartNumber] // Part not exists in multipart if !ok { - err = s3utils.InvalidPart{ - PartNumber: part.PartNumber, - GotETag: part.ETag, - } + err = ErrPartNotExists return } @@ -350,22 +351,13 @@ func (s *service) CompleteMultiPartUpload(ctx context.Context, user, bucname, ob // Check got part etag with part etag if gotPart.ETag != part.ETag { - err = s3utils.InvalidPart{ - PartNumber: part.PartNumber, - ExpETag: gotPart.ETag, - GotETag: part.ETag, - } + err = ErrPartETagNotMatch return } // All parts except the last part has to be at least 5MB. - // todo: change to '''!(gotPart.Size >= consts.MinPartSize)''' - if (i < len(parts)-1) && !(gotPart.Size >= 0) { - err = s3utils.PartTooSmall{ - PartNumber: part.PartNumber, - PartSize: gotPart.Size, - PartETag: part.ETag, - } + if (i < len(args.CompletedParts)-1) && !(gotPart.Size >= consts.MinPartSize) { + err = ErrPartTooSmall return } @@ -404,7 +396,7 @@ func (s *service) CompleteMultiPartUpload(ctx context.Context, user, bucname, ob }() // Calculate multipart etag - multiEtag, err := s.calcMultiETag(parts) + multiEtag, err := s.calcMultiETag(args.CompletedParts) if err != nil { return } @@ -414,45 +406,24 @@ func (s *service) CompleteMultiPartUpload(ctx context.Context, user, bucname, ob // Object object = &Object{ - Bucket: bucname, - Name: objname, + Bucket: args.Bucket, + Name: args.Object, ModTime: now, Size: size, IsDir: false, - ETag: multiEtag.String(), + ETag: multiEtag.String(), CID: cid, ACL: "", VersionID: "", IsLatest: true, DeleteMarker: false, - ContentType: "", - ContentEncoding: "", - Expires: time.Time{}, + ContentType: multipart.ContentType, + ContentEncoding: multipart.ContentEncoding, + Expires: multipart.Expires, AccTime: time.Time{}, SuccessorModTime: now, } - // Set object content type - ctyp := multipart.MetaData[strings.ToLower(consts.ContentType)] - if ctyp != nil { - object.ContentType = *ctyp - } - - // Set object content encoding - cecd := multipart.MetaData[strings.ToLower(consts.ContentEncoding)] - if cecd != nil { - object.ContentEncoding = *cecd - } - - // Set object expires - cexp := multipart.MetaData[strings.ToLower(consts.Expires)] - if cexp != nil { - exp, e := time.Parse(http.TimeFormat, *cexp) - if e != nil { - object.Expires = exp.UTC() - } - } - // Put object err = s.providers.StateStore().Put(objkey, object) if err != nil { @@ -490,8 +461,8 @@ func (s *service) getMultipart(uplkey string) (multipart *Multipart, err error) return } -func (s *service) partIdxMap(parts []*Part) map[int]int { - mp := make(map[int]int) +func (s *service) partIdxMap(parts []*Part) map[int64]int { + mp := make(map[int64]int) for i, part := range parts { mp[part.Number] = i } @@ -500,8 +471,6 @@ func (s *service) partIdxMap(parts []*Part) map[int]int { var etagRegex = regexp.MustCompile("\"*?([^\"]*?)\"*?$") -// canonicalizeETag returns ETag with leading and trailing double-quotes removed, -// if any present func (s *service) canonicalizeETag(etag string) string { return etagRegex.ReplaceAllString(etag, "$1") } diff --git a/s3/services/object/service_object.go b/s3/api/services/object/service_object.go similarity index 88% rename from s3/services/object/service_object.go rename to s3/api/services/object/service_object.go index e2e499a56..825711bc6 100644 --- a/s3/services/object/service_object.go +++ b/s3/api/services/object/service_object.go @@ -4,7 +4,8 @@ import ( "context" "errors" "github.com/bittorrent/go-btfs/s3/action" - "github.com/bittorrent/go-btfs/s3/providers" + "github.com/bittorrent/go-btfs/s3/api/providers" + "github.com/bittorrent/go-btfs/s3/utils" "io" "strings" "time" @@ -37,7 +38,7 @@ func (s *service) PutObject(ctx context.Context, args *PutObjectArgs) (object *O } // Check action ACL - allow := s.checkACL(bucket.Owner, bucket.ACL, args.AccessKey, action.PutObjectAction) + allow := s.checkACL(bucket.Owner, bucket.ACL, args.UserId, action.PutObjectAction) if !allow { err = ErrNotAllowed return @@ -142,7 +143,7 @@ func (s *service) CopyObject(ctx context.Context, args *CopyObjectArgs) (dstObje } // Check source action ACL - srcAllow := s.checkACL(srcBucket.Owner, srcBucket.ACL, args.AccessKey, action.GetObjectAction) + srcAllow := s.checkACL(srcBucket.Owner, srcBucket.ACL, args.UserId, action.GetObjectAction) if !srcAllow { err = ErrNotAllowed return @@ -189,7 +190,7 @@ func (s *service) CopyObject(ctx context.Context, args *CopyObjectArgs) (dstObje } // Check destination action ACL - dstAllow := s.checkACL(dstBucket.Owner, dstBucket.ACL, args.AccessKey, action.PutObjectAction) + dstAllow := s.checkACL(dstBucket.Owner, dstBucket.ACL, args.UserId, action.PutObjectAction) if !dstAllow { err = ErrNotAllowed return @@ -303,7 +304,7 @@ func (s *service) GetObject(ctx context.Context, args *GetObjectArgs) (object *O } // Check action ACL - allow := s.checkACL(bucket.Owner, bucket.ACL, args.AccessKey, action.GetObjectAction) + allow := s.checkACL(bucket.Owner, bucket.ACL, args.UserId, action.GetObjectAction) if !allow { err = ErrNotAllowed return @@ -388,7 +389,7 @@ func (s *service) DeleteObject(ctx context.Context, args *DeleteObjectArgs) (err } // Check action ACL - allow := s.checkACL(bucket.Owner, bucket.ACL, args.AccessKey, action.DeleteObjectAction) + allow := s.checkACL(bucket.Owner, bucket.ACL, args.UserId, action.DeleteObjectAction) if !allow { err = ErrNotAllowed return @@ -427,7 +428,7 @@ func (s *service) DeleteObject(ctx context.Context, args *DeleteObjectArgs) (err } // DeleteObjects delete multiple user specified objects -func (s *service) DeleteObjects(ctx context.Context, args *DeleteObjectsArgs) (deletedObjects []*DeletedObject, err error) { +func (s *service) DeleteObjects(ctx context.Context, args *DeleteObjectsArgs) (deletes []*DeletedObject, err error) { // Operation context ctx, cancel := s.opctx(ctx) defer cancel() @@ -453,7 +454,7 @@ func (s *service) DeleteObjects(ctx context.Context, args *DeleteObjectsArgs) (d } // Check action ACL - allow := s.checkACL(bucket.Owner, bucket.ACL, args.AccessKey, action.DeleteObjectAction) + allow := s.checkACL(bucket.Owner, bucket.ACL, args.UserId, action.DeleteObjectAction) if !allow { err = ErrNotAllowed return @@ -465,7 +466,7 @@ func (s *service) DeleteObjects(ctx context.Context, args *DeleteObjectsArgs) (d // Collection delete result defer func() { if er != nil || !args.Quite { - deletedObjects = append(deletedObjects, &DeletedObject{ + deletes = append(deletes, &DeletedObject{ Object: deleteObj.Object, DeleteErr: er, }) @@ -521,12 +522,7 @@ func (s *service) ListObjects(ctx context.Context, args *ListObjectsArgs) (list // Object list list = &ObjectsList{ - Bucket: args.Bucket, - MaxKeys: args.MaxKeys, - Marker: args.Marker, - Prefix: args.Prefix, - Delimiter: args.Delimiter, - EncodingType: args.EncodingType, + Args: args, } // Bucket key @@ -550,12 +546,15 @@ func (s *service) ListObjects(ctx context.Context, args *ListObjectsArgs) (list } // Check action ACL - allow := s.checkACL(bucket.Owner, bucket.ACL, args.AccessKey, action.ListObjectsAction) + allow := s.checkACL(bucket.Owner, bucket.ACL, args.UserId, action.ListObjectsAction) if !allow { err = ErrNotAllowed return } + // Set objects owner(objects owner is the bucket owner included them) + list.Owner = bucket.Owner + // MaxKeys is zero if args.MaxKeys == 0 { list.IsTruncated = true @@ -656,22 +655,32 @@ func (s *service) ListObjects(ctx context.Context, args *ListObjectsArgs) (list return } -func (s *service) ListObjectsV2(ctx context.Context, user string, bucket string, prefix string, token, delimiter string, max int64, owner bool, after string) (list *ObjectsListV2, err error) { - marker := token - if marker == "" { - marker = after +func (s *service) ListObjectsV2(ctx context.Context, args *ListObjectsV2Args) (list *ObjectsListV2, err error) { + // Args v1 + v1Args := &ListObjectsArgs{ + UserId: args.UserId, + Bucket: args.Bucket, + MaxKeys: args.MaxKeys, + Marker: utils.CoalesceStr(args.Token, args.After), + Prefix: args.Prefix, + Delimiter: args.Delimiter, + EncodingType: args.EncodingType, } - loi, err := s.ListObjects(ctx, user, bucket, prefix, delimiter, marker, max) + + // Get v1 list + v1List, err := s.ListObjects(ctx, v1Args) if err != nil { return } + // List v2 list = &ObjectsListV2{ - IsTruncated: loi.IsTruncated, - ContinuationToken: token, - NextContinuationToken: loi.NextMarker, - Objects: loi.Objects, - Prefixes: loi.Prefixes, + Args: args, + Owner: v1List.Owner, + IsTruncated: v1List.IsTruncated, + NextContinuationToken: v1List.NextMarker, + Objects: v1List.Objects, + Prefixes: v1List.Prefixes, } return } @@ -695,13 +704,13 @@ func (s *service) getObject(objkey string) (object *Object, err error) { } // GetObjectACL get user specified object ACL(bucket acl) -func (s *service) GetObjectACL(ctx context.Context, user, bucname, objname string) (acl string, err error) { +func (s *service) GetObjectACL(ctx context.Context, args *GetObjectACLArgs) (acl *ACL, err error) { // Operation context ctx, cancel := s.opctx(ctx) defer cancel() // Bucket key - buckey := s.getBucketKey(bucname) + buckey := s.getBucketKey(args.Bucket) // RLock bucket err = s.lock.RLock(ctx, buckey) @@ -721,14 +730,14 @@ func (s *service) GetObjectACL(ctx context.Context, user, bucname, objname strin } // Check action ACL - allow := s.checkACL(bucket.Owner, bucket.ACL, user, action.GetBucketAclAction) + allow := s.checkACL(bucket.Owner, bucket.ACL, args.UserId, action.GetBucketAclAction) if !allow { err = ErrNotAllowed return } // Object key - objkey := s.getObjectKey(bucname, objname) + objkey := s.getObjectKey(args.Bucket, args.Object) // RLock object err = s.lock.RLock(ctx, objkey) @@ -746,8 +755,11 @@ func (s *service) GetObjectACL(ctx context.Context, user, bucname, objname strin err = ErrObjectNotFound } - // Get ACL field value - acl = bucket.ACL + // ACL + acl = &ACL{ + Owner: bucket.Owner, + ACL: bucket.ACL, + } return } diff --git a/s3/services/sign/options.go b/s3/api/services/sign/options.go similarity index 100% rename from s3/services/sign/options.go rename to s3/api/services/sign/options.go diff --git a/s3/services/sign/proto.go b/s3/api/services/sign/proto.go similarity index 81% rename from s3/services/sign/proto.go rename to s3/api/services/sign/proto.go index a9b17d8fc..e2af57ca7 100644 --- a/s3/services/sign/proto.go +++ b/s3/api/services/sign/proto.go @@ -1,7 +1,7 @@ package sign import ( - "github.com/bittorrent/go-btfs/s3/responses" + "github.com/bittorrent/go-btfs/s3/api/responses" "net/http" ) diff --git a/s3/services/sign/service.go b/s3/api/services/sign/service.go similarity index 81% rename from s3/services/sign/service.go rename to s3/api/services/sign/service.go index 0a89e22eb..35dedceec 100644 --- a/s3/services/sign/service.go +++ b/s3/api/services/sign/service.go @@ -1,7 +1,7 @@ package sign import ( - "github.com/bittorrent/go-btfs/s3/responses" + "github.com/bittorrent/go-btfs/s3/api/responses" "net/http" "sync" ) @@ -36,11 +36,11 @@ func (s *service) VerifyRequestSignature(r *http.Request) (ack string, rerr *res switch GetRequestAuthType(r) { case AuthTypeUnknown: return - case AuthTypeSigned, AuthTypePresigned: - ack, rerr = s.isReqAuthenticated(r, "", ServiceS3) + case AuthTypeSigned: + ack, rerr = s.reqSignatureV4Verify(r, "") return case AuthTypeStreamingSigned: - ack, rerr = s.setReqBodySignV4ChunkedReader(r, "", ServiceS3) + ack, rerr = s.setReqBodySignV4ChunkedReader(r, "") return default: rerr = responses.ErrSignatureVersionNotSupported diff --git a/s3/services/sign/signature-auth-type.go b/s3/api/services/sign/signature-type.go similarity index 100% rename from s3/services/sign/signature-auth-type.go rename to s3/api/services/sign/signature-type.go diff --git a/s3/services/sign/signature-v4-parser.go b/s3/api/services/sign/signature-v4-parser.go similarity index 62% rename from s3/services/sign/signature-v4-parser.go rename to s3/api/services/sign/signature-v4-parser.go index d02da5db0..026b6d10b 100644 --- a/s3/services/sign/signature-v4-parser.go +++ b/s3/api/services/sign/signature-v4-parser.go @@ -18,9 +18,8 @@ package sign import ( + "github.com/bittorrent/go-btfs/s3/api/responses" "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/responses" - "net/url" "strings" "time" ) @@ -48,7 +47,7 @@ func (c credentialHeader) getScope() string { } // parse credentialHeader string into its structured form. -func parseCredentialHeader(credElement string, region string, stype serviceType) (ch credentialHeader, rerr *responses.Error) { +func parseCredentialHeader(credElement string, region string) (ch credentialHeader, rerr *responses.Error) { creds := strings.SplitN(strings.TrimSpace(credElement), "=", 2) if len(creds) != 2 { return ch, responses.ErrMissingFields @@ -87,11 +86,7 @@ func parseCredentialHeader(credElement string, region string, stype serviceType) if !isValidRegion(sRegion, region) { return ch, responses.ErrAuthorizationHeaderMalformed } - if credElements[2] != string(stype) { - switch stype { - case ServiceSTS: - return ch, responses.ErrAuthorizationHeaderMalformed - } + if credElements[2] != "s3" { return ch, responses.ErrAuthorizationHeaderMalformed } cred.scope.service = credElements[2] @@ -141,98 +136,11 @@ type signValues struct { Signature string } -// preSignValues data type represents structued form of AWS Signature V4 query string. -type preSignValues struct { - signValues - Date time.Time - Expires time.Duration -} - -// Parses signature version '4' query string of the following form. -// -// querystring = X-Amz-Algorithm=algorithm -// querystring += &X-Amz-Credential= urlencode(accessKey + '/' + credential_scope) -// querystring += &X-Amz-Date=date -// querystring += &X-Amz-Expires=timeout interval -// querystring += &X-Amz-SignedHeaders=signed_headers -// querystring += &X-Amz-Signature=signature -// -// verifies if any of the necessary query params are missing in the presigned request. -func doesV4PresignParamsExist(query url.Values) *responses.Error { - v4PresignQueryParams := []string{consts.AmzAlgorithm, consts.AmzCredential, consts.AmzSignature, consts.AmzDate, consts.AmzSignedHeaders, consts.AmzExpires} - for _, v4PresignQueryParam := range v4PresignQueryParams { - if _, ok := query[v4PresignQueryParam]; !ok { - return responses.ErrInvalidQueryParams - } - } - return nil -} - -// Parses all the presigned signature values into separate elements. -func parsePreSignV4(query url.Values, region string, stype serviceType) (psv preSignValues, rerr *responses.Error) { - // verify whether the required query params exist. - rerr = doesV4PresignParamsExist(query) - if rerr != nil { - return psv, rerr - } - - // Verify if the query algorithm is supported or not. - if query.Get(consts.AmzAlgorithm) != signV4Algorithm { - return psv, responses.ErrAuthorizationHeaderMalformed - } - - // Initialize signature version '4' structured header. - preSignV4Values := preSignValues{} - - // Save credential. - preSignV4Values.Credential, rerr = parseCredentialHeader("Credential="+query.Get(consts.AmzCredential), region, stype) - if rerr != nil { - return psv, rerr - } - - var e error - // Save date in native time.Time. - preSignV4Values.Date, e = time.Parse(iso8601Format, query.Get(consts.AmzDate)) - if e != nil { - return psv, responses.ErrAuthorizationHeaderMalformed - } - - // Save expires in native time.Duration. - preSignV4Values.Expires, e = time.ParseDuration(query.Get(consts.AmzExpires) + "s") - if e != nil { - return psv, responses.ErrAuthorizationHeaderMalformed - } - - if preSignV4Values.Expires < 0 { - return psv, responses.ErrAuthorizationHeaderMalformed - } - - // Check if Expiry time is less than 7 days (value in seconds). - if preSignV4Values.Expires.Seconds() > 604800 { - return psv, responses.ErrAuthorizationHeaderMalformed - } - - // Save signed headers. - preSignV4Values.SignedHeaders, rerr = parseSignedHeader("SignedHeaders=" + query.Get(consts.AmzSignedHeaders)) - if rerr != nil { - return psv, rerr - } - - // Save signature. - preSignV4Values.Signature, rerr = parseSignature("Signature=" + query.Get(consts.AmzSignature)) - if rerr != nil { - return psv, rerr - } - - // Return structed form of signature query string. - return preSignV4Values, nil -} - // Parses signature version '4' header of the following form. // // Authorization: algorithm Credential=accessKeyID/credScope, \ // SignedHeaders=signedHeaders, Signature=signature -func parseSignV4(v4Auth string, region string, stype serviceType) (sv signValues, aec *responses.Error) { +func parseSignV4(v4Auth string, region string) (sv signValues, aec *responses.Error) { // credElement is fetched first to skip replacing the space in access key. credElement := strings.TrimPrefix(strings.Split(strings.TrimSpace(v4Auth), ",")[0], signV4Algorithm) // Replace all spaced strings, some clients can send spaced @@ -259,8 +167,8 @@ func parseSignV4(v4Auth string, region string, stype serviceType) (sv signValues signV4Values := signValues{} var s3Err *responses.Error - // Save credentail values. - signV4Values.Credential, s3Err = parseCredentialHeader(strings.TrimSpace(credElement), region, stype) + // Save credential values. + signV4Values.Credential, s3Err = parseCredentialHeader(strings.TrimSpace(credElement), region) if s3Err != nil { return sv, s3Err } diff --git a/s3/services/sign/streaming-signature-v4.go b/s3/api/services/sign/signature-v4-streaming.go similarity index 77% rename from s3/services/sign/streaming-signature-v4.go rename to s3/api/services/sign/signature-v4-streaming.go index 99e991bd3..d03b31ae6 100644 --- a/s3/services/sign/streaming-signature-v4.go +++ b/s3/api/services/sign/signature-v4-streaming.go @@ -6,13 +6,10 @@ import ( "crypto/sha256" "encoding/hex" "errors" - "github.com/bittorrent/go-btfs/s3/responses" - "github.com/bittorrent/go-btfs/s3/utils" - s3hash "github.com/bittorrent/go-btfs/s3/utils/hash" + "github.com/bittorrent/go-btfs/s3/api/responses" "hash" "io" "net/http" - "strings" "time" "github.com/bittorrent/go-btfs/s3/consts" @@ -21,17 +18,16 @@ import ( // Streaming AWS Signature Version '4' constants. const ( - emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" - streamingContentSHA256 = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" - signV4ChunkedAlgorithm = "AWS4-HMAC-SHA256-PAYLOAD" - streamingContentEncoding = "aws-chunked" + emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + streamingContentSHA256 = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" + signV4ChunkedAlgorithm = "AWS4-HMAC-SHA256-PAYLOAD" ) // errSignatureMismatch means signature did not match. var errSignatureMismatch = errors.New("Signature does not match") // getChunkSignature - get chunk signature. -func getChunkSignature(secret, seedSignature string, region string, stype serviceType, date time.Time, hashedChunk string) string { +func getChunkSignature(secret, seedSignature string, region string, date time.Time, hashedChunk string) string { // Calculate string to sign. stringToSign := signV4ChunkedAlgorithm + "\n" + date.Format(iso8601Format) + "\n" + @@ -41,10 +37,10 @@ func getChunkSignature(secret, seedSignature string, region string, stype servic hashedChunk // Get hmac signing key. - signingKey := utils.GetSigningKey(secret, date, region, string(stype)) + signingKey := GetSigningKey(secret, date, region) // Calculate signature. - newSignature := utils.GetSignature(signingKey, stringToSign) + newSignature := GetSignature(signingKey, stringToSign) return newSignature } @@ -54,7 +50,7 @@ func getChunkSignature(secret, seedSignature string, region string, stype servic // // returns signature, error otherwise if the signature mismatches or any other // error while parsing and validating. -func (s *service) calculateSeedSignature(r *http.Request, iregion string, stype serviceType) (ack, sec string, signature string, region string, date time.Time, rerr *responses.Error) { +func (s *service) calculateSeedSignature(r *http.Request, iregion string) (ack, sec string, signature string, region string, date time.Time, rerr *responses.Error) { // Copy request. req := *r @@ -62,7 +58,7 @@ func (s *service) calculateSeedSignature(r *http.Request, iregion string, stype v4Auth := req.Header.Get(consts.Authorization) // Parse signature version '4' header. - signV4Values, rerr := parseSignV4(v4Auth, "", stype) + signV4Values, rerr := parseSignV4(v4Auth, "") if rerr != nil { return } @@ -115,16 +111,16 @@ func (s *service) calculateSeedSignature(r *http.Request, iregion string, stype queryStr := req.Form.Encode() // Get canonical request. - canonicalRequest := utils.GetCanonicalRequest(extractedSignedHeaders, payload, queryStr, req.URL.Path, req.Method) + canonicalRequest := GetCanonicalRequest(extractedSignedHeaders, payload, queryStr, req.URL.Path, req.Method) // Get string to sign from canonical request. - stringToSign := utils.GetStringToSign(canonicalRequest, date, signV4Values.Credential.getScope()) + stringToSign := GetStringToSign(canonicalRequest, date, signV4Values.Credential.getScope()) // Get hmac signing key. - signingKey := utils.GetSigningKey(sec, signV4Values.Credential.scope.date, region, string(stype)) + signingKey := GetSigningKey(sec, signV4Values.Credential.scope.date, region) // Calculate signature. - newSignature := utils.GetSignature(signingKey, stringToSign) + newSignature := GetSignature(signingKey, stringToSign) // Verify if signature match. if !compareSignatureV4(newSignature, signV4Values.Signature) { @@ -147,61 +143,33 @@ var errMalformedEncoding = errors.New("malformed chunked encoding") // chunk is considered too big if its bigger than > 16MiB. var errChunkTooBig = errors.New("chunk too big: choose chunk size <= 16MiB") -func (s *service) setReqBodySignV4ChunkedReader(r *http.Request, region string, stype serviceType) (ack string, rerr *responses.Error) { - ack, sec, seedSignature, region, seedDate, rerr := s.calculateSeedSignature(r, region, stype) +func (s *service) setReqBodySignV4ChunkedReader(r *http.Request, region string) (ack string, rerr *responses.Error) { + ack, sec, seedSignature, region, seedDate, rerr := s.calculateSeedSignature(r, region) if rerr != nil { return } - crdr := &s3ChunkedReader{ + r.Body = &s3ChunkedReader{ reader: bufio.NewReader(r.Body), secret: sec, seedSignature: seedSignature, seedDate: seedDate, region: region, - stype: stype, chunkSHA256Writer: sha256.New(), buffer: make([]byte, 64*1024), } - size := r.ContentLength - - if size == -1 { - rerr = responses.ErrMissingContentLength - return - } - - if size > consts.MaxObjectSize { - rerr = responses.ErrEntityTooLarge - return - } - - md5Hex, sha256Hex, rerr := s.getClientCheckSum(r) - if rerr != nil { - return - } - - hrdr, err := s3hash.NewReader(crdr, size, md5Hex, sha256Hex, size) - if err != nil { - rerr = responses.ErrInternalError - return - } - - r.Body = hrdr - return } // Represents the overall state that is required for decoding a // AWS Signature V4 chunked reader. type s3ChunkedReader struct { - reader *bufio.Reader - secret string - seedSignature string - seedDate time.Time - region string - stype serviceType - + reader *bufio.Reader + secret string + seedSignature string + seedDate time.Time + region string chunkSHA256Writer hash.Hash // Calculates sha256 of chunk data. buffer []byte offset int @@ -358,7 +326,7 @@ func (cr *s3ChunkedReader) Read(buf []byte) (n int, err error) { // Once we have read the entire chunk successfully, we verify // that the received signature matches our computed signature. cr.chunkSHA256Writer.Write(cr.buffer) - newSignature := getChunkSignature(cr.secret, cr.seedSignature, cr.region, cr.stype, cr.seedDate, hex.EncodeToString(cr.chunkSHA256Writer.Sum(nil))) + newSignature := getChunkSignature(cr.secret, cr.seedSignature, cr.region, cr.seedDate, hex.EncodeToString(cr.chunkSHA256Writer.Sum(nil))) if !compareSignatureV4(string(signature[16:]), newSignature) { cr.err = errSignatureMismatch return n, cr.err @@ -451,42 +419,3 @@ func parseChunkSignature(chunk []byte) []byte { chunkSplits := bytes.SplitN(chunk, []byte(s3ChunkSignatureStr), 2) return chunkSplits[1] } - -// parse hex to uint64. -func parseHexUint(v []byte) (n uint64, err error) { - for i, b := range v { - switch { - case '0' <= b && b <= '9': - b -= '0' - case 'a' <= b && b <= 'f': - b = b - 'a' + 10 - case 'A' <= b && b <= 'F': - b = b - 'A' + 10 - default: - return 0, errors.New("invalid byte in chunk length") - } - if i == 16 { - return 0, errors.New("http chunk length too large") - } - n <<= 4 - n |= uint64(b) - } - return -} - -// Trims away `aws-chunked` from the content-encoding header if present. -// Streaming signature clients can have custom content-encoding such as -// `aws-chunked,gzip` here we need to only save `gzip`. -// For more refer http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html -func TrimAwsChunkedContentEncoding(contentEnc string) (trimmedContentEnc string) { - if contentEnc == "" { - return contentEnc - } - var newEncs []string - for _, enc := range strings.Split(contentEnc, ",") { - if enc != streamingContentEncoding { - newEncs = append(newEncs, enc) - } - } - return strings.Join(newEncs, ",") -} diff --git a/s3/api/services/sign/signature-v4-utils.go b/s3/api/services/sign/signature-v4-utils.go new file mode 100644 index 000000000..5e8d03a1a --- /dev/null +++ b/s3/api/services/sign/signature-v4-utils.go @@ -0,0 +1,276 @@ +package sign + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "crypto/subtle" + "encoding/hex" + "github.com/bittorrent/go-btfs/s3/api/responses" + "github.com/bittorrent/go-btfs/s3/consts" + "net/http" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +func GetContentSHA256Checksum(r *http.Request) (string, error) { + v, ok := r.Header[consts.AmzContentSha256] + if ok { + return v[0], nil + } + return consts.EmptySHA256, nil +} + +// isValidRegion - verify if incoming region value is valid with configured Region. +func isValidRegion(reqRegion string, confRegion string) bool { + if confRegion == "" { + return true + } + if confRegion == "US" { + confRegion = consts.DefaultBucketRegion + } + // Some older s3 clients set region as "US" instead of + // globalDefaultRegion, handle it. + if reqRegion == "US" { + reqRegion = consts.DefaultBucketRegion + } + return reqRegion == confRegion +} + +func contains(slice interface{}, elem interface{}) bool { + v := reflect.ValueOf(slice) + if v.Kind() == reflect.Slice { + for i := 0; i < v.Len(); i++ { + if v.Index(i).Interface() == elem { + return true + } + } + } + return false +} + +// extractSignedHeaders extract signed headers from Authorization header +func extractSignedHeaders(signedHeaders []string, r *http.Request) (http.Header, *responses.Error) { + reqHeaders := r.Header + reqQueries := r.Form + // find whether "host" is part of list of signed headers. + // if not return ErrUnsignedHeaders. "host" is mandatory. + if !contains(signedHeaders, "host") { + return nil, responses.ErrUnsignedHeaders + } + extractedSignedHeaders := make(http.Header) + for _, header := range signedHeaders { + // `host` will not be found in the headers, can be found in r.Host. + // but its alway necessary that the list of signed headers containing host in it. + val, ok := reqHeaders[http.CanonicalHeaderKey(header)] + if !ok { + // try to set headers from Query String + val, ok = reqQueries[header] + } + if ok { + extractedSignedHeaders[http.CanonicalHeaderKey(header)] = val + continue + } + switch header { + case "expect": + // Golang http server strips off 'Expect' header, if the + // client sent this as part of signed headers we need to + // handle otherwise we would see a signature mismatch. + // `aws-cli` sets this as part of signed headers. + // + // According to + // http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.20 + // Expect header is always of form: + // + // Expect = "Expect" ":" 1#expectation + // expectation = "100-continue" | expectation-extension + // + // So it safe to assume that '100-continue' is what would + // be sent, for the time being keep this work around. + // Adding a *TODO* to remove this later when Golang server + // doesn't filter out the 'Expect' header. + extractedSignedHeaders.Set(header, "100-continue") + case "host": + // Go http server removes "host" from Request.Header + + //extractedSignedHeaders.Set(header, r.Host) + // todo use r.Host, or filedag-web deal with + //value := strings.Split(r.Host, ":") + extractedSignedHeaders.Set(header, r.Host) + case "transfer-encoding": + // Go http server removes "host" from Request.Header + extractedSignedHeaders[http.CanonicalHeaderKey(header)] = r.TransferEncoding + case "content-length": + // Signature-V4 spec excludes Content-Length from signed headers list for signature calculation. + // But some clients deviate from this rule. Hence we consider Content-Length for signature + // calculation to be compatible with such clients. + extractedSignedHeaders.Set(header, strconv.FormatInt(r.ContentLength, 10)) + default: + return nil, responses.ErrUnsignedHeaders + } + } + return extractedSignedHeaders, nil +} + +// if object matches reserved string, no need to encode them +var reservedObjectNames = regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$") + +// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences +// +// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8 +// non english characters cannot be parsed due to the nature in which url.Encode() is written +// +// This function on the other hand is a direct replacement for url.Encode() technique to support +// pretty much every UTF-8 character. +func EncodePath(pathName string) string { + if reservedObjectNames.MatchString(pathName) { + return pathName + } + var encodedPathname string + for _, s := range pathName { + if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + } + switch s { + case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + default: + len := utf8.RuneLen(s) + if len < 0 { + // if utf8 cannot convert return the same string as is + return pathName + } + u := make([]byte, len) + utf8.EncodeRune(u, s) + for _, r := range u { + hex := hex.EncodeToString([]byte{r}) + encodedPathname = encodedPathname + "%" + strings.ToUpper(hex) + } + } + } + return encodedPathname +} + +// GetCanonicalRequest generate a canonical request of style +// +// canonicalRequest = +// +// \n +// \n +// \n +// \n +// \n +// +func GetCanonicalRequest(extractedSignedHeaders http.Header, payload, queryStr, urlPath, method string) string { + rawQuery := strings.ReplaceAll(queryStr, "+", "%20") + encodedPath := EncodePath(urlPath) + canonicalRequest := strings.Join([]string{ + method, + encodedPath, + rawQuery, + getCanonicalHeaders(extractedSignedHeaders), + GetSignedHeaders(extractedSignedHeaders), + payload, + }, "\n") + return canonicalRequest +} + +// GetSignedHeaders generate a string i.e alphabetically sorted, semicolon-separated list of lowercase request header names +func GetSignedHeaders(signedHeaders http.Header) string { + var headers []string + for k := range signedHeaders { + headers = append(headers, strings.ToLower(k)) + } + sort.Strings(headers) + return strings.Join(headers, ";") +} + +// getCanonicalHeaders generate a list of request headers with their values +func getCanonicalHeaders(signedHeaders http.Header) string { + var headers []string + vals := make(http.Header) + for k, vv := range signedHeaders { + headers = append(headers, strings.ToLower(k)) + vals[strings.ToLower(k)] = vv + } + sort.Strings(headers) + + var buf bytes.Buffer + for _, k := range headers { + buf.WriteString(k) + buf.WriteByte(':') + for idx, v := range vals[k] { + if idx > 0 { + buf.WriteByte(',') + } + buf.WriteString(signV4TrimAll(v)) + } + buf.WriteByte('\n') + } + return buf.String() +} + +// GetStringToSign a string based on selected query values. +func GetStringToSign(canonicalRequest string, t time.Time, scope string) string { + stringToSign := signV4Algorithm + "\n" + t.Format(iso8601Format) + "\n" + stringToSign += scope + "\n" + canonicalRequestBytes := sha256.Sum256([]byte(canonicalRequest)) + stringToSign += hex.EncodeToString(canonicalRequestBytes[:]) + return stringToSign +} + +// GetSigningKey hmac seed to calculate final signature. +func GetSigningKey(secretKey string, t time.Time, region string) []byte { + date := sumHMAC([]byte("AWS4"+secretKey), []byte(t.Format(yyyymmdd))) + regionBytes := sumHMAC(date, []byte(region)) + service := sumHMAC(regionBytes, []byte("s3")) + signingKey := sumHMAC(service, []byte("aws4_request")) + return signingKey +} + +// GetSignature final signature in hexadecimal form. +func GetSignature(signingKey []byte, stringToSign string) string { + return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign))) +} + +// sumHMAC calculate hmac between two input byte array. +func sumHMAC(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +// Trim leading and trailing spaces and replace sequential spaces with one space, following Trimall() +// in http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html +func signV4TrimAll(input string) string { + // Compress adjacent spaces (a space is determined by + // unicode.IsSpace() internally here) to one space and return + return strings.Join(strings.Fields(input), " ") +} + +// getScope generate a string of a specific date, an AWS region, and a service. +func getScope(t time.Time, region string) string { + scope := strings.Join([]string{ + t.Format(yyyymmdd), + region, + "s3", + "aws4_request", + }, consts.SlashSeparator) + return scope +} + +// compareSignatureV4 returns true if and only if both signatures +// are equal. The signatures are expected to be HEX encoded strings +// according to the AWS S3 signature V4 spec. +func compareSignatureV4(sig1, sig2 string) bool { + // The CTC using []byte(str) works because the hex encoding + // is unique for a sequence of bytes. See also compareSignatureV2. + return subtle.ConstantTimeCompare([]byte(sig1), []byte(sig2)) == 1 +} diff --git a/s3/api/services/sign/signature-v4.go b/s3/api/services/sign/signature-v4.go new file mode 100644 index 000000000..ce17b86ca --- /dev/null +++ b/s3/api/services/sign/signature-v4.go @@ -0,0 +1,119 @@ +package sign + +import ( + "github.com/bittorrent/go-btfs/s3/api/responses" + "github.com/bittorrent/go-btfs/s3/consts" + "net/http" + "time" +) + +// AWS Signature Version '4' constants. +const ( + signV2Algorithm = "AWS" + signV4Algorithm = "AWS4-HMAC-SHA256" + iso8601Format = "20060102T150405Z" + yyyymmdd = "20060102" +) + +func (s *service) reqSignatureV4Verify(r *http.Request, region string) (ack string, rerr *responses.Error) { + sha256sum, err := GetContentSHA256Checksum(r) + if err != nil { + rerr = responses.ErrInternalError + return + } + ack, rerr = s.doesSignatureMatch(sha256sum, r, region) + return +} + +// doesSignatureMatch - Verify authorization header with calculated header in accordance with +// - http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html +// +// returns nil if signature matches. +func (s *service) doesSignatureMatch(hashedPayload string, r *http.Request, region string) (ack string, rerr *responses.Error) { + // Copy request. + req := *r + + // Save authorization header. + v4Auth := req.Header.Get(consts.Authorization) + + // Parse signature version '4' header. + signV4Values, rerr := parseSignV4(v4Auth, region) + if rerr != nil { + return + } + + // Extract all the signed headers along with its values. + extractedSignedHeaders, rerr := extractSignedHeaders(signV4Values.SignedHeaders, r) + if rerr != nil { + return + } + + ack = signV4Values.Credential.accessKey + secret, rerr := s.checkKeyValid(ack) + if rerr != nil { + return + } + + // Extract date, if not present throw error. + var date string + if date = req.Header.Get(consts.AmzDate); date == "" { + if date = r.Header.Get(consts.Date); date == "" { + rerr = responses.ErrMissingDateHeader + return + } + } + + // Parse date header. + t, err := time.Parse(iso8601Format, date) + if err != nil { + rerr = responses.ErrAuthorizationHeaderMalformed + return + } + + // Query string. + queryStr := req.URL.Query().Encode() + + // Get canonical request. + canonicalRequest := GetCanonicalRequest(extractedSignedHeaders, hashedPayload, queryStr, req.URL.Path, req.Method) + + // Get string to sign from canonical request. + stringToSign := GetStringToSign(canonicalRequest, t, signV4Values.Credential.getScope()) + + // Get hmac signing key. + signingKey := GetSigningKey(secret, signV4Values.Credential.scope.date, + signV4Values.Credential.scope.region) + + // Calculate signature. + newSignature := GetSignature(signingKey, stringToSign) + + // Verify if signature match. + if !compareSignatureV4(newSignature, signV4Values.Signature) { + rerr = responses.ErrSignatureDoesNotMatch + return + } + + // Return error none. + return +} + +// check if the access key is valid and recognized, additionally +// also returns if the access key is owner/admin. +func (s *service) checkKeyValid(ack string) (secret string, rerr *responses.Error) { + secret, exists, enable, err := s.getSecret(ack) + if err != nil { + rerr = responses.ErrInternalError + return + } + + if !exists { + rerr = responses.ErrInvalidAccessKeyID + return + } + + if !enable { + rerr = responses.ErrAccessKeyDisabled + return + } + + return +} diff --git a/s3/consts/consts.go b/s3/consts/consts.go index beea060bc..8a0690239 100644 --- a/s3/consts/consts.go +++ b/s3/consts/consts.go @@ -3,32 +3,14 @@ package consts import ( "github.com/aws/aws-sdk-go/service/s3" "github.com/dustin/go-humanize" - "time" ) -// some const const ( - // Iso8601TimeFormat RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z - Iso8601TimeFormat = "2006-01-02T15:04:05.000Z" // Reply date format with nanosecond precision. - - StreamingContentSHA256 = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" - - // MaxLocationConstraintSize Limit of location constraint XML for unauthenticated PUT bucket operations. - MaxLocationConstraintSize = 3 * humanize.MiByte - EmptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" - UnsignedSHA256 = "UNSIGNED-PAYLOAD" - StsRequestBodyLimit = 10 * (1 << 20) // 10 MiB - SlashSeparator = "/" - - MaxSkewTime = 15 * time.Minute // 15 minutes skew allowed. - - // STS API version. - StsAPIVersion = "2011-06-15" - StsVersion = "Version" - StsAction = "Action" - AssumeRole = "AssumeRole" - SignV4Algorithm = "AWS4-HMAC-SHA256" - + StreamingContentSHA256 = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" + EmptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + UnsignedSHA256 = "UNSIGNED-PAYLOAD" + SlashSeparator = "/" + StsAction = "Action" StreamingContentEncoding = "aws-chunked" DefaultEncodingType = "url" DefaultContentType = "binary/octet-stream" @@ -50,90 +32,10 @@ var SupportedBucketACLs = map[string]bool{ // Standard S3 HTTP request constants const ( - IfModifiedSince = "If-Modified-Since" - IfUnmodifiedSince = "If-Unmodified-Since" - IfMatch = "If-Match" - IfNoneMatch = "If-None-Match" - - // S3 storage class - AmzStorageClass = "x-amz-storage-class" - - // S3 object version ID - AmzVersionID = "x-amz-version-id" - AmzDeleteMarker = "x-amz-delete-marker" - - // S3 object tagging - AmzObjectTagging = "X-Amz-Tagging" - AmzTagCount = "x-amz-tagging-count" - AmzTagDirective = "X-Amz-Tagging-Directive" - - // S3 transition restore - AmzRestore = "x-amz-restore" - AmzRestoreExpiryDays = "X-Amz-Restore-Expiry-Days" - AmzRestoreRequestDate = "X-Amz-Restore-Request-Date" - AmzRestoreOutputPath = "x-amz-restore-output-path" - - // S3 extensions - AmzCopySourceIfModifiedSince = "x-amz-copy-source-if-modified-since" - AmzCopySourceIfUnmodifiedSince = "x-amz-copy-source-if-unmodified-since" - - AmzCopySourceIfNoneMatch = "x-amz-copy-source-if-none-match" - AmzCopySourceIfMatch = "x-amz-copy-source-if-match" - - AmzCopySource = "X-Amz-Copy-Source" - AmzCopySourceVersionID = "X-Amz-Copy-Source-Version-Id" - AmzCopySourceRange = "X-Amz-Copy-Source-Range" - AmzMetadataDirective = "X-Amz-Metadata-Directive" - AmzObjectLockMode = "X-Amz-Object-Lock-Mode" - AmzObjectLockRetainUntilDate = "X-Amz-Object-Lock-Retain-Until-Date" - AmzObjectLockLegalHold = "X-Amz-Object-Lock-Legal-Hold" - AmzObjectLockBypassGovernance = "X-Amz-Bypass-Governance-Retention" - AmzBucketReplicationStatus = "X-Amz-Replication-Status" - AmzSnowballExtract = "X-Amz-Meta-Snowball-Auto-Extract" - - // Multipart parts count - AmzMpPartsCount = "x-amz-mp-parts-count" - - // Object date/time of expiration - AmzExpiration = "x-amz-expiration" - - // Dummy putBucketACL - AmzACL = "x-amz-acl" - - // Signature V4 related contants. - AmzContentSha256 = "X-Amz-Content-Sha256" - AmzDate = "X-Amz-Date" - AmzAlgorithm = "X-Amz-Algorithm" - AmzExpires = "X-Amz-Expires" - AmzSignedHeaders = "X-Amz-SignedHeaders" - AmzSignature = "X-Amz-Signature" - AmzCredential = "X-Amz-Credential" - AmzSecurityToken = "X-Amz-Security-Token" - AmzDecodedContentLength = "X-Amz-Decoded-Content-Length" - - AmzMetaUnencryptedContentLength = "X-Amz-Meta-X-Amz-Unencrypted-Content-Length" - AmzMetaUnencryptedContentMD5 = "X-Amz-Meta-X-Amz-Unencrypted-Content-Md5" - - // AWS server-side encryption headers for SSE-S3, SSE-KMS and SSE-C. - AmzServerSideEncryption = "X-Amz-Server-Side-Encryption" - AmzServerSideEncryptionKmsID = AmzServerSideEncryption + "-Aws-Kms-Key-Id" - AmzServerSideEncryptionKmsContext = AmzServerSideEncryption + "-Context" - AmzServerSideEncryptionCustomerAlgorithm = AmzServerSideEncryption + "-Customer-Algorithm" - AmzServerSideEncryptionCustomerKey = AmzServerSideEncryption + "-Customer-Key" - AmzServerSideEncryptionCustomerKeyMD5 = AmzServerSideEncryption + "-Customer-Key-Md5" - AmzServerSideEncryptionCopyCustomerAlgorithm = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm" - AmzServerSideEncryptionCopyCustomerKey = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key" - AmzServerSideEncryptionCopyCustomerKeyMD5 = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5" - - AmzEncryptionAES = "AES256" - AmzEncryptionKMS = "aws:kms" - - // Signature v2 related constants - AmzSignatureV2 = "Signature" - AmzAccessKeyID = "AWSAccessKeyId" - - // Response request id. - AmzRequestID = "x-amz-request-id" + AmzACL = "x-amz-acl" + AmzContentSha256 = "X-Amz-Content-Sha256" + AmzDate = "X-Amz-Date" + AmzRequestID = "x-amz-request-id" ) // Standard S3 HTTP response constants @@ -176,36 +78,18 @@ const ( // object const const ( - MaxObjectSize = 5 * humanize.TiByte - - // Minimum Part size for multipart upload is 5MiB - MinPartSize = 5 * humanize.MiByte - - // Maximum Part size for multipart upload is 5GiB - MaxPartSize = 5 * humanize.GiByte - - // Maximum Part ID for multipart upload is 10000 - // (Acceptable values range from 1 to 10000 inclusive) - MaxPartID = 10000 - - MaxObjectList = 1000 // Limit number of objects in a listObjectsResponse/listObjectsVersionsResponse. - MaxDeleteList = 1000 // Limit number of objects deleted in a delete call. - MaxUploadsList = 10000 // Limit number of uploads in a listUploadsResponse. - MaxPartsList = 10000 // Limit number of parts in a listPartsResponse. + MaxXMLBodySize = 5 * humanize.MiByte + MaxObjectSize = 5 * humanize.TiByte + MinPartSize = 5 * humanize.MiByte + MaxPartSize = 5 * humanize.GiByte + MinPartNumber = 1 + MaxPartNumber = 10000 + MaxObjectList = 1000 // Limit number of objects in a listObjectsResponse/listObjectsVersionsResponse. + MaxDeleteList = 1000 // Limit number of objects deleted in a delete call. ) // Common http query params S3 API const ( - VersionID = "versionId" - + MaxKeys = "max-keys" PartNumber = "partNumber" - - UploadID = "uploadId" -) - -// limit -const ( - // The maximum allowed time difference between the incoming request - // date and server date during signature verification. - GlobalMaxSkewTime = 15 * time.Minute // 15 minutes skew allowed. ) diff --git a/s3/handlers/handlers_multipart.go b/s3/handlers/handlers_multipart.go deleted file mode 100644 index 5c1bc0de6..000000000 --- a/s3/handlers/handlers_multipart.go +++ /dev/null @@ -1,249 +0,0 @@ -package handlers - -import ( - "github.com/aws/aws-sdk-go/service/s3" - "github.com/bittorrent/go-btfs/s3/cctx" - "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/requests" - "github.com/bittorrent/go-btfs/s3/responses" - "github.com/bittorrent/go-btfs/s3/s3utils" - "github.com/bittorrent/go-btfs/s3/services/object" - "github.com/bittorrent/go-btfs/s3/utils/hash" - "net/http" - "sort" -) - -func (h *Handlers) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - ack := cctx.GetAccessKey(r) - var err error - defer func() { - cctx.SetHandleInf(r, h.name(), err) - }() - - var input s3.CreateMultipartUploadInput - - err = requests.ParseInput(r, &input) - if err != nil { - rerr := responses.ErrBadRequest - responses.WriteErrorResponse(w, r, rerr) - return - } - - bucname, objname := *input.Bucket, *input.Key - - err = s3utils.CheckNewMultipartArgs(ctx, bucname, objname) - if err != nil { - rerr := h.toRespErr(err) - responses.WriteErrorResponse(w, r, rerr) - return - } - - meta := input.Metadata - - mtp, err := h.objsvc.CreateMultipartUpload(ctx, ack, bucname, objname, meta) - if err != nil { - rerr := h.toRespErr(err) - responses.WriteErrorResponse(w, r, rerr) - return - } - - output := new(s3.CreateMultipartUploadOutput) - output.SetBucket(bucname) - output.SetKey(objname) - output.SetUploadId(mtp.UploadID) - - responses.WriteSuccessResponse(w, output, "InitiateMultipartUploadResult") - - return -} - -func (h *Handlers) UploadPartHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - ack := cctx.GetAccessKey(r) - var err error - defer func() { - cctx.SetHandleInf(r, h.name(), err) - }() - - var input s3.UploadPartInput - - err = requests.ParseInput(r, &input) - if err != nil { - rerr := responses.ErrBadRequest - responses.WriteErrorResponse(w, r, rerr) - return - } - - bucname, objname := *input.Bucket, *input.Key - - err = s3utils.CheckPutObjectPartArgs(ctx, bucname, objname) - if err != nil { - rerr := h.toRespErr(err) - responses.WriteErrorResponse(w, r, rerr) - return - } - - uploadId, partId := *input.UploadId, int(*input.PartNumber) - if partId > consts.MaxPartID { - rerr := responses.ErrInvalidMaxParts - err = rerr - responses.WriteErrorResponse(w, r, rerr) - return - } - - size := r.ContentLength - - if size <= 0 { - rerr := responses.ErrEntityTooSmall - err = rerr - responses.WriteErrorResponse(w, r, rerr) - return - } - - if size > consts.MaxPartSize { - rerr := responses.ErrEntityTooLarge - err = rerr - responses.WriteErrorResponse(w, r, rerr) - return - } - - hrdr, ok := r.Body.(*hash.Reader) - if !ok { - rerr := responses.ErrInternalError - err = rerr - responses.WriteErrorResponse(w, r, rerr) - return - } - - part, err := h.objsvc.UploadPart(ctx, ack, bucname, objname, uploadId, partId, hrdr, size) - if err != nil { - rerr := h.toRespErr(err) - responses.WriteErrorResponse(w, r, rerr) - return - } - - output := new(s3.UploadPartOutput) - output.SetETag(`"` + part.ETag + `"`) - w.Header().Set(consts.Cid, part.CID) - - responses.WriteSuccessResponse(w, output, "") - - return -} - -func (h *Handlers) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - ack := cctx.GetAccessKey(r) - var err error - defer func() { - cctx.SetHandleInf(r, h.name(), err) - }() - - var input s3.AbortMultipartUploadInput - - err = requests.ParseInput(r, &input) - if err != nil { - rerr := responses.ErrBadRequest - responses.WriteErrorResponse(w, r, rerr) - return - } - - bucname, objname := *input.Bucket, *input.Key - - err = s3utils.CheckAbortMultipartArgs(ctx, bucname, objname) - if err != nil { - rerr := h.toRespErr(err) - responses.WriteErrorResponse(w, r, rerr) - return - } - - uploadId := *input.UploadId - - err = h.objsvc.AbortMultipartUpload(ctx, ack, bucname, objname, uploadId) - if err != nil { - rerr := h.toRespErr(err) - responses.WriteErrorResponse(w, r, rerr) - return - } - - output := new(s3.AbortMultipartUploadOutput) - - responses.WriteSuccessResponse(w, output, "") - - return -} - -func (h *Handlers) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - ack := cctx.GetAccessKey(r) - var err error - defer func() { - cctx.SetHandleInf(r, h.name(), err) - }() - - var input s3.CompleteMultipartUploadInput - - err = requests.ParseInput(r, &input) - if err != nil { - rerr := responses.ErrBadRequest - responses.WriteErrorResponse(w, r, rerr) - return - } - - bucname, objname := *input.Bucket, *input.Key - - err = s3utils.CheckCompleteMultipartArgs(ctx, bucname, objname) - if err != nil { - rerr := h.toRespErr(err) - responses.WriteErrorResponse(w, r, rerr) - return - } - - if r.ContentLength <= 0 { - rerr := responses.ErrMissingContentLength - err = rerr - responses.WriteErrorResponse(w, r, rerr) - return - } - - if len(input.MultipartUpload.Parts) == 0 { - rerr := responses.ErrMalformedXML - err = rerr - responses.WriteErrorResponse(w, r, rerr) - return - } - - complUpload := new(object.CompleteMultipartUpload) - - for _, part := range input.MultipartUpload.Parts { - complUpload.Parts = append(complUpload.Parts, &object.CompletePart{ - PartNumber: int(*part.PartNumber), - ETag: *part.ETag, - }) - } - - if !sort.IsSorted(object.CompletedParts(complUpload.Parts)) { - rerr := responses.ErrInvalidPartOrder - err = rerr - responses.WriteErrorResponse(w, r, rerr) - return - } - - uploadId := *input.UploadId - - obj, err := h.objsvc.CompleteMultiPartUpload(ctx, ack, bucname, objname, uploadId, complUpload.Parts) - if err != nil { - rerr := h.toRespErr(err) - responses.WriteErrorResponse(w, r, rerr) - return - } - - output := new(s3.CompleteMultipartUploadOutput) - output.SetBucket(bucname) - output.SetKey(objname) - output.SetETag(`"` + obj.ETag + `"`) - w.Header().Set(consts.Cid, obj.CID) - - responses.WriteSuccessResponse(w, output, "CompleteMultipartUploadResult") -} diff --git a/s3/handlers/handlers_object.go b/s3/handlers/handlers_object.go deleted file mode 100644 index 507b56a44..000000000 --- a/s3/handlers/handlers_object.go +++ /dev/null @@ -1,366 +0,0 @@ -package handlers - -import ( - "encoding/base64" - "github.com/bittorrent/go-btfs/s3/cctx" - "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/requests" - "github.com/bittorrent/go-btfs/s3/responses" - "github.com/bittorrent/go-btfs/s3/s3utils" - "net/http" - "net/url" - "path" - "strconv" - "strings" -) - -// PutObjectHandler . -func (h *Handlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - var err error - defer func() { - cctx.SetHandleInf(r, h.name(), err) - }() - - args, err := requests.ParsePutObjectRequest(r) - if err != nil { - responses.WriteErrorResponse(w, r, h.toRespErr(err)) - return - } - - obj, err := h.objsvc.PutObject(ctx, args) - if err != nil { - responses.WriteErrorResponse(w, r, h.toRespErr(err)) - return - } - - responses.WritePutObjectResponse(w, r, obj) - return -} - -// CopyObjectHandler . -func (h *Handlers) CopyObjectHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - var err error - defer func() { - cctx.SetHandleInf(r, h.name(), err) - }() - - args, err := requests.ParseCopyObjectRequest(r) - if err != nil { - responses.WriteErrorResponse(w, r, h.toRespErr(err)) - return - } - - obj, err := h.objsvc.CopyObject(ctx, args) - if err != nil { - responses.WriteErrorResponse(w, r, h.toRespErr(err)) - return - } - - responses.WriteCopyObjectResponse(w, r, obj) - return -} - -// HeadObjectHandler . -func (h *Handlers) HeadObjectHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - var err error - defer func() { - cctx.SetHandleInf(r, h.name(), err) - }() - - args, err := requests.ParseHeadObjectRequest(r) - if err != nil { - responses.WriteErrorResponse(w, r, h.toRespErr(err)) - return - } - - obj, _, err := h.objsvc.GetObject(ctx, args) - if err != nil { - responses.WriteErrorResponse(w, r, h.toRespErr(err)) - return - } - - responses.WriteHeadObjectResponse(w, r, obj) - return -} - -// GetObjectHandler . -func (h *Handlers) GetObjectHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - var err error - defer func() { - cctx.SetHandleInf(r, h.name(), err) - }() - - args, err := requests.ParseGetObjectRequest(r) - if err != nil { - responses.WriteErrorResponse(w, r, h.toRespErr(err)) - return - } - - obj, body, err := h.objsvc.GetObject(ctx, args) - if err != nil { - responses.WriteErrorResponse(w, r, h.toRespErr(err)) - return - } - - responses.WriteGetObjectResponse(w, r, obj, body) - return -} - -// DeleteObjectHandler . -func (h *Handlers) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - var err error - defer func() { - cctx.SetHandleInf(r, h.name(), err) - }() - - args, err := requests.ParseDeleteObjectRequest(r) - if err != nil { - responses.WriteErrorResponse(w, r, h.toRespErr(err)) - return - } - err = h.objsvc.DeleteObject(ctx, args) - if err != nil { - responses.WriteErrorResponse(w, r, h.toRespErr(err)) - return - } - - responses.WriteDeleteObjectResponse(w, r, nil) - return -} - -// DeleteObjectsHandler . -func (h *Handlers) DeleteObjectsHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - var err error - defer func() { - cctx.SetHandleInf(r, h.name(), err) - }() - - args, err := requests.ParseDeleteObjectsRequest(r) - if err != nil { - responses.WriteErrorResponse(w, r, h.toRespErr(err)) - return - } - - deletedObjects, err := h.objsvc.DeleteObjects(ctx, args) - if err != nil { - responses.WriteErrorResponse(w, r, h.toRespErr(err)) - return - } - - responses.WriteDeleteObjectsResponse(w, r, h.toRespErr, deletedObjects) - return -} - -// ListObjectsHandler . -func (h *Handlers) ListObjectsHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - ack := cctx.GetAccessKey(r) - var err error - defer func() { - cctx.SetHandleInf(r, h.name(), err) - }() - - args, err := requests.ParseListObjectsRequest(r) - if err != nil { - responses.WriteErrorResponse(w, r, h.toRespErr(err)) - return - } - - list, err := h.objsvc.ListObjects(ctx, args) - if err != nil { - responses.WriteErrorResponse(w, r, h.toRespErr(err)) - return - } - - responses.WriteListObjectsResponse(w, r, ack, list) - return -} - -func (h *Handlers) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - ack := cctx.GetAccessKey(r) - var err error - defer func() { - cctx.SetHandleInf(r, h.name(), err) - }() - - bucname, rerr := requests.ParseBucket(r) - if rerr != nil { - err = rerr - responses.WriteErrorResponse(w, r, rerr) - return - } - - urlValues := r.Form - // Extract all the listObjectsV2 query params to their native values. - prefix, token, startAfter, delimiter, fetchOwner, maxKeys, encodingType, rerr := getListObjectsV2Args(urlValues) - if rerr != nil { - err = rerr - responses.WriteErrorResponse(w, r, rerr) - return - } - - marker := token - if marker == "" { - marker = startAfter - } - err = s3utils.CheckListObjsArgs(ctx, bucname, prefix, marker) - if err != nil { - rerr = h.toRespErr(err) - responses.WriteErrorResponse(w, r, rerr) - return - } - - // Validate the query params before beginning to serve the request. - // fetch-owner is not validated since it is a boolean - rerr = validateListObjectsArgs(token, delimiter, encodingType, maxKeys) - if rerr != nil { - err = rerr - responses.WriteErrorResponse(w, r, rerr) - return - } - - list, err := h.objsvc.ListObjectsV2(ctx, ack, bucname, prefix, token, delimiter, - maxKeys, fetchOwner, startAfter) - if err != nil { - rerr = h.toRespErr(err) - responses.WriteErrorResponse(w, r, rerr) - return - } - - responses.WriteListObjectsV2Response(w, r, ack, bucname, prefix, token, startAfter, - delimiter, encodingType, maxKeys, list) -} - -// GetObjectACLHandler - GET Object ACL -func (h *Handlers) GetObjectACLHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - var err error - defer func() { - cctx.SetHandleInf(r, h.name(), err) - }() - - args, err := requests.ParseGetBucketACLRequest() - - bucname, objname, rerr := requests.ParseBucketAndObject(r) - if rerr != nil { - err = rerr - responses.WriteErrorResponse(w, r, rerr) - return - } - - acl, err := h.objsvc.GetObjectACL(ctx, ack, bucname, objname) - if err != nil { - rerr = h.toRespErr(err) - responses.WriteErrorResponse(w, r, rerr) - return - } - - responses.WriteGetObjectACLResponse(w, r, ack, acl) -} - -// Parse bucket url queries -func getListObjectsV1Args(values url.Values) ( - prefix, marker, delimiter string, maxkeys int64, encodingType string, rerr *responses.Error) { - - if values.Get("max-keys") != "" { - var err error - if maxkeys, err = strconv.ParseInt(values.Get("max-keys"), 10, 64); err != nil { - rerr = responses.ErrInvalidMaxKeys - return - } - } else { - maxkeys = consts.MaxObjectList - } - - prefix = trimLeadingSlash(values.Get("prefix")) - marker = trimLeadingSlash(values.Get("marker")) - delimiter = values.Get("delimiter") - encodingType = values.Get("encoding-type") - return -} - -// Parse bucket url queries for ListObjects V2. -func getListObjectsV2Args(values url.Values) ( - prefix, token, startAfter, delimiter string, - fetchOwner bool, maxkeys int64, encodingType string, rerr *responses.Error) { - - // The continuation-token cannot be empty. - if val, ok := values["continuation-token"]; ok { - if len(val[0]) == 0 { - rerr = responses.ErrInvalidToken - return - } - } - - if values.Get("max-keys") != "" { - var err error - if maxkeys, err = strconv.ParseInt(values.Get("max-keys"), 10, 64); err != nil { - rerr = responses.ErrInvalidMaxKeys - return - } - // Over flowing count - reset to maxObjectList. - if maxkeys > consts.MaxObjectList { - maxkeys = consts.MaxObjectList - } - } else { - maxkeys = consts.MaxObjectList - } - - prefix = trimLeadingSlash(values.Get("prefix")) - startAfter = trimLeadingSlash(values.Get("start-after")) - delimiter = values.Get("delimiter") - fetchOwner = values.Get("fetch-owner") == "true" - encodingType = values.Get("encoding-type") - - if token = values.Get("continuation-token"); token != "" { - decodedToken, err := base64.StdEncoding.DecodeString(token) - if err != nil { - rerr = responses.ErrIncorrectContinuationToken - return - } - token = string(decodedToken) - } - return -} - -func trimLeadingSlash(ep string) string { - if len(ep) > 0 && ep[0] == '/' { - // Path ends with '/' preserve it - if ep[len(ep)-1] == '/' && len(ep) > 1 { - ep = path.Clean(ep) - ep += "/" - } else { - ep = path.Clean(ep) - } - ep = ep[1:] - } - return ep -} - -// Validate all the ListObjects query arguments, returns an APIErrorCode -// if one of the args do not meet the required conditions. -// - delimiter if set should be equal to '/', otherwise the request is rejected. -// - marker if set should have a common prefix with 'prefix' param, otherwise -// the request is rejected. -func validateListObjectsArgs(marker, delimiter, encodingType string, maxKeys int64) (rerr *responses.Error) { - // Max keys cannot be negative. - if maxKeys < 0 { - return responses.ErrInvalidMaxKeys - } - - if encodingType != "" { - // AWS S3 spec only supports 'url' encoding type - if !strings.EqualFold(encodingType, "url") { - return responses.ErrInvalidEncodingMethod - } - } - - return nil -} diff --git a/s3/handlers/utils.go b/s3/handlers/utils.go deleted file mode 100644 index fb7d2f8ca..000000000 --- a/s3/handlers/utils.go +++ /dev/null @@ -1,147 +0,0 @@ -package handlers - -import ( - "context" - "errors" - "github.com/bittorrent/go-btfs/s3/consts" - "net/http" - "net/textproto" - "strings" -) - -const streamingContentEncoding = "aws-chunked" - -// errInvalidArgument means that input argument is invalid. -var errInvalidArgument = errors.New("Invalid arguments specified") - -// Supported headers that needs to be extracted. -var supportedHeaders = []string{ - consts.ContentType, - consts.CacheControl, - consts.ContentLength, - consts.ContentEncoding, - consts.ContentDisposition, - consts.AmzStorageClass, - consts.AmzObjectTagging, - consts.Expires, - consts.AmzBucketReplicationStatus, - // Add more supported headers here. -} - -// userMetadataKeyPrefixes contains the prefixes of used-defined metadata keys. -// All values stored with a key starting with one of the following prefixes -// must be extracted from the header. -var userMetadataKeyPrefixes = []string{ - "x-amz-meta-", -} - -// matches k1 with all keys, returns 'true' if one of them matches -func equals(k1 string, keys ...string) bool { - for _, k2 := range keys { - if strings.EqualFold(k1, k2) { - return true - } - } - return false -} - -// extractMetadata extracts metadata from HTTP header and HTTP queryString. -// Note: The key has been converted to lowercase letters -func extractMetadata(ctx context.Context, r *http.Request) (metadata map[string]string, err error) { - query := r.Form - header := r.Header - metadata = make(map[string]string) - // Extract all query values. - err = extractMetadataFromMime(ctx, textproto.MIMEHeader(query), metadata) - if err != nil { - return nil, err - } - - // Extract all header values. - err = extractMetadataFromMime(ctx, textproto.MIMEHeader(header), metadata) - if err != nil { - return nil, err - } - - // Set content-type to default value if it is not set. - if _, ok := metadata[strings.ToLower(consts.ContentType)]; !ok { - metadata[strings.ToLower(consts.ContentType)] = "binary/octet-stream" - } - - // https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w - for k := range metadata { - if equals(k, consts.AmzMetaUnencryptedContentLength, consts.AmzMetaUnencryptedContentMD5) { - delete(metadata, k) - } - } - - if contentEncoding, ok := metadata[strings.ToLower(consts.ContentEncoding)]; ok { - contentEncoding = trimAwsChunkedContentEncoding(contentEncoding) - if contentEncoding != "" { - // Make sure to trim and save the content-encoding - // parameter for a streaming signature which is set - // to a custom value for example: "aws-chunked,gzip". - metadata[strings.ToLower(consts.ContentEncoding)] = contentEncoding - } else { - // Trimmed content encoding is empty when the header - // value is set to "aws-chunked" only. - - // Make sure to delete the content-encoding parameter - // for a streaming signature which is set to value - // for example: "aws-chunked" - delete(metadata, strings.ToLower(consts.ContentEncoding)) - } - } - - // Success. - return metadata, nil -} - -// extractMetadata extracts metadata from map values. -func extractMetadataFromMime(ctx context.Context, v textproto.MIMEHeader, m map[string]string) error { - if v == nil { - return errInvalidArgument - } - - nv := make(textproto.MIMEHeader, len(v)) - for k, kv := range v { - // Canonicalize all headers, to remove any duplicates. - nv[strings.ToLower(k)] = kv - } - - // Save all supported headers. - for _, supportedHeader := range supportedHeaders { - value, ok := nv[strings.ToLower(supportedHeader)] - if ok { - m[strings.ToLower(supportedHeader)] = strings.Join(value, ",") - } - } - - for key := range v { - lowerKey := strings.ToLower(key) - for _, prefix := range userMetadataKeyPrefixes { - if !strings.HasPrefix(lowerKey, strings.ToLower(prefix)) { - continue - } - value, ok := nv[lowerKey] - if ok { - m[lowerKey] = strings.Join(value, ",") - break - } - } - } - return nil -} - -func trimAwsChunkedContentEncoding(contentEnc string) (trimmedContentEnc string) { - if contentEnc == "" { - return contentEnc - } - var newEncs []string - for _, enc := range strings.Split(contentEnc, ",") { - if enc != streamingContentEncoding { - newEncs = append(newEncs, enc) - } - } - return strings.Join(newEncs, ",") -} diff --git a/s3/utils/hash/errors.go b/s3/hash/errors.go similarity index 100% rename from s3/utils/hash/errors.go rename to s3/hash/errors.go diff --git a/s3/utils/hash/reader.go b/s3/hash/reader.go similarity index 100% rename from s3/utils/hash/reader.go rename to s3/hash/reader.go diff --git a/s3/policy/policy.go b/s3/policy/policy.go index a59b76558..45adbc268 100644 --- a/s3/policy/policy.go +++ b/s3/policy/policy.go @@ -5,27 +5,20 @@ import ( ) const ( - // PublicReadWrite 公开读写,适用于桶ACL和对象ACL PublicReadWrite = "public-read-write" - - // PublicRead 公开读,适用于桶ACL和对象ACL - PublicRead = "public-read" - - // Private 私有,适用于桶ACL和对象ACL - Private = "private" + PublicRead = "public-read" + Private = "private" ) -// 支持匿名公开读写的action集合 var rwActionMap = map[s3action.Action]struct{}{ - s3action.ListObjectsAction: {}, - s3action.ListObjectsV2Action: {}, - s3action.HeadObjectAction: {}, - s3action.PutObjectAction: {}, - s3action.GetObjectAction: {}, - s3action.CopyObjectAction: {}, - s3action.DeleteObjectAction: {}, - s3action.DeleteObjectsAction: {}, - + s3action.ListObjectsAction: {}, + s3action.ListObjectsV2Action: {}, + s3action.HeadObjectAction: {}, + s3action.PutObjectAction: {}, + s3action.GetObjectAction: {}, + s3action.CopyObjectAction: {}, + s3action.DeleteObjectAction: {}, + s3action.DeleteObjectsAction: {}, s3action.CreateMultipartUploadAction: {}, s3action.AbortMultipartUploadAction: {}, s3action.CompleteMultipartUploadAction: {}, @@ -38,7 +31,6 @@ func checkActionInPublicReadWrite(action s3action.Action) bool { return ok } -// 支持匿名公开读的action集合 var rdActionMap = map[s3action.Action]struct{}{ s3action.ListObjectsAction: {}, s3action.ListObjectsV2Action: {}, @@ -53,17 +45,14 @@ func checkActionInPublicRead(action s3action.Action) bool { } func IsAllowed(own bool, acl string, action s3action.Action) (allow bool) { - // 1.如果是自己,都能操作 if own { return true } - // 2.如果是别人,不能操作bucket if action.IsBucketAction() { return false } - // 2.如果是别人,区分acl操作object if action.IsObjectAction() { switch acl { case Private: diff --git a/s3/requests/input_errors.go b/s3/requests/input_errors.go deleted file mode 100644 index b2da8113a..000000000 --- a/s3/requests/input_errors.go +++ /dev/null @@ -1,65 +0,0 @@ -package requests - -import ( - "fmt" - "reflect" -) - -// ErrInvalidInputValue . -type ErrInvalidInputValue struct { - er error -} - -func (err ErrInvalidInputValue) Error() string { - return fmt.Sprintf("invalid input value: %v", err.er) -} - -// ErrTypeNotSet . -type ErrTypeNotSet struct { - typ reflect.Type -} - -func (err ErrTypeNotSet) Error() string { - return fmt.Sprintf("type <%s> not set", err.typ.String()) -} - -// ErrFailedDecodeXML . -type ErrFailedDecodeXML struct { - err error -} - -func (err ErrFailedDecodeXML) Error() string { - return fmt.Sprintf("decode xml: %v", err.err) -} - -// ErrWithUnsupportedParam . -type ErrWithUnsupportedParam struct { - param string -} - -func (err ErrWithUnsupportedParam) Error() string { - return fmt.Sprintf("param %s is unsported", err.param) -} - -// ErrFailedParseValue . -type ErrFailedParseValue struct { - name string - err error -} - -func (err ErrFailedParseValue) Name() string { - return err.name -} - -func (err ErrFailedParseValue) Error() string { - return fmt.Sprintf("parse <%s> value: %v", err.name, err.err) -} - -// ErrMissingRequiredParam . -type ErrMissingRequiredParam struct { - param string -} - -func (err ErrMissingRequiredParam) Error() string { - return fmt.Sprintf("missing required param <%s>", err.param) -} diff --git a/s3/requests/validate_errors.go b/s3/requests/validate_errors.go deleted file mode 100644 index 839a6ed83..000000000 --- a/s3/requests/validate_errors.go +++ /dev/null @@ -1,22 +0,0 @@ -package requests - -import "errors" - -var ( - ErrBucketNameInvalid = errors.New("the bucket name is invalid") - ErrObjectNameInvalid = errors.New("the object name is invalid") - ErrObjectNameTooLong = errors.New("the object name cannot be longer than 1024 characters") - ErrObjectNamePrefixSlash = errors.New("the object name cannot start with slash") - ErrRegionUnsupported = errors.New("the location is not supported by this server") - ErrACLUnsupported = errors.New("the ACL is not supported by this server") - ErrInvalidContentMd5 = errors.New("the content md5 is invalid") - ErrInvalidChecksumSha256 = errors.New("the checksum-sha256 is invalid") - ErrContentLengthMissing = errors.New("the content-length is missing") - ErrContentLengthTooSmall = errors.New("the content-length is too small") - ErrContentLengthTooLarge = errors.New("the content-length is too large") - ErrCopySrcInvalid = errors.New("the copy-source is invalid") - ErrCopyDestInvalid = errors.New("the copy-destination is invalid") - ErrMaxKeysInvalid = errors.New("the max-keys is invalid") - ErrEncodingTypeInvalid = errors.New("the encoding-type is invalid") - ErrMarkerPrefixCombinationInvalid = errors.New("the marker-prefix combination is invalid") -) diff --git a/s3/responses/object_header.go b/s3/responses/object_header.go deleted file mode 100644 index 401957956..000000000 --- a/s3/responses/object_header.go +++ /dev/null @@ -1,61 +0,0 @@ -package responses - -import ( - "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/services/object" - "net/http" - "net/url" - "strconv" - "strings" -) - -// SetObjectHeaders Write object header -func SetObjectHeaders(w http.ResponseWriter, r *http.Request, objInfo *object.Object) { - // Set last modified time. - lastModified := objInfo.ModTime.UTC().Format(http.TimeFormat) - w.Header().Set(consts.LastModified, lastModified) - - // Set Etag if available. - if objInfo.ETag != "" { - w.Header()[consts.ETag] = []string{"\"" + objInfo.ETag + "\""} - } - - if objInfo.ContentType != "" { - w.Header().Set(consts.ContentType, objInfo.ContentType) - } - - if objInfo.ContentEncoding != "" { - w.Header().Set(consts.ContentEncoding, objInfo.ContentEncoding) - } - - if !objInfo.Expires.IsZero() { - w.Header().Set(consts.Expires, objInfo.Expires.UTC().Format(http.TimeFormat)) - } - - // Set content length - w.Header().Set(consts.ContentLength, strconv.FormatInt(objInfo.Size, 10)) - - // Set the relevant version ID as part of the response header. - if objInfo.VersionID != "" { - w.Header()[consts.AmzVersionID] = []string{objInfo.VersionID} - } - -} - -// SetHeadGetRespHeaders - set any requested parameters as response headers. -func SetHeadGetRespHeaders(w http.ResponseWriter, reqParams url.Values) { - for k, v := range reqParams { - if header, ok := supportedHeadGetReqParams[strings.ToLower(k)]; ok { - w.Header()[header] = v - } - } -} - -// supportedHeadGetReqParams - supported request parameters for GET and HEAD presigned request. -var supportedHeadGetReqParams = map[string]string{ - "response-expires": consts.Expires, - "response-content-type": consts.ContentType, - "response-content-encoding": consts.ContentEncoding, - "response-content-language": consts.ContentLanguage, - "response-content-disposition": consts.ContentDisposition, -} diff --git a/s3/responses/responses_common.go b/s3/responses/responses_common.go deleted file mode 100644 index d1098cdf2..000000000 --- a/s3/responses/responses_common.go +++ /dev/null @@ -1,56 +0,0 @@ -package responses - -import ( - "github.com/aws/aws-sdk-go/service/s3" - "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/protocol" - "net/http" - "path" -) - -func owner(accessKey string) *s3.Owner { - return new(s3.Owner).SetID(accessKey).SetDisplayName(accessKey) -} - -func ownerFullControlGrant(accessKey string) *s3.Grant { - return new(s3.Grant).SetGrantee(new(s3.Grantee).SetType(s3.TypeCanonicalUser).SetID(accessKey).SetDisplayName(accessKey)).SetPermission(s3.PermissionFullControl) -} - -var ( - allUsersReadGrant = new(s3.Grant).SetGrantee(new(s3.Grantee).SetType(s3.TypeGroup).SetURI(consts.AllUsersURI)).SetPermission(s3.PermissionRead) - allUsersWriteGrant = new(s3.Grant).SetGrantee(new(s3.Grantee).SetType(s3.TypeGroup).SetURI(consts.AllUsersURI)).SetPermission(s3.PermissionWrite) -) - -type ErrorOutput struct { - _ struct{} `type:"structure"` - Code string `locationName:"Code" type:"string"` - Message string `locationName:"Message" type:"string"` - Resource string `locationName:"Resource" type:"string"` - RequestID string `locationName:"RequestID" type:"string"` -} - -func NewErrOutput(r *http.Request, rerr *Error) *ErrorOutput { - return &ErrorOutput{ - Code: rerr.Code(), - Message: rerr.Description(), - Resource: pathClean(r.URL.Path), - RequestID: "", // this field value will be automatically filled - } -} - -func WriteErrorResponse(w http.ResponseWriter, r *http.Request, rerr *Error) { - output := NewErrOutput(r, rerr) - _ = protocol.WriteResponse(w, rerr.HTTPStatusCode(), output, "Error") -} - -func WriteSuccessResponse(w http.ResponseWriter, output interface{}, locationName string) { - _ = protocol.WriteResponse(w, http.StatusOK, output, locationName) -} - -func pathClean(p string) string { - cp := path.Clean(p) - if cp == "." { - return "" - } - return cp -} diff --git a/s3/s3.go b/s3/s3.go index c7ff7bc07..9512a5473 100644 --- a/s3/s3.go +++ b/s3/s3.go @@ -2,14 +2,14 @@ package s3 import ( config "github.com/bittorrent/go-btfs-config" + "github.com/bittorrent/go-btfs/s3/api/handlers" + "github.com/bittorrent/go-btfs/s3/api/providers" + "github.com/bittorrent/go-btfs/s3/api/routers" + "github.com/bittorrent/go-btfs/s3/api/server" + "github.com/bittorrent/go-btfs/s3/api/services/accesskey" + "github.com/bittorrent/go-btfs/s3/api/services/object" + "github.com/bittorrent/go-btfs/s3/api/services/sign" "github.com/bittorrent/go-btfs/s3/ctxmu" - "github.com/bittorrent/go-btfs/s3/handlers" - "github.com/bittorrent/go-btfs/s3/providers" - "github.com/bittorrent/go-btfs/s3/routers" - "github.com/bittorrent/go-btfs/s3/server" - "github.com/bittorrent/go-btfs/s3/services/accesskey" - "github.com/bittorrent/go-btfs/s3/services/object" - "github.com/bittorrent/go-btfs/s3/services/sign" "github.com/bittorrent/go-btfs/transaction/storage" "sync" ) diff --git a/s3/s3utils/utils.go b/s3/s3utils/utils.go deleted file mode 100644 index f8d9b083a..000000000 --- a/s3/s3utils/utils.go +++ /dev/null @@ -1,380 +0,0 @@ -package s3utils - -import ( - "context" - "errors" - "fmt" - "github.com/google/uuid" - "regexp" - "strings" - "unicode/utf8" -) - -// GenericError - generic object layer error. -type GenericError struct { - Bucket string - Object string - Err error -} - -// Bucket related errors. - -// BucketNameInvalid - bucket name provided is invalid. -type BucketNameInvalid GenericError - -// Error returns string an error formatted as the given text. -func (e BucketNameInvalid) Error() string { - return "bucket name invalid: " + e.Bucket -} - -// Object related errors. - -// ObjectNameInvalid - object name provided is invalid. -type ObjectNameInvalid GenericError - -// ObjectNameTooLong - object name too long. -type ObjectNameTooLong GenericError - -// ObjectNamePrefixAsSlash - object name has a slash as prefix. -type ObjectNamePrefixAsSlash GenericError - -// Error returns string an error formatted as the given text. -func (e ObjectNameInvalid) Error() string { - return "Object name invalid: " + e.Bucket + "/" + e.Object -} - -// Error returns string an error formatted as the given text. -func (e ObjectNameTooLong) Error() string { - return "Object name too long: " + e.Bucket + "/" + e.Object -} - -// Error returns string an error formatted as the given text. -func (e ObjectNamePrefixAsSlash) Error() string { - return "Object name contains forward slash as prefix: " + e.Bucket + "/" + e.Object -} - -// InvalidUploadIDKeyCombination - invalid upload id and key marker combination. -type InvalidUploadIDKeyCombination struct { - UploadIDMarker, KeyMarker string -} - -func (e InvalidUploadIDKeyCombination) Error() string { - return fmt.Sprintf("Invalid combination of uploadID marker '%s' and marker '%s'", e.UploadIDMarker, e.KeyMarker) -} - -// InvalidMarkerPrefixCombination - invalid marker and prefix combination. -type InvalidMarkerPrefixCombination struct { - Marker, Prefix string -} - -func (e InvalidMarkerPrefixCombination) Error() string { - return fmt.Sprintf("Invalid combination of marker '%s' and prefix '%s'", e.Marker, e.Prefix) -} - -// Multipart related errors. - -// MalformedUploadID malformed upload id. -type MalformedUploadID struct { - UploadID string -} - -func (e MalformedUploadID) Error() string { - return "Malformed upload id " + e.UploadID -} - -// InvalidUploadID invalid upload id. -type InvalidUploadID struct { - Bucket string - Object string - UploadID string -} - -func (e InvalidUploadID) Error() string { - return "Invalid upload id " + e.UploadID -} - -// InvalidPart One or more of the specified parts could not be found -type InvalidPart struct { - PartNumber int - ExpETag string - GotETag string -} - -func (e InvalidPart) Error() string { - return fmt.Sprintf("Specified part could not be found. PartNumber %d, Expected %s, got %s", - e.PartNumber, e.ExpETag, e.GotETag) -} - -// PartTooSmall - error if part size is less than 5MB. -type PartTooSmall struct { - PartSize int64 - PartNumber int - PartETag string -} - -func (e PartTooSmall) Error() string { - return fmt.Sprintf("Part size for %d should be at least 5MB", e.PartNumber) -} - -// PartTooBig returned if size of part is bigger than the allowed limit. -type PartTooBig struct{} - -func (e PartTooBig) Error() string { - return "Part size bigger than the allowed limit" -} - -// We support '.' with bucket names but we fallback to using path -// style requests instead for such buckets. -var ( - validBucketName = regexp.MustCompile(`^[A-Za-z0-9][A-Za-z0-9\.\-\_\:]{1,61}[A-Za-z0-9]$`) - validBucketNameStrict = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`) - ipAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`) -) - -// Common checker for both stricter and basic validation. -func checkBucketName(bucketName string, strict bool) (err error) { - if strings.TrimSpace(bucketName) == "" { - return errors.New("bucket name cannot be empty") - } - if len(bucketName) < 3 { - return errors.New("bucket name cannot be shorter than 3 characters") - } - if len(bucketName) > 63 { - return errors.New("bucket name cannot be longer than 63 characters") - } - if ipAddress.MatchString(bucketName) { - return errors.New("bucket name cannot be an ip address") - } - if strings.Contains(bucketName, "..") || strings.Contains(bucketName, ".-") || strings.Contains(bucketName, "-.") { - return errors.New("bucket name contains invalid characters") - } - if strict { - if !validBucketNameStrict.MatchString(bucketName) { - err = errors.New("bucket name contains invalid characters") - } - return err - } - if !validBucketName.MatchString(bucketName) { - err = errors.New("bucket name contains invalid characters") - } - return err -} - -// CheckValidBucketName - checks if we have a valid input bucket name. -func CheckValidBucketName(bucketName string) (err error) { - return checkBucketName(bucketName, false) -} - -// CheckValidBucketNameStrict - checks if we have a valid input bucket name. -// This is a stricter version. -// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html -func CheckValidBucketNameStrict(bucketName string) (err error) { - return checkBucketName(bucketName, true) -} - -// Checks on GetObject arguments, bucket and object. -func CheckGetObjArgs(ctx context.Context, bucket, object string) error { - return checkBucketAndObjectNames(ctx, bucket, object) -} - -// Checks on DeleteObject arguments, bucket and object. -func CheckDelObjArgs(ctx context.Context, bucket, object string) error { - return checkBucketAndObjectNames(ctx, bucket, object) -} - -// Checks bucket and object name validity, returns nil if both are valid. -func checkBucketAndObjectNames(ctx context.Context, bucket, object string) error { - // Verify if bucket is valid. - if CheckValidBucketName(bucket) != nil { - return BucketNameInvalid{Bucket: bucket} - } - // Verify if object is valid. - if len(object) == 0 { - return ObjectNameInvalid{Bucket: bucket, Object: object} - } - if !IsValidObjectPrefix(object) { - return ObjectNameInvalid{Bucket: bucket, Object: object} - } - return nil -} - -// Checks for all ListObjects arguments validity. -func CheckListObjsArgs(ctx context.Context, bucket, prefix, marker string) error { - // Validates object prefix validity after bucket exists. - if !IsValidObjectPrefix(prefix) { - return ObjectNameInvalid{ - Bucket: bucket, - Object: prefix, - } - } - // Verify if marker has prefix. - if marker != "" && !strings.HasPrefix(marker, prefix) { - return InvalidMarkerPrefixCombination{ - Marker: marker, - Prefix: prefix, - } - } - return nil -} - -// Checks for all ListMultipartUploads arguments validity. -func CheckListMultipartArgs(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string) error { - if err := CheckListObjsArgs(ctx, bucket, prefix, keyMarker); err != nil { - return err - } - if uploadIDMarker != "" { - if strings.HasSuffix(keyMarker, SlashSeparator) { - return InvalidUploadIDKeyCombination{ - UploadIDMarker: uploadIDMarker, - KeyMarker: keyMarker, - } - } - if _, err := uuid.Parse(uploadIDMarker); err != nil { - return MalformedUploadID{ - UploadID: uploadIDMarker, - } - } - } - return nil -} - -// Checks for NewMultipartUpload arguments validity, also validates if bucket exists. -func CheckNewMultipartArgs(ctx context.Context, bucket, object string) error { - return checkObjectArgs(ctx, bucket, object) -} - -// Checks for PutObjectPart arguments validity, also validates if bucket exists. -func CheckPutObjectPartArgs(ctx context.Context, bucket, object string) error { - return checkObjectArgs(ctx, bucket, object) -} - -// Checks for ListParts arguments validity, also validates if bucket exists. -func CheckListPartsArgs(ctx context.Context, bucket, object string) error { - return checkObjectArgs(ctx, bucket, object) -} - -// Checks for CompleteMultipartUpload arguments validity, also validates if bucket exists. -func CheckCompleteMultipartArgs(ctx context.Context, bucket, object string) error { - return checkObjectArgs(ctx, bucket, object) -} - -// Checks for AbortMultipartUpload arguments validity, also validates if bucket exists. -func CheckAbortMultipartArgs(ctx context.Context, bucket, object string) error { - return checkObjectArgs(ctx, bucket, object) -} - -// Checks Object arguments validity, also validates if bucket exists. -func checkObjectArgs(ctx context.Context, bucket, object string) error { - if err := checkObjectNameForLengthAndSlash(bucket, object); err != nil { - return err - } - - // Validates object name validity after bucket exists. - if !IsValidObjectName(object) { - return ObjectNameInvalid{ - Bucket: bucket, - Object: object, - } - } - - return nil -} - -// Checks for PutObject arguments validity, also validates if bucket exists. -func CheckPutObjectArgs(ctx context.Context, bucket, object string) error { - if err := checkObjectNameForLengthAndSlash(bucket, object); err != nil { - return err - } - if len(object) == 0 || - !IsValidObjectPrefix(object) { - return ObjectNameInvalid{ - Bucket: bucket, - Object: object, - } - } - return nil -} - -// SlashSeparator - slash separator. -const SlashSeparator = "/" - -// IsValidObjectName verifies an object name in accordance with Amazon's -// requirements. It cannot exceed 1024 characters and must be a valid UTF8 -// string. -// -// See: -// http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html -// -// You should avoid the following characters in a key name because of -// significant special handling for consistency across all -// applications. -// -// Rejects strings with following characters. -// -// - Backslash ("\") -// -// additionally minio does not support object names with trailing SlashSeparator. -func IsValidObjectName(object string) bool { - if len(object) == 0 { - return false - } - if strings.HasSuffix(object, SlashSeparator) { - return false - } - return IsValidObjectPrefix(object) -} - -// IsValidObjectPrefix verifies whether the prefix is a valid object name. -// Its valid to have a empty prefix. -func IsValidObjectPrefix(object string) bool { - if hasBadPathComponent(object) { - return false - } - if !utf8.ValidString(object) { - return false - } - if strings.Contains(object, `//`) { - return false - } - return true -} - -// checkObjectNameForLengthAndSlash -check for the validity of object name length and prefis as slash -func checkObjectNameForLengthAndSlash(bucket, object string) error { - // Check for the length of object name - if len(object) > 1024 { - return ObjectNameTooLong{ - Bucket: bucket, - Object: object, - } - } - // Check for slash as prefix in object name - if strings.HasPrefix(object, SlashSeparator) { - return ObjectNamePrefixAsSlash{ - Bucket: bucket, - Object: object, - } - } - return nil -} - -// Bad path components to be rejected by the path validity handler. -const ( - dotdotComponent = ".." - dotComponent = "." -) - -// Check if the incoming path has bad path components, -// such as ".." and "." -func hasBadPathComponent(path string) bool { - path = strings.TrimSpace(path) - for _, p := range strings.Split(path, SlashSeparator) { - switch strings.TrimSpace(p) { - case dotdotComponent: - return true - case dotComponent: - return true - } - } - return false -} diff --git a/s3/services/sign/signature-v4-utils.go b/s3/services/sign/signature-v4-utils.go deleted file mode 100644 index e64b8d23d..000000000 --- a/s3/services/sign/signature-v4-utils.go +++ /dev/null @@ -1,234 +0,0 @@ -/* - * The following code tries to reverse engineer the Amazon S3 APIs, - * and is mostly copied from minio implementation. - */ - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the License. - -package sign - -import ( - "bytes" - "crypto/sha256" - "encoding/hex" - "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/responses" - "io" - "io/ioutil" - "net/http" - "reflect" - "strconv" -) - -// http Header "x-amz-content-sha256" == "UNSIGNED-PAYLOAD" indicates that the -// client did not calculate sha256 of the payload. -const unsignedPayload = "UNSIGNED-PAYLOAD" - -// SkipContentSha256Cksum returns true if caller needs to skip -// payload checksum, false if not. -func SkipContentSha256Cksum(r *http.Request) bool { - var ( - v []string - ok bool - ) - - if isRequestPresignedSignatureV4(r) { - v, ok = r.Form[consts.AmzContentSha256] - if !ok { - v, ok = r.Header[consts.AmzContentSha256] - } - } else { - v, ok = r.Header[consts.AmzContentSha256] - } - - // Skip if no header was set. - if !ok { - return true - } - - // If x-amz-content-sha256 is set and the value is not - // 'UNSIGNED-PAYLOAD' we should validate the content sha256. - switch v[0] { - case unsignedPayload: - return true - case consts.EmptySHA256: - // some broken clients set empty-sha256 - // with > 0 content-length in the body, - // we should skip such clients and allow - // blindly such insecure clients only if - // S3 strict compatibility is disabled. - if r.ContentLength > 0 { - // We return true only in situations when - // deployment has asked MinIO to allow for - // such broken clients and content-length > 0. - return true - } - } - return false -} - -// Returns SHA256 for calculating canonical-request. -func GetContentSha256Cksum(r *http.Request, stype serviceType) (string, error) { - if stype == ServiceSTS { - payload, err := ioutil.ReadAll(io.LimitReader(r.Body, consts.StsRequestBodyLimit)) - if err != nil { - return "", err - } - sum256 := sha256.Sum256(payload) - r.Body = ioutil.NopCloser(bytes.NewReader(payload)) - return hex.EncodeToString(sum256[:]), nil - } - - var ( - defaultSha256Cksum string - v []string - ok bool - ) - - // For a presigned request we look at the query param for sha256. - if isRequestPresignedSignatureV4(r) { - // X-Amz-Content-Sha256, if not set in presigned requests, checksum - // will default to 'UNSIGNED-PAYLOAD'. - defaultSha256Cksum = unsignedPayload - v, ok = r.Form[consts.AmzContentSha256] - if !ok { - v, ok = r.Header[consts.AmzContentSha256] - } - } else { - // X-Amz-Content-Sha256, if not set in signed requests, checksum - // will default to sha256([]byte("")). - defaultSha256Cksum = consts.EmptySHA256 - v, ok = r.Header[consts.AmzContentSha256] - } - - // We found 'X-Amz-Content-Sha256' return the captured value. - if ok { - return v[0], nil - } - - // We couldn't find 'X-Amz-Content-Sha256'. - return defaultSha256Cksum, nil -} - -// isValidRegion - verify if incoming region value is valid with configured Region. -func isValidRegion(reqRegion string, confRegion string) bool { - if confRegion == "" { - return true - } - if confRegion == "US" { - confRegion = consts.DefaultBucketRegion - } - // Some older s3 clients set region as "US" instead of - // globalDefaultRegion, handle it. - if reqRegion == "US" { - reqRegion = consts.DefaultBucketRegion - } - return reqRegion == confRegion -} - -// check if the access key is valid and recognized, additionally -// also returns if the access key is owner/admin. -func (s *service) checkKeyValid(ack string) (secret string, rerr *responses.Error) { - secret, exists, enable, err := s.getSecret(ack) - if err != nil { - rerr = responses.ErrInternalError - return - } - - if !exists { - rerr = responses.ErrInvalidAccessKeyID - return - } - - if !enable { - rerr = responses.ErrAccessKeyDisabled - return - } - - return -} - -func contains(slice interface{}, elem interface{}) bool { - v := reflect.ValueOf(slice) - if v.Kind() == reflect.Slice { - for i := 0; i < v.Len(); i++ { - if v.Index(i).Interface() == elem { - return true - } - } - } - return false -} - -// extractSignedHeaders extract signed headers from Authorization header -func extractSignedHeaders(signedHeaders []string, r *http.Request) (http.Header, *responses.Error) { - reqHeaders := r.Header - reqQueries := r.Form - // find whether "host" is part of list of signed headers. - // if not return ErrUnsignedHeaders. "host" is mandatory. - if !contains(signedHeaders, "host") { - return nil, responses.ErrUnsignedHeaders - } - extractedSignedHeaders := make(http.Header) - for _, header := range signedHeaders { - // `host` will not be found in the headers, can be found in r.Host. - // but its alway necessary that the list of signed headers containing host in it. - val, ok := reqHeaders[http.CanonicalHeaderKey(header)] - if !ok { - // try to set headers from Query String - val, ok = reqQueries[header] - } - if ok { - extractedSignedHeaders[http.CanonicalHeaderKey(header)] = val - continue - } - switch header { - case "expect": - // Golang http server strips off 'Expect' header, if the - // client sent this as part of signed headers we need to - // handle otherwise we would see a signature mismatch. - // `aws-cli` sets this as part of signed headers. - // - // According to - // http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.20 - // Expect header is always of form: - // - // Expect = "Expect" ":" 1#expectation - // expectation = "100-continue" | expectation-extension - // - // So it safe to assume that '100-continue' is what would - // be sent, for the time being keep this work around. - // Adding a *TODO* to remove this later when Golang server - // doesn't filter out the 'Expect' header. - extractedSignedHeaders.Set(header, "100-continue") - case "host": - // Go http server removes "host" from Request.Header - - //extractedSignedHeaders.Set(header, r.Host) - // todo use r.Host, or filedag-web deal with - //value := strings.Split(r.Host, ":") - extractedSignedHeaders.Set(header, r.Host) - case "transfer-encoding": - // Go http server removes "host" from Request.Header - extractedSignedHeaders[http.CanonicalHeaderKey(header)] = r.TransferEncoding - case "content-length": - // Signature-V4 spec excludes Content-Length from signed headers list for signature calculation. - // But some clients deviate from this rule. Hence we consider Content-Length for signature - // calculation to be compatible with such clients. - extractedSignedHeaders.Set(header, strconv.FormatInt(r.ContentLength, 10)) - default: - return nil, responses.ErrUnsignedHeaders - } - } - return extractedSignedHeaders, nil -} diff --git a/s3/services/sign/signature-v4.go b/s3/services/sign/signature-v4.go deleted file mode 100644 index 9fb12a15b..000000000 --- a/s3/services/sign/signature-v4.go +++ /dev/null @@ -1,272 +0,0 @@ -/* - * The following code tries to reverse engineer the Amazon S3 APIs, - * and is mostly copied from minio implementation. - */ - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the License. - -package sign - -import ( - "crypto/subtle" - "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/responses" - "github.com/bittorrent/go-btfs/s3/set" - "github.com/bittorrent/go-btfs/s3/utils" - "net/http" - "net/url" - "strconv" - "strings" - "time" -) - -// AWS Signature Version '4' constants. -const ( - signV2Algorithm = "AWS" - signV4Algorithm = "AWS4-HMAC-SHA256" - iso8601Format = "20060102T150405Z" - yyyymmdd = "20060102" -) - -type serviceType string - -const ( - ServiceS3 serviceType = "s3" - //ServiceSTS STS - ServiceSTS serviceType = "sts" -) - -// compareSignatureV4 returns true if and only if both signatures -// are equal. The signatures are expected to be HEX encoded strings -// according to the AWS S3 signature V4 spec. -func compareSignatureV4(sig1, sig2 string) bool { - // The CTC using []byte(str) works because the hex encoding - // is unique for a sequence of bytes. See also compareSignatureV2. - return subtle.ConstantTimeCompare([]byte(sig1), []byte(sig2)) == 1 -} - -// doesPresignedSignatureMatch - Verify query headers with presigned signature -// - http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html -// -// returns nil if the signature matches. -func (s *service) doesPresignedSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType) (ack string, rerr *responses.Error) { - // Copy request - req := *r - - // Parse request query string. - pSignValues, rerr := parsePreSignV4(req.Form, region, stype) - if rerr != nil { - return - } - - ack = pSignValues.Credential.accessKey - secret, rerr := s.checkKeyValid(ack) - if rerr != nil { - return - } - - // Extract all the signed headers along with its values. - extractedSignedHeaders, rerr := extractSignedHeaders(pSignValues.SignedHeaders, r) - if rerr != nil { - return - } - - // If the host which signed the request is slightly ahead in time (by less than MaxSkewTime) the - // request should still be allowed. - if pSignValues.Date.After(time.Now().UTC().Add(consts.MaxSkewTime)) { - rerr = responses.ErrRequestNotReadyYet - return - } - - if time.Now().UTC().Sub(pSignValues.Date) > pSignValues.Expires { - rerr = responses.ErrExpiredPresignRequest - return - } - - // Save the date and expires. - t := pSignValues.Date - expireSeconds := int(pSignValues.Expires / time.Second) - - // Construct new query. - query := make(url.Values) - clntHashedPayload := req.Form.Get(consts.AmzContentSha256) - if clntHashedPayload != "" { - query.Set(consts.AmzContentSha256, hashedPayload) - } - - token := req.Form.Get(consts.AmzSecurityToken) - if token != "" { - rerr = responses.ErrSignatureVersionNotSupported - return - } - - query.Set(consts.AmzAlgorithm, signV4Algorithm) - - // Construct the query. - query.Set(consts.AmzDate, t.Format(iso8601Format)) - query.Set(consts.AmzExpires, strconv.Itoa(expireSeconds)) - query.Set(consts.AmzSignedHeaders, utils.GetSignedHeaders(extractedSignedHeaders)) - query.Set(consts.AmzCredential, ack+consts.SlashSeparator+pSignValues.Credential.getScope()) - - defaultSigParams := set.CreateStringSet( - consts.AmzContentSha256, - consts.AmzSecurityToken, - consts.AmzAlgorithm, - consts.AmzDate, - consts.AmzExpires, - consts.AmzSignedHeaders, - consts.AmzCredential, - consts.AmzSignature, - ) - - // Add missing query parameters if any provided in the request URL - for k, v := range req.Form { - if !defaultSigParams.Contains(k) { - query[k] = v - } - } - - // Get the encoded query. - encodedQuery := query.Encode() - - // Verify if date query is same. - if req.Form.Get(consts.AmzDate) != query.Get(consts.AmzDate) { - rerr = responses.ErrSignatureDoesNotMatch - return - } - // Verify if expires query is same. - if req.Form.Get(consts.AmzExpires) != query.Get(consts.AmzExpires) { - rerr = responses.ErrSignatureDoesNotMatch - return - } - // Verify if signed headers query is same. - if req.Form.Get(consts.AmzSignedHeaders) != query.Get(consts.AmzSignedHeaders) { - rerr = responses.ErrSignatureDoesNotMatch - return - } - // Verify if credential query is same. - if req.Form.Get(consts.AmzCredential) != query.Get(consts.AmzCredential) { - rerr = responses.ErrSignatureDoesNotMatch - return - } - // Verify if sha256 payload query is same. - if clntHashedPayload != "" && clntHashedPayload != query.Get(consts.AmzContentSha256) { - rerr = responses.ErrContentSHA256Mismatch - return - } - - // Verify finally if signature is same. - - // Get canonical request. - presignedCanonicalReq := utils.GetCanonicalRequest(extractedSignedHeaders, hashedPayload, encodedQuery, req.URL.Path, req.Method) - - // Get string to sign from canonical request. - presignedStringToSign := utils.GetStringToSign(presignedCanonicalReq, t, pSignValues.Credential.getScope()) - - // Get hmac presigned signing key. - presignedSigningKey := utils.GetSigningKey(secret, pSignValues.Credential.scope.date, - pSignValues.Credential.scope.region, string(stype)) - - // Get new signature. - newSignature := utils.GetSignature(presignedSigningKey, presignedStringToSign) - - // Verify signature. - if !compareSignatureV4(req.Form.Get(consts.AmzSignature), newSignature) { - rerr = responses.ErrSignatureDoesNotMatch - return - } - - return -} - -// doesSignatureMatch - Verify authorization header with calculated header in accordance with -// - http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html -// -// returns nil if signature matches. -func (s *service) doesSignatureMatch(hashedPayload string, r *http.Request, region string, stype serviceType) (ack string, rerr *responses.Error) { - // Copy request. - req := *r - - // Save authorization header. - v4Auth := req.Header.Get(consts.Authorization) - - // Parse signature version '4' header. - signV4Values, rerr := parseSignV4(v4Auth, region, stype) - if rerr != nil { - return - } - - // Extract all the signed headers along with its values. - extractedSignedHeaders, rerr := extractSignedHeaders(signV4Values.SignedHeaders, r) - if rerr != nil { - return - } - - ack = signV4Values.Credential.accessKey - secret, rerr := s.checkKeyValid(ack) - if rerr != nil { - return - } - - // Extract date, if not present throw error. - var date string - if date = req.Header.Get(consts.AmzDate); date == "" { - if date = r.Header.Get(consts.Date); date == "" { - rerr = responses.ErrMissingDateHeader - return - } - } - - // Parse date header. - t, err := time.Parse(iso8601Format, date) - if err != nil { - rerr = responses.ErrAuthorizationHeaderMalformed - return - } - - // Query string. - queryStr := req.URL.Query().Encode() - - // Get canonical request. - canonicalRequest := utils.GetCanonicalRequest(extractedSignedHeaders, hashedPayload, queryStr, req.URL.Path, req.Method) - - // Get string to sign from canonical request. - stringToSign := utils.GetStringToSign(canonicalRequest, t, signV4Values.Credential.getScope()) - - // Get hmac signing key. - signingKey := utils.GetSigningKey(secret, signV4Values.Credential.scope.date, - signV4Values.Credential.scope.region, string(stype)) - - // Calculate signature. - newSignature := utils.GetSignature(signingKey, stringToSign) - - // Verify if signature match. - if !compareSignatureV4(newSignature, signV4Values.Signature) { - rerr = responses.ErrSignatureDoesNotMatch - return - } - - // Return error none. - return -} - -// getScope generate a string of a specific date, an AWS region, and a service. -func getScope(t time.Time, region string) string { - scope := strings.Join([]string{ - t.Format(yyyymmdd), - region, - string(ServiceS3), - "aws4_request", - }, consts.SlashSeparator) - return scope -} diff --git a/s3/services/sign/signature.go b/s3/services/sign/signature.go deleted file mode 100644 index 6cf160aa1..000000000 --- a/s3/services/sign/signature.go +++ /dev/null @@ -1,98 +0,0 @@ -package sign - -import ( - "encoding/hex" - "github.com/bittorrent/go-btfs/s3/consts" - "github.com/bittorrent/go-btfs/s3/etag" - "github.com/bittorrent/go-btfs/s3/responses" - "github.com/bittorrent/go-btfs/s3/utils/hash" - "net/http" -) - -// isReqAuthenticated Verify if request has valid AWS Signature Version '4'. -func (s *service) isReqAuthenticated(r *http.Request, region string, stype serviceType) (ack string, rerr *responses.Error) { - ack, rerr = s.reqSignatureV4Verify(r, region, stype) - if rerr != nil { - return - } - - size := r.ContentLength - - if size == -1 { - rerr = responses.ErrMissingContentLength - return - } - - if size > consts.MaxObjectSize { - rerr = responses.ErrEntityTooLarge - return - } - - md5Hex, sha256Hex, rerr := s.getClientCheckSum(r) - if rerr != nil { - return - } - - reader, err := hash.NewReader(r.Body, size, md5Hex, sha256Hex, size) - if err != nil { - rerr = responses.ErrInternalError - return - } - - r.Body = reader - - return -} - -func (s *service) getClientCheckSum(r *http.Request) (md5TagStr, sha256SumStr string, rerr *responses.Error) { - eTag, err := etag.FromContentMD5(r.Header) - if err != nil { - rerr = responses.ErrInvalidDigest - return - } - md5TagStr = eTag.String() - - skipSHA256 := SkipContentSha256Cksum(r) - if skipSHA256 { - return - } - - var ( - contentSHA256 []byte - sha256Sum []string - ) - - if isRequestPresignedSignatureV4(r) { - sha256Sum = r.Form[consts.AmzContentSha256] - } else { - sha256Sum = r.Header[consts.AmzContentSha256] - } - - if len(sha256Sum) > 0 { - contentSHA256, err = hex.DecodeString(sha256Sum[0]) - if err != nil || len(contentSHA256) == 0 { - rerr = responses.ErrContentSHA256Mismatch - return - } - sha256SumStr = hex.EncodeToString(contentSHA256) - } - - return -} - -func (s *service) reqSignatureV4Verify(r *http.Request, region string, stype serviceType) (ack string, rerr *responses.Error) { - sha256sum, err := GetContentSha256Cksum(r, stype) - if err != nil { - rerr = responses.ErrInternalError - return - } - switch { - case IsRequestSignatureV4(r): - ack, rerr = s.doesSignatureMatch(sha256sum, r, region, stype) - case isRequestPresignedSignatureV4(r): - ack, rerr = s.doesPresignedSignatureMatch(sha256sum, r, region, stype) - default: - rerr = responses.ErrAccessDenied - } - return -} diff --git a/s3/utils/bgcontext.go b/s3/utils/bgcontext.go deleted file mode 100644 index 3ad10e230..000000000 --- a/s3/utils/bgcontext.go +++ /dev/null @@ -1,35 +0,0 @@ -package utils - -import ( - "context" - "time" -) - -// BgContext returns a context that can be used for async operations. -// Cancellation/timeouts are removed, so parent cancellations/timeout will -// not propagate from parent. -// Context values are preserved. -// This can be used for goroutines that live beyond the parent context. -func BgContext(parent context.Context) context.Context { - return bgCtx{parent: parent} -} - -type bgCtx struct { - parent context.Context -} - -func (a bgCtx) Done() <-chan struct{} { - return nil -} - -func (a bgCtx) Err() error { - return nil -} - -func (a bgCtx) Deadline() (deadline time.Time, ok bool) { - return time.Time{}, false -} - -func (a bgCtx) Value(key interface{}) interface{} { - return a.parent.Value(key) -} diff --git a/s3/utils/if.go b/s3/utils/coalesce.go similarity index 100% rename from s3/utils/if.go rename to s3/utils/coalesce.go diff --git a/s3/utils/levels.go b/s3/utils/levels.go deleted file mode 100644 index 07af02999..000000000 --- a/s3/utils/levels.go +++ /dev/null @@ -1,15 +0,0 @@ -package utils - -import ( - logging "github.com/ipfs/go-log/v2" - "os" -) - -func SetupLogLevels() { - if _, set := os.LookupEnv("GOLOG_LOG_LEVEL"); !set { - _ = logging.SetLogLevel("*", "INFO") - - } else { - _ = logging.SetLogLevel("*", os.Getenv("GOLOG_LOG_LEVEL")) - } -} diff --git a/s3/utils/signature.go b/s3/utils/signature.go deleted file mode 100644 index 002d3d36f..000000000 --- a/s3/utils/signature.go +++ /dev/null @@ -1,359 +0,0 @@ -package utils - -import ( - "bytes" - "crypto/hmac" - "crypto/md5" - "crypto/sha256" - "encoding/base64" - "encoding/hex" - "io" - "net/http" - "regexp" - "sort" - "strings" - "testing" - "time" - "unicode/utf8" - - "github.com/bittorrent/go-btfs/s3/consts" -) - -var ignoredHeaders = map[string]bool{ - "Authorization": true, - "Content-Type": true, - "Content-Length": true, - "User-Agent": true, -} - -// AWS Signature Version '4' constants. -const ( - signV4Algorithm = "AWS4-HMAC-SHA256" - iso8601Format = "20060102T150405Z" - yyyymmdd = "20060102" -) - -type ServiceType string - -// MustNewSignedV4Request NewSignedV4Request -func MustNewSignedV4Request(method string, urlStr string, contentLength int64, body io.ReadSeeker, st ServiceType, accessKey, secretKey string, t *testing.T) *http.Request { - req, err := NewRequest(method, urlStr, contentLength, body) - if err != nil { - t.Fatalf("newTestRequest fail err:%v", err) - } - - if err := SignRequestV4(req, accessKey, secretKey, st); err != nil { - t.Fatalf("Unable to inititalized new signed http request %s", err) - } - return req -} - -// SignRequestV4 Sign given request using Signature V4. -func SignRequestV4(req *http.Request, accessKey, secretKey string, st ServiceType) error { - // Get hashed payload. - hashedPayload := getContentSha256Cksum(req) - currTime := time.Now().UTC() - - // Set x-amz-date. - req.Header.Set("x-amz-date", currTime.Format(iso8601Format)) - req.Header.Set(consts.ContentType, "application/x-www-form-urlencoded") - // Query string. - // final Authorization header - // Get header keys. - // Get header map. - headerMap := make(map[string][]string) - for k, vv := range req.Header { - // If request header key is not in ignored headers, then add it. - if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; !ok { - headerMap[strings.ToLower(k)] = vv - } - } - headers := []string{"host"} - for k := range headerMap { - headers = append(headers, k) - } - sort.Strings(headers) - - // Get canonical headers. - var buf bytes.Buffer - for _, k := range headers { - buf.WriteString(k) - buf.WriteByte(':') - switch { - case k == "host": - buf.WriteString(req.URL.Host) - fallthrough - default: - for idx, v := range headerMap[k] { - if idx > 0 { - buf.WriteByte(',') - } - buf.WriteString(v) - } - buf.WriteByte('\n') - } - } - headerMap["host"] = append(headerMap["host"], req.URL.Host) - - // Get signed headers. - signedHeaders := strings.Join(headers, ";") - //a,_:=io.ReadAll(req.Body) - //b:=req.URL.Query().Encode() - //req.Form=url.Values{} - //req.Form.Add(b,string(a)) - //queryStr := req.Form.Encode() - queryStr := req.URL.Query().Encode() - region := consts.DefaultBucketRegion - // Get scope. - scope := strings.Join([]string{ - currTime.Format(yyyymmdd), - region, - string(st), - "aws4_request", - }, "/") - // Get canonical request. - canonicalRequest := GetCanonicalRequest(headerMap, hashedPayload, queryStr, req.URL.Path, req.Method) - // Get string to sign from canonical request. - stringToSign := GetStringToSign(canonicalRequest, currTime, scope) - - // Get hmac signing key. - signingKey := GetSigningKey(secretKey, currTime, region, string(st)) - - // Calculate signature. - newSignature := GetSignature(signingKey, stringToSign) - - parts := []string{ - "AWS4-HMAC-SHA256" + " Credential=" + accessKey + "/" + scope, - "SignedHeaders=" + signedHeaders, - "Signature=" + newSignature, - } - author := strings.Join(parts, ", ") - req.Header.Set("Authorization", author) - - return nil -} - -// if object matches reserved string, no need to encode them -var reservedObjectNames = regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$") - -// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences -// -// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8 -// non english characters cannot be parsed due to the nature in which url.Encode() is written -// -// This function on the other hand is a direct replacement for url.Encode() technique to support -// pretty much every UTF-8 character. -func EncodePath(pathName string) string { - if reservedObjectNames.MatchString(pathName) { - return pathName - } - var encodedPathname string - for _, s := range pathName { - if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark) - encodedPathname = encodedPathname + string(s) - continue - } - switch s { - case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark) - encodedPathname = encodedPathname + string(s) - continue - default: - len := utf8.RuneLen(s) - if len < 0 { - // if utf8 cannot convert return the same string as is - return pathName - } - u := make([]byte, len) - utf8.EncodeRune(u, s) - for _, r := range u { - hex := hex.EncodeToString([]byte{r}) - encodedPathname = encodedPathname + "%" + strings.ToUpper(hex) - } - } - } - return encodedPathname -} - -// NewRequest Returns new HTTP request object. -func NewRequest(method, urlStr string, contentLength int64, body io.ReadSeeker) (*http.Request, error) { - if method == "" { - method = "POST" - } - - // Save for subsequent use - var hashedPayload string - var md5Base64 string - switch { - case body == nil: - hashedPayload = getSHA256Hash([]byte{}) - default: - payloadBytes, err := io.ReadAll(body) - if err != nil { - return nil, err - } - hashedPayload = getSHA256Hash(payloadBytes) - md5Base64 = getMD5HashBase64(payloadBytes) - } - // Seek back to beginning. - if body != nil { - body.Seek(0, 0) - } else { - body = bytes.NewReader([]byte("")) - } - req, err := http.NewRequest(method, urlStr, body) - if err != nil { - return nil, err - } - if md5Base64 != "" { - req.Header.Set("Content-Md5", md5Base64) - } - req.Header.Set("x-amz-content-sha256", hashedPayload) - - // Add Content-Length - req.ContentLength = contentLength - - return req, nil -} - -// getSHA256Hash returns SHA-256 hash in hex encoding of given data. -func getSHA256Hash(data []byte) string { - return hex.EncodeToString(getSHA256Sum(data)) -} - -// getMD5HashBase64 returns MD5 hash in base64 encoding of given data. -func getMD5HashBase64(data []byte) string { - return base64.StdEncoding.EncodeToString(getMD5Sum(data)) -} - -// getSHA256Hash returns SHA-256 sum of given data. -func getSHA256Sum(data []byte) []byte { - hash := sha256.New() - hash.Write(data) - return hash.Sum(nil) -} - -// getMD5Sum returns MD5 sum of given data. -func getMD5Sum(data []byte) []byte { - hash := md5.New() - hash.Write(data) - return hash.Sum(nil) -} - -// Returns SHA256 for calculating canonical-request. -func getContentSha256Cksum(r *http.Request) string { - var ( - defaultSha256Cksum string - v []string - ok bool - ) - - // X-Amz-Content-Sha256, if not set in signed requests, checksum - // will default to sha256([]byte("")). - defaultSha256Cksum = consts.EmptySHA256 - v, ok = r.Header[consts.AmzContentSha256] - - // We found 'X-Amz-Content-Sha256' return the captured value. - if ok { - return v[0] - } - - // We couldn't find 'X-Amz-Content-Sha256'. - return defaultSha256Cksum -} - -// GetCanonicalRequest generate a canonical request of style -// -// canonicalRequest = -// -// \n -// \n -// \n -// \n -// \n -// -func GetCanonicalRequest(extractedSignedHeaders http.Header, payload, queryStr, urlPath, method string) string { - rawQuery := strings.ReplaceAll(queryStr, "+", "%20") - encodedPath := EncodePath(urlPath) - canonicalRequest := strings.Join([]string{ - method, - encodedPath, - rawQuery, - getCanonicalHeaders(extractedSignedHeaders), - GetSignedHeaders(extractedSignedHeaders), - payload, - }, "\n") - return canonicalRequest -} - -// GetSignedHeaders generate a string i.e alphabetically sorted, semicolon-separated list of lowercase request header names -func GetSignedHeaders(signedHeaders http.Header) string { - var headers []string - for k := range signedHeaders { - headers = append(headers, strings.ToLower(k)) - } - sort.Strings(headers) - return strings.Join(headers, ";") -} - -// getCanonicalHeaders generate a list of request headers with their values -func getCanonicalHeaders(signedHeaders http.Header) string { - var headers []string - vals := make(http.Header) - for k, vv := range signedHeaders { - headers = append(headers, strings.ToLower(k)) - vals[strings.ToLower(k)] = vv - } - sort.Strings(headers) - - var buf bytes.Buffer - for _, k := range headers { - buf.WriteString(k) - buf.WriteByte(':') - for idx, v := range vals[k] { - if idx > 0 { - buf.WriteByte(',') - } - buf.WriteString(signV4TrimAll(v)) - } - buf.WriteByte('\n') - } - return buf.String() -} - -// GetStringToSign a string based on selected query values. -func GetStringToSign(canonicalRequest string, t time.Time, scope string) string { - stringToSign := signV4Algorithm + "\n" + t.Format(iso8601Format) + "\n" - stringToSign += scope + "\n" - canonicalRequestBytes := sha256.Sum256([]byte(canonicalRequest)) - stringToSign += hex.EncodeToString(canonicalRequestBytes[:]) - return stringToSign -} - -// GetSigningKey hmac seed to calculate final signature. -func GetSigningKey(secretKey string, t time.Time, region string, serviceType string) []byte { - date := sumHMAC([]byte("AWS4"+secretKey), []byte(t.Format(yyyymmdd))) - regionBytes := sumHMAC(date, []byte(region)) - service := sumHMAC(regionBytes, []byte(serviceType)) - signingKey := sumHMAC(service, []byte("aws4_request")) - return signingKey -} - -// GetSignature final signature in hexadecimal form. -func GetSignature(signingKey []byte, stringToSign string) string { - return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign))) -} - -// sumHMAC calculate hmac between two input byte array. -func sumHMAC(key []byte, data []byte) []byte { - hash := hmac.New(sha256.New, key) - hash.Write(data) - return hash.Sum(nil) -} - -// Trim leading and trailing spaces and replace sequential spaces with one space, following Trimall() -// in http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html -func signV4TrimAll(input string) string { - // Compress adjacent spaces (a space is determined by - // unicode.IsSpace() internally here) to one space and return - return strings.Join(strings.Fields(input), " ") -} diff --git a/s3/utils/utils.go b/s3/utils/utils.go deleted file mode 100644 index 73842b663..000000000 --- a/s3/utils/utils.go +++ /dev/null @@ -1,9 +0,0 @@ -package utils - -func CloneMapSS(src map[string]string) map[string]string { - r := make(map[string]string, len(src)) - for k, v := range src { - r[k] = v - } - return r -} diff --git a/s3/utils/xml.go b/s3/utils/xml.go deleted file mode 100644 index 3374b0db1..000000000 --- a/s3/utils/xml.go +++ /dev/null @@ -1,26 +0,0 @@ -package utils - -import ( - "encoding/xml" - "io" -) - -// XmlDecoder provide decoded value in xml. -func XmlDecoder(body io.Reader, v interface{}, size int64) error { - var lbody io.Reader - if size > 0 { - lbody = io.LimitReader(body, size) - } else { - lbody = body - } - d := xml.NewDecoder(lbody) - // Ignore any encoding set in the XML body - d.CharsetReader = nopCharsetConverter - return d.Decode(v) -} - -// nopCharsetConverter is a dummy charset convert which just copies input to output, -// it is used to ignore custom encoding charset in S3 XML body. -func nopCharsetConverter(label string, input io.Reader) (io.Reader, error) { - return input, nil -} From a18e40eeddc5a729f4cef84a1a85946de9c725de Mon Sep 17 00:00:00 2001 From: Steve Date: Tue, 19 Sep 2023 19:20:29 +0800 Subject: [PATCH 112/139] fix: args parse --- s3/api/contexts/contexts.go | 9 +-- s3/api/contexts/contexts_handle_inf.go | 8 ++- s3/api/handlers/handlers.go | 9 ++- s3/api/handlers/handlers_bucket.go | 54 +++++++++--------- s3/api/handlers/handlers_middlewares.go | 7 +-- s3/api/handlers/handlers_multipart.go | 33 +++++------ s3/api/handlers/handlers_object.go | 73 +++++++++++++------------ s3/api/handlers/proto.go | 4 +- s3/api/requests/parsers.go | 9 +-- s3/api/requests/parsers_bucket.go | 2 +- s3/api/requests/validates.go | 1 - s3/api/routers/routers.go | 4 +- 12 files changed, 112 insertions(+), 101 deletions(-) diff --git a/s3/api/contexts/contexts.go b/s3/api/contexts/contexts.go index ca7211b75..a475de203 100644 --- a/s3/api/contexts/contexts.go +++ b/s3/api/contexts/contexts.go @@ -5,11 +5,12 @@ import ( "net/http" ) -type key *struct{} +type key string -var ( - keyOfAccessKey = new(struct{}) - keyOfHandleInf = new(struct{}) +const ( + keyOfAccessKey key = "ctx-access-key" + keyOfHandleInf key = "ctx-handle-inf" + keyOfRequestArgs key = "ctx-request-args" ) func set(r *http.Request, k key, v any) { diff --git a/s3/api/contexts/contexts_handle_inf.go b/s3/api/contexts/contexts_handle_inf.go index c1a25ef6e..30bd65465 100644 --- a/s3/api/contexts/contexts_handle_inf.go +++ b/s3/api/contexts/contexts_handle_inf.go @@ -7,17 +7,19 @@ import ( type handleInfo struct { name string err error + args interface{} } -func SetHandleInf(r *http.Request, name string, err error) { - set(r, keyOfHandleInf, handleInfo{name, err}) +func SetHandleInf(r *http.Request, name string, err error, args interface{}) { + set(r, keyOfHandleInf, handleInfo{name, err, args}) return } -func GetHandleInf(r *http.Request) (name string, err error) { +func GetHandleInf(r *http.Request) (name string, err error, args interface{}) { v := get(r, keyOfHandleInf) inf, _ := v.(handleInfo) name = inf.name err = inf.err + args = inf.args return } diff --git a/s3/api/handlers/handlers.go b/s3/api/handlers/handlers.go index d9ae7697f..e1e3c2a2a 100644 --- a/s3/api/handlers/handlers.go +++ b/s3/api/handlers/handlers.go @@ -10,6 +10,7 @@ import ( "github.com/bittorrent/go-btfs/s3/hash" "net/http" "runtime" + "strings" ) var _ Handlerser = (*Handlers)(nil) @@ -36,12 +37,16 @@ func NewHandlers( return } -// name returns name of the called handler +// name returns name of the handler function func (h *Handlers) name() string { pc := make([]uintptr, 1) runtime.Callers(3, pc) f := runtime.FuncForPC(pc[0]) - return f.Name() + ps := strings.Split(f.Name(), ".") + if len(ps) > 0 { + return ps[len(ps)-1] + } + return "UnknownHandler" } // toResponseErr convert internal error to response error diff --git a/s3/api/handlers/handlers_bucket.go b/s3/api/handlers/handlers_bucket.go index d1771b319..cb44b632e 100644 --- a/s3/api/handlers/handlers_bucket.go +++ b/s3/api/handlers/handlers_bucket.go @@ -4,23 +4,24 @@ import ( "github.com/bittorrent/go-btfs/s3/api/contexts" "github.com/bittorrent/go-btfs/s3/api/requests" "github.com/bittorrent/go-btfs/s3/api/responses" + "github.com/bittorrent/go-btfs/s3/api/services/object" "net/http" ) func (h *Handlers) CreateBucketHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() var err error + var args *object.CreateBucketArgs defer func() { - contexts.SetHandleInf(r, h.name(), err) + contexts.SetHandleInf(r, h.name(), err, args) }() - args, err := requests.ParseCreateBucketRequest(r) + args, err = requests.ParseCreateBucketRequest(r) if err != nil { responses.WriteErrorResponse(w, r, h.toResponseErr(err)) return } - buc, err := h.objsvc.CreateBucket(ctx, args) + buc, err := h.objsvc.CreateBucket(r.Context(), args) if err != nil { responses.WriteErrorResponse(w, r, h.toResponseErr(err)) return @@ -31,19 +32,19 @@ func (h *Handlers) CreateBucketHandler(w http.ResponseWriter, r *http.Request) { } func (h *Handlers) HeadBucketHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() var err error + var args *object.GetBucketArgs defer func() { - contexts.SetHandleInf(r, h.name(), err) + contexts.SetHandleInf(r, h.name(), err, args) }() - args, err := requests.ParseHeadBucketRequest(r) + args, err = requests.ParseHeadBucketRequest(r) if err != nil { responses.WriteErrorResponse(w, r, h.toResponseErr(err)) return } - buc, err := h.objsvc.GetBucket(ctx, args) + buc, err := h.objsvc.GetBucket(r.Context(), args) if err != nil { responses.WriteErrorResponse(w, r, h.toResponseErr(err)) return @@ -54,19 +55,19 @@ func (h *Handlers) HeadBucketHandler(w http.ResponseWriter, r *http.Request) { } func (h *Handlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() var err error + var args *object.DeleteBucketArgs defer func() { - contexts.SetHandleInf(r, h.name(), err) + contexts.SetHandleInf(r, h.name(), err, args) }() - args, err := requests.ParseDeleteBucketRequest(r) + args, err = requests.ParseDeleteBucketRequest(r) if err != nil { responses.WriteErrorResponse(w, r, h.toResponseErr(err)) return } - err = h.objsvc.DeleteBucket(ctx, args) + err = h.objsvc.DeleteBucket(r.Context(), args) if err != nil { responses.WriteErrorResponse(w, r, h.toResponseErr(err)) return @@ -78,11 +79,12 @@ func (h *Handlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) { func (h *Handlers) ListBucketsHandler(w http.ResponseWriter, r *http.Request) { var err error + var args *object.ListBucketsArgs defer func() { - contexts.SetHandleInf(r, h.name(), err) + contexts.SetHandleInf(r, h.name(), err, args) }() - args, err := requests.ParseListBucketsRequest(r) + args, err = requests.ParseListBucketsRequest(r) if err != nil { responses.WriteErrorResponse(w, r, h.toResponseErr(err)) return @@ -98,48 +100,48 @@ func (h *Handlers) ListBucketsHandler(w http.ResponseWriter, r *http.Request) { return } -func (h *Handlers) GetBucketAclHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() +func (h *Handlers) PutBucketACLHandler(w http.ResponseWriter, r *http.Request) { var err error + var args *object.PutBucketACLArgs defer func() { - contexts.SetHandleInf(r, h.name(), err) + contexts.SetHandleInf(r, h.name(), err, args) }() - args, err := requests.ParseGetBucketACLRequest(r) + args, err = requests.ParsePutBucketAclRequest(r) if err != nil { responses.WriteErrorResponse(w, r, h.toResponseErr(err)) return } - acl, err := h.objsvc.GetBucketACL(ctx, args) + err = h.objsvc.PutBucketACL(r.Context(), args) if err != nil { responses.WriteErrorResponse(w, r, h.toResponseErr(err)) return } - responses.WriteGetBucketACLResponse(w, r, acl) + responses.WritePutBucketAclResponse(w, r) return } -func (h *Handlers) PutBucketAclHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() +func (h *Handlers) GetBucketACLHandler(w http.ResponseWriter, r *http.Request) { var err error + var args *object.GetBucketACLArgs defer func() { - contexts.SetHandleInf(r, h.name(), err) + contexts.SetHandleInf(r, h.name(), err, args) }() - args, err := requests.ParsePutBucketAclRequest(r) + args, err = requests.ParseGetBucketACLRequest(r) if err != nil { responses.WriteErrorResponse(w, r, h.toResponseErr(err)) return } - err = h.objsvc.PutBucketACL(ctx, args) + acl, err := h.objsvc.GetBucketACL(r.Context(), args) if err != nil { responses.WriteErrorResponse(w, r, h.toResponseErr(err)) return } - responses.WritePutBucketAclResponse(w, r) + responses.WriteGetBucketACLResponse(w, r, acl) return } diff --git a/s3/api/handlers/handlers_middlewares.go b/s3/api/handlers/handlers_middlewares.go index cf7181b6c..340248524 100644 --- a/s3/api/handlers/handlers_middlewares.go +++ b/s3/api/handlers/handlers_middlewares.go @@ -38,12 +38,11 @@ func (h *Handlers) Cors(handler http.Handler) http.Handler { func (h *Handlers) Log(handler http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { start := time.Now() - fmt.Printf("s3-api: [I] %s | <%-4s> | %s\n", start.Format(time.RFC3339), r.Method, r.URL) handler.ServeHTTP(w, r) - hname, herr := contexts.GetHandleInf(r) + hname, herr, args := contexts.GetHandleInf(r) end := time.Now() ela := end.Sub(start) - fmt.Printf("s3-api: [O] %s | <%-4s> | %s | %s | %v | %s \n", end.Format(time.RFC3339), r.Method, r.URL, hname, herr, ela) + fmt.Printf("s3-api: | %s | <%-4s> | %s | %s | %v | %+v | %s \n", end.Format(time.RFC3339), r.Method, r.URL, hname, herr, args, ela) }) } @@ -69,7 +68,7 @@ func (h *Handlers) Sign(handler http.Handler) http.Handler { var err *responses.Error defer func() { if err != nil { - contexts.SetHandleInf(r, h.name(), err) + contexts.SetHandleInf(r, h.name(), err, nil) } }() diff --git a/s3/api/handlers/handlers_multipart.go b/s3/api/handlers/handlers_multipart.go index 386c65fc2..7dbb1b44f 100644 --- a/s3/api/handlers/handlers_multipart.go +++ b/s3/api/handlers/handlers_multipart.go @@ -4,23 +4,24 @@ import ( "github.com/bittorrent/go-btfs/s3/api/contexts" "github.com/bittorrent/go-btfs/s3/api/requests" "github.com/bittorrent/go-btfs/s3/api/responses" + "github.com/bittorrent/go-btfs/s3/api/services/object" "net/http" ) func (h *Handlers) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() var err error + var args *object.CreateMultipartUploadArgs defer func() { - contexts.SetHandleInf(r, h.name(), err) + contexts.SetHandleInf(r, h.name(), err, args) }() - args, err := requests.ParseCreateMultipartUploadRequest(r) + args, err = requests.ParseCreateMultipartUploadRequest(r) if err != nil { responses.WriteErrorResponse(w, r, h.toResponseErr(err)) return } - multipart, err := h.objsvc.CreateMultipartUpload(ctx, args) + multipart, err := h.objsvc.CreateMultipartUpload(r.Context(), args) if err != nil { responses.WriteErrorResponse(w, r, h.toResponseErr(err)) return @@ -31,19 +32,19 @@ func (h *Handlers) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.R } func (h *Handlers) UploadPartHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() var err error + var args *object.UploadPartArgs defer func() { - contexts.SetHandleInf(r, h.name(), err) + contexts.SetHandleInf(r, h.name(), err, args) }() - args, err := requests.ParseUploadPartRequest(r) + args, err = requests.ParseUploadPartRequest(r) if err != nil { responses.WriteErrorResponse(w, r, h.toResponseErr(err)) return } - part, err := h.objsvc.UploadPart(ctx, args) + part, err := h.objsvc.UploadPart(r.Context(), args) if err != nil { responses.WriteErrorResponse(w, r, h.toResponseErr(err)) return @@ -54,19 +55,19 @@ func (h *Handlers) UploadPartHandler(w http.ResponseWriter, r *http.Request) { } func (h *Handlers) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() var err error + var args *object.AbortMultipartUploadArgs defer func() { - contexts.SetHandleInf(r, h.name(), err) + contexts.SetHandleInf(r, h.name(), err, args) }() - args, err := requests.ParseAbortMultipartUploadRequest(r) + args, err = requests.ParseAbortMultipartUploadRequest(r) if err != nil { responses.WriteErrorResponse(w, r, h.toResponseErr(err)) return } - err = h.objsvc.AbortMultipartUpload(ctx, args) + err = h.objsvc.AbortMultipartUpload(r.Context(), args) if err != nil { responses.WriteErrorResponse(w, r, h.toResponseErr(err)) return @@ -77,19 +78,19 @@ func (h *Handlers) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Re } func (h *Handlers) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() var err error + var args *object.CompleteMultipartUploadArgs defer func() { - contexts.SetHandleInf(r, h.name(), err) + contexts.SetHandleInf(r, h.name(), err, args) }() - args, err := requests.ParseCompleteMultipartUploadRequest(r) + args, err = requests.ParseCompleteMultipartUploadRequest(r) if err != nil { responses.WriteErrorResponse(w, r, h.toResponseErr(err)) return } - obj, err := h.objsvc.CompleteMultiPartUpload(ctx, args) + obj, err := h.objsvc.CompleteMultiPartUpload(r.Context(), args) if err != nil { responses.WriteErrorResponse(w, r, h.toResponseErr(err)) return diff --git a/s3/api/handlers/handlers_object.go b/s3/api/handlers/handlers_object.go index edad88e95..f635388be 100644 --- a/s3/api/handlers/handlers_object.go +++ b/s3/api/handlers/handlers_object.go @@ -4,24 +4,25 @@ import ( "github.com/bittorrent/go-btfs/s3/api/contexts" "github.com/bittorrent/go-btfs/s3/api/requests" "github.com/bittorrent/go-btfs/s3/api/responses" + "github.com/bittorrent/go-btfs/s3/api/services/object" "net/http" ) // PutObjectHandler . func (h *Handlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() var err error + var args *object.PutObjectArgs defer func() { - contexts.SetHandleInf(r, h.name(), err) + contexts.SetHandleInf(r, h.name(), err, args) }() - args, err := requests.ParsePutObjectRequest(r) + args, err = requests.ParsePutObjectRequest(r) if err != nil { responses.WriteErrorResponse(w, r, h.toResponseErr(err)) return } - obj, err := h.objsvc.PutObject(ctx, args) + obj, err := h.objsvc.PutObject(r.Context(), args) if err != nil { responses.WriteErrorResponse(w, r, h.toResponseErr(err)) return @@ -33,19 +34,19 @@ func (h *Handlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) { // CopyObjectHandler . func (h *Handlers) CopyObjectHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() var err error + var args *object.CopyObjectArgs defer func() { - contexts.SetHandleInf(r, h.name(), err) + contexts.SetHandleInf(r, h.name(), err, args) }() - args, err := requests.ParseCopyObjectRequest(r) + args, err = requests.ParseCopyObjectRequest(r) if err != nil { responses.WriteErrorResponse(w, r, h.toResponseErr(err)) return } - obj, err := h.objsvc.CopyObject(ctx, args) + obj, err := h.objsvc.CopyObject(r.Context(), args) if err != nil { responses.WriteErrorResponse(w, r, h.toResponseErr(err)) return @@ -57,19 +58,19 @@ func (h *Handlers) CopyObjectHandler(w http.ResponseWriter, r *http.Request) { // HeadObjectHandler . func (h *Handlers) HeadObjectHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() var err error + var args *object.GetObjectArgs defer func() { - contexts.SetHandleInf(r, h.name(), err) + contexts.SetHandleInf(r, h.name(), err, args) }() - args, err := requests.ParseHeadObjectRequest(r) + args, err = requests.ParseHeadObjectRequest(r) if err != nil { responses.WriteErrorResponse(w, r, h.toResponseErr(err)) return } - obj, _, err := h.objsvc.GetObject(ctx, args) + obj, _, err := h.objsvc.GetObject(r.Context(), args) if err != nil { responses.WriteErrorResponse(w, r, h.toResponseErr(err)) return @@ -81,19 +82,19 @@ func (h *Handlers) HeadObjectHandler(w http.ResponseWriter, r *http.Request) { // GetObjectHandler . func (h *Handlers) GetObjectHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() var err error + var args *object.GetObjectArgs defer func() { - contexts.SetHandleInf(r, h.name(), err) + contexts.SetHandleInf(r, h.name(), err, args) }() - args, err := requests.ParseGetObjectRequest(r) + args, err = requests.ParseGetObjectRequest(r) if err != nil { responses.WriteErrorResponse(w, r, h.toResponseErr(err)) return } - obj, body, err := h.objsvc.GetObject(ctx, args) + obj, body, err := h.objsvc.GetObject(r.Context(), args) if err != nil { responses.WriteErrorResponse(w, r, h.toResponseErr(err)) return @@ -105,18 +106,18 @@ func (h *Handlers) GetObjectHandler(w http.ResponseWriter, r *http.Request) { // DeleteObjectHandler . func (h *Handlers) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() var err error + var args *object.DeleteObjectArgs defer func() { - contexts.SetHandleInf(r, h.name(), err) + contexts.SetHandleInf(r, h.name(), err, args) }() - args, err := requests.ParseDeleteObjectRequest(r) + args, err = requests.ParseDeleteObjectRequest(r) if err != nil { responses.WriteErrorResponse(w, r, h.toResponseErr(err)) return } - err = h.objsvc.DeleteObject(ctx, args) + err = h.objsvc.DeleteObject(r.Context(), args) if err != nil { responses.WriteErrorResponse(w, r, h.toResponseErr(err)) return @@ -128,19 +129,19 @@ func (h *Handlers) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) { // DeleteObjectsHandler . func (h *Handlers) DeleteObjectsHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() var err error + var args *object.DeleteObjectsArgs defer func() { - contexts.SetHandleInf(r, h.name(), err) + contexts.SetHandleInf(r, h.name(), err, args) }() - args, err := requests.ParseDeleteObjectsRequest(r) + args, err = requests.ParseDeleteObjectsRequest(r) if err != nil { responses.WriteErrorResponse(w, r, h.toResponseErr(err)) return } - deletes, err := h.objsvc.DeleteObjects(ctx, args) + deletes, err := h.objsvc.DeleteObjects(r.Context(), args) if err != nil { responses.WriteErrorResponse(w, r, h.toResponseErr(err)) return @@ -152,19 +153,19 @@ func (h *Handlers) DeleteObjectsHandler(w http.ResponseWriter, r *http.Request) // ListObjectsHandler . func (h *Handlers) ListObjectsHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() var err error + var args *object.ListObjectsArgs defer func() { - contexts.SetHandleInf(r, h.name(), err) + contexts.SetHandleInf(r, h.name(), err, args) }() - args, err := requests.ParseListObjectsRequest(r) + args, err = requests.ParseListObjectsRequest(r) if err != nil { responses.WriteErrorResponse(w, r, h.toResponseErr(err)) return } - list, err := h.objsvc.ListObjects(ctx, args) + list, err := h.objsvc.ListObjects(r.Context(), args) if err != nil { responses.WriteErrorResponse(w, r, h.toResponseErr(err)) return @@ -176,19 +177,19 @@ func (h *Handlers) ListObjectsHandler(w http.ResponseWriter, r *http.Request) { // ListObjectsV2Handler . func (h *Handlers) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() var err error + var args *object.ListObjectsV2Args defer func() { - contexts.SetHandleInf(r, h.name(), err) + contexts.SetHandleInf(r, h.name(), err, args) }() - args, err := requests.ParseListObjectsV2Request(r) + args, err = requests.ParseListObjectsV2Request(r) if err != nil { responses.WriteErrorResponse(w, r, h.toResponseErr(err)) return } - list, err := h.objsvc.ListObjectsV2(ctx, args) + list, err := h.objsvc.ListObjectsV2(r.Context(), args) if err != nil { responses.WriteErrorResponse(w, r, h.toResponseErr(err)) return @@ -200,19 +201,19 @@ func (h *Handlers) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) // GetObjectACLHandler - GET Object ACL func (h *Handlers) GetObjectACLHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() var err error + var args *object.GetObjectACLArgs defer func() { - contexts.SetHandleInf(r, h.name(), err) + contexts.SetHandleInf(r, h.name(), err, args) }() - args, err := requests.ParseGetObjectACLRequest(r) + args, err = requests.ParseGetObjectACLRequest(r) if err != nil { responses.WriteErrorResponse(w, r, h.toResponseErr(err)) return } - acl, err := h.objsvc.GetObjectACL(ctx, args) + acl, err := h.objsvc.GetObjectACL(r.Context(), args) if err != nil { responses.WriteErrorResponse(w, r, h.toResponseErr(err)) return diff --git a/s3/api/handlers/proto.go b/s3/api/handlers/proto.go index ab6853100..4f55ff5f6 100644 --- a/s3/api/handlers/proto.go +++ b/s3/api/handlers/proto.go @@ -17,8 +17,8 @@ type Handlerser interface { HeadBucketHandler(w http.ResponseWriter, r *http.Request) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) ListBucketsHandler(w http.ResponseWriter, r *http.Request) - PutBucketAclHandler(w http.ResponseWriter, r *http.Request) - GetBucketAclHandler(w http.ResponseWriter, r *http.Request) + PutBucketACLHandler(w http.ResponseWriter, r *http.Request) + GetBucketACLHandler(w http.ResponseWriter, r *http.Request) // Object diff --git a/s3/api/requests/parsers.go b/s3/api/requests/parsers.go index 4bc95c271..37c6f89c4 100644 --- a/s3/api/requests/parsers.go +++ b/s3/api/requests/parsers.go @@ -94,13 +94,11 @@ func parseLocationField(vars map[string]string, query url.Values, headers http.H ) switch loca { case "querystring": - has = query.Has(name) - val = query.Get(name) + val, has = query.Get(name), query.Has(name) case "uri": val, has = vars[name] case "header": - _, has = headers[name] - val = headers.Get(name) + val, has = headers.Get(name), len(headers.Values(name)) > 0 case "headers": vals, has = getHeaderValues(headers, name) isVals = true @@ -115,6 +113,9 @@ func parseLocationField(vars map[string]string, query url.Values, headers http.H err = ErrMissingRequiredParam{name} return } + if !has { + return + } if isVals { err = parseValues(vals, fv) } else { diff --git a/s3/api/requests/parsers_bucket.go b/s3/api/requests/parsers_bucket.go index 479a37029..a4d441038 100644 --- a/s3/api/requests/parsers_bucket.go +++ b/s3/api/requests/parsers_bucket.go @@ -100,7 +100,7 @@ func ParsePutBucketAclRequest(r *http.Request) (args *object.PutBucketACLArgs, e if err != nil { return } - args.Bucket, err = ValidateBucketACL(input.ACL) + args.ACL, err = ValidateBucketACL(input.ACL) return } diff --git a/s3/api/requests/validates.go b/s3/api/requests/validates.go index 839ba7415..467c4b004 100644 --- a/s3/api/requests/validates.go +++ b/s3/api/requests/validates.go @@ -49,7 +49,6 @@ func ValidateBucketACL(acl *string) (val string, err error) { } if !consts.SupportedBucketACLs[val] { err = ErrACLUnsupported - return } return } diff --git a/s3/api/routers/routers.go b/s3/api/routers/routers.go index 1bfab0270..4ef8187c8 100644 --- a/s3/api/routers/routers.go +++ b/s3/api/routers/routers.go @@ -61,7 +61,7 @@ func (routers *Routers) Register() http.Handler { bucket.Methods(http.MethodGet).Path("/{Key:.+}").HandlerFunc(hs.GetObjectHandler) // GetBucketACL - bucket.Methods(http.MethodGet).HandlerFunc(hs.GetBucketAclHandler).Queries("acl", "") + bucket.Methods(http.MethodGet).HandlerFunc(hs.GetBucketACLHandler).Queries("acl", "") // ListObjectsV2 bucket.Methods(http.MethodGet).HandlerFunc(hs.ListObjectsV2Handler).Queries("list-type", "2") @@ -70,7 +70,7 @@ func (routers *Routers) Register() http.Handler { bucket.Methods(http.MethodGet).HandlerFunc(hs.ListObjectsHandler) // PutBucketACL - bucket.Methods(http.MethodPut).HandlerFunc(hs.PutBucketAclHandler).Queries("acl", "") + bucket.Methods(http.MethodPut).HandlerFunc(hs.PutBucketACLHandler).Queries("acl", "") // CreateBucket bucket.Methods(http.MethodPut).HandlerFunc(hs.CreateBucketHandler) From 962a5ad9ddd6a3706d07103b2f2eebb4e8cd83cd Mon Sep 17 00:00:00 2001 From: Steve Date: Tue, 19 Sep 2023 20:29:37 +0800 Subject: [PATCH 113/139] fix: get object unlock --- s3/api/services/object/service_object.go | 27 +++++++++++++----------- 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/s3/api/services/object/service_object.go b/s3/api/services/object/service_object.go index 825711bc6..265f7fb65 100644 --- a/s3/api/services/object/service_object.go +++ b/s3/api/services/object/service_object.go @@ -278,6 +278,11 @@ func (s *service) GetObject(ctx context.Context, args *GetObjectArgs) (object *O ctx, cancel := s.opctx(ctx) defer cancel() + // Unlock-later is a flag mark if the bucket or object will be unlocked later + // if the flag is true, the bucket and object should not be unlocked as soon as leave the function call + // they will be automatically unlocked after completely written the object body or write object body timeout + unlockLater := false + // bucket key buckey := s.getBucketKey(args.Bucket) @@ -287,8 +292,7 @@ func (s *service) GetObject(ctx context.Context, args *GetObjectArgs) (object *O return } defer func() { - // RUnlock bucket just if getting failed - if err != nil { + if !unlockLater { s.lock.RUnlock(buckey) } }() @@ -319,8 +323,7 @@ func (s *service) GetObject(ctx context.Context, args *GetObjectArgs) (object *O return } defer func() { - // RUnlock object just if getting failed - if err != nil { + if !unlockLater { s.lock.RUnlock(objkey) } }() @@ -346,18 +349,18 @@ func (s *service) GetObject(ctx context.Context, args *GetObjectArgs) (object *O return } + // Set unlock-later flag to true to enable the bucket and object + // will not be unlocked before completely written the response body + unlockLater = true + // Wrap the body with timeout and unlock hooks, // this will enable the bucket and object keep rlocked until // read timout or read closed. Normally, these locks will // be released as soon as leave from the call - body = WrapCleanReadCloser( - body, - s.closeBodyTimeout, - func() { - s.lock.RUnlock(objkey) // Note: Release object first - s.lock.RUnlock(buckey) - }, - ) + body = WrapCleanReadCloser(body, s.closeBodyTimeout, func() { + s.lock.RUnlock(objkey) // Note: Release object first + s.lock.RUnlock(buckey) + }) return } From cd8ef905d14f5d6ec7bdf78f1790bb2b81915177 Mon Sep 17 00:00:00 2001 From: Steve Date: Wed, 20 Sep 2023 00:07:13 +0800 Subject: [PATCH 114/139] fix: object acl writer --- s3/api/responses/writers_bucket.go | 3 ++- s3/api/responses/writers_object.go | 5 +++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/s3/api/responses/writers_bucket.go b/s3/api/responses/writers_bucket.go index ef7548475..6024f29ac 100644 --- a/s3/api/responses/writers_bucket.go +++ b/s3/api/responses/writers_bucket.go @@ -1,6 +1,7 @@ package responses import ( + "fmt" "github.com/aws/aws-sdk-go/service/s3" "github.com/bittorrent/go-btfs/s3/api/services/object" "github.com/bittorrent/go-btfs/s3/consts" @@ -72,7 +73,7 @@ func WriteGetBucketACLResponse(w http.ResponseWriter, r *http.Request, acl *obje case s3.BucketCannedACLPublicReadWrite: grants = append(grants, s3AllUsersReadGrant, s3AllUsersWriteGrant) default: - panic("unknown acl") + panic(fmt.Sprintf("unknwon acl <%s>", acl.ACL)) } output.SetGrants(grants) w.Header().Add(consts.AmzACL, acl.ACL) diff --git a/s3/api/responses/writers_object.go b/s3/api/responses/writers_object.go index 6158a92cd..a5ac45c98 100644 --- a/s3/api/responses/writers_object.go +++ b/s3/api/responses/writers_object.go @@ -2,6 +2,7 @@ package responses import ( "encoding/base64" + "fmt" "github.com/aws/aws-sdk-go/service/s3" "github.com/bittorrent/go-btfs/s3/api/services/object" "github.com/bittorrent/go-btfs/s3/consts" @@ -166,14 +167,14 @@ func WriteGetObjectACLResponse(w http.ResponseWriter, r *http.Request, acl *obje output.SetOwner(newS3Owner(acl.Owner)) grants := make([]*s3.Grant, 0) grants = append(grants, newS3FullControlGrant(acl.Owner)) - switch acl.Owner { + switch acl.ACL { case s3.BucketCannedACLPrivate: case s3.BucketCannedACLPublicRead: grants = append(grants, s3AllUsersReadGrant) case s3.BucketCannedACLPublicReadWrite: grants = append(grants, s3AllUsersReadGrant, s3AllUsersWriteGrant) default: - panic("unknown acl") + panic(fmt.Sprintf("unknwo acl <%s>", acl.ACL)) } output.SetGrants(grants) WriteSuccessResponse(w, output, "AccessControlPolicy") From 5a6bdc5b043eacd86a4b52d1a2e4af440e86b1b9 Mon Sep 17 00:00:00 2001 From: Steve Date: Wed, 20 Sep 2023 00:26:30 +0800 Subject: [PATCH 115/139] fix: delete objects error --- s3/api/services/object/service_object.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/s3/api/services/object/service_object.go b/s3/api/services/object/service_object.go index 265f7fb65..283642f58 100644 --- a/s3/api/services/object/service_object.go +++ b/s3/api/services/object/service_object.go @@ -498,7 +498,7 @@ func (s *service) DeleteObjects(ctx context.Context, args *DeleteObjectsArgs) (d return } if object == nil { - err = ErrObjectNotFound + er = ErrObjectNotFound return } From 39b0fddd8fba5dc52a4fa3ca6704e26929ee36c0 Mon Sep 17 00:00:00 2001 From: Steve Date: Wed, 20 Sep 2023 01:14:38 +0800 Subject: [PATCH 116/139] fix: Sign handler name --- s3/api/handlers/handlers_middlewares.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/s3/api/handlers/handlers_middlewares.go b/s3/api/handlers/handlers_middlewares.go index 340248524..e5a01d020 100644 --- a/s3/api/handlers/handlers_middlewares.go +++ b/s3/api/handlers/handlers_middlewares.go @@ -68,7 +68,7 @@ func (h *Handlers) Sign(handler http.Handler) http.Handler { var err *responses.Error defer func() { if err != nil { - contexts.SetHandleInf(r, h.name(), err, nil) + contexts.SetHandleInf(r, "Sign", err, nil) } }() From 9fc88cd063d3f02332bc8b59ff9ce64b68ce915e Mon Sep 17 00:00:00 2001 From: Steve Date: Wed, 20 Sep 2023 16:52:46 +0800 Subject: [PATCH 117/139] fix: object name escape --- s3/api/requests/validates.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/s3/api/requests/validates.go b/s3/api/requests/validates.go index 467c4b004..892916e3b 100644 --- a/s3/api/requests/validates.go +++ b/s3/api/requests/validates.go @@ -185,7 +185,7 @@ func ValidateCopySource(copySource *string) (val1, val2 string, err error) { if copySource == nil { return } - src, err := url.QueryUnescape(*copySource) + src, err := url.PathUnescape(*copySource) if err != nil { src = *copySource err = nil From 40735b22fa9853b123bfefc8103a5334ea86bc8d Mon Sep 17 00:00:00 2001 From: Steve Date: Wed, 20 Sep 2023 17:14:09 +0800 Subject: [PATCH 118/139] fix: copy source validate --- s3/api/requests/validates.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/s3/api/requests/validates.go b/s3/api/requests/validates.go index 892916e3b..ea7ae64d4 100644 --- a/s3/api/requests/validates.go +++ b/s3/api/requests/validates.go @@ -187,10 +187,10 @@ func ValidateCopySource(copySource *string) (val1, val2 string, err error) { } src, err := url.PathUnescape(*copySource) if err != nil { - src = *copySource - err = nil + err = ErrCopySrcInvalid + return } - src = strings.TrimPrefix(*copySource, consts.SlashSeparator) + src = strings.TrimPrefix(src, consts.SlashSeparator) idx := strings.Index(src, consts.SlashSeparator) if idx < 0 { err = ErrCopySrcInvalid From 7c6bb5bc4e8d1ae4af4db99cf8edf4aa911ca9d6 Mon Sep 17 00:00:00 2001 From: fish <920886811@163.com> Date: Wed, 20 Sep 2023 23:08:44 +0800 Subject: [PATCH 119/139] chore: fix --- core/commands/cheque/cheque.go | 11 +- core/commands/cheque/fix_cheque_cashout.go | 60 +++++++++++ settlement/swap/vault/cashout.go | 117 +++++++++++++++++++++ 3 files changed, 183 insertions(+), 5 deletions(-) create mode 100644 core/commands/cheque/fix_cheque_cashout.go diff --git a/core/commands/cheque/cheque.go b/core/commands/cheque/cheque.go index b9bf4783c..2d719749b 100644 --- a/core/commands/cheque/cheque.go +++ b/core/commands/cheque/cheque.go @@ -65,11 +65,12 @@ var ChequeCmd = &cmds.Command{ Vault services include issue cheque to peer, receive cheque and store operations.`, }, Subcommands: map[string]*cmds.Command{ - "cash": CashChequeCmd, - "cashstatus": ChequeCashStatusCmd, - "cashlist": ChequeCashListCmd, - "price": StorePriceCmd, - "price-all": StorePriceAllCmd, + "cash": CashChequeCmd, + "cashstatus": ChequeCashStatusCmd, + "cashlist": ChequeCashListCmd, + "price": StorePriceCmd, + "price-all": StorePriceAllCmd, + "fix_cheque_cashout": FixChequeCashOutCmd, "send": SendChequeCmd, "sendlist": ListSendChequesCmd, diff --git a/core/commands/cheque/fix_cheque_cashout.go b/core/commands/cheque/fix_cheque_cashout.go new file mode 100644 index 000000000..7dba765a9 --- /dev/null +++ b/core/commands/cheque/fix_cheque_cashout.go @@ -0,0 +1,60 @@ +package cheque + +import ( + "fmt" + cmds "github.com/bittorrent/go-btfs-cmds" + "github.com/bittorrent/go-btfs/chain" + "github.com/bittorrent/go-btfs/chain/tokencfg" + "github.com/bittorrent/go-btfs/utils" + "golang.org/x/net/context" + "io" +) + +var FixChequeCashOutCmd = &cmds.Command{ + Helptext: cmds.HelpText{ + Tagline: "List cheque(s) received from peers.", + }, + Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { + fmt.Println("FixChequeCashOutCmd ... ") + + err := utils.CheckSimpleMode(env) + if err != nil { + return err + } + + for _, tokenAddr := range tokencfg.MpTokenAddr { + fmt.Println("FixChequeCashOutCmd ... 2") + cheques, err := chain.SettleObject.SwapService.LastReceivedCheques(tokenAddr) + fmt.Println("FixChequeCashOutCmd ... 3", cheques) + if err != nil { + return err + } + for _, v := range cheques { + err := chain.SettleObject.CashoutService.AdjustCashCheque( + context.Background(), v.Vault, v.Beneficiary, tokenAddr) + if err != nil { + return err + } + } + } + + return cmds.EmitOnce(res, nil) + }, + Type: ListChequeRet{}, + Encoders: cmds.EncoderMap{ + cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *ListChequeRet) error { + fmt.Fprintf(w, "\t%-55s\t%-46s\t%-46s\t%-46s\tamount: \n", "peerID:", "vault:", "beneficiary:", "cashout_amount:") + for iter := 0; iter < out.Len; iter++ { + fmt.Fprintf(w, "\t%-55s\t%-46s\t%-46s\t%d\t%d \n", + out.Cheques[iter].PeerID, + out.Cheques[iter].Beneficiary, + out.Cheques[iter].Vault, + out.Cheques[iter].Payout.Uint64(), + out.Cheques[iter].CashedAmount.Uint64(), + ) + } + + return nil + }), + }, +} diff --git a/settlement/swap/vault/cashout.go b/settlement/swap/vault/cashout.go index c6ee34bb5..12a27923a 100644 --- a/settlement/swap/vault/cashout.go +++ b/settlement/swap/vault/cashout.go @@ -28,6 +28,7 @@ type CashoutService interface { CashCheque(ctx context.Context, vault, recipient common.Address, token common.Address) (common.Hash, error) // CashoutStatus gets the status of the latest cashout transaction for the vault CashoutStatus(ctx context.Context, vaultAddress common.Address, token common.Address) (*CashoutStatus, error) + AdjustCashCheque(ctx context.Context, vaultAddress, recipient common.Address, token common.Address) error HasCashoutAction(ctx context.Context, peer common.Address, token common.Address) (bool, error) CashoutResults() ([]CashOutResult, error) } @@ -237,6 +238,10 @@ func (s *cashoutService) storeCashResult(ctx context.Context, vault common.Addre Status: "fail", } + fmt.Println("test exit.") + time.Sleep(time.Second * 3) + return nil + _, err := s.transactionService.WaitForReceipt(ctx, txHash) if err != nil { log.Infof("storeCashResult err:%+v", err) @@ -304,6 +309,118 @@ func (s *cashoutService) storeCashResult(ctx context.Context, vault common.Addre return nil } +// AdjustCashCheque . +func (s *cashoutService) AdjustCashCheque(ctx context.Context, vaultAddress, recipient common.Address, token common.Address) error { + fmt.Println("AdjustCashCheque ... ") + // 1.totalReceivedCashed + totalReceivedCashed := big.NewInt(0) + if err := s.store.Get(tokencfg.AddToken(statestore.TotalReceivedCashedKey, token), &totalReceivedCashed); err != nil || err == storage.ErrNotFound { + fmt.Println("AdjustCashCheque ... 1 err = ", err) + return err + } + + // 2.alreadyPaidOut in renter contract + // blockchain calls below + contract := newVaultContractMuti(vaultAddress, s.transactionService) + alreadyPaidOutOnline, err := contract.PaidOut(ctx, recipient, token) + if err != nil { + fmt.Println("AdjustCashCheque ... 2 err = ", err) + return err + } + + // 3.compare it to fix. + diff := big.NewInt(0).Sub(alreadyPaidOutOnline, totalReceivedCashed) + fmt.Println("AdjustCashCheque: ", alreadyPaidOutOnline.String(), totalReceivedCashed.String(), diff.String()) + if diff.Cmp(big.NewInt(0)) > 0 { + fmt.Println("AdjustCashCheque: diff > 0") + //return nil + err := s.fixStoreCashResult(vaultAddress, diff, token) + if err != nil { + return err + } + } + return nil +} + +func (s *cashoutService) fixStoreCashResult(vault common.Address, shouldPaidOut *big.Int, token common.Address) error { + txHash := common.Hash{} //fix: 0x0000... + cashResult := CashOutResult{ + TxHash: txHash, + Vault: vault, + Token: token, + Amount: shouldPaidOut, + CashTime: time.Now().Unix(), + Status: "success", + } + + //_, err := s.transactionService.WaitForReceipt(ctx, txHash) + //if err != nil { + // log.Infof("storeCashResult err:%+v", err) + //} else { + // cs, err := s.CashoutStatus(ctx, vault, token) + // if err != nil { + // log.Infof("CashOutStats:get cashout status err:%+v", err) + // if cs.UncashedAmount != nil { + // cashResult.Amount = cs.UncashedAmount + // } + // } else { + // // update totalReceivedCashed + // totalPaidOut := big.NewInt(0) + // if cs.Last != nil && cs.Last.Result != nil && cs.Last.Result.TotalPayout != nil { + // totalPaidOut = cs.Last.Result.TotalPayout + // } + // if cs.Last != nil && !cs.Last.Reverted { + // cashResult.Status = "success" + // } + + cashResult.Amount = shouldPaidOut + totalReceivedCashed := big.NewInt(0) + if err := s.store.Get(tokencfg.AddToken(statestore.TotalReceivedCashedKey, token), &totalReceivedCashed); err == nil || err == storage.ErrNotFound { + totalReceivedCashed = totalReceivedCashed.Add(totalReceivedCashed, shouldPaidOut) + err := s.store.Put(tokencfg.AddToken(statestore.TotalReceivedCashedKey, token), totalReceivedCashed) + if err != nil { + log.Infof("CashOutStats:put totalReceivedCashdKey err:%+v", err) + } + } + + totalDailyReceivedCashed := big.NewInt(0) + if err := s.store.Get(statestore.GetTodayTotalDailyReceivedCashedKey(token), &totalDailyReceivedCashed); err == nil || err == storage.ErrNotFound { + totalDailyReceivedCashed = totalDailyReceivedCashed.Add(totalDailyReceivedCashed, shouldPaidOut) + err := s.store.Put(statestore.GetTodayTotalDailyReceivedCashedKey(token), totalDailyReceivedCashed) + if err != nil { + log.Infof("CashOutStats:put totalReceivedDailyCashdKey err:%+v", err) + } + } + + // update TotalReceivedCountCashed + uncashed := 0 + err := s.store.Get(statestore.PeerReceivedUncashRecordsCountKey(vault, token), &uncashed) + if err != nil { + log.Infof("CashOutStats:put totalReceivedCountCashed err:%+v", err) + } else { + cashedCount := 0 + err := s.store.Get(tokencfg.AddToken(statestore.TotalReceivedCashedCountKey, token), &cashedCount) + if err == nil || err == storage.ErrNotFound { + err := s.store.Put(tokencfg.AddToken(statestore.TotalReceivedCashedCountKey, token), cashedCount+uncashed) + if err != nil { + log.Infof("CashOutStats:put totalReceivedCashedConuntKey err:%+v", err) + } else { + err := s.store.Put(statestore.PeerReceivedUncashRecordsCountKey(vault, token), 0) + if err != nil { + log.Infof("CashOutStats:put totalReceivedCashedConuntKey err:%+v", err) + } + } + } + } + //} + //} + err = s.store.Put(statestore.CashoutResultKey(vault), &cashResult) + if err != nil { + log.Infof("CashOutStats:put cashoutResultKey err:%+v", err) + } + return nil +} + // CashoutStatus gets the status of the latest cashout transaction for the vault func (s *cashoutService) CashoutStatus(ctx context.Context, vaultAddress common.Address, token common.Address) (*CashoutStatus, error) { cheque, err := s.chequeStore.LastReceivedCheque(vaultAddress, token) From 7034f17d326df834fe3dd162777991949445e351 Mon Sep 17 00:00:00 2001 From: Steve Date: Thu, 21 Sep 2023 00:02:44 +0800 Subject: [PATCH 120/139] opt: s3 log --- s3/api/handlers/handlers_middlewares.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/s3/api/handlers/handlers_middlewares.go b/s3/api/handlers/handlers_middlewares.go index e5a01d020..dba27f7ef 100644 --- a/s3/api/handlers/handlers_middlewares.go +++ b/s3/api/handlers/handlers_middlewares.go @@ -42,7 +42,7 @@ func (h *Handlers) Log(handler http.Handler) http.Handler { hname, herr, args := contexts.GetHandleInf(r) end := time.Now() ela := end.Sub(start) - fmt.Printf("s3-api: | %s | <%-4s> | %s | %s | %v | %+v | %s \n", end.Format(time.RFC3339), r.Method, r.URL, hname, herr, args, ela) + fmt.Printf("s3-api: | %s | <%-4s> | %s | %s | %+v | %v | %s \n", end.Format(time.RFC3339), r.Method, r.URL, hname, args, herr, ela) }) } From 75a8e21b6ad502c6a28e3a80dc941f4a1d35cea5 Mon Sep 17 00:00:00 2001 From: Steve Date: Thu, 21 Sep 2023 00:09:30 +0800 Subject: [PATCH 121/139] opt: s3 api log --- s3/api/contexts/contexts_handle_inf.go | 8 +++--- s3/api/handlers/handlers_bucket.go | 24 ++++++++--------- s3/api/handlers/handlers_middlewares.go | 2 +- s3/api/handlers/handlers_multipart.go | 16 +++++------ s3/api/handlers/handlers_object.go | 36 ++++++++++++------------- 5 files changed, 43 insertions(+), 43 deletions(-) diff --git a/s3/api/contexts/contexts_handle_inf.go b/s3/api/contexts/contexts_handle_inf.go index 30bd65465..5204fee19 100644 --- a/s3/api/contexts/contexts_handle_inf.go +++ b/s3/api/contexts/contexts_handle_inf.go @@ -6,16 +6,16 @@ import ( type handleInfo struct { name string - err error args interface{} + err error } -func SetHandleInf(r *http.Request, name string, err error, args interface{}) { - set(r, keyOfHandleInf, handleInfo{name, err, args}) +func SetHandleInf(r *http.Request, name string, args interface{}, err error) { + set(r, keyOfHandleInf, handleInfo{name, args, err}) return } -func GetHandleInf(r *http.Request) (name string, err error, args interface{}) { +func GetHandleInf(r *http.Request) (name string, args interface{}, err error) { v := get(r, keyOfHandleInf) inf, _ := v.(handleInfo) name = inf.name diff --git a/s3/api/handlers/handlers_bucket.go b/s3/api/handlers/handlers_bucket.go index cb44b632e..4389d0e92 100644 --- a/s3/api/handlers/handlers_bucket.go +++ b/s3/api/handlers/handlers_bucket.go @@ -9,10 +9,10 @@ import ( ) func (h *Handlers) CreateBucketHandler(w http.ResponseWriter, r *http.Request) { - var err error var args *object.CreateBucketArgs + var err error defer func() { - contexts.SetHandleInf(r, h.name(), err, args) + contexts.SetHandleInf(r, h.name(), args, err) }() args, err = requests.ParseCreateBucketRequest(r) @@ -32,10 +32,10 @@ func (h *Handlers) CreateBucketHandler(w http.ResponseWriter, r *http.Request) { } func (h *Handlers) HeadBucketHandler(w http.ResponseWriter, r *http.Request) { - var err error var args *object.GetBucketArgs + var err error defer func() { - contexts.SetHandleInf(r, h.name(), err, args) + contexts.SetHandleInf(r, h.name(), args, err) }() args, err = requests.ParseHeadBucketRequest(r) @@ -55,10 +55,10 @@ func (h *Handlers) HeadBucketHandler(w http.ResponseWriter, r *http.Request) { } func (h *Handlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) { - var err error var args *object.DeleteBucketArgs + var err error defer func() { - contexts.SetHandleInf(r, h.name(), err, args) + contexts.SetHandleInf(r, h.name(), args, err) }() args, err = requests.ParseDeleteBucketRequest(r) @@ -78,10 +78,10 @@ func (h *Handlers) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) { } func (h *Handlers) ListBucketsHandler(w http.ResponseWriter, r *http.Request) { - var err error var args *object.ListBucketsArgs + var err error defer func() { - contexts.SetHandleInf(r, h.name(), err, args) + contexts.SetHandleInf(r, h.name(), args, err) }() args, err = requests.ParseListBucketsRequest(r) @@ -101,10 +101,10 @@ func (h *Handlers) ListBucketsHandler(w http.ResponseWriter, r *http.Request) { } func (h *Handlers) PutBucketACLHandler(w http.ResponseWriter, r *http.Request) { - var err error var args *object.PutBucketACLArgs + var err error defer func() { - contexts.SetHandleInf(r, h.name(), err, args) + contexts.SetHandleInf(r, h.name(), args, err) }() args, err = requests.ParsePutBucketAclRequest(r) @@ -124,10 +124,10 @@ func (h *Handlers) PutBucketACLHandler(w http.ResponseWriter, r *http.Request) { } func (h *Handlers) GetBucketACLHandler(w http.ResponseWriter, r *http.Request) { - var err error var args *object.GetBucketACLArgs + var err error defer func() { - contexts.SetHandleInf(r, h.name(), err, args) + contexts.SetHandleInf(r, h.name(), args, err) }() args, err = requests.ParseGetBucketACLRequest(r) diff --git a/s3/api/handlers/handlers_middlewares.go b/s3/api/handlers/handlers_middlewares.go index dba27f7ef..a62b68c15 100644 --- a/s3/api/handlers/handlers_middlewares.go +++ b/s3/api/handlers/handlers_middlewares.go @@ -39,7 +39,7 @@ func (h *Handlers) Log(handler http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { start := time.Now() handler.ServeHTTP(w, r) - hname, herr, args := contexts.GetHandleInf(r) + hname, args, herr := contexts.GetHandleInf(r) end := time.Now() ela := end.Sub(start) fmt.Printf("s3-api: | %s | <%-4s> | %s | %s | %+v | %v | %s \n", end.Format(time.RFC3339), r.Method, r.URL, hname, args, herr, ela) diff --git a/s3/api/handlers/handlers_multipart.go b/s3/api/handlers/handlers_multipart.go index 7dbb1b44f..c88c5f802 100644 --- a/s3/api/handlers/handlers_multipart.go +++ b/s3/api/handlers/handlers_multipart.go @@ -9,10 +9,10 @@ import ( ) func (h *Handlers) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Request) { - var err error var args *object.CreateMultipartUploadArgs + var err error defer func() { - contexts.SetHandleInf(r, h.name(), err, args) + contexts.SetHandleInf(r, h.name(), args, err) }() args, err = requests.ParseCreateMultipartUploadRequest(r) @@ -32,10 +32,10 @@ func (h *Handlers) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.R } func (h *Handlers) UploadPartHandler(w http.ResponseWriter, r *http.Request) { - var err error var args *object.UploadPartArgs + var err error defer func() { - contexts.SetHandleInf(r, h.name(), err, args) + contexts.SetHandleInf(r, h.name(), args, err) }() args, err = requests.ParseUploadPartRequest(r) @@ -55,10 +55,10 @@ func (h *Handlers) UploadPartHandler(w http.ResponseWriter, r *http.Request) { } func (h *Handlers) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Request) { - var err error var args *object.AbortMultipartUploadArgs + var err error defer func() { - contexts.SetHandleInf(r, h.name(), err, args) + contexts.SetHandleInf(r, h.name(), args, err) }() args, err = requests.ParseAbortMultipartUploadRequest(r) @@ -78,10 +78,10 @@ func (h *Handlers) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Re } func (h *Handlers) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.Request) { - var err error var args *object.CompleteMultipartUploadArgs + var err error defer func() { - contexts.SetHandleInf(r, h.name(), err, args) + contexts.SetHandleInf(r, h.name(), args, err) }() args, err = requests.ParseCompleteMultipartUploadRequest(r) diff --git a/s3/api/handlers/handlers_object.go b/s3/api/handlers/handlers_object.go index f635388be..eb4abd0cd 100644 --- a/s3/api/handlers/handlers_object.go +++ b/s3/api/handlers/handlers_object.go @@ -10,10 +10,10 @@ import ( // PutObjectHandler . func (h *Handlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) { - var err error var args *object.PutObjectArgs + var err error defer func() { - contexts.SetHandleInf(r, h.name(), err, args) + contexts.SetHandleInf(r, h.name(), args, err) }() args, err = requests.ParsePutObjectRequest(r) @@ -34,10 +34,10 @@ func (h *Handlers) PutObjectHandler(w http.ResponseWriter, r *http.Request) { // CopyObjectHandler . func (h *Handlers) CopyObjectHandler(w http.ResponseWriter, r *http.Request) { - var err error var args *object.CopyObjectArgs + var err error defer func() { - contexts.SetHandleInf(r, h.name(), err, args) + contexts.SetHandleInf(r, h.name(), args, err) }() args, err = requests.ParseCopyObjectRequest(r) @@ -58,10 +58,10 @@ func (h *Handlers) CopyObjectHandler(w http.ResponseWriter, r *http.Request) { // HeadObjectHandler . func (h *Handlers) HeadObjectHandler(w http.ResponseWriter, r *http.Request) { - var err error var args *object.GetObjectArgs + var err error defer func() { - contexts.SetHandleInf(r, h.name(), err, args) + contexts.SetHandleInf(r, h.name(), args, err) }() args, err = requests.ParseHeadObjectRequest(r) @@ -82,10 +82,10 @@ func (h *Handlers) HeadObjectHandler(w http.ResponseWriter, r *http.Request) { // GetObjectHandler . func (h *Handlers) GetObjectHandler(w http.ResponseWriter, r *http.Request) { - var err error var args *object.GetObjectArgs + var err error defer func() { - contexts.SetHandleInf(r, h.name(), err, args) + contexts.SetHandleInf(r, h.name(), args, err) }() args, err = requests.ParseGetObjectRequest(r) @@ -106,10 +106,10 @@ func (h *Handlers) GetObjectHandler(w http.ResponseWriter, r *http.Request) { // DeleteObjectHandler . func (h *Handlers) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) { - var err error var args *object.DeleteObjectArgs + var err error defer func() { - contexts.SetHandleInf(r, h.name(), err, args) + contexts.SetHandleInf(r, h.name(), args, err) }() args, err = requests.ParseDeleteObjectRequest(r) @@ -129,10 +129,10 @@ func (h *Handlers) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) { // DeleteObjectsHandler . func (h *Handlers) DeleteObjectsHandler(w http.ResponseWriter, r *http.Request) { - var err error var args *object.DeleteObjectsArgs + var err error defer func() { - contexts.SetHandleInf(r, h.name(), err, args) + contexts.SetHandleInf(r, h.name(), args, err) }() args, err = requests.ParseDeleteObjectsRequest(r) @@ -153,10 +153,10 @@ func (h *Handlers) DeleteObjectsHandler(w http.ResponseWriter, r *http.Request) // ListObjectsHandler . func (h *Handlers) ListObjectsHandler(w http.ResponseWriter, r *http.Request) { - var err error var args *object.ListObjectsArgs + var err error defer func() { - contexts.SetHandleInf(r, h.name(), err, args) + contexts.SetHandleInf(r, h.name(), args, err) }() args, err = requests.ParseListObjectsRequest(r) @@ -177,10 +177,10 @@ func (h *Handlers) ListObjectsHandler(w http.ResponseWriter, r *http.Request) { // ListObjectsV2Handler . func (h *Handlers) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) { - var err error var args *object.ListObjectsV2Args + var err error defer func() { - contexts.SetHandleInf(r, h.name(), err, args) + contexts.SetHandleInf(r, h.name(), args, err) }() args, err = requests.ParseListObjectsV2Request(r) @@ -201,10 +201,10 @@ func (h *Handlers) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) // GetObjectACLHandler - GET Object ACL func (h *Handlers) GetObjectACLHandler(w http.ResponseWriter, r *http.Request) { - var err error var args *object.GetObjectACLArgs + var err error defer func() { - contexts.SetHandleInf(r, h.name(), err, args) + contexts.SetHandleInf(r, h.name(), args, err) }() args, err = requests.ParseGetObjectACLRequest(r) From 95d2aaca1c3e3d54f399d3d25747b7bd3f731c77 Mon Sep 17 00:00:00 2001 From: Steve Date: Thu, 21 Sep 2023 15:29:15 +0800 Subject: [PATCH 122/139] fix: allow Cache-Control header in PutObject and CopyObject Action --- s3/api/requests/parsers_object.go | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/s3/api/requests/parsers_object.go b/s3/api/requests/parsers_object.go index 67d8fe369..726d5b180 100644 --- a/s3/api/requests/parsers_object.go +++ b/s3/api/requests/parsers_object.go @@ -11,9 +11,12 @@ import ( ) var putObjectSupports = fields{ - "Body": true, - "Bucket": true, - "Key": true, + "Body": true, + "Bucket": true, + "Key": true, + // The browser some time automatically add this CacheControl header + // just allow, do not handle + "CacheControl": true, "ContentLength": true, "ContentEncoding": true, "ContentType": true, @@ -71,9 +74,12 @@ func ParsePutObjectRequest(r *http.Request) (args *object.PutObjectArgs, err err } var copyObjectSupports = fields{ - "Bucket": true, - "Key": true, - "CopySource": true, + "Bucket": true, + "Key": true, + "CopySource": true, + // The browser some time automatically add this CacheControl header + // just allow, do not handle + "CacheControl": true, "ContentEncoding": true, "ContentType": true, "Expires": true, From 81db736ae5ee695cdf862b5a2f71932e4bf1abcd Mon Sep 17 00:00:00 2001 From: Shawn-Huang-Tron <107823650+Shawn-Huang-Tron@users.noreply.github.com> Date: Thu, 21 Sep 2023 15:46:14 +0800 Subject: [PATCH 123/139] feat: log details (#349) * feat: add backup and recovery command * feat: beautify the log * feat: init add recovery option * fix: format error * feat: log details --- core/commands/backup.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/commands/backup.go b/core/commands/backup.go index 989c569e2..1de94bf16 100644 --- a/core/commands/backup.go +++ b/core/commands/backup.go @@ -119,7 +119,7 @@ var RecoveryCmd = &cmds.Command{ if err := UnTar(backupPath, dstPath); err != nil { err = UnZip(backupPath, dstPath) if err != nil { - return errors.New("your file format is not tar.gz or zip, please check again") + return errors.New("your file is not exists or your file format is not tar.gz or zip, please check again") } } fmt.Println("Recovery successful!") From 16db48aa00fa9b8dfaf136f3f40900ca16da8a5a Mon Sep 17 00:00:00 2001 From: Steve Date: Thu, 21 Sep 2023 16:17:20 +0800 Subject: [PATCH 124/139] opt: add access-key command taglines --- core/commands/accesskey.go | 30 ++++++++---------------------- 1 file changed, 8 insertions(+), 22 deletions(-) diff --git a/core/commands/accesskey.go b/core/commands/accesskey.go index 013e0ca63..4b1683ad1 100644 --- a/core/commands/accesskey.go +++ b/core/commands/accesskey.go @@ -9,9 +9,7 @@ import ( var AccessKeyCmd = &cmds.Command{ Helptext: cmds.HelpText{ - Tagline: "", - ShortDescription: ` -`, + Tagline: "Manage S3-Compatible-API access-keys.", }, Subcommands: map[string]*cmds.Command{ "generate": accessKeyGenerateCmd, @@ -38,9 +36,7 @@ func checkDaemon(env cmds.Environment) (err error) { var accessKeyGenerateCmd = &cmds.Command{ Helptext: cmds.HelpText{ - Tagline: "", - ShortDescription: ` -`, + Tagline: "Generate a new access-key record.", }, Arguments: []cmds.Argument{}, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) (err error) { @@ -59,9 +55,7 @@ var accessKeyGenerateCmd = &cmds.Command{ var accessKeyEnableCmd = &cmds.Command{ Helptext: cmds.HelpText{ - Tagline: "", - ShortDescription: ` -`, + Tagline: "Set status of the specified access-key to enable.", }, Arguments: []cmds.Argument{ cmds.StringArg("key", true, true, "The key").EnableStdin(), @@ -79,9 +73,7 @@ var accessKeyEnableCmd = &cmds.Command{ var accessKeyDisableCmd = &cmds.Command{ Helptext: cmds.HelpText{ - Tagline: "", - ShortDescription: ` -`, + Tagline: "Set status of the specified access-key to enable.", }, Arguments: []cmds.Argument{ cmds.StringArg("key", true, true, "The key").EnableStdin(), @@ -99,7 +91,7 @@ var accessKeyDisableCmd = &cmds.Command{ var accessKeyResetCmd = &cmds.Command{ Helptext: cmds.HelpText{ - Tagline: "", + Tagline: "Rest secret of the specified access-key.", ShortDescription: ` `, }, @@ -119,9 +111,7 @@ var accessKeyResetCmd = &cmds.Command{ var accessKeyDeleteCmd = &cmds.Command{ Helptext: cmds.HelpText{ - Tagline: "", - ShortDescription: ` -`, + Tagline: "Delete the specified access-key", }, Arguments: []cmds.Argument{ cmds.StringArg("key", true, true, "The key").EnableStdin(), @@ -139,9 +129,7 @@ var accessKeyDeleteCmd = &cmds.Command{ var accessKeyGetCmd = &cmds.Command{ Helptext: cmds.HelpText{ - Tagline: "", - ShortDescription: ` -`, + Tagline: "Get an access-key detail info.", }, Arguments: []cmds.Argument{ cmds.StringArg("key", true, true, "The key").EnableStdin(), @@ -163,9 +151,7 @@ var accessKeyGetCmd = &cmds.Command{ var accessKeyListCmd = &cmds.Command{ Helptext: cmds.HelpText{ - Tagline: "", - ShortDescription: ` -`, + Tagline: "List all access-keys.", }, Arguments: []cmds.Argument{}, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) (err error) { From f8b2589a0a99fccb816cff32ad3e47def5a804b2 Mon Sep 17 00:00:00 2001 From: Steve Date: Thu, 21 Sep 2023 16:39:34 +0800 Subject: [PATCH 125/139] opt: add accesskey command description --- core/commands/accesskey.go | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/core/commands/accesskey.go b/core/commands/accesskey.go index 4b1683ad1..3e82dacf9 100644 --- a/core/commands/accesskey.go +++ b/core/commands/accesskey.go @@ -9,7 +9,8 @@ import ( var AccessKeyCmd = &cmds.Command{ Helptext: cmds.HelpText{ - Tagline: "Manage S3-Compatible-API access-keys.", + Tagline: "Manage S3-Compatible-API access-keys.", + ShortDescription: "Commands for generate, update, get and list access-keys stored in this node.", }, Subcommands: map[string]*cmds.Command{ "generate": accessKeyGenerateCmd, @@ -36,7 +37,8 @@ func checkDaemon(env cmds.Environment) (err error) { var accessKeyGenerateCmd = &cmds.Command{ Helptext: cmds.HelpText{ - Tagline: "Generate a new access-key record.", + Tagline: "Generate a new access-key record.", + ShortDescription: "Outputs the new created access-key record.", }, Arguments: []cmds.Argument{}, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) (err error) { @@ -55,7 +57,8 @@ var accessKeyGenerateCmd = &cmds.Command{ var accessKeyEnableCmd = &cmds.Command{ Helptext: cmds.HelpText{ - Tagline: "Set status of the specified access-key to enable.", + Tagline: "Set status of the specified access-key to enable.", + ShortDescription: "Outputs empty if the access-key has been set to enable or it was already enabled.", }, Arguments: []cmds.Argument{ cmds.StringArg("key", true, true, "The key").EnableStdin(), @@ -73,7 +76,8 @@ var accessKeyEnableCmd = &cmds.Command{ var accessKeyDisableCmd = &cmds.Command{ Helptext: cmds.HelpText{ - Tagline: "Set status of the specified access-key to enable.", + Tagline: "Set status of the specified access-key to enable.", + ShortDescription: "Outputs empty if the access-key has been set to disable or it was already disabled.", }, Arguments: []cmds.Argument{ cmds.StringArg("key", true, true, "The key").EnableStdin(), @@ -91,9 +95,8 @@ var accessKeyDisableCmd = &cmds.Command{ var accessKeyResetCmd = &cmds.Command{ Helptext: cmds.HelpText{ - Tagline: "Rest secret of the specified access-key.", - ShortDescription: ` -`, + Tagline: "Rest secret of the specified access-key.", + ShortDescription: "Outputs the updated access-key record if it's secret has been reset.", }, Arguments: []cmds.Argument{ cmds.StringArg("key", true, true, "The key").EnableStdin(), @@ -111,7 +114,8 @@ var accessKeyResetCmd = &cmds.Command{ var accessKeyDeleteCmd = &cmds.Command{ Helptext: cmds.HelpText{ - Tagline: "Delete the specified access-key", + Tagline: "Delete the specified access-key", + ShortDescription: "Outputs empty if access-key record has been deleted.", }, Arguments: []cmds.Argument{ cmds.StringArg("key", true, true, "The key").EnableStdin(), @@ -129,7 +133,8 @@ var accessKeyDeleteCmd = &cmds.Command{ var accessKeyGetCmd = &cmds.Command{ Helptext: cmds.HelpText{ - Tagline: "Get an access-key detail info.", + Tagline: "Get an access-key detail info.", + ShortDescription: "Outputs access-key record for the specified key.", }, Arguments: []cmds.Argument{ cmds.StringArg("key", true, true, "The key").EnableStdin(), @@ -151,7 +156,8 @@ var accessKeyGetCmd = &cmds.Command{ var accessKeyListCmd = &cmds.Command{ Helptext: cmds.HelpText{ - Tagline: "List all access-keys.", + Tagline: "List all access-keys.", + ShortDescription: "Outputs all non-deleted access-keys stored in current node.", }, Arguments: []cmds.Argument{}, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) (err error) { From ce2eacc6b0c8cb1dd5c412c1f36999cd225473e4 Mon Sep 17 00:00:00 2001 From: fish <920886811@163.com> Date: Thu, 21 Sep 2023 20:55:06 +0800 Subject: [PATCH 126/139] chore: add out of FixChequeCashOutCmd --- core/commands/cheque/fix_cheque_cashout.go | 21 +++++-- settlement/swap/vault/cashout.go | 68 ++++++++-------------- 2 files changed, 41 insertions(+), 48 deletions(-) diff --git a/core/commands/cheque/fix_cheque_cashout.go b/core/commands/cheque/fix_cheque_cashout.go index 7dba765a9..cca2724c7 100644 --- a/core/commands/cheque/fix_cheque_cashout.go +++ b/core/commands/cheque/fix_cheque_cashout.go @@ -22,6 +22,9 @@ var FixChequeCashOutCmd = &cmds.Command{ return err } + listRet := ListChequeRet{} + listRet.Cheques = make([]cheque, 0, 0) + for _, tokenAddr := range tokencfg.MpTokenAddr { fmt.Println("FixChequeCashOutCmd ... 2") cheques, err := chain.SettleObject.SwapService.LastReceivedCheques(tokenAddr) @@ -29,21 +32,31 @@ var FixChequeCashOutCmd = &cmds.Command{ if err != nil { return err } - for _, v := range cheques { - err := chain.SettleObject.CashoutService.AdjustCashCheque( + + for k, v := range cheques { + _, err := chain.SettleObject.CashoutService.AdjustCashCheque( context.Background(), v.Vault, v.Beneficiary, tokenAddr) if err != nil { return err } + + var record cheque + record.PeerID = k + record.Token = v.Token.String() + record.Beneficiary = v.Beneficiary.String() + record.Vault = v.Vault.String() + record.Payout = v.CumulativePayout + listRet.Cheques = append(listRet.Cheques, record) } } + listRet.Len = len(listRet.Cheques) - return cmds.EmitOnce(res, nil) + return cmds.EmitOnce(res, &listRet) }, Type: ListChequeRet{}, Encoders: cmds.EncoderMap{ cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *ListChequeRet) error { - fmt.Fprintf(w, "\t%-55s\t%-46s\t%-46s\t%-46s\tamount: \n", "peerID:", "vault:", "beneficiary:", "cashout_amount:") + fmt.Fprintf(w, "fix: \n\t%-55s\t%-46s\t%-46s\t%-46s\tamount: \n", "peerID:", "vault:", "beneficiary:", "cashout_amount:") for iter := 0; iter < out.Len; iter++ { fmt.Fprintf(w, "\t%-55s\t%-46s\t%-46s\t%d\t%d \n", out.Cheques[iter].PeerID, diff --git a/settlement/swap/vault/cashout.go b/settlement/swap/vault/cashout.go index 12a27923a..084be65cc 100644 --- a/settlement/swap/vault/cashout.go +++ b/settlement/swap/vault/cashout.go @@ -28,7 +28,7 @@ type CashoutService interface { CashCheque(ctx context.Context, vault, recipient common.Address, token common.Address) (common.Hash, error) // CashoutStatus gets the status of the latest cashout transaction for the vault CashoutStatus(ctx context.Context, vaultAddress common.Address, token common.Address) (*CashoutStatus, error) - AdjustCashCheque(ctx context.Context, vaultAddress, recipient common.Address, token common.Address) error + AdjustCashCheque(ctx context.Context, vaultAddress, recipient common.Address, token common.Address) (cashResult *CashOutResult, err error) HasCashoutAction(ctx context.Context, peer common.Address, token common.Address) (bool, error) CashoutResults() ([]CashOutResult, error) } @@ -310,13 +310,14 @@ func (s *cashoutService) storeCashResult(ctx context.Context, vault common.Addre } // AdjustCashCheque . -func (s *cashoutService) AdjustCashCheque(ctx context.Context, vaultAddress, recipient common.Address, token common.Address) error { +func (s *cashoutService) AdjustCashCheque(ctx context.Context, vaultAddress, recipient common.Address, token common.Address) (cashResult *CashOutResult, err error) { fmt.Println("AdjustCashCheque ... ") // 1.totalReceivedCashed totalReceivedCashed := big.NewInt(0) - if err := s.store.Get(tokencfg.AddToken(statestore.TotalReceivedCashedKey, token), &totalReceivedCashed); err != nil || err == storage.ErrNotFound { + err = s.store.Get(tokencfg.AddToken(statestore.TotalReceivedCashedKey, token), &totalReceivedCashed) + if err != nil && err != storage.ErrNotFound { fmt.Println("AdjustCashCheque ... 1 err = ", err) - return err + return nil, err } // 2.alreadyPaidOut in renter contract @@ -325,7 +326,7 @@ func (s *cashoutService) AdjustCashCheque(ctx context.Context, vaultAddress, rec alreadyPaidOutOnline, err := contract.PaidOut(ctx, recipient, token) if err != nil { fmt.Println("AdjustCashCheque ... 2 err = ", err) - return err + return nil, err } // 3.compare it to fix. @@ -334,17 +335,18 @@ func (s *cashoutService) AdjustCashCheque(ctx context.Context, vaultAddress, rec if diff.Cmp(big.NewInt(0)) > 0 { fmt.Println("AdjustCashCheque: diff > 0") //return nil - err := s.fixStoreCashResult(vaultAddress, diff, token) + cashResult, err = s.fixStoreCashResult(vaultAddress, diff, token) if err != nil { - return err + return nil, err } } - return nil + + return } -func (s *cashoutService) fixStoreCashResult(vault common.Address, shouldPaidOut *big.Int, token common.Address) error { - txHash := common.Hash{} //fix: 0x0000... - cashResult := CashOutResult{ +func (s *cashoutService) fixStoreCashResult(vault common.Address, shouldPaidOut *big.Int, token common.Address) (cashResult *CashOutResult, err error) { + txHash := common.Hash{} //fix txHash: 0x0000... + cashResult = &CashOutResult{ TxHash: txHash, Vault: vault, Token: token, @@ -353,72 +355,50 @@ func (s *cashoutService) fixStoreCashResult(vault common.Address, shouldPaidOut Status: "success", } - //_, err := s.transactionService.WaitForReceipt(ctx, txHash) - //if err != nil { - // log.Infof("storeCashResult err:%+v", err) - //} else { - // cs, err := s.CashoutStatus(ctx, vault, token) - // if err != nil { - // log.Infof("CashOutStats:get cashout status err:%+v", err) - // if cs.UncashedAmount != nil { - // cashResult.Amount = cs.UncashedAmount - // } - // } else { - // // update totalReceivedCashed - // totalPaidOut := big.NewInt(0) - // if cs.Last != nil && cs.Last.Result != nil && cs.Last.Result.TotalPayout != nil { - // totalPaidOut = cs.Last.Result.TotalPayout - // } - // if cs.Last != nil && !cs.Last.Reverted { - // cashResult.Status = "success" - // } - - cashResult.Amount = shouldPaidOut totalReceivedCashed := big.NewInt(0) - if err := s.store.Get(tokencfg.AddToken(statestore.TotalReceivedCashedKey, token), &totalReceivedCashed); err == nil || err == storage.ErrNotFound { + if err = s.store.Get(tokencfg.AddToken(statestore.TotalReceivedCashedKey, token), &totalReceivedCashed); err == nil || err == storage.ErrNotFound { totalReceivedCashed = totalReceivedCashed.Add(totalReceivedCashed, shouldPaidOut) err := s.store.Put(tokencfg.AddToken(statestore.TotalReceivedCashedKey, token), totalReceivedCashed) if err != nil { - log.Infof("CashOutStats:put totalReceivedCashdKey err:%+v", err) + log.Infof("fixStoreCashResult:put totalReceivedCashdKey err:%+v", err) } } totalDailyReceivedCashed := big.NewInt(0) - if err := s.store.Get(statestore.GetTodayTotalDailyReceivedCashedKey(token), &totalDailyReceivedCashed); err == nil || err == storage.ErrNotFound { + if err = s.store.Get(statestore.GetTodayTotalDailyReceivedCashedKey(token), &totalDailyReceivedCashed); err == nil || err == storage.ErrNotFound { totalDailyReceivedCashed = totalDailyReceivedCashed.Add(totalDailyReceivedCashed, shouldPaidOut) err := s.store.Put(statestore.GetTodayTotalDailyReceivedCashedKey(token), totalDailyReceivedCashed) if err != nil { - log.Infof("CashOutStats:put totalReceivedDailyCashdKey err:%+v", err) + log.Infof("fixStoreCashResult:put totalReceivedDailyCashdKey err:%+v", err) } } // update TotalReceivedCountCashed uncashed := 0 - err := s.store.Get(statestore.PeerReceivedUncashRecordsCountKey(vault, token), &uncashed) + err = s.store.Get(statestore.PeerReceivedUncashRecordsCountKey(vault, token), &uncashed) if err != nil { - log.Infof("CashOutStats:put totalReceivedCountCashed err:%+v", err) + log.Infof("fixStoreCashResult:put totalReceivedCountCashed err:%+v", err) } else { cashedCount := 0 err := s.store.Get(tokencfg.AddToken(statestore.TotalReceivedCashedCountKey, token), &cashedCount) if err == nil || err == storage.ErrNotFound { err := s.store.Put(tokencfg.AddToken(statestore.TotalReceivedCashedCountKey, token), cashedCount+uncashed) if err != nil { - log.Infof("CashOutStats:put totalReceivedCashedConuntKey err:%+v", err) + log.Infof("fixStoreCashResult:put totalReceivedCashedConuntKey err:%+v", err) } else { err := s.store.Put(statestore.PeerReceivedUncashRecordsCountKey(vault, token), 0) if err != nil { - log.Infof("CashOutStats:put totalReceivedCashedConuntKey err:%+v", err) + log.Infof("fixStoreCashResult:put totalReceivedCashedConuntKey err:%+v", err) } } } } - //} - //} + err = s.store.Put(statestore.CashoutResultKey(vault), &cashResult) if err != nil { - log.Infof("CashOutStats:put cashoutResultKey err:%+v", err) + log.Infof("fixStoreCashResult:put cashoutResultKey err:%+v", err) } - return nil + return } // CashoutStatus gets the status of the latest cashout transaction for the vault From 6ff651d56c1cf2446697ef876188493e8ea20ee1 Mon Sep 17 00:00:00 2001 From: fish <920886811@163.com> Date: Thu, 21 Sep 2023 21:56:39 +0800 Subject: [PATCH 127/139] chore: --- core/commands/cheque/cheque.go | 14 ++++++ core/commands/cheque/fix_cheque_cashout.go | 50 ++++++++++++++-------- settlement/swap/vault/cashout.go | 15 ++++--- 3 files changed, 53 insertions(+), 26 deletions(-) diff --git a/core/commands/cheque/cheque.go b/core/commands/cheque/cheque.go index 2d719749b..1b8bc8229 100644 --- a/core/commands/cheque/cheque.go +++ b/core/commands/cheque/cheque.go @@ -36,6 +36,20 @@ type ListChequeRet struct { Len int } +type fixCheque struct { + PeerID string + Token string + Beneficiary string + Vault string + CashedAmount *big.Int + FixCashedAmount *big.Int +} + +type ListFixChequeRet struct { + FixCheques []fixCheque + Len int +} + type ReceiveCheque struct { PeerID string Token common.Address diff --git a/core/commands/cheque/fix_cheque_cashout.go b/core/commands/cheque/fix_cheque_cashout.go index cca2724c7..48de1adbc 100644 --- a/core/commands/cheque/fix_cheque_cashout.go +++ b/core/commands/cheque/fix_cheque_cashout.go @@ -22,8 +22,8 @@ var FixChequeCashOutCmd = &cmds.Command{ return err } - listRet := ListChequeRet{} - listRet.Cheques = make([]cheque, 0, 0) + listRet := ListFixChequeRet{} + listRet.FixCheques = make([]fixCheque, 0) for _, tokenAddr := range tokencfg.MpTokenAddr { fmt.Println("FixChequeCashOutCmd ... 2") @@ -34,36 +34,48 @@ var FixChequeCashOutCmd = &cmds.Command{ } for k, v := range cheques { - _, err := chain.SettleObject.CashoutService.AdjustCashCheque( + fmt.Println("FixChequeCashOutCmd ... 4") + + cashOutAmount, newCashOutAmount, err := chain.SettleObject.CashoutService.AdjustCashCheque( context.Background(), v.Vault, v.Beneficiary, tokenAddr) if err != nil { return err } - var record cheque - record.PeerID = k - record.Token = v.Token.String() - record.Beneficiary = v.Beneficiary.String() - record.Vault = v.Vault.String() - record.Payout = v.CumulativePayout - listRet.Cheques = append(listRet.Cheques, record) + fmt.Println("FixChequeCashOutCmd ... 5", cashOutAmount.String(), newCashOutAmount.String()) + + if newCashOutAmount != nil && newCashOutAmount.Uint64() > 0 { + var record fixCheque + record.PeerID = k + record.Token = v.Token.String() + record.Beneficiary = v.Beneficiary.String() + record.Vault = v.Vault.String() + record.CashedAmount = cashOutAmount + record.FixCashedAmount = newCashOutAmount + + listRet.FixCheques = append(listRet.FixCheques, record) + } + + fmt.Println("FixChequeCashOutCmd ... 6") } } - listRet.Len = len(listRet.Cheques) + listRet.Len = len(listRet.FixCheques) + + fmt.Println("listRet = ", listRet) return cmds.EmitOnce(res, &listRet) }, - Type: ListChequeRet{}, + Type: ListFixChequeRet{}, Encoders: cmds.EncoderMap{ - cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *ListChequeRet) error { - fmt.Fprintf(w, "fix: \n\t%-55s\t%-46s\t%-46s\t%-46s\tamount: \n", "peerID:", "vault:", "beneficiary:", "cashout_amount:") + cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *ListFixChequeRet) error { + fmt.Fprintf(w, "fix: \n\t%-55s\t%-46s\t%-46s\t%-46s\tfix_cash_amount: \n", "peerID:", "vault:", "beneficiary:", "cash_amount:") for iter := 0; iter < out.Len; iter++ { fmt.Fprintf(w, "\t%-55s\t%-46s\t%-46s\t%d\t%d \n", - out.Cheques[iter].PeerID, - out.Cheques[iter].Beneficiary, - out.Cheques[iter].Vault, - out.Cheques[iter].Payout.Uint64(), - out.Cheques[iter].CashedAmount.Uint64(), + out.FixCheques[iter].PeerID, + out.FixCheques[iter].Vault, + out.FixCheques[iter].Beneficiary, + out.FixCheques[iter].CashedAmount.Uint64(), + out.FixCheques[iter].FixCashedAmount.Uint64(), ) } diff --git a/settlement/swap/vault/cashout.go b/settlement/swap/vault/cashout.go index 084be65cc..8dccaa65a 100644 --- a/settlement/swap/vault/cashout.go +++ b/settlement/swap/vault/cashout.go @@ -28,7 +28,7 @@ type CashoutService interface { CashCheque(ctx context.Context, vault, recipient common.Address, token common.Address) (common.Hash, error) // CashoutStatus gets the status of the latest cashout transaction for the vault CashoutStatus(ctx context.Context, vaultAddress common.Address, token common.Address) (*CashoutStatus, error) - AdjustCashCheque(ctx context.Context, vaultAddress, recipient common.Address, token common.Address) (cashResult *CashOutResult, err error) + AdjustCashCheque(ctx context.Context, vaultAddress, recipient common.Address, token common.Address) (totalCashOutAmount, newCashOutAmount *big.Int, err error) HasCashoutAction(ctx context.Context, peer common.Address, token common.Address) (bool, error) CashoutResults() ([]CashOutResult, error) } @@ -310,14 +310,14 @@ func (s *cashoutService) storeCashResult(ctx context.Context, vault common.Addre } // AdjustCashCheque . -func (s *cashoutService) AdjustCashCheque(ctx context.Context, vaultAddress, recipient common.Address, token common.Address) (cashResult *CashOutResult, err error) { +func (s *cashoutService) AdjustCashCheque(ctx context.Context, vaultAddress, recipient common.Address, token common.Address) (totalCashOutAmount, newCashOutAmount *big.Int, err error) { fmt.Println("AdjustCashCheque ... ") // 1.totalReceivedCashed totalReceivedCashed := big.NewInt(0) err = s.store.Get(tokencfg.AddToken(statestore.TotalReceivedCashedKey, token), &totalReceivedCashed) if err != nil && err != storage.ErrNotFound { fmt.Println("AdjustCashCheque ... 1 err = ", err) - return nil, err + return nil, nil, err } // 2.alreadyPaidOut in renter contract @@ -326,7 +326,7 @@ func (s *cashoutService) AdjustCashCheque(ctx context.Context, vaultAddress, rec alreadyPaidOutOnline, err := contract.PaidOut(ctx, recipient, token) if err != nil { fmt.Println("AdjustCashCheque ... 2 err = ", err) - return nil, err + return nil, nil, err } // 3.compare it to fix. @@ -335,13 +335,14 @@ func (s *cashoutService) AdjustCashCheque(ctx context.Context, vaultAddress, rec if diff.Cmp(big.NewInt(0)) > 0 { fmt.Println("AdjustCashCheque: diff > 0") //return nil - cashResult, err = s.fixStoreCashResult(vaultAddress, diff, token) + cashResult, err := s.fixStoreCashResult(vaultAddress, diff, token) if err != nil { - return nil, err + return nil, nil, err } + newCashOutAmount = cashResult.Amount } - return + return alreadyPaidOutOnline, newCashOutAmount, nil } func (s *cashoutService) fixStoreCashResult(vault common.Address, shouldPaidOut *big.Int, token common.Address) (cashResult *CashOutResult, err error) { From 88c3a686962b2234fd770034feec3d332ab2f113 Mon Sep 17 00:00:00 2001 From: fish <920886811@163.com> Date: Fri, 22 Sep 2023 08:57:09 +0800 Subject: [PATCH 128/139] chore --- core/commands/cheque/cheque.go | 12 ++++++------ core/commands/cheque/fix_cheque_cashout.go | 10 +++++----- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/core/commands/cheque/cheque.go b/core/commands/cheque/cheque.go index 1b8bc8229..7e578acc8 100644 --- a/core/commands/cheque/cheque.go +++ b/core/commands/cheque/cheque.go @@ -37,12 +37,12 @@ type ListChequeRet struct { } type fixCheque struct { - PeerID string - Token string - Beneficiary string - Vault string - CashedAmount *big.Int - FixCashedAmount *big.Int + PeerID string + Token string + Beneficiary string + Vault string + TotalCashedAmount *big.Int + FixCashedAmount *big.Int } type ListFixChequeRet struct { diff --git a/core/commands/cheque/fix_cheque_cashout.go b/core/commands/cheque/fix_cheque_cashout.go index 48de1adbc..60e3f0243 100644 --- a/core/commands/cheque/fix_cheque_cashout.go +++ b/core/commands/cheque/fix_cheque_cashout.go @@ -36,13 +36,13 @@ var FixChequeCashOutCmd = &cmds.Command{ for k, v := range cheques { fmt.Println("FixChequeCashOutCmd ... 4") - cashOutAmount, newCashOutAmount, err := chain.SettleObject.CashoutService.AdjustCashCheque( + totalCashOutAmount, newCashOutAmount, err := chain.SettleObject.CashoutService.AdjustCashCheque( context.Background(), v.Vault, v.Beneficiary, tokenAddr) if err != nil { return err } - fmt.Println("FixChequeCashOutCmd ... 5", cashOutAmount.String(), newCashOutAmount.String()) + fmt.Println("FixChequeCashOutCmd ... 5", totalCashOutAmount.String(), newCashOutAmount.String()) if newCashOutAmount != nil && newCashOutAmount.Uint64() > 0 { var record fixCheque @@ -50,7 +50,7 @@ var FixChequeCashOutCmd = &cmds.Command{ record.Token = v.Token.String() record.Beneficiary = v.Beneficiary.String() record.Vault = v.Vault.String() - record.CashedAmount = cashOutAmount + record.TotalCashedAmount = totalCashOutAmount record.FixCashedAmount = newCashOutAmount listRet.FixCheques = append(listRet.FixCheques, record) @@ -68,13 +68,13 @@ var FixChequeCashOutCmd = &cmds.Command{ Type: ListFixChequeRet{}, Encoders: cmds.EncoderMap{ cmds.Text: cmds.MakeTypedEncoder(func(req *cmds.Request, w io.Writer, out *ListFixChequeRet) error { - fmt.Fprintf(w, "fix: \n\t%-55s\t%-46s\t%-46s\t%-46s\tfix_cash_amount: \n", "peerID:", "vault:", "beneficiary:", "cash_amount:") + fmt.Fprintf(w, "fix: \n\t%-55s\t%-46s\t%-46s\t%-46s\tfix_cash_amount: \n", "peerID:", "vault:", "beneficiary:", "total_cash_amount:") for iter := 0; iter < out.Len; iter++ { fmt.Fprintf(w, "\t%-55s\t%-46s\t%-46s\t%d\t%d \n", out.FixCheques[iter].PeerID, out.FixCheques[iter].Vault, out.FixCheques[iter].Beneficiary, - out.FixCheques[iter].CashedAmount.Uint64(), + out.FixCheques[iter].TotalCashedAmount.Uint64(), out.FixCheques[iter].FixCashedAmount.Uint64(), ) } From 85fb908b3b0c42ce05c321113a1e245106afaf78 Mon Sep 17 00:00:00 2001 From: fish <920886811@163.com> Date: Fri, 22 Sep 2023 13:46:34 +0800 Subject: [PATCH 129/139] chore: --- cmd/btfs/daemon.go | 1 + core/commands/cheque/fix_cheque_cashout.go | 2 +- settlement/swap/vault/cashout.go | 73 ++++++++++-- settlement/swap/vault/cashout_status_store.go | 105 ++++++++++++++++++ spin/cheque_cash_out.go | 9 ++ 5 files changed, 182 insertions(+), 8 deletions(-) create mode 100644 settlement/swap/vault/cashout_status_store.go create mode 100644 spin/cheque_cash_out.go diff --git a/cmd/btfs/daemon.go b/cmd/btfs/daemon.go index 889e5d984..01eee1216 100644 --- a/cmd/btfs/daemon.go +++ b/cmd/btfs/daemon.go @@ -728,6 +728,7 @@ If the user need to start multiple nodes on the same machine, the configuration spin.Analytics(api, cctx.ConfigRoot, node, version.CurrentVersionNumber, hValue) spin.Hosts(node, env) spin.Contracts(node, req, env, nodepb.ContractStat_HOST.String()) + spin.RestartFixChequeCashOut() } // Give the user some immediate feedback when they hit C-c diff --git a/core/commands/cheque/fix_cheque_cashout.go b/core/commands/cheque/fix_cheque_cashout.go index 60e3f0243..ea4343424 100644 --- a/core/commands/cheque/fix_cheque_cashout.go +++ b/core/commands/cheque/fix_cheque_cashout.go @@ -37,7 +37,7 @@ var FixChequeCashOutCmd = &cmds.Command{ fmt.Println("FixChequeCashOutCmd ... 4") totalCashOutAmount, newCashOutAmount, err := chain.SettleObject.CashoutService.AdjustCashCheque( - context.Background(), v.Vault, v.Beneficiary, tokenAddr) + context.Background(), v.Vault, v.Beneficiary, tokenAddr, false) if err != nil { return err } diff --git a/settlement/swap/vault/cashout.go b/settlement/swap/vault/cashout.go index 8dccaa65a..1299ab647 100644 --- a/settlement/swap/vault/cashout.go +++ b/settlement/swap/vault/cashout.go @@ -28,9 +28,10 @@ type CashoutService interface { CashCheque(ctx context.Context, vault, recipient common.Address, token common.Address) (common.Hash, error) // CashoutStatus gets the status of the latest cashout transaction for the vault CashoutStatus(ctx context.Context, vaultAddress common.Address, token common.Address) (*CashoutStatus, error) - AdjustCashCheque(ctx context.Context, vaultAddress, recipient common.Address, token common.Address) (totalCashOutAmount, newCashOutAmount *big.Int, err error) + AdjustCashCheque(ctx context.Context, vaultAddress, recipient common.Address, token common.Address, passFlag bool) (totalCashOutAmount, newCashOutAmount *big.Int, err error) HasCashoutAction(ctx context.Context, peer common.Address, token common.Address) (bool, error) CashoutResults() ([]CashOutResult, error) + RestartFixChequeCashOut() } type cashoutService struct { @@ -179,6 +180,10 @@ func (s *cashoutService) CashoutResults() ([]CashOutResult, error) { // CashCheque sends a cashout transaction for the last cheque of the vault func (s *cashoutService) CashCheque(ctx context.Context, vault, recipient common.Address, token common.Address) (common.Hash, error) { + if RestartFixCashOutStatusLock { + return common.Hash{}, errors.New("Just started, it can not cash cheque, you will wait for about 40s to do it. ") + } + cheque, err := s.chequeStore.LastReceivedCheque(vault, token) if err != nil { return common.Hash{}, err @@ -216,6 +221,19 @@ func (s *cashoutService) CashCheque(ctx context.Context, vault, recipient common return common.Hash{}, err } + // 1.add cash out status + cashOutStateInfo := CashOutStatusStoreInfo{ + Token: token, + Vault: cheque.Vault, + Beneficiary: cheque.Beneficiary, + CumulativePayout: cheque.CumulativePayout, + TxHash: txHash.String(), + } + err = s.AddCashOutStatusStore(cashOutStateInfo) + if err != nil { + return common.Hash{}, err + } + // WaitForReceipt takes long time go func() { defer func() { @@ -224,6 +242,13 @@ func (s *cashoutService) CashCheque(ctx context.Context, vault, recipient common } }() s.storeCashResult(context.Background(), vault, txHash, cheque, token) + + // 2.delete cash out status + err = s.DeleteCashOutStatusStore(cashOutStateInfo) + if err != nil { + fmt.Printf("delete cashout status, err = %v \n", err) + return + } }() return txHash, nil } @@ -238,10 +263,6 @@ func (s *cashoutService) storeCashResult(ctx context.Context, vault common.Addre Status: "fail", } - fmt.Println("test exit.") - time.Sleep(time.Second * 3) - return nil - _, err := s.transactionService.WaitForReceipt(ctx, txHash) if err != nil { log.Infof("storeCashResult err:%+v", err) @@ -310,7 +331,13 @@ func (s *cashoutService) storeCashResult(ctx context.Context, vault common.Addre } // AdjustCashCheque . -func (s *cashoutService) AdjustCashCheque(ctx context.Context, vaultAddress, recipient common.Address, token common.Address) (totalCashOutAmount, newCashOutAmount *big.Int, err error) { +func (s *cashoutService) AdjustCashCheque(ctx context.Context, vaultAddress, recipient common.Address, token common.Address, passFlag bool) (totalCashOutAmount, newCashOutAmount *big.Int, err error) { + if RestartFixCashOutStatusLock { + if !passFlag { + return nil, nil, errors.New("Just started, it can not fix cash out status, you will wait for about 40s to do it. ") + } + } + fmt.Println("AdjustCashCheque ... ") // 1.totalReceivedCashed totalReceivedCashed := big.NewInt(0) @@ -334,7 +361,7 @@ func (s *cashoutService) AdjustCashCheque(ctx context.Context, vaultAddress, rec fmt.Println("AdjustCashCheque: ", alreadyPaidOutOnline.String(), totalReceivedCashed.String(), diff.String()) if diff.Cmp(big.NewInt(0)) > 0 { fmt.Println("AdjustCashCheque: diff > 0") - //return nil + cashResult, err := s.fixStoreCashResult(vaultAddress, diff, token) if err != nil { return nil, nil, err @@ -345,6 +372,38 @@ func (s *cashoutService) AdjustCashCheque(ctx context.Context, vaultAddress, rec return alreadyPaidOutOnline, newCashOutAmount, nil } +func (s *cashoutService) RestartFixChequeCashOut() { + if RestartFixCashOutStatusLock { + list, err := s.GetAllCashOutStatusStore() + if err != nil { + fmt.Printf("RestartFixChequeCashOut: GetAllCashOutStatusStore err = %v \n", err) + return + } + + if len(list) > 0 { + fmt.Println("wait 30s, for fixing cash out status") + + // wait 30s, for online cashing out ok. + time.Sleep(time.Second * RestartWaitCashOutOnlineTime) + + for _, v := range list { + _, _, err := s.AdjustCashCheque(context.Background(), v.Vault, v.Beneficiary, v.Token, true) + if err != nil { + fmt.Printf("RestartFixChequeCashOut: AdjustCashCheque err = %v, info = %+v \n", err, v) + continue + } + + err = s.DeleteCashOutStatusStore(v) + if err != nil { + fmt.Printf("RestartFixChequeCashOut: DeleteCashOutStatusStore err = %v, info = %+v \n", err, v) + } + } + } + RestartFixCashOutStatusLock = false + } + return +} + func (s *cashoutService) fixStoreCashResult(vault common.Address, shouldPaidOut *big.Int, token common.Address) (cashResult *CashOutResult, err error) { txHash := common.Hash{} //fix txHash: 0x0000... cashResult = &CashOutResult{ diff --git a/settlement/swap/vault/cashout_status_store.go b/settlement/swap/vault/cashout_status_store.go new file mode 100644 index 000000000..2a0c45aec --- /dev/null +++ b/settlement/swap/vault/cashout_status_store.go @@ -0,0 +1,105 @@ +package vault + +import ( + "errors" + "fmt" + "github.com/ethereum/go-ethereum/common" + "math/big" + "time" +) + +var RestartFixCashOutStatusLock bool = true +var RestartWaitCashOutOnlineTime time.Duration = 30 //seconds + +// CashOutStatus from leveldb +var prefixKeyCashOutStatusStore = "keyCashOutStatusStore" // + txHash. +type CashOutStatusStoreInfo struct { + Token common.Address + Vault common.Address + Beneficiary common.Address + CumulativePayout *big.Int + TxHash string +} + +func getkeyCashOutStatusStore(txHash string) string { + return fmt.Sprintf("%s-%s", prefixKeyCashOutStatusStore, txHash) +} + +// AddCashOutStatusStore . +func (s *cashoutService) AddCashOutStatusStore(info CashOutStatusStoreInfo) (err error) { + if s.store == nil { + return errors.New("please start btfs node, at first! ") + } + + err = s.store.Put(getkeyCashOutStatusStore(info.TxHash), info) + if err != nil { + return err + } + fmt.Println("... AddCashOutStatusStore ok! info = ", info) + + return nil +} + +// DeleteCashOutStatusStore . +func (s *cashoutService) DeleteCashOutStatusStore(info CashOutStatusStoreInfo) (err error) { + if s.store == nil { + return errors.New("please start btfs node, at first! ") + } + + err = s.store.Delete(getkeyCashOutStatusStore(info.TxHash)) + if err != nil { + if err.Error() == "storage: not found" { + return nil + } else { + return err + } + } + + fmt.Println("... DeleteCashOutStatusStore ok! info = ", info) + return +} + +// GetCashOutStatusStore . +func (s *cashoutService) GetCashOutStatusStore(txHash string) (bl bool, err error) { + if s.store == nil { + return bl, errors.New("please start btfs node, at first! ") + } + + var info CashOutStatusStoreInfo + err = s.store.Get(getkeyCashOutStatusStore(txHash), &info) + if err != nil { + if err.Error() == "storage: not found" { + return false, nil + } else { + return false, err + } + } + + fmt.Println("... GetCashOutStatusStore ok! info = ", info) + + return true, nil +} + +// GetAllCashOutStatusStore . +func (s *cashoutService) GetAllCashOutStatusStore() (infoList []CashOutStatusStoreInfo, err error) { + if s.store == nil { + return nil, errors.New("please start btfs node, at first! ") + } + + infoList = make([]CashOutStatusStoreInfo, 0) + err = s.store.Iterate(prefixKeyCashOutStatusStore, func(key, val []byte) (stop bool, err error) { + var info CashOutStatusStoreInfo + err = s.store.Get(string(key), &info) + if err != nil { + return false, err + } + infoList = append(infoList, info) + return false, nil + }) + if err != nil { + return nil, err + } + + fmt.Println("... GetAllCashOutStatusStore ok! infoList = ", infoList) + return infoList, nil +} diff --git a/spin/cheque_cash_out.go b/spin/cheque_cash_out.go new file mode 100644 index 000000000..bfe5b2fd8 --- /dev/null +++ b/spin/cheque_cash_out.go @@ -0,0 +1,9 @@ +package spin + +import ( + "github.com/bittorrent/go-btfs/chain" +) + +func RestartFixChequeCashOut() { + chain.SettleObject.CashoutService.RestartFixChequeCashOut() +} From b05bc7934495d277aa4a1cbc7e8b77f4c41a4945 Mon Sep 17 00:00:00 2001 From: fish <920886811@163.com> Date: Fri, 22 Sep 2023 13:54:09 +0800 Subject: [PATCH 130/139] chore: --- settlement/swap/vault/cashout.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/settlement/swap/vault/cashout.go b/settlement/swap/vault/cashout.go index 1299ab647..e0824a0d8 100644 --- a/settlement/swap/vault/cashout.go +++ b/settlement/swap/vault/cashout.go @@ -28,7 +28,7 @@ type CashoutService interface { CashCheque(ctx context.Context, vault, recipient common.Address, token common.Address) (common.Hash, error) // CashoutStatus gets the status of the latest cashout transaction for the vault CashoutStatus(ctx context.Context, vaultAddress common.Address, token common.Address) (*CashoutStatus, error) - AdjustCashCheque(ctx context.Context, vaultAddress, recipient common.Address, token common.Address, passFlag bool) (totalCashOutAmount, newCashOutAmount *big.Int, err error) + AdjustCashCheque(ctx context.Context, vaultAddress, recipient common.Address, token common.Address, restartPassFlag bool) (totalCashOutAmount, newCashOutAmount *big.Int, err error) HasCashoutAction(ctx context.Context, peer common.Address, token common.Address) (bool, error) CashoutResults() ([]CashOutResult, error) RestartFixChequeCashOut() @@ -331,9 +331,9 @@ func (s *cashoutService) storeCashResult(ctx context.Context, vault common.Addre } // AdjustCashCheque . -func (s *cashoutService) AdjustCashCheque(ctx context.Context, vaultAddress, recipient common.Address, token common.Address, passFlag bool) (totalCashOutAmount, newCashOutAmount *big.Int, err error) { +func (s *cashoutService) AdjustCashCheque(ctx context.Context, vaultAddress, recipient common.Address, token common.Address, restartPassFlag bool) (totalCashOutAmount, newCashOutAmount *big.Int, err error) { if RestartFixCashOutStatusLock { - if !passFlag { + if !restartPassFlag { return nil, nil, errors.New("Just started, it can not fix cash out status, you will wait for about 40s to do it. ") } } From 2736d71f0a9c73cf2b3f2b4686d6947ad579aa6b Mon Sep 17 00:00:00 2001 From: fish <920886811@163.com> Date: Fri, 22 Sep 2023 15:29:29 +0800 Subject: [PATCH 131/139] chore: --- core/commands/cheque/fix_cheque_cashout.go | 14 ++----------- settlement/swap/vault/cashout.go | 20 +++++++++---------- settlement/swap/vault/cashout_status_store.go | 6 ------ 3 files changed, 12 insertions(+), 28 deletions(-) diff --git a/core/commands/cheque/fix_cheque_cashout.go b/core/commands/cheque/fix_cheque_cashout.go index ea4343424..f4908454c 100644 --- a/core/commands/cheque/fix_cheque_cashout.go +++ b/core/commands/cheque/fix_cheque_cashout.go @@ -6,6 +6,7 @@ import ( "github.com/bittorrent/go-btfs/chain" "github.com/bittorrent/go-btfs/chain/tokencfg" "github.com/bittorrent/go-btfs/utils" + "github.com/google/martian/log" "golang.org/x/net/context" "io" ) @@ -15,8 +16,6 @@ var FixChequeCashOutCmd = &cmds.Command{ Tagline: "List cheque(s) received from peers.", }, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { - fmt.Println("FixChequeCashOutCmd ... ") - err := utils.CheckSimpleMode(env) if err != nil { return err @@ -26,24 +25,17 @@ var FixChequeCashOutCmd = &cmds.Command{ listRet.FixCheques = make([]fixCheque, 0) for _, tokenAddr := range tokencfg.MpTokenAddr { - fmt.Println("FixChequeCashOutCmd ... 2") cheques, err := chain.SettleObject.SwapService.LastReceivedCheques(tokenAddr) - fmt.Println("FixChequeCashOutCmd ... 3", cheques) if err != nil { return err } for k, v := range cheques { - fmt.Println("FixChequeCashOutCmd ... 4") - totalCashOutAmount, newCashOutAmount, err := chain.SettleObject.CashoutService.AdjustCashCheque( context.Background(), v.Vault, v.Beneficiary, tokenAddr, false) if err != nil { return err } - - fmt.Println("FixChequeCashOutCmd ... 5", totalCashOutAmount.String(), newCashOutAmount.String()) - if newCashOutAmount != nil && newCashOutAmount.Uint64() > 0 { var record fixCheque record.PeerID = k @@ -55,13 +47,11 @@ var FixChequeCashOutCmd = &cmds.Command{ listRet.FixCheques = append(listRet.FixCheques, record) } - - fmt.Println("FixChequeCashOutCmd ... 6") } } listRet.Len = len(listRet.FixCheques) - fmt.Println("listRet = ", listRet) + log.Infof("FixChequeCashOutCmd, listRet = %+v", listRet) return cmds.EmitOnce(res, &listRet) }, diff --git a/settlement/swap/vault/cashout.go b/settlement/swap/vault/cashout.go index e0824a0d8..5e2da6b37 100644 --- a/settlement/swap/vault/cashout.go +++ b/settlement/swap/vault/cashout.go @@ -246,7 +246,7 @@ func (s *cashoutService) CashCheque(ctx context.Context, vault, recipient common // 2.delete cash out status err = s.DeleteCashOutStatusStore(cashOutStateInfo) if err != nil { - fmt.Printf("delete cashout status, err = %v \n", err) + log.Errorf("delete cashout status, err = %v", err) return } }() @@ -338,12 +338,10 @@ func (s *cashoutService) AdjustCashCheque(ctx context.Context, vaultAddress, rec } } - fmt.Println("AdjustCashCheque ... ") // 1.totalReceivedCashed totalReceivedCashed := big.NewInt(0) err = s.store.Get(tokencfg.AddToken(statestore.TotalReceivedCashedKey, token), &totalReceivedCashed) if err != nil && err != storage.ErrNotFound { - fmt.Println("AdjustCashCheque ... 1 err = ", err) return nil, nil, err } @@ -352,16 +350,17 @@ func (s *cashoutService) AdjustCashCheque(ctx context.Context, vaultAddress, rec contract := newVaultContractMuti(vaultAddress, s.transactionService) alreadyPaidOutOnline, err := contract.PaidOut(ctx, recipient, token) if err != nil { - fmt.Println("AdjustCashCheque ... 2 err = ", err) return nil, nil, err } // 3.compare it to fix. diff := big.NewInt(0).Sub(alreadyPaidOutOnline, totalReceivedCashed) - fmt.Println("AdjustCashCheque: ", alreadyPaidOutOnline.String(), totalReceivedCashed.String(), diff.String()) - if diff.Cmp(big.NewInt(0)) > 0 { - fmt.Println("AdjustCashCheque: diff > 0") + log.Infof("AdjustCashCheque: diff > 0, vault=%s, recipient=%s, online=%s, local=%s, diff=%s", + vaultAddress.String(), recipient.String(), + alreadyPaidOutOnline.String(), totalReceivedCashed.String(), diff.String(), + ) + if diff.Cmp(big.NewInt(0)) > 0 { cashResult, err := s.fixStoreCashResult(vaultAddress, diff, token) if err != nil { return nil, nil, err @@ -376,7 +375,7 @@ func (s *cashoutService) RestartFixChequeCashOut() { if RestartFixCashOutStatusLock { list, err := s.GetAllCashOutStatusStore() if err != nil { - fmt.Printf("RestartFixChequeCashOut: GetAllCashOutStatusStore err = %v \n", err) + log.Infof("RestartFixChequeCashOut: GetAllCashOutStatusStore err = %v", err) return } @@ -389,13 +388,14 @@ func (s *cashoutService) RestartFixChequeCashOut() { for _, v := range list { _, _, err := s.AdjustCashCheque(context.Background(), v.Vault, v.Beneficiary, v.Token, true) if err != nil { - fmt.Printf("RestartFixChequeCashOut: AdjustCashCheque err = %v, info = %+v \n", err, v) + log.Infof("RestartFixChequeCashOut: AdjustCashCheque err = %v, info = %+v", err, v) continue } err = s.DeleteCashOutStatusStore(v) if err != nil { - fmt.Printf("RestartFixChequeCashOut: DeleteCashOutStatusStore err = %v, info = %+v \n", err, v) + log.Infof("RestartFixChequeCashOut: DeleteCashOutStatusStore err = %v, info = %+v", err, v) + continue } } } diff --git a/settlement/swap/vault/cashout_status_store.go b/settlement/swap/vault/cashout_status_store.go index 2a0c45aec..c547670ab 100644 --- a/settlement/swap/vault/cashout_status_store.go +++ b/settlement/swap/vault/cashout_status_store.go @@ -35,7 +35,6 @@ func (s *cashoutService) AddCashOutStatusStore(info CashOutStatusStoreInfo) (err if err != nil { return err } - fmt.Println("... AddCashOutStatusStore ok! info = ", info) return nil } @@ -54,8 +53,6 @@ func (s *cashoutService) DeleteCashOutStatusStore(info CashOutStatusStoreInfo) ( return err } } - - fmt.Println("... DeleteCashOutStatusStore ok! info = ", info) return } @@ -75,8 +72,6 @@ func (s *cashoutService) GetCashOutStatusStore(txHash string) (bl bool, err erro } } - fmt.Println("... GetCashOutStatusStore ok! info = ", info) - return true, nil } @@ -100,6 +95,5 @@ func (s *cashoutService) GetAllCashOutStatusStore() (infoList []CashOutStatusSto return nil, err } - fmt.Println("... GetAllCashOutStatusStore ok! infoList = ", infoList) return infoList, nil } From 08c2a19e0963ab36280a6f3f4672530805247a97 Mon Sep 17 00:00:00 2001 From: fish <920886811@163.com> Date: Fri, 22 Sep 2023 17:56:27 +0800 Subject: [PATCH 132/139] chore --- go.mod | 1 + go.sum | 1 + settlement/swap/vault/cashout.go | 2 +- 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 5417489f9..7efd9b0dd 100644 --- a/go.mod +++ b/go.mod @@ -33,6 +33,7 @@ require ( github.com/go-bindata/go-bindata/v3 v3.1.3 github.com/gogo/protobuf v1.3.2 github.com/golang/protobuf v1.5.3 + github.com/google/martian v2.1.0+incompatible github.com/google/uuid v1.3.0 github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d diff --git a/go.sum b/go.sum index 3b0cea235..ccd628da3 100644 --- a/go.sum +++ b/go.sum @@ -570,6 +570,7 @@ github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/gopacket v1.1.17/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= diff --git a/settlement/swap/vault/cashout.go b/settlement/swap/vault/cashout.go index 5e2da6b37..308c36612 100644 --- a/settlement/swap/vault/cashout.go +++ b/settlement/swap/vault/cashout.go @@ -380,7 +380,7 @@ func (s *cashoutService) RestartFixChequeCashOut() { } if len(list) > 0 { - fmt.Println("wait 30s, for fixing cash out status") + log.Infof("wait 30s, for fixing cash out status") // wait 30s, for online cashing out ok. time.Sleep(time.Second * RestartWaitCashOutOnlineTime) From fd09b33c2a1a3d0e5cedf24d97ab2162c42cb2c0 Mon Sep 17 00:00:00 2001 From: Steve Date: Fri, 22 Sep 2023 18:37:26 +0800 Subject: [PATCH 133/139] chore: add accesskey commands test path --- core/commands/commands_test.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/core/commands/commands_test.go b/core/commands/commands_test.go index 1982d51b9..bf6b2c2c4 100644 --- a/core/commands/commands_test.go +++ b/core/commands/commands_test.go @@ -342,6 +342,13 @@ func TestCommands(t *testing.T) { "/bittorrent/scrape", "/bittorrent/metainfo", "/bittorrent/bencode", + "/accesskey/generate", + "/accesskey/enable", + "/accesskey/disable", + "/accesskey/reset", + "/accesskey/delete", + "/accesskey/get", + "/accesskey/list", } cmdSet := make(map[string]struct{}) From 08cc667de30ae89d63aa7aa724f0bd9ee7ff9f83 Mon Sep 17 00:00:00 2001 From: fish <920886811@163.com> Date: Fri, 22 Sep 2023 18:40:53 +0800 Subject: [PATCH 134/139] chore: --- settlement/swap/vault/cashout_test.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/settlement/swap/vault/cashout_test.go b/settlement/swap/vault/cashout_test.go index 252d130f4..bca0f953b 100644 --- a/settlement/swap/vault/cashout_test.go +++ b/settlement/swap/vault/cashout_test.go @@ -23,6 +23,7 @@ var ( ) func TestCashout(t *testing.T) { + vault.RestartFixCashOutStatusLock = false vaultAddress := common.HexToAddress("abcd") recipientAddress := common.HexToAddress("efff") txHash := common.HexToHash("dddd") @@ -123,6 +124,7 @@ func TestCashout(t *testing.T) { } func TestCashoutBounced(t *testing.T) { + vault.RestartFixCashOutStatusLock = false vaultAddress := common.HexToAddress("abcd") recipientAddress := common.HexToAddress("efff") txHash := common.HexToHash("dddd") @@ -227,6 +229,7 @@ func TestCashoutBounced(t *testing.T) { } func TestCashoutStatusReverted(t *testing.T) { + vault.RestartFixCashOutStatusLock = false vaultAddress := common.HexToAddress("abcd") recipientAddress := common.HexToAddress("efff") txHash := common.HexToHash("dddd") @@ -307,6 +310,7 @@ func TestCashoutStatusReverted(t *testing.T) { } func TestCashoutStatusPending(t *testing.T) { + vault.RestartFixCashOutStatusLock = false vaultAddress := common.HexToAddress("abcd") recipientAddress := common.HexToAddress("efff") txHash := common.HexToHash("dddd") From 5aca67c0fa3f0f57499b6cf33ed95fd6aac0878e Mon Sep 17 00:00:00 2001 From: Steve Date: Fri, 22 Sep 2023 18:43:18 +0800 Subject: [PATCH 135/139] chore: add accesskey test path --- core/commands/commands_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/core/commands/commands_test.go b/core/commands/commands_test.go index bf6b2c2c4..6870f21d7 100644 --- a/core/commands/commands_test.go +++ b/core/commands/commands_test.go @@ -342,6 +342,7 @@ func TestCommands(t *testing.T) { "/bittorrent/scrape", "/bittorrent/metainfo", "/bittorrent/bencode", + "/accesskey", "/accesskey/generate", "/accesskey/enable", "/accesskey/disable", From 5913d937a82c91e163af67d41d11f0248ece4dfe Mon Sep 17 00:00:00 2001 From: fish <920886811@163.com> Date: Fri, 22 Sep 2023 18:55:18 +0800 Subject: [PATCH 136/139] chore: --- settlement/swap/swap_test.go | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/settlement/swap/swap_test.go b/settlement/swap/swap_test.go index 74c778f5b..a603ed26f 100644 --- a/settlement/swap/swap_test.go +++ b/settlement/swap/swap_test.go @@ -124,10 +124,12 @@ func (m *addressbookMock) PutVault(peer string, vault common.Address) error { } type cashoutMock struct { - cashCheque func(ctx context.Context, vault, recipient common.Address, token common.Address) (common.Hash, error) - cashoutStatus func(ctx context.Context, vaultAddress common.Address, token common.Address) (*vault.CashoutStatus, error) - cashoutResults func() ([]vault.CashOutResult, error) - hasCashoutAction func(ctx context.Context, peer common.Address, token common.Address) (bool, error) + cashCheque func(ctx context.Context, vault, recipient common.Address, token common.Address) (common.Hash, error) + cashoutStatus func(ctx context.Context, vaultAddress common.Address, token common.Address) (*vault.CashoutStatus, error) + cashoutResults func() ([]vault.CashOutResult, error) + hasCashoutAction func(ctx context.Context, peer common.Address, token common.Address) (bool, error) + adjustCashCheque func(ctx context.Context, vaultAddress, recipient common.Address, token common.Address, restartPassFlag bool) (totalCashOutAmount, newCashOutAmount *big.Int, err error) + restartFixChequeCashOut func() } func (m *cashoutMock) CashCheque(ctx context.Context, vault, recipient common.Address, token common.Address) (common.Hash, error) { @@ -142,6 +144,12 @@ func (m *cashoutMock) CashoutResults() ([]vault.CashOutResult, error) { func (m *cashoutMock) HasCashoutAction(ctx context.Context, peer common.Address, token common.Address) (bool, error) { return m.hasCashoutAction(ctx, peer, token) } +func (m *cashoutMock) AdjustCashCheque(ctx context.Context, vaultAddress, recipient common.Address, token common.Address, restartPassFlag bool) (totalCashOutAmount, newCashOutAmount *big.Int, err error) { + return m.adjustCashCheque(ctx, vaultAddress, recipient, token, restartPassFlag) +} +func (m *cashoutMock) RestartFixChequeCashOut() { + m.restartFixChequeCashOut() +} func TestReceiveCheque(t *testing.T) { store := mockstore.NewStateStore() vaultService := mockvault.NewVault( From 585c44999b485a0515f76e0cfebfe150f2f37f27 Mon Sep 17 00:00:00 2001 From: fish <920886811@163.com> Date: Fri, 22 Sep 2023 19:33:01 +0800 Subject: [PATCH 137/139] chore: --- core/commands/commands_test.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/core/commands/commands_test.go b/core/commands/commands_test.go index 5b4eadcc1..183a4401a 100644 --- a/core/commands/commands_test.go +++ b/core/commands/commands_test.go @@ -349,6 +349,15 @@ func TestCommands(t *testing.T) { "/multibase/list", "/backup", "/recovery", + "/accesskey/delete", + "/cheque/fix_cheque_cashout", + "/accesskey/list", + "/accesskey/get", + "/accesskey/enable", + "/accesskey/disable", + "/accesskey/reset", + "/accesskey", + "/accesskey/generate", } cmdSet := make(map[string]struct{}) From 71f6bccdaccffd2e6320f0365cb32b961c84f024 Mon Sep 17 00:00:00 2001 From: fish <920886811@163.com> Date: Fri, 22 Sep 2023 19:39:57 +0800 Subject: [PATCH 138/139] chore: --- core/commands/commands_test.go | 9 --------- 1 file changed, 9 deletions(-) diff --git a/core/commands/commands_test.go b/core/commands/commands_test.go index 183a4401a..5b4eadcc1 100644 --- a/core/commands/commands_test.go +++ b/core/commands/commands_test.go @@ -349,15 +349,6 @@ func TestCommands(t *testing.T) { "/multibase/list", "/backup", "/recovery", - "/accesskey/delete", - "/cheque/fix_cheque_cashout", - "/accesskey/list", - "/accesskey/get", - "/accesskey/enable", - "/accesskey/disable", - "/accesskey/reset", - "/accesskey", - "/accesskey/generate", } cmdSet := make(map[string]struct{}) From a812b319876015eadb6d91c345522bf51b2ed732 Mon Sep 17 00:00:00 2001 From: fish <920886811@163.com> Date: Fri, 22 Sep 2023 19:43:24 +0800 Subject: [PATCH 139/139] chore: --- core/commands/commands_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/core/commands/commands_test.go b/core/commands/commands_test.go index 609d65114..aafe6fc7b 100644 --- a/core/commands/commands_test.go +++ b/core/commands/commands_test.go @@ -357,6 +357,7 @@ func TestCommands(t *testing.T) { "/accesskey/delete", "/accesskey/get", "/accesskey/list", + "/cheque/fix_cheque_cashout", } cmdSet := make(map[string]struct{})