diff --git a/internal/grpc/services/gateway/storageprovider.go b/internal/grpc/services/gateway/storageprovider.go index 90a5a04272..2595fc5b55 100644 --- a/internal/grpc/services/gateway/storageprovider.go +++ b/internal/grpc/services/gateway/storageprovider.go @@ -123,7 +123,7 @@ func (s *svc) ListStorageSpaces(ctx context.Context, req *provider.ListStorageSp } } c, err := s.findByID(ctx, &provider.ResourceId{ - OpaqueId: id.OpaqueId, + StorageId: id.OpaqueId, // TODO fix id is nil }) if err != nil { return &provider.ListStorageSpacesResponse{ @@ -196,6 +196,11 @@ func (s *svc) getHome(_ context.Context) string { func (s *svc) InitiateFileDownload(ctx context.Context, req *provider.InitiateFileDownloadRequest) (*gateway.InitiateFileDownloadResponse, error) { log := appctx.GetLogger(ctx) + + if isStorageSpaceReference(req.Ref) { + return s.initiateFileDownload(ctx, req) + } + p, st := s.getPath(ctx, req.Ref) if st.Code != rpc.Code_CODE_OK { return &gateway.InitiateFileDownloadResponse{ @@ -419,6 +424,9 @@ func (s *svc) initiateFileDownload(ctx context.Context, req *provider.InitiateFi func (s *svc) InitiateFileUpload(ctx context.Context, req *provider.InitiateFileUploadRequest) (*gateway.InitiateFileUploadResponse, error) { log := appctx.GetLogger(ctx) + if isStorageSpaceReference(req.Ref) { + return s.initiateFileUpload(ctx, req) + } p, st := s.getPath(ctx, req.Ref) if st.Code != rpc.Code_CODE_OK { return &gateway.InitiateFileUploadResponse{ @@ -662,6 +670,11 @@ func (s *svc) GetPath(ctx context.Context, req *provider.GetPathRequest) (*provi func (s *svc) CreateContainer(ctx context.Context, req *provider.CreateContainerRequest) (*provider.CreateContainerResponse, error) { log := appctx.GetLogger(ctx) + + if isStorageSpaceReference(req.Ref) { + return s.createContainer(ctx, req) + } + p, st := s.getPath(ctx, req.Ref) if st.Code != rpc.Code_CODE_OK { return &provider.CreateContainerResponse{ @@ -1161,7 +1174,15 @@ func (s *svc) stat(ctx context.Context, req *provider.StatRequest) (*provider.St Status: status.NewInternal(ctx, err, "error connecting to storage provider="+providers[0].Address), }, nil } - return c.Stat(ctx, req) + rsp, err := c.Stat(ctx, req) + if err != nil || rsp.Status.Code != rpc.Code_CODE_OK { + return rsp, err + } + if !isStorageSpaceReference(req.Ref) { + rsp.Info.Path = path.Join(providers[0].ProviderPath, rsp.Info.Path) + } + + return rsp, nil } infoFromProviders := make([]*provider.ResourceInfo, len(providers)) @@ -1209,18 +1230,20 @@ func (s *svc) statOnProvider(ctx context.Context, req *provider.StatRequest, res return } - resPath := path.Clean(req.Ref.GetPath()) - newPath := req.Ref.GetPath() - if resPath != "" && !strings.HasPrefix(resPath, p.ProviderPath) { - newPath = p.ProviderPath - } - r, err := c.Stat(ctx, &provider.StatRequest{ - Ref: &provider.Reference{ + if !isStorageSpaceReference(req.Ref) { + resPath := path.Clean(req.Ref.GetPath()) + newPath := req.Ref.GetPath() + if resPath != "" && !strings.HasPrefix(resPath, p.ProviderPath) { + newPath = p.ProviderPath + } + req.Ref = &provider.Reference{ Spec: &provider.Reference_Path{ Path: newPath, }, - }, - }) + } + } + + r, err := c.Stat(ctx, req) if err != nil { *e = errors.Wrap(err, "gateway: error calling ListContainer") return @@ -1232,6 +1255,11 @@ func (s *svc) statOnProvider(ctx context.Context, req *provider.StatRequest, res } func (s *svc) Stat(ctx context.Context, req *provider.StatRequest) (*provider.StatResponse, error) { + + if isStorageSpaceReference(req.Ref) { + return s.stat(ctx, req) + } + p, st := s.getPath(ctx, req.Ref, req.ArbitraryMetadataKeys...) if st.Code != rpc.Code_CODE_OK { return &provider.StatResponse{ @@ -1532,6 +1560,10 @@ func (s *svc) listSharesFolder(ctx context.Context) (*provider.ListContainerResp return lcr, nil } +func isStorageSpaceReference(ref *provider.Reference) bool { + return strings.HasPrefix(ref.GetId().GetOpaqueId(), "/") +} + func (s *svc) listContainer(ctx context.Context, req *provider.ListContainerRequest) (*provider.ListContainerResponse, error) { providers, err := s.findProviders(ctx, req.Ref) if err != nil { @@ -1560,7 +1592,7 @@ func (s *svc) listContainer(ctx context.Context, req *provider.ListContainerRequ }, nil } for _, inf := range infoFromProviders[i] { - if parent := path.Dir(inf.Path); resPath != "" && resPath != parent { + if parent := path.Dir(inf.Path); resPath != "." && resPath != parent { parts := strings.Split(strings.TrimPrefix(inf.Path, resPath), "/") p := path.Join(resPath, parts[1]) indirects[p] = append(indirects[p], inf) @@ -1598,27 +1630,40 @@ func (s *svc) listContainerOnProvider(ctx context.Context, req *provider.ListCon return } - resPath := path.Clean(req.Ref.GetPath()) - newPath := req.Ref.GetPath() - if resPath != "" && !strings.HasPrefix(resPath, p.ProviderPath) { - newPath = p.ProviderPath - } - r, err := c.ListContainer(ctx, &provider.ListContainerRequest{ - Ref: &provider.Reference{ + if !isStorageSpaceReference(req.Ref) { + resPath := path.Clean(req.Ref.GetPath()) + newPath := req.Ref.GetPath() + if resPath != "" && !strings.HasPrefix(resPath, p.ProviderPath) { + newPath = p.ProviderPath + } + req.Ref = &provider.Reference{ Spec: &provider.Reference_Path{ Path: newPath, }, - }, - }) + } + } + + r, err := c.ListContainer(ctx, req) if err != nil { *e = errors.Wrap(err, "gateway: error calling ListContainer") return } + + if !isStorageSpaceReference(req.Ref) { + for i := range r.Infos { + r.Infos[i].Path = path.Join(p.ProviderPath, r.Infos[i].Path) + } + } *res = r.Infos } func (s *svc) ListContainer(ctx context.Context, req *provider.ListContainerRequest) (*provider.ListContainerResponse, error) { log := appctx.GetLogger(ctx) + + if isStorageSpaceReference(req.Ref) { + return s.listContainer(ctx, req) + } + p, st := s.getPath(ctx, req.Ref, req.ArbitraryMetadataKeys...) if st.Code != rpc.Code_CODE_OK { return &provider.ListContainerResponse{ diff --git a/internal/grpc/services/storageprovider/storageprovider.go b/internal/grpc/services/storageprovider/storageprovider.go index 7785771fcb..3163b902b3 100644 --- a/internal/grpc/services/storageprovider/storageprovider.go +++ b/internal/grpc/services/storageprovider/storageprovider.go @@ -44,6 +44,10 @@ import ( "google.golang.org/grpc" ) +type ctxKey int + +const spaceRootKey ctxKey = 0 + func init() { rgrpc.Register("storageprovider", New) } @@ -196,7 +200,7 @@ func registerMimeTypes(mimes map[string]string) { } func (s *service) SetArbitraryMetadata(ctx context.Context, req *provider.SetArbitraryMetadataRequest) (*provider.SetArbitraryMetadataResponse, error) { - newRef, err := s.unwrap(ctx, req.Ref) + ctx, newRef, err := s.unwrap(ctx, req.Ref) if err != nil { err := errors.Wrap(err, "storageprovidersvc: error unwrapping path") return &provider.SetArbitraryMetadataResponse{ @@ -226,7 +230,7 @@ func (s *service) SetArbitraryMetadata(ctx context.Context, req *provider.SetArb } func (s *service) UnsetArbitraryMetadata(ctx context.Context, req *provider.UnsetArbitraryMetadataRequest) (*provider.UnsetArbitraryMetadataResponse, error) { - newRef, err := s.unwrap(ctx, req.Ref) + ctx, newRef, err := s.unwrap(ctx, req.Ref) if err != nil { err := errors.Wrap(err, "storageprovidersvc: error unwrapping path") return &provider.UnsetArbitraryMetadataResponse{ @@ -262,40 +266,49 @@ func (s *service) InitiateFileDownload(ctx context.Context, req *provider.Initia // For example, https://data-server.example.org/home/docs/myfile.txt // or ownclouds://data-server.example.org/home/docs/myfile.txt log := appctx.GetLogger(ctx) + u := *s.dataServerURL - newRef, err := s.unwrap(ctx, req.Ref) - if err != nil { - return &provider.InitiateFileDownloadResponse{ - Status: status.NewInternal(ctx, err, "error unwrapping path"), - }, nil + log.Info().Str("data-server", u.String()).Interface("ref", req.Ref).Msg("file download") + + protocol := &provider.FileDownloadProtocol{Expose: s.conf.ExposeDataServer} + + if isStorageSpaceReference(req.Ref) { + protocol.Protocol = "spaces" + u.Path = path.Join(u.Path, "spaces", req.Ref.GetId().OpaqueId) + } else { + ctx, newRef, err := s.unwrap(ctx, req.Ref) + if err != nil { + return &provider.InitiateFileDownloadResponse{ + Status: status.NewInternal(ctx, err, "error unwrapping path"), + }, nil + } + // Currently, we only support the simple protocol for GET requests + // Once we have multiple protocols, this would be moved to the fs layer + protocol.Protocol = "simple" + u.Path = path.Join(u.Path, "simple", newRef.GetPath()) } - // Currently, we only support the simple protocol for GET requests - // Once we have multiple protocols, this would be moved to the fs layer - u.Path = path.Join(u.Path, "simple", newRef.GetPath()) + protocol.DownloadEndpoint = u.String() - log.Info().Str("data-server", u.String()).Str("fn", req.Ref.GetPath()).Msg("file download") - res := &provider.InitiateFileDownloadResponse{ - Protocols: []*provider.FileDownloadProtocol{ - &provider.FileDownloadProtocol{ - Protocol: "simple", - DownloadEndpoint: u.String(), - Expose: s.conf.ExposeDataServer, - }, - }, - Status: status.NewOK(ctx), - } - return res, nil + return &provider.InitiateFileDownloadResponse{ + Protocols: []*provider.FileDownloadProtocol{protocol}, + Status: status.NewOK(ctx), + }, nil } func (s *service) InitiateFileUpload(ctx context.Context, req *provider.InitiateFileUploadRequest) (*provider.InitiateFileUploadResponse, error) { // TODO(labkode): same considerations as download log := appctx.GetLogger(ctx) - newRef, err := s.unwrap(ctx, req.Ref) + ctx, newRef, err := s.unwrap(ctx, req.Ref) if err != nil { - return &provider.InitiateFileUploadResponse{ - Status: status.NewInternal(ctx, err, "error unwrapping path"), - }, nil + switch err.(type) { + case errtypes.IsNotFound: + newRef = req.Ref + default: + return &provider.InitiateFileUploadResponse{ + Status: status.NewInternal(ctx, err, "error unwrapping path"), + }, nil + } } if newRef.GetPath() == "/" { return &provider.InitiateFileUploadResponse{ @@ -429,8 +442,24 @@ func (s *service) CreateStorageSpace(ctx context.Context, req *provider.CreateSt } func (s *service) ListStorageSpaces(ctx context.Context, req *provider.ListStorageSpacesRequest) (*provider.ListStorageSpacesResponse, error) { + spaces, err := s.storage.ListStorageSpaces(ctx, req.Filters) + if err != nil { + var st *rpc.Status + switch err.(type) { + case errtypes.IsNotFound: + st = status.NewNotFound(ctx, "not found when listing spaces") + case errtypes.PermissionDenied: + st = status.NewPermissionDenied(ctx, err, "permission denied") + default: + st = status.NewInternal(ctx, err, "error listing spaces") + } + return &provider.ListStorageSpacesResponse{ + Status: st, + }, nil + } return &provider.ListStorageSpacesResponse{ - Status: status.NewUnimplemented(ctx, errtypes.NotSupported("ListStorageSpaces not implemented"), "ListStorageSpaces not implemented"), + Status: status.NewOK(ctx), + StorageSpaces: spaces, }, nil } @@ -447,14 +476,53 @@ func (s *service) DeleteStorageSpace(ctx context.Context, req *provider.DeleteSt } func (s *service) CreateContainer(ctx context.Context, req *provider.CreateContainerRequest) (*provider.CreateContainerResponse, error) { - newRef, err := s.unwrap(ctx, req.Ref) + var err error + var parentRef *provider.Reference + var name string + switch { + case isStorageSpaceReference(req.Ref): + parts := strings.SplitN(req.Ref.GetId().OpaqueId, "/", 3) + if len(parts) != 3 { + return &provider.CreateContainerResponse{ + Status: status.NewInvalidArg(ctx, "invalid reference, name required"), + }, nil + } + req.Ref.GetId().OpaqueId = path.Join("/", parts[1], path.Dir(parts[2])) + parentRef = req.Ref + name = path.Base(parts[2]) + case req.Ref.GetPath() != "": + ctx, ref, err := s.unwrap(ctx, req.Ref) + if err != nil { + return &provider.CreateContainerResponse{ + Status: status.NewInternal(ctx, err, "error unwrapping path"), + }, nil + } + parentRef = &provider.Reference{ + Spec: &provider.Reference_Path{ + Path: path.Dir(ref.GetPath()), + }, + } + name = path.Base(ref.GetPath()) + default: + return &provider.CreateContainerResponse{ + Status: status.NewInvalidArg(ctx, "invalid reference, name required"), + }, nil + } + var st *rpc.Status if err != nil { + switch err.(type) { + case errtypes.IsNotFound: + st = status.NewNotFound(ctx, "path not found when unwrapping") + case errtypes.PermissionDenied: + st = status.NewPermissionDenied(ctx, err, "permission denied") + default: + st = status.NewInternal(ctx, err, "error unwrapping: "+req.String()) + } return &provider.CreateContainerResponse{ - Status: status.NewInternal(ctx, err, "error unwrapping path"), + Status: st, }, nil } - - if err := s.storage.CreateDir(ctx, newRef.GetPath()); err != nil { + if err := s.storage.CreateDir(ctx, parentRef, name); err != nil { var st *rpc.Status switch err.(type) { case errtypes.IsNotFound: @@ -478,7 +546,7 @@ func (s *service) CreateContainer(ctx context.Context, req *provider.CreateConta } func (s *service) Delete(ctx context.Context, req *provider.DeleteRequest) (*provider.DeleteResponse, error) { - newRef, err := s.unwrap(ctx, req.Ref) + ctx, newRef, err := s.unwrap(ctx, req.Ref) if err != nil { return &provider.DeleteResponse{ Status: status.NewInternal(ctx, err, "error unwrapping path"), @@ -512,17 +580,22 @@ func (s *service) Delete(ctx context.Context, req *provider.DeleteRequest) (*pro } func (s *service) Move(ctx context.Context, req *provider.MoveRequest) (*provider.MoveResponse, error) { - sourceRef, err := s.unwrap(ctx, req.Source) + ctx, sourceRef, err := s.unwrap(ctx, req.Source) if err != nil { return &provider.MoveResponse{ Status: status.NewInternal(ctx, err, "error unwrapping source path"), }, nil } - targetRef, err := s.unwrap(ctx, req.Destination) + ctx, targetRef, err := s.unwrap(ctx, req.Destination) if err != nil { - return &provider.MoveResponse{ - Status: status.NewInternal(ctx, err, "error unwrapping destination path"), - }, nil + switch err.(type) { + case errtypes.IsNotFound: + targetRef = req.Destination + default: + return &provider.MoveResponse{ + Status: status.NewInternal(ctx, err, "error unwrapping destination path"), + }, nil + } } if err := s.storage.Move(ctx, sourceRef, targetRef); err != nil { @@ -554,16 +627,24 @@ func (s *service) Stat(ctx context.Context, req *provider.StatRequest) (*provide trace.StringAttribute("ref", req.Ref.String()), ) - newRef, err := s.unwrap(ctx, req.Ref) + ctx, newRef, err := s.unwrap(ctx, req.Ref) + var st *rpc.Status if err != nil { + switch err.(type) { + case errtypes.IsNotFound: + st = status.NewNotFound(ctx, "path not found when unwrapping") + case errtypes.PermissionDenied: + st = status.NewPermissionDenied(ctx, err, "permission denied") + default: + st = status.NewInternal(ctx, err, "error unwrapping: "+req.String()) + } return &provider.StatResponse{ - Status: status.NewInternal(ctx, err, "error unwrapping path"), + Status: st, }, nil } md, err := s.storage.GetMD(ctx, newRef, req.ArbitraryMetadataKeys) if err != nil { - var st *rpc.Status switch err.(type) { case errtypes.IsNotFound: st = status.NewNotFound(ctx, "path not found when stating") @@ -593,7 +674,7 @@ func (s *service) ListContainerStream(req *provider.ListContainerStreamRequest, ctx := ss.Context() log := appctx.GetLogger(ctx) - newRef, err := s.unwrap(ctx, req.Ref) + ctx, newRef, err := s.unwrap(ctx, req.Ref) if err != nil { res := &provider.ListContainerStreamResponse{ Status: status.NewInternal(ctx, err, "error unwrapping path"), @@ -651,7 +732,7 @@ func (s *service) ListContainerStream(req *provider.ListContainerStreamRequest, } func (s *service) ListContainer(ctx context.Context, req *provider.ListContainerRequest) (*provider.ListContainerResponse, error) { - newRef, err := s.unwrap(ctx, req.Ref) + ctx, newRef, err := s.unwrap(ctx, req.Ref) if err != nil { return &provider.ListContainerResponse{ Status: status.NewInternal(ctx, err, "error unwrapping path"), @@ -691,7 +772,7 @@ func (s *service) ListContainer(ctx context.Context, req *provider.ListContainer } func (s *service) ListFileVersions(ctx context.Context, req *provider.ListFileVersionsRequest) (*provider.ListFileVersionsResponse, error) { - newRef, err := s.unwrap(ctx, req.Ref) + ctx, newRef, err := s.unwrap(ctx, req.Ref) if err != nil { return &provider.ListFileVersionsResponse{ Status: status.NewInternal(ctx, err, "error unwrapping path"), @@ -722,7 +803,7 @@ func (s *service) ListFileVersions(ctx context.Context, req *provider.ListFileVe } func (s *service) RestoreFileVersion(ctx context.Context, req *provider.RestoreFileVersionRequest) (*provider.RestoreFileVersionResponse, error) { - newRef, err := s.unwrap(ctx, req.Ref) + ctx, newRef, err := s.unwrap(ctx, req.Ref) if err != nil { return &provider.RestoreFileVersionResponse{ Status: status.NewInternal(ctx, err, "error unwrapping path"), @@ -877,7 +958,7 @@ func (s *service) PurgeRecycle(ctx context.Context, req *provider.PurgeRecycleRe } func (s *service) ListGrants(ctx context.Context, req *provider.ListGrantsRequest) (*provider.ListGrantsResponse, error) { - newRef, err := s.unwrap(ctx, req.Ref) + ctx, newRef, err := s.unwrap(ctx, req.Ref) if err != nil { return &provider.ListGrantsResponse{ Status: status.NewInternal(ctx, err, "error unwrapping path"), @@ -908,7 +989,7 @@ func (s *service) ListGrants(ctx context.Context, req *provider.ListGrantsReques } func (s *service) AddGrant(ctx context.Context, req *provider.AddGrantRequest) (*provider.AddGrantResponse, error) { - newRef, err := s.unwrap(ctx, req.Ref) + ctx, newRef, err := s.unwrap(ctx, req.Ref) if err != nil { return &provider.AddGrantResponse{ Status: status.NewInternal(ctx, err, "error unwrapping path"), @@ -952,7 +1033,7 @@ func (s *service) UpdateGrant(ctx context.Context, req *provider.UpdateGrantRequ }, nil } - newRef, err := s.unwrap(ctx, req.Ref) + ctx, newRef, err := s.unwrap(ctx, req.Ref) if err != nil { return &provider.UpdateGrantResponse{ Status: status.NewInternal(ctx, err, "error unwrapping path"), @@ -988,7 +1069,7 @@ func (s *service) RemoveGrant(ctx context.Context, req *provider.RemoveGrantRequ }, nil } - newRef, err := s.unwrap(ctx, req.Ref) + ctx, newRef, err := s.unwrap(ctx, req.Ref) if err != nil { return &provider.RemoveGrantResponse{ Status: status.NewInternal(ctx, err, "error unwrapping path"), @@ -1034,7 +1115,7 @@ func (s *service) CreateReference(ctx context.Context, req *provider.CreateRefer }, } - newRef, err := s.unwrap(ctx, ref) + ctx, newRef, err := s.unwrap(ctx, ref) if err != nil { return &provider.CreateReferenceResponse{ Status: status.NewInternal(ctx, err, "error unwrapping path"), @@ -1100,8 +1181,71 @@ func getFS(c *config) (storage.FS, error) { return nil, errtypes.NotFound("driver not found: " + c.Driver) } -func (s *service) unwrap(ctx context.Context, ref *provider.Reference) (*provider.Reference, error) { +func (s *service) unwrap(ctx context.Context, ref *provider.Reference) (context.Context, *provider.Reference, error) { if ref.GetId() != nil { + opaqueID := ref.GetId().GetOpaqueId() + if isStorageSpaceReference(ref) { + // TODO + // Split opaque id into spaceId and relative path + // ListStorageSpaces filter by id -> root item + // Get path from root item + // put path to ctx so we can cut the prefix later in wrap + // append relative part (after space id) to root path + // get id for path (stat) + + parts := strings.SplitN(opaqueID, "/", 3) + spaceID := parts[1] + + filter := []*provider.ListStorageSpacesRequest_Filter{ + { + Type: provider.ListStorageSpacesRequest_Filter_TYPE_ID, + Term: &provider.ListStorageSpacesRequest_Filter_Id{ + Id: &provider.StorageSpaceId{ + OpaqueId: spaceID, + }, + }, + }, + } + + res, err := s.storage.ListStorageSpaces(ctx, filter) + if err != nil { + return ctx, nil, err + } + + space := res[0] + spaceRoot, err := s.storage.GetPathByID(ctx, space.Root) + if err != nil { + return ctx, nil, err + } + + ctx = context.WithValue(ctx, spaceRootKey, spaceRoot) + + var fullPath string + if len(parts) == 3 { + fullPath = path.Join(spaceRoot, parts[2]) + } else { + fullPath = spaceRoot + } + + r := &provider.Reference{Spec: &provider.Reference_Path{ + Path: fullPath, + }} + + info, err := s.storage.GetMD(ctx, r, nil) + if err != nil { + return ctx, nil, err + } + idRef := &provider.Reference{ + Spec: &provider.Reference_Id{ + Id: &provider.ResourceId{ + StorageId: "", // we are unwrapping on purpose, bottom layers only need OpaqueId. + OpaqueId: info.Id.OpaqueId, + }, + }, + } + return ctx, idRef, nil + } + idRef := &provider.Reference{ Spec: &provider.Reference_Id{ Id: &provider.ResourceId{ @@ -1111,18 +1255,18 @@ func (s *service) unwrap(ctx context.Context, ref *provider.Reference) (*provide }, } - return idRef, nil + return ctx, idRef, nil } if ref.GetPath() == "" { // abort, no valid id nor path - return nil, errtypes.BadRequest("ref is invalid: " + ref.String()) + return ctx, nil, errtypes.BadRequest("ref is invalid: " + ref.String()) } fn := ref.GetPath() fsfn, err := s.trimMountPrefix(fn) if err != nil { - return nil, err + return ctx, nil, err } pathRef := &provider.Reference{ @@ -1131,7 +1275,7 @@ func (s *service) unwrap(ctx context.Context, ref *provider.Reference) (*provide }, } - return pathRef, nil + return ctx, pathRef, nil } func (s *service) trimMountPrefix(fn string) (string, error) { @@ -1146,6 +1290,22 @@ func (s *service) wrap(ctx context.Context, ri *provider.ResourceInfo) error { // For wrapper drivers, the storage ID might already be set. In that case, skip setting it ri.Id.StorageId = s.mountID } - ri.Path = path.Join(s.mountPath, ri.Path) + v := ctx.Value(spaceRootKey) + if v != nil { + spaceRoot := v.(string) + ri.Path = strings.TrimPrefix(ri.Path, spaceRoot) + } return nil } + +// Id based references have two properties: +// 1. StorageID +// 2. OpaqueID +// e.g. StorageID:a-storage-provider-id OpaqueID:a-file-id-d +// We are representing space ids by putting the space id in the OpaqueID: +// e.g. StorageID:a-storage-provider-id OpaqueID:/a-storage-space-id/optional/relative/path +// In the URL it looks like this: /a-storage-provider-id!a-storage-space-id/optional/relative/path +// See https://github.com/cs3org/cs3apis/pull/125 for an extension to the CS3 ReferenceID to model this properly +func isStorageSpaceReference(ref *provider.Reference) bool { + return strings.HasPrefix(ref.GetId().GetOpaqueId(), "/") +} diff --git a/internal/http/services/dataprovider/dataprovider.go b/internal/http/services/dataprovider/dataprovider.go index a87a32081e..de4a7a785a 100644 --- a/internal/http/services/dataprovider/dataprovider.go +++ b/internal/http/services/dataprovider/dataprovider.go @@ -103,6 +103,7 @@ func getDataTXs(c *config, fs storage.FS) (map[string]http.Handler, error) { } if len(c.DataTXs) == 0 { c.DataTXs["simple"] = make(map[string]interface{}) + c.DataTXs["spaces"] = make(map[string]interface{}) c.DataTXs["tus"] = make(map[string]interface{}) } diff --git a/internal/http/services/owncloud/ocdav/avatars.go b/internal/http/services/owncloud/ocdav/avatars.go index 441abbca38..1fc12ee9a1 100644 --- a/internal/http/services/owncloud/ocdav/avatars.go +++ b/internal/http/services/owncloud/ocdav/avatars.go @@ -40,7 +40,7 @@ func (h *AvatarsHandler) Handler(s *svc) http.Handler { ctx := r.Context() log := appctx.GetLogger(ctx) - if r.Method == "OPTIONS" { + if r.Method == http.MethodOptions { // no need for the user, and we need to be able // to answer preflight checks, which have no auth headers r.URL.Path = "/" // always use / ... we just want the options answered so phoenix doesnt hiccup @@ -49,7 +49,7 @@ func (h *AvatarsHandler) Handler(s *svc) http.Handler { } _, r.URL.Path = router.ShiftPath(r.URL.Path) - if r.Method == "GET" && r.URL.Path == "/128.png" { + if r.Method == http.MethodGet && r.URL.Path == "/128.png" { // TODO load avatar url from user context? const img = "89504E470D0A1A0A0000000D4948445200000080000000800806000000C33E61CB00000006624B474400FF00FF00FFA0BDA793000000097048597300000B1300000B1301009A9C180000000774494D4507E3061B080516D3ECF61E000008F24944415478DAED9D7D8C1D5515C07FDB76774B775BB7454AA54BBB2D5DDD765B6BD34A140B464CB07EA0113518016B4848FC438A1A448D9A18FF40316942524D544C900F49A17C2882120A28604D506C915AB160B7B2A14275B7BB606BCB76779F7FDC79F4CE79F7BD7DEFED7B3377E69E5FF2B233DBED9B7B3EEECCB977CE3D171445511445511445098B9680645D0BAC8C7EAE020A0E5D0C027B80DDC033EA1ED96521B001D80A3C1F19BB9ECF007003F0CEE83B15CFB90C781A189986D1CB7D8E007F06AE5035FBC599C0359181AA35E6716014188A3EA3D1EFAAFDFFAF025F06DEA2EA4F97EB81935318EB047037F0396035300FE8043A8039D1A723FADD3CA01FB80AB817989CE2BB4F0237AA1992E703C00B150CB313D812057DD36555D4DB7756B8DE41E0236A9664B8A982216E897A72B3980BDC5CE1CE70AB9AA779744541984BF1DF03BA136C4B77F4F871B5E519E074355763590E8C9519A62D4DB15DDDC07E47BBC681156AB6C6D0071C7328F93A60A607ED9B017CDED1BEA35140A94C83259122ED67EE316093876DDD28E61F26A3B69EAD66AC9F61D1AB463D1F7BCF075E126D1E5233D6C74EC7E4CBEA0CB47B317048B4FD6135676D5C2E14F83A705686DA3FD771F7D229E41A823E19507D2A83729CEF90A34FCD3B35F70BA5DD906159AE14B2FC5ACD5B99F384C20E016D19966726B04FC874819AB93C434259EFCD814C2B1C2319C5C14542513FCF916C5B856C17ABB94BF915F1A9D43CCDA2AD20FEDAFA5135779CD9A287FC2D8732EE12322E52B39FE28742391B722863BF90F17635BBA115386C296630C7B2DA492CFFC16423A5CA0C0F94B214938A55E4DE9CC73945E691EEAB6C6F1C605D140314F96D8E1DE009EBB82D923D78EE14CFC63C67DA9E2D64DDA1E687D7882751E49D717452E80DE692DFC99F723C26646E0F390638579C3F1280033CEE888182758035E27C57000EF09438EF0BD9017AC5F940000EB0479CF784EC004BACE362E66FDE1916E7DD213BC07CEBF8BF8104BE72B4B330640768B58E8F0734FA39661D7785EA002DE2FA2703790448676F0DD901EC123593013D02267CB90BCF48591105E110A13051A12304E500E3BEDC0A136666858E105410683B407B20778116605699BB41700E30621DCF09E80E709A757C22640778D93A9E1B501C603BFB70C80EF092753C0B3FD6FB27815DC6E65F213B80CCFFEB0DC0F8B27CCC3F43768003E27C6D000E20339E5F08D9019E9B423979E43C71BE97C0B1B3639E0A40DE3F089983E72FC4EBEAE41DBBDED1F36937C687B4703B55BA050F72E59B488F18EA3EAE0E509A07B826C70E2083DC87D5014C143C669DAFCFB103D8B28D3B82E020E9225EEA3DCF2B839EB4E41C414BCABEC19E4022635BC67D3E346886278AF99138BF3487C6DF2CCE7FA2FD3EEE8876EF78368732CA6251AD6AF6D2D180BDA54B9E6AEC2E25BE25CD633EF53C5FD86E1DCF06DE9D2307D8487C09FC1DDADF4B5981C98E29F692277224DB1F2DB926D0BD04CAF2AC784E2ECB814CB236D05E3573792E10CABA270732FD46C874A19AB9320396B286C9F664C9424C1188A23C2FFA38FCF20D3B185C80D9222EAB7C0C7893757EA7F6EFA9E9A174E3C7AC22B797D3E0AF4AEE168AFB520665F8AA90E101356BF57489DEB39F6C958D6FA77467D337AB59ABA705784828F033196AFF15A2ED8F12D6DAC786B086D22D57B2B07A688EA3DDEBD59CF5F103A1C86D1968F336D1E69FAA19EB6701A6744C5666079789B61ED367FFF4F99650EA11FC5C42D64A3CB3A9007C57CDD7189E168AFDBE876DBC91FCE734A4463F66F3485BC11FF4A87D978AB68D11C632B744B99AD2DD44CFF1A05DEB89BFC62E00D7AAB99AC30EA1E8D7800E8F82BE02709F9AA9799C46E9DE820748A7E2F65B8997BA2F06A81D6AA6E6D289C9A9B7153F98F070EB3D8E9E3F4AFCCD9FD244563B0C3044325BB17DC271ED02F02E354BF2C1D70987219AB9A6E0DA32C6FFA49A231DFACA18647B13AE7553996B6D5333A4CB324CA125DB2813C0CA065EA3D731D42B00B7A9FAFDC136CCFF68ECEE638BA2EF94A38F3655BB1FC8F705CDD87CF23E718D6FAADAFD19168E0AE3346338D625AE314C7CB58F921232FBA6995BCFDD21AEF551557FBAB4736AA38924B26F36503AF9A3A95E29F26002C33F89CC58BE4BCD900E1FA2741E3E89A8BC8D78E2C704F03E3547B2F43AC6E4572778FD2D8EEBF7A859926101F04A0AB77E89DCF5FC1029EF0016024B89EFBE5D00FE413AAF83DB319341765B4E92EF4297A97215A519C2C749E60D603916112FFD52DC14F2323557633803F3EEFD49C73377043F52C2CE1141617149DB4398323767AA19AB6739F005E09798248F51DC6FE00EE357DD80D3817F9769EBAB517CF040143C6AB018B10CB818F80EA61ED0781905CACF0EFC4CBBEAC45434A9468613983AC15F073691AF8A6815E9C1E4CF8F44069FAC5261C5D2EAABF07BE6AD0593A3F05C0D724D46BA18C2AC77E8CE93C1DB804F03B746B7F4420D9F21E07EE02BC0BA0CCADE0F5C8399391CA851F641E076E072329864DA1605463B6A10780CB38E6E2F701D8D7D97EF13E702D7037F8F460B6355EAE741E06D789E7FB004933675A04AA186819B31397C6B896FA516029D98E4D64B22BD1DA9426703C08F7DEA20B380B7535A0ACDF59C3B0CEC06BE019CA531B0933E4C8EE100A51948AE6078252916FADA8829803C51A191AF005FC32CA298AFF6ADA9632D8E628017A77874EE8E3A6162F402BFA8D0A8039852E8FD6AC786B10E938CF27205BD6F4F628EE18B94AED22D7E0E621226B40C7AF368053E4EE9CA287B7E6173332EDC8149B4745DF477C087D53689B3391A4DB86C720B0DAEA774D07191A3C0F96A87D4D952663839D8A85BCE2EC7977F9B6C54EC0A851EE0670E3BED9EEEDCC1FB1D51E73AD5B7B75CE888D12E99CE17CAE95B5D04E93F17519A2B5917B2FAC53ED56D6678840614A9FEACF8924DAAD7CCB0BEDA3B77A569C47788F3DFAB5E33C37E71BEB81E07586E1D1F45F7BACF1AF67ECC67D4E30036AFAB03648A494CB26A91B28B6567D5F0A573D07570596176E40045C3774ED7011670EA3DBFE23F2DC2E8EDF538408B389EA77ACD2C6DF5C40007556FB9E1AFD5F472175762B66D9B2D6EFF05F19332E7D4F877AE7F6FF66327EF8FB53F015BB50F288AA2288AA2288A62F83FEC37068C6750398B0000000049454E44AE426082" decoded, err := hex.DecodeString(img) diff --git a/internal/http/services/owncloud/ocdav/copy.go b/internal/http/services/owncloud/ocdav/copy.go index 1a7e29a704..deb2eaa184 100644 --- a/internal/http/services/owncloud/ocdav/copy.go +++ b/internal/http/services/owncloud/ocdav/copy.go @@ -23,6 +23,7 @@ import ( "fmt" "net/http" "path" + "strconv" "strings" gateway "github.com/cs3org/go-cs3apis/cs3/gateway/v1beta1" @@ -32,23 +33,24 @@ import ( "github.com/cs3org/reva/internal/http/services/datagateway" "github.com/cs3org/reva/pkg/appctx" "github.com/cs3org/reva/pkg/rhttp" + "github.com/cs3org/reva/pkg/rhttp/router" + "github.com/pkg/errors" + "github.com/rs/zerolog" "go.opencensus.io/trace" ) -func (s *svc) handleCopy(w http.ResponseWriter, r *http.Request, ns string) { +var ( + errInvalidValue = errors.New("invalid value") +) + +func (s *svc) handlePathCopy(w http.ResponseWriter, r *http.Request, ns string) { ctx := r.Context() ctx, span := trace.StartSpan(ctx, "head") defer span.End() src := path.Join(ns, r.URL.Path) - dstHeader := r.Header.Get("Destination") - overwrite := r.Header.Get("Overwrite") - depth := r.Header.Get("Depth") - if depth == "" { - depth = "infinity" - } - dst, err := extractDestination(dstHeader, r.Context().Value(ctxKeyBaseURI).(string)) + dst, err := extractDestination(r) if err != nil { w.WriteHeader(http.StatusBadRequest) return @@ -56,23 +58,28 @@ func (s *svc) handleCopy(w http.ResponseWriter, r *http.Request, ns string) { dst = path.Join(ns, dst) sublog := appctx.GetLogger(ctx).With().Str("src", src).Str("dst", dst).Logger() - sublog.Debug().Str("overwrite", overwrite).Str("depth", depth).Msg("copy") - overwrite = strings.ToUpper(overwrite) - if overwrite == "" { - overwrite = "T" + srcRef := &provider.Reference{ + Spec: &provider.Reference_Path{Path: src}, } - if overwrite != "T" && overwrite != "F" { - w.WriteHeader(http.StatusBadRequest) - return + // check dst exists + dstRef := &provider.Reference{ + Spec: &provider.Reference_Path{Path: dst}, } - if depth != "infinity" && depth != "0" { - w.WriteHeader(http.StatusBadRequest) - return + intermediateDirRefFunc := func() (*provider.Reference, *rpc.Status, error) { + intermediateDir := path.Dir(dst) + ref := &provider.Reference{ + Spec: &provider.Reference_Path{Path: intermediateDir}, + } + return ref, &rpc.Status{Code: rpc.Code_CODE_OK}, nil } + srcInfo, depth, successCode, ok := s.prepareCopy(ctx, w, r, srcRef, dstRef, intermediateDirRefFunc, sublog) + if !ok { + return + } client, err := s.getClient() if err != nil { sublog.Error().Err(err).Msg("error getting grpc client") @@ -80,76 +87,215 @@ func (s *svc) handleCopy(w http.ResponseWriter, r *http.Request, ns string) { return } - // check src exists - ref := &provider.Reference{ - Spec: &provider.Reference_Path{Path: src}, - } - srcStatReq := &provider.StatRequest{Ref: ref} - srcStatRes, err := client.Stat(ctx, srcStatReq) + err = s.executePathCopy(ctx, client, srcInfo, dst, depth == "infinity") if err != nil { - sublog.Error().Err(err).Msg("error sending grpc stat request") + sublog.Error().Err(err).Str("depth", depth).Msg("error descending directory") w.WriteHeader(http.StatusInternalServerError) return } + w.WriteHeader(successCode) +} - if srcStatRes.Status.Code != rpc.Code_CODE_OK { - HandleErrorStatus(&sublog, w, srcStatRes.Status) - return +func (s *svc) executePathCopy(ctx context.Context, client gateway.GatewayAPIClient, src *provider.ResourceInfo, dst string, recurse bool) error { + log := appctx.GetLogger(ctx) + log.Debug().Str("src", src.Path).Str("dst", dst).Msg("descending") + if src.Type == provider.ResourceType_RESOURCE_TYPE_CONTAINER { + // create dir + createReq := &provider.CreateContainerRequest{ + Ref: &provider.Reference{ + Spec: &provider.Reference_Path{Path: dst}, + }, + } + createRes, err := client.CreateContainer(ctx, createReq) + if err != nil || createRes.Status.Code != rpc.Code_CODE_OK { + return err + } + + // TODO: also copy properties: https://tools.ietf.org/html/rfc4918#section-9.8.2 + + if !recurse { + return nil + } + + // descend for children + listReq := &provider.ListContainerRequest{ + Ref: &provider.Reference{ + Spec: &provider.Reference_Path{Path: src.Path}, + }, + } + res, err := client.ListContainer(ctx, listReq) + if err != nil { + return err + } + if res.Status.Code != rpc.Code_CODE_OK { + return fmt.Errorf("status code %d", res.Status.Code) + } + + for i := range res.Infos { + childDst := path.Join(dst, path.Base(res.Infos[i].Path)) + err := s.executePathCopy(ctx, client, res.Infos[i], childDst, recurse) + if err != nil { + return err + } + } + + } else { + // copy file + + // 1. get download url + + dReq := &provider.InitiateFileDownloadRequest{ + Ref: &provider.Reference{ + Spec: &provider.Reference_Path{Path: src.Path}, + }, + } + + dRes, err := client.InitiateFileDownload(ctx, dReq) + if err != nil { + return err + } + + if dRes.Status.Code != rpc.Code_CODE_OK { + return fmt.Errorf("status code %d", dRes.Status.Code) + } + + var downloadEP, downloadToken string + for _, p := range dRes.Protocols { + if p.Protocol == "simple" { + downloadEP, downloadToken = p.DownloadEndpoint, p.Token + } + } + + // 2. get upload url + + uReq := &provider.InitiateFileUploadRequest{ + Ref: &provider.Reference{ + Spec: &provider.Reference_Path{Path: dst}, + }, + Opaque: &typespb.Opaque{ + Map: map[string]*typespb.OpaqueEntry{ + "Upload-Length": { + Decoder: "plain", + // TODO: handle case where size is not known in advance + Value: []byte(strconv.FormatUint(src.GetSize(), 10)), + }, + }, + }, + } + + uRes, err := client.InitiateFileUpload(ctx, uReq) + if err != nil { + return err + } + + if uRes.Status.Code != rpc.Code_CODE_OK { + return fmt.Errorf("status code %d", uRes.Status.Code) + } + + var uploadEP, uploadToken string + for _, p := range uRes.Protocols { + if p.Protocol == "simple" { + uploadEP, uploadToken = p.UploadEndpoint, p.Token + } + } + + // 3. do download + + httpDownloadReq, err := rhttp.NewRequest(ctx, "GET", downloadEP, nil) + if err != nil { + return err + } + httpDownloadReq.Header.Set(datagateway.TokenTransportHeader, downloadToken) + + httpDownloadRes, err := s.client.Do(httpDownloadReq) + if err != nil { + return err + } + defer httpDownloadRes.Body.Close() + if httpDownloadRes.StatusCode != http.StatusOK { + return fmt.Errorf("status code %d", httpDownloadRes.StatusCode) + } + + // 4. do upload + + if src.GetSize() > 0 { + httpUploadReq, err := rhttp.NewRequest(ctx, "PUT", uploadEP, httpDownloadRes.Body) + if err != nil { + return err + } + httpUploadReq.Header.Set(datagateway.TokenTransportHeader, uploadToken) + + httpUploadRes, err := s.client.Do(httpUploadReq) + if err != nil { + return err + } + defer httpUploadRes.Body.Close() + if httpUploadRes.StatusCode != http.StatusOK { + return err + } + } } + return nil +} - // check dst exists - ref = &provider.Reference{ - Spec: &provider.Reference_Path{Path: dst}, +func (s *svc) handleSpacesCopy(w http.ResponseWriter, r *http.Request, spaceID string) { + ctx := r.Context() + ctx, span := trace.StartSpan(ctx, "head") + defer span.End() + + dst, err := extractDestination(r) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + return } - dstStatReq := &provider.StatRequest{Ref: ref} - dstStatRes, err := client.Stat(ctx, dstStatReq) + + sublog := appctx.GetLogger(ctx).With().Str("spaceid", spaceID).Str("path", r.URL.Path).Logger() + + // retrieve a specific storage space + srcRef, status, err := s.lookUpStorageSpaceReference(ctx, spaceID, r.URL.Path) if err != nil { - sublog.Error().Err(err).Msg("error sending grpc stat request") + sublog.Error().Err(err).Msg("error sending a grpc request") w.WriteHeader(http.StatusInternalServerError) return } - if dstStatRes.Status.Code != rpc.Code_CODE_OK && dstStatRes.Status.Code != rpc.Code_CODE_NOT_FOUND { - HandleErrorStatus(&sublog, w, srcStatRes.Status) + + if status.Code != rpc.Code_CODE_OK { + HandleErrorStatus(&sublog, w, status) return } - successCode := http.StatusCreated // 201 if new resource was created, see https://tools.ietf.org/html/rfc4918#section-9.8.5 - if dstStatRes.Status.Code == rpc.Code_CODE_OK { - successCode = http.StatusNoContent // 204 if target already existed, see https://tools.ietf.org/html/rfc4918#section-9.8.5 + dstSpaceID, dstRelPath := router.ShiftPath(dst) - if overwrite == "F" { - sublog.Warn().Str("overwrite", overwrite).Msg("dst already exists") - w.WriteHeader(http.StatusPreconditionFailed) // 412, see https://tools.ietf.org/html/rfc4918#section-9.8.5 - return - } + // retrieve a specific storage space + dstRef, status, err := s.lookUpStorageSpaceReference(ctx, dstSpaceID, dstRelPath) + if err != nil { + sublog.Error().Err(err).Msg("error sending a grpc request") + w.WriteHeader(http.StatusInternalServerError) + return + } - } else { - // check if an intermediate path / the parent exists - intermediateDir := path.Dir(dst) - ref = &provider.Reference{ - Spec: &provider.Reference_Path{Path: intermediateDir}, - } - intStatReq := &provider.StatRequest{Ref: ref} - intStatRes, err := client.Stat(ctx, intStatReq) - if err != nil { - sublog.Error().Err(err).Msg("error sending grpc stat request") - w.WriteHeader(http.StatusInternalServerError) - return - } - if intStatRes.Status.Code != rpc.Code_CODE_OK { - if intStatRes.Status.Code == rpc.Code_CODE_NOT_FOUND { - // 409 if intermediate dir is missing, see https://tools.ietf.org/html/rfc4918#section-9.8.5 - sublog.Debug().Str("parent", intermediateDir).Interface("status", intStatRes.Status).Msg("conflict") - w.WriteHeader(http.StatusConflict) - } else { - HandleErrorStatus(&sublog, w, srcStatRes.Status) - } - return - } - // TODO what if intermediate is a file? + if status.Code != rpc.Code_CODE_OK { + HandleErrorStatus(&sublog, w, status) + return + } + + intermediateDirRefFunc := func() (*provider.Reference, *rpc.Status, error) { + intermediateDir := path.Dir(dstRelPath) + return s.lookUpStorageSpaceReference(ctx, dstSpaceID, intermediateDir) + } + + srcInfo, depth, successCode, ok := s.prepareCopy(ctx, w, r, srcRef, dstRef, intermediateDirRefFunc, sublog) + if !ok { + return + } + client, err := s.getClient() + if err != nil { + sublog.Error().Err(err).Msg("error getting grpc client") + w.WriteHeader(http.StatusInternalServerError) + return } - err = s.descend(ctx, client, srcStatRes.Info, dst, depth == "infinity") + err = s.executeSpacesCopy(ctx, client, srcInfo, dstRef, depth == "infinity") if err != nil { sublog.Error().Err(err).Str("depth", depth).Msg("error descending directory") w.WriteHeader(http.StatusInternalServerError) @@ -158,15 +304,14 @@ func (s *svc) handleCopy(w http.ResponseWriter, r *http.Request, ns string) { w.WriteHeader(successCode) } -func (s *svc) descend(ctx context.Context, client gateway.GatewayAPIClient, src *provider.ResourceInfo, dst string, recurse bool) error { +func (s *svc) executeSpacesCopy(ctx context.Context, client gateway.GatewayAPIClient, src *provider.ResourceInfo, dst *provider.Reference, recurse bool) error { log := appctx.GetLogger(ctx) - log.Debug().Str("src", src.Path).Str("dst", dst).Msg("descending") + log.Debug().Str("src", src.Path).Interface("dst", dst).Msg("descending") + if src.Type == provider.ResourceType_RESOURCE_TYPE_CONTAINER { // create dir createReq := &provider.CreateContainerRequest{ - Ref: &provider.Reference{ - Spec: &provider.Reference_Path{Path: dst}, - }, + Ref: dst, } createRes, err := client.CreateContainer(ctx, createReq) if err != nil || createRes.Status.Code != rpc.Code_CODE_OK { @@ -179,12 +324,18 @@ func (s *svc) descend(ctx context.Context, client gateway.GatewayAPIClient, src return nil } - // descend for children - listReq := &provider.ListContainerRequest{ - Ref: &provider.Reference{ - Spec: &provider.Reference_Path{Path: src.Path}, - }, + spaceID, _ := router.ShiftPath(dst.GetId().OpaqueId) + + srcRef := &provider.Reference{ + Spec: &provider.Reference_Id{ + Id: &provider.ResourceId{ + StorageId: dst.GetId().StorageId, + OpaqueId: path.Join("/", spaceID, src.Path), + }}, } + + // descend for children + listReq := &provider.ListContainerRequest{Ref: srcRef} res, err := client.ListContainer(ctx, listReq) if err != nil { return err @@ -194,8 +345,16 @@ func (s *svc) descend(ctx context.Context, client gateway.GatewayAPIClient, src } for i := range res.Infos { - childDst := path.Join(dst, path.Base(res.Infos[i].Path)) - err := s.descend(ctx, client, res.Infos[i], childDst, recurse) + childPath := strings.TrimPrefix(res.Infos[i].Path, src.Path) + childRef := &provider.Reference{ + Spec: &provider.Reference_Id{ + Id: &provider.ResourceId{ + StorageId: srcRef.GetId().StorageId, + OpaqueId: path.Join(dst.GetId().GetOpaqueId(), childPath), + }, + }, + } + err := s.executeSpacesCopy(ctx, client, res.Infos[i], childRef, recurse) if err != nil { return err } @@ -206,9 +365,15 @@ func (s *svc) descend(ctx context.Context, client gateway.GatewayAPIClient, src // 1. get download url + spaceID, _ := router.ShiftPath(dst.GetId().OpaqueId) dReq := &provider.InitiateFileDownloadRequest{ Ref: &provider.Reference{ - Spec: &provider.Reference_Path{Path: src.Path}, + Spec: &provider.Reference_Id{ + Id: &provider.ResourceId{ + StorageId: dst.GetId().StorageId, + OpaqueId: path.Join("/", spaceID, src.Path), + }, + }, }, } @@ -223,7 +388,7 @@ func (s *svc) descend(ctx context.Context, client gateway.GatewayAPIClient, src var downloadEP, downloadToken string for _, p := range dRes.Protocols { - if p.Protocol == "simple" { + if p.Protocol == "spaces" { downloadEP, downloadToken = p.DownloadEndpoint, p.Token } } @@ -231,15 +396,13 @@ func (s *svc) descend(ctx context.Context, client gateway.GatewayAPIClient, src // 2. get upload url uReq := &provider.InitiateFileUploadRequest{ - Ref: &provider.Reference{ - Spec: &provider.Reference_Path{Path: dst}, - }, + Ref: dst, Opaque: &typespb.Opaque{ Map: map[string]*typespb.OpaqueEntry{ "Upload-Length": { Decoder: "plain", // TODO: handle case where size is not known in advance - Value: []byte(fmt.Sprintf("%d", src.GetSize())), + Value: []byte(strconv.FormatUint(src.GetSize(), 10)), }, }, }, @@ -299,3 +462,120 @@ func (s *svc) descend(ctx context.Context, client gateway.GatewayAPIClient, src } return nil } + +func (s *svc) prepareCopy(ctx context.Context, w http.ResponseWriter, r *http.Request, srcRef, dstRef *provider.Reference, intermediateDirRef func() (*provider.Reference, *rpc.Status, error), log zerolog.Logger) (*provider.ResourceInfo, string, int, bool) { + overwrite, err := extractOverwrite(w, r) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + return nil, "", 0, false + } + depth, err := extractDepth(w, r) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + return nil, "", 0, false + } + + log.Debug().Str("overwrite", overwrite).Str("depth", depth).Msg("copy") + + client, err := s.getClient() + if err != nil { + log.Error().Err(err).Msg("error getting grpc client") + w.WriteHeader(http.StatusInternalServerError) + return nil, "", 0, false + } + + srcStatReq := &provider.StatRequest{Ref: srcRef} + srcStatRes, err := client.Stat(ctx, srcStatReq) + if err != nil { + log.Error().Err(err).Msg("error sending grpc stat request") + w.WriteHeader(http.StatusInternalServerError) + return nil, "", 0, false + } + + if srcStatRes.Status.Code != rpc.Code_CODE_OK { + HandleErrorStatus(&log, w, srcStatRes.Status) + return nil, "", 0, false + } + + dstStatReq := &provider.StatRequest{Ref: dstRef} + dstStatRes, err := client.Stat(ctx, dstStatReq) + if err != nil { + log.Error().Err(err).Msg("error sending grpc stat request") + w.WriteHeader(http.StatusInternalServerError) + return nil, "", 0, false + } + if dstStatRes.Status.Code != rpc.Code_CODE_OK && dstStatRes.Status.Code != rpc.Code_CODE_NOT_FOUND { + HandleErrorStatus(&log, w, srcStatRes.Status) + return nil, "", 0, false + } + + successCode := http.StatusCreated // 201 if new resource was created, see https://tools.ietf.org/html/rfc4918#section-9.8.5 + if dstStatRes.Status.Code == rpc.Code_CODE_OK { + successCode = http.StatusNoContent // 204 if target already existed, see https://tools.ietf.org/html/rfc4918#section-9.8.5 + + if overwrite == "F" { + log.Warn().Str("overwrite", overwrite).Msg("dst already exists") + w.WriteHeader(http.StatusPreconditionFailed) // 412, see https://tools.ietf.org/html/rfc4918#section-9.8.5 + return nil, "", 0, false + } + + } else { + // check if an intermediate path / the parent exists + intermediateRef, status, err := intermediateDirRef() + if err != nil { + log.Error().Err(err).Msg("error sending a grpc request") + w.WriteHeader(http.StatusInternalServerError) + return nil, "", 0, false + } + + if status.Code != rpc.Code_CODE_OK { + HandleErrorStatus(&log, w, status) + return nil, "", 0, false + } + intStatReq := &provider.StatRequest{Ref: intermediateRef} + intStatRes, err := client.Stat(ctx, intStatReq) + if err != nil { + log.Error().Err(err).Msg("error sending grpc stat request") + w.WriteHeader(http.StatusInternalServerError) + return nil, "", 0, false + } + if intStatRes.Status.Code != rpc.Code_CODE_OK { + if intStatRes.Status.Code == rpc.Code_CODE_NOT_FOUND { + // 409 if intermediate dir is missing, see https://tools.ietf.org/html/rfc4918#section-9.8.5 + log.Debug().Interface("parent", intermediateRef).Interface("status", intStatRes.Status).Msg("conflict") + w.WriteHeader(http.StatusConflict) + } else { + HandleErrorStatus(&log, w, srcStatRes.Status) + } + return nil, "", 0, false + } + // TODO what if intermediate is a file? + } + + return srcStatRes.Info, depth, successCode, true +} + +func extractOverwrite(w http.ResponseWriter, r *http.Request) (string, error) { + overwrite := r.Header.Get(HeaderOverwrite) + overwrite = strings.ToUpper(overwrite) + if overwrite == "" { + overwrite = "T" + } + + if overwrite != "T" && overwrite != "F" { + return "", errInvalidValue + } + + return overwrite, nil +} + +func extractDepth(w http.ResponseWriter, r *http.Request) (string, error) { + depth := r.Header.Get(HeaderDepth) + if depth == "" { + depth = "infinity" + } + if depth != "infinity" && depth != "0" { + return "", errInvalidValue + } + return depth, nil +} diff --git a/internal/http/services/owncloud/ocdav/dav.go b/internal/http/services/owncloud/ocdav/dav.go index ca5abc1884..7fae59a4aa 100644 --- a/internal/http/services/owncloud/ocdav/dav.go +++ b/internal/http/services/owncloud/ocdav/dav.go @@ -47,6 +47,7 @@ type DavHandler struct { FilesHomeHandler *WebDavHandler MetaHandler *MetaHandler TrashbinHandler *TrashbinHandler + SpacesHandler *SpacesHandler PublicFolderHandler *WebDavHandler PublicFileHandler *PublicFileHandler } @@ -70,6 +71,11 @@ func (h *DavHandler) init(c *Config) error { } h.TrashbinHandler = new(TrashbinHandler) + h.SpacesHandler = new(SpacesHandler) + if err := h.SpacesHandler.init(c); err != nil { + return err + } + h.PublicFolderHandler = new(WebDavHandler) if err := h.PublicFolderHandler.init("public", true); err != nil { // jail public file requests to /public/ prefix return err @@ -163,6 +169,11 @@ func (h *DavHandler) Handler(s *svc) http.Handler { ctx := context.WithValue(ctx, ctxKeyBaseURI, base) r = r.WithContext(ctx) h.TrashbinHandler.Handler(s).ServeHTTP(w, r) + case "spaces": + base := path.Join(ctx.Value(ctxKeyBaseURI).(string), "spaces") + ctx := context.WithValue(ctx, ctxKeyBaseURI, base) + r = r.WithContext(ctx) + h.SpacesHandler.Handler(s).ServeHTTP(w, r) case "public-files": base := path.Join(ctx.Value(ctxKeyBaseURI).(string), "public-files") ctx = context.WithValue(ctx, ctxKeyBaseURI, base) diff --git a/internal/http/services/owncloud/ocdav/delete.go b/internal/http/services/owncloud/ocdav/delete.go index 0bdf55561a..78269d630e 100644 --- a/internal/http/services/owncloud/ocdav/delete.go +++ b/internal/http/services/owncloud/ocdav/delete.go @@ -19,45 +19,72 @@ package ocdav import ( + "context" "net/http" "path" rpc "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" "github.com/cs3org/reva/pkg/appctx" + "github.com/rs/zerolog" "go.opencensus.io/trace" ) -func (s *svc) handleDelete(w http.ResponseWriter, r *http.Request, ns string) { +func (s *svc) handlePathDelete(w http.ResponseWriter, r *http.Request, ns string) { ctx := r.Context() - ctx, span := trace.StartSpan(ctx, "head") + ctx, span := trace.StartSpan(ctx, "delete") defer span.End() fn := path.Join(ns, r.URL.Path) sublog := appctx.GetLogger(ctx).With().Str("path", fn).Logger() + ref := &provider.Reference{ + Spec: &provider.Reference_Path{Path: fn}, + } + s.handleDelete(ctx, w, r, ref, sublog) +} +func (s *svc) handleDelete(ctx context.Context, w http.ResponseWriter, r *http.Request, ref *provider.Reference, log zerolog.Logger) { client, err := s.getClient() if err != nil { - sublog.Error().Err(err).Msg("error getting grpc client") + log.Error().Err(err).Msg("error getting grpc client") w.WriteHeader(http.StatusInternalServerError) return } - ref := &provider.Reference{ - Spec: &provider.Reference_Path{Path: fn}, - } req := &provider.DeleteRequest{Ref: ref} res, err := client.Delete(ctx, req) if err != nil { - sublog.Error().Err(err).Msg("error performing delete grpc request") + log.Error().Err(err).Msg("error performing delete grpc request") w.WriteHeader(http.StatusInternalServerError) return } if res.Status.Code != rpc.Code_CODE_OK { - HandleErrorStatus(&sublog, w, res.Status) + HandleErrorStatus(&log, w, res.Status) return } w.WriteHeader(http.StatusNoContent) } + +func (s *svc) handleSpacesDelete(w http.ResponseWriter, r *http.Request, spaceID string) { + ctx := r.Context() + ctx, span := trace.StartSpan(ctx, "spaces_delete") + defer span.End() + + sublog := appctx.GetLogger(ctx).With().Logger() + // retrieve a specific storage space + ref, rpcStatus, err := s.lookUpStorageSpaceReference(ctx, spaceID, r.URL.Path) + if err != nil { + sublog.Error().Err(err).Msg("error sending a grpc request") + w.WriteHeader(http.StatusInternalServerError) + return + } + + if rpcStatus.Code != rpc.Code_CODE_OK { + HandleErrorStatus(&sublog, w, rpcStatus) + return + } + + s.handleDelete(ctx, w, r, ref, sublog) +} diff --git a/internal/http/services/owncloud/ocdav/get.go b/internal/http/services/owncloud/ocdav/get.go index d6ab7d6c89..760d466bff 100644 --- a/internal/http/services/owncloud/ocdav/get.go +++ b/internal/http/services/owncloud/ocdav/get.go @@ -19,6 +19,7 @@ package ocdav import ( + "context" "fmt" "io" "net/http" @@ -29,6 +30,7 @@ import ( "github.com/cs3org/reva/internal/grpc/services/storageprovider" "github.com/cs3org/reva/internal/http/services/datagateway" + "github.com/rs/zerolog" "go.opencensus.io/trace" rpc "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" @@ -38,7 +40,7 @@ import ( "github.com/cs3org/reva/pkg/utils" ) -func (s *svc) handleGet(w http.ResponseWriter, r *http.Request, ns string) { +func (s *svc) handlePathGet(w http.ResponseWriter, r *http.Request, ns string) { ctx := r.Context() ctx, span := trace.StartSpan(ctx, "get") defer span.End() @@ -47,79 +49,79 @@ func (s *svc) handleGet(w http.ResponseWriter, r *http.Request, ns string) { sublog := appctx.GetLogger(ctx).With().Str("path", fn).Str("svc", "ocdav").Str("handler", "get").Logger() + ref := &provider.Reference{ + Spec: &provider.Reference_Path{Path: fn}, + } + + s.handleGet(ctx, w, r, ref, "simple", sublog) +} + +func (s *svc) handleGet(ctx context.Context, w http.ResponseWriter, r *http.Request, ref *provider.Reference, dlProtocol string, log zerolog.Logger) { client, err := s.getClient() if err != nil { - sublog.Error().Err(err).Msg("error getting grpc client") + log.Error().Err(err).Msg("error getting grpc client") w.WriteHeader(http.StatusInternalServerError) return } - sReq := &provider.StatRequest{ - Ref: &provider.Reference{ - Spec: &provider.Reference_Path{Path: fn}, - }, - } + sReq := &provider.StatRequest{Ref: ref} sRes, err := client.Stat(ctx, sReq) if err != nil { - sublog.Error().Err(err).Msg("error sending grpc stat request") + log.Error().Err(err).Msg("error sending grpc stat request") w.WriteHeader(http.StatusInternalServerError) return } if sRes.Status.Code != rpc.Code_CODE_OK { - HandleErrorStatus(&sublog, w, sRes.Status) + HandleErrorStatus(&log, w, sRes.Status) return } info := sRes.Info if info.Type == provider.ResourceType_RESOURCE_TYPE_CONTAINER { - sublog.Warn().Msg("resource is a folder and cannot be downloaded") + log.Warn().Msg("resource is a folder and cannot be downloaded") w.WriteHeader(http.StatusNotImplemented) return } - dReq := &provider.InitiateFileDownloadRequest{ - Ref: &provider.Reference{ - Spec: &provider.Reference_Path{Path: fn}, - }, - } + dReq := &provider.InitiateFileDownloadRequest{Ref: ref} dRes, err := client.InitiateFileDownload(ctx, dReq) if err != nil { - sublog.Error().Err(err).Msg("error initiating file download") + log.Error().Err(err).Msg("error initiating file download") w.WriteHeader(http.StatusInternalServerError) return } if dRes.Status.Code != rpc.Code_CODE_OK { - HandleErrorStatus(&sublog, w, dRes.Status) + HandleErrorStatus(&log, w, dRes.Status) return } var ep, token string for _, p := range dRes.Protocols { - if p.Protocol == "simple" { + if p.Protocol == dlProtocol { ep, token = p.DownloadEndpoint, p.Token } } - httpReq, err := rhttp.NewRequest(ctx, "GET", ep, nil) + httpReq, err := rhttp.NewRequest(ctx, http.MethodGet, ep, nil) if err != nil { - sublog.Error().Err(err).Msg("error creating http request") + log.Error().Err(err).Msg("error creating http request") w.WriteHeader(http.StatusInternalServerError) return } httpReq.Header.Set(datagateway.TokenTransportHeader, token) - if r.Header.Get("Range") != "" { - httpReq.Header.Set("Range", r.Header.Get("Range")) + if r.Header.Get(HeaderRange) != "" { + httpReq.Header.Set(HeaderRange, r.Header.Get(HeaderRange)) } httpClient := s.client httpRes, err := httpClient.Do(httpReq) if err != nil { - sublog.Error().Err(err).Msg("error performing http request") + log.Error().Err(err).Msg("error performing http request") w.WriteHeader(http.StatusInternalServerError) return } @@ -130,38 +132,60 @@ func (s *svc) handleGet(w http.ResponseWriter, r *http.Request, ns string) { return } - w.Header().Set("Content-Type", info.MimeType) - w.Header().Set("Content-Disposition", "attachment; filename*=UTF-8''"+ + w.Header().Set(HeaderContentType, info.MimeType) + w.Header().Set(HeaderContentDisposistion, "attachment; filename*=UTF-8''"+ path.Base(info.Path)+"; filename=\""+path.Base(info.Path)+"\"") - w.Header().Set("ETag", info.Etag) - w.Header().Set("OC-FileId", wrapResourceID(info.Id)) - w.Header().Set("OC-ETag", info.Etag) + w.Header().Set(HeaderETag, info.Etag) + w.Header().Set(HeaderOCFileID, wrapResourceID(info.Id)) + w.Header().Set(HeaderOCETag, info.Etag) t := utils.TSToTime(info.Mtime).UTC() lastModifiedString := t.Format(time.RFC1123Z) - w.Header().Set("Last-Modified", lastModifiedString) + w.Header().Set(HeaderLastModified, lastModifiedString) if httpRes.StatusCode == http.StatusPartialContent { - w.Header().Set("Content-Range", httpRes.Header.Get("Content-Range")) - w.Header().Set("Content-Length", httpRes.Header.Get("Content-Length")) + w.Header().Set(HeaderContentRange, httpRes.Header.Get(HeaderContentRange)) + w.Header().Set(HeaderContentLength, httpRes.Header.Get(HeaderContentLength)) w.WriteHeader(http.StatusPartialContent) } else { - w.Header().Set("Content-Length", strconv.FormatUint(info.Size, 10)) + w.Header().Set(HeaderContentLength, strconv.FormatUint(info.Size, 10)) } if info.Checksum != nil { - w.Header().Set("OC-Checksum", fmt.Sprintf("%s:%s", strings.ToUpper(string(storageprovider.GRPC2PKGXS(info.Checksum.Type))), info.Checksum.Sum)) + w.Header().Set(HeaderOCChecksum, fmt.Sprintf("%s:%s", strings.ToUpper(string(storageprovider.GRPC2PKGXS(info.Checksum.Type))), info.Checksum.Sum)) } var c int64 if c, err = io.Copy(w, httpRes.Body); err != nil { - sublog.Error().Err(err).Msg("error finishing copying data to response") + log.Error().Err(err).Msg("error finishing copying data to response") } - if httpRes.Header.Get("Content-Length") != "" { - i, err := strconv.ParseInt(httpRes.Header.Get("Content-Length"), 10, 64) + if httpRes.Header.Get(HeaderContentLength) != "" { + i, err := strconv.ParseInt(httpRes.Header.Get(HeaderContentLength), 10, 64) if err != nil { - sublog.Error().Err(err).Str("content-length", httpRes.Header.Get("Content-Length")).Msg("invalid content length in datagateway response") + log.Error().Err(err).Str("content-length", httpRes.Header.Get(HeaderContentLength)).Msg("invalid content length in datagateway response") } if i != c { - sublog.Error().Int64("content-length", i).Int64("transferred-bytes", c).Msg("content length vs transferred bytes mismatch") + log.Error().Int64("content-length", i).Int64("transferred-bytes", c).Msg("content length vs transferred bytes mismatch") } } // TODO we need to send the If-Match etag in the GET to the datagateway to prevent race conditions between stating and reading the file } + +func (s *svc) handleSpacesGet(w http.ResponseWriter, r *http.Request, spaceID string) { + ctx := r.Context() + ctx, span := trace.StartSpan(ctx, "spaces_get") + defer span.End() + + sublog := appctx.GetLogger(ctx).With().Str("path", r.URL.Path).Str("spaceid", spaceID).Str("handler", "get").Logger() + + // retrieve a specific storage space + ref, rpcStatus, err := s.lookUpStorageSpaceReference(ctx, spaceID, r.URL.Path) + if err != nil { + sublog.Error().Err(err).Msg("error sending a grpc request") + w.WriteHeader(http.StatusInternalServerError) + return + } + + if rpcStatus.Code != rpc.Code_CODE_OK { + HandleErrorStatus(&sublog, w, rpcStatus) + return + } + s.handleGet(ctx, w, r, ref, "spaces", sublog) +} diff --git a/internal/http/services/owncloud/ocdav/head.go b/internal/http/services/owncloud/ocdav/head.go index 250da299bb..336238e3f2 100644 --- a/internal/http/services/owncloud/ocdav/head.go +++ b/internal/http/services/owncloud/ocdav/head.go @@ -19,6 +19,7 @@ package ocdav import ( + "context" "fmt" "net/http" "path" @@ -31,10 +32,11 @@ import ( "github.com/cs3org/reva/internal/grpc/services/storageprovider" "github.com/cs3org/reva/pkg/appctx" "github.com/cs3org/reva/pkg/utils" + "github.com/rs/zerolog" "go.opencensus.io/trace" ) -func (s *svc) handleHead(w http.ResponseWriter, r *http.Request, ns string) { +func (s *svc) handlePathHead(w http.ResponseWriter, r *http.Request, ns string) { ctx := r.Context() ctx, span := trace.StartSpan(ctx, "head") defer span.End() @@ -43,43 +45,69 @@ func (s *svc) handleHead(w http.ResponseWriter, r *http.Request, ns string) { sublog := appctx.GetLogger(ctx).With().Str("path", fn).Logger() + ref := &provider.Reference{ + Spec: &provider.Reference_Path{Path: fn}, + } + s.handleHead(ctx, w, r, ref, sublog) +} + +func (s *svc) handleHead(ctx context.Context, w http.ResponseWriter, r *http.Request, ref *provider.Reference, logger zerolog.Logger) { client, err := s.getClient() if err != nil { - sublog.Error().Err(err).Msg("error getting grpc client") + logger.Error().Err(err).Msg("error getting grpc client") w.WriteHeader(http.StatusInternalServerError) return } - ref := &provider.Reference{ - Spec: &provider.Reference_Path{Path: fn}, - } req := &provider.StatRequest{Ref: ref} res, err := client.Stat(ctx, req) if err != nil { - sublog.Error().Err(err).Msg("error sending grpc stat request") + logger.Error().Err(err).Msg("error sending grpc stat request") w.WriteHeader(http.StatusInternalServerError) return } if res.Status.Code != rpc.Code_CODE_OK { - HandleErrorStatus(&sublog, w, res.Status) + HandleErrorStatus(&logger, w, res.Status) return } info := res.Info - w.Header().Set("Content-Type", info.MimeType) - w.Header().Set("ETag", info.Etag) - w.Header().Set("OC-FileId", wrapResourceID(info.Id)) - w.Header().Set("OC-ETag", info.Etag) + w.Header().Set(HeaderContentType, info.MimeType) + w.Header().Set(HeaderETag, info.Etag) + w.Header().Set(HeaderOCFileID, wrapResourceID(info.Id)) + w.Header().Set(HeaderOCETag, info.Etag) if info.Checksum != nil { - w.Header().Set("OC-Checksum", fmt.Sprintf("%s:%s", strings.ToUpper(string(storageprovider.GRPC2PKGXS(info.Checksum.Type))), info.Checksum.Sum)) + w.Header().Set(HeaderOCChecksum, fmt.Sprintf("%s:%s", strings.ToUpper(string(storageprovider.GRPC2PKGXS(info.Checksum.Type))), info.Checksum.Sum)) } t := utils.TSToTime(info.Mtime).UTC() lastModifiedString := t.Format(time.RFC1123Z) - w.Header().Set("Last-Modified", lastModifiedString) - w.Header().Set("Content-Length", strconv.FormatUint(info.Size, 10)) + w.Header().Set(HeaderLastModified, lastModifiedString) + w.Header().Set(HeaderContentLength, strconv.FormatUint(info.Size, 10)) if info.Type != provider.ResourceType_RESOURCE_TYPE_CONTAINER { - w.Header().Set("Accept-Ranges", "bytes") + w.Header().Set(HeaderAcceptRanges, "bytes") } w.WriteHeader(http.StatusOK) } + +func (s *svc) handleSpacesHead(w http.ResponseWriter, r *http.Request, spaceID string) { + ctx := r.Context() + ctx, span := trace.StartSpan(ctx, "spaces_head") + defer span.End() + + sublog := appctx.GetLogger(ctx).With().Str("spaceid", spaceID).Str("path", r.URL.Path).Logger() + + spaceRef, status, err := s.lookUpStorageSpaceReference(ctx, spaceID, r.URL.Path) + if err != nil { + sublog.Error().Err(err).Msg("error sending a grpc request") + w.WriteHeader(http.StatusInternalServerError) + return + } + + if status.Code != rpc.Code_CODE_OK { + HandleErrorStatus(&sublog, w, status) + return + } + + s.handleHead(ctx, w, r, spaceRef, sublog) +} diff --git a/internal/http/services/owncloud/ocdav/mkcol.go b/internal/http/services/owncloud/ocdav/mkcol.go index c93a8f9f67..41aeb22321 100644 --- a/internal/http/services/owncloud/ocdav/mkcol.go +++ b/internal/http/services/owncloud/ocdav/mkcol.go @@ -19,17 +19,18 @@ package ocdav import ( - "io" + "context" "net/http" "path" rpc "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" "github.com/cs3org/reva/pkg/appctx" + "github.com/rs/zerolog" "go.opencensus.io/trace" ) -func (s *svc) handleMkcol(w http.ResponseWriter, r *http.Request, ns string) { +func (s *svc) handlePathMkcol(w http.ResponseWriter, r *http.Request, ns string) { ctx := r.Context() ctx, span := trace.StartSpan(ctx, "mkcol") defer span.End() @@ -38,29 +39,54 @@ func (s *svc) handleMkcol(w http.ResponseWriter, r *http.Request, ns string) { sublog := appctx.GetLogger(ctx).With().Str("path", fn).Logger() - buf := make([]byte, 1) - _, err := r.Body.Read(buf) - if err != io.EOF { - sublog.Error().Err(err).Msg("error reading request body") + ref := &provider.Reference{ + Spec: &provider.Reference_Path{Path: fn}, + } + + s.handleMkcol(ctx, w, r, ref, sublog) +} + +func (s *svc) handleSpacesMkCol(w http.ResponseWriter, r *http.Request, spaceID string) { + ctx := r.Context() + ctx, span := trace.StartSpan(ctx, "spaces_mkcol") + defer span.End() + + sublog := appctx.GetLogger(ctx).With().Str("path", r.URL.Path).Str("spaceid", spaceID).Str("handler", "mkcol").Logger() + + ref, rpcStatus, err := s.lookUpStorageSpaceReference(ctx, spaceID, r.URL.Path) + if err != nil { + sublog.Error().Err(err).Msg("error sending a grpc request") + w.WriteHeader(http.StatusInternalServerError) + return + } + + if rpcStatus.Code != rpc.Code_CODE_OK { + HandleErrorStatus(&sublog, w, rpcStatus) + return + } + + s.handleMkcol(ctx, w, r, ref, sublog) + +} + +func (s *svc) handleMkcol(ctx context.Context, w http.ResponseWriter, r *http.Request, ref *provider.Reference, log zerolog.Logger) { + if r.Body != http.NoBody { w.WriteHeader(http.StatusUnsupportedMediaType) return } - client, err := s.getClient() + gatewayClient, err := s.getClient() if err != nil { - sublog.Error().Err(err).Msg("error getting grpc client") + log.Error().Err(err).Msg("error getting grpc client") w.WriteHeader(http.StatusInternalServerError) return } - // check fn exists - ref := &provider.Reference{ - Spec: &provider.Reference_Path{Path: fn}, - } + // check if ref exists statReq := &provider.StatRequest{Ref: ref} - statRes, err := client.Stat(ctx, statReq) + statRes, err := gatewayClient.Stat(ctx, statReq) if err != nil { - sublog.Error().Err(err).Msg("error sending a grpc stat request") + log.Error().Err(err).Msg("error sending a grpc stat request") w.WriteHeader(http.StatusInternalServerError) return } @@ -69,15 +95,15 @@ func (s *svc) handleMkcol(w http.ResponseWriter, r *http.Request, ns string) { if statRes.Status.Code == rpc.Code_CODE_OK { w.WriteHeader(http.StatusMethodNotAllowed) // 405 if it already exists } else { - HandleErrorStatus(&sublog, w, statRes.Status) + HandleErrorStatus(&log, w, statRes.Status) } return } req := &provider.CreateContainerRequest{Ref: ref} - res, err := client.CreateContainer(ctx, req) + res, err := gatewayClient.CreateContainer(ctx, req) if err != nil { - sublog.Error().Err(err).Msg("error sending create container grpc request") + log.Error().Err(err).Msg("error sending create container grpc request") w.WriteHeader(http.StatusInternalServerError) return } @@ -85,9 +111,9 @@ func (s *svc) handleMkcol(w http.ResponseWriter, r *http.Request, ns string) { case rpc.Code_CODE_OK: w.WriteHeader(http.StatusCreated) case rpc.Code_CODE_NOT_FOUND: - sublog.Debug().Str("path", fn).Interface("status", statRes.Status).Msg("conflict") + log.Debug().Str("path", r.URL.Path).Interface("status", statRes.Status).Msg("conflict") w.WriteHeader(http.StatusConflict) default: - HandleErrorStatus(&sublog, w, res.Status) + HandleErrorStatus(&log, w, res.Status) } } diff --git a/internal/http/services/owncloud/ocdav/move.go b/internal/http/services/owncloud/ocdav/move.go index 77debda500..bce291f7ca 100644 --- a/internal/http/services/owncloud/ocdav/move.go +++ b/internal/http/services/owncloud/ocdav/move.go @@ -19,6 +19,7 @@ package ocdav import ( + "context" "net/http" "path" "strings" @@ -26,19 +27,19 @@ import ( rpc "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" "github.com/cs3org/reva/pkg/appctx" + "github.com/cs3org/reva/pkg/rhttp/router" + "github.com/rs/zerolog" "go.opencensus.io/trace" ) -func (s *svc) handleMove(w http.ResponseWriter, r *http.Request, ns string) { +func (s *svc) handlePathMove(w http.ResponseWriter, r *http.Request, ns string) { ctx := r.Context() ctx, span := trace.StartSpan(ctx, "move") defer span.End() src := path.Join(ns, r.URL.Path) - dstHeader := r.Header.Get("Destination") - overwrite := r.Header.Get("Overwrite") - dst, err := extractDestination(dstHeader, r.Context().Value(ctxKeyBaseURI).(string)) + dst, err := extractDestination(r) if err != nil { w.WriteHeader(http.StatusBadRequest) return @@ -46,7 +47,73 @@ func (s *svc) handleMove(w http.ResponseWriter, r *http.Request, ns string) { dst = path.Join(ns, dst) sublog := appctx.GetLogger(ctx).With().Str("src", src).Str("dst", dst).Logger() - sublog.Debug().Str("overwrite", overwrite).Msg("move") + srcRef := &provider.Reference{ + Spec: &provider.Reference_Path{Path: src}, + } + dstRef := &provider.Reference{ + Spec: &provider.Reference_Path{Path: dst}, + } + + intermediateDirRefFunc := func() (*provider.Reference, *rpc.Status, error) { + intermediateDir := path.Dir(dst) + ref := &provider.Reference{ + Spec: &provider.Reference_Path{Path: intermediateDir}, + } + return ref, &rpc.Status{Code: rpc.Code_CODE_OK}, nil + } + s.handleMove(ctx, w, r, srcRef, dstRef, intermediateDirRefFunc, sublog) +} + +func (s *svc) handleSpacesMove(w http.ResponseWriter, r *http.Request, srcSpaceID string) { + ctx := r.Context() + ctx, span := trace.StartSpan(ctx, "spaces_move") + defer span.End() + + dst, err := extractDestination(r) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + return + } + + sublog := appctx.GetLogger(ctx).With().Str("spaceid", srcSpaceID).Str("path", r.URL.Path).Logger() + // retrieve a specific storage space + srcRef, status, err := s.lookUpStorageSpaceReference(ctx, srcSpaceID, r.URL.Path) + if err != nil { + sublog.Error().Err(err).Msg("error sending a grpc request") + w.WriteHeader(http.StatusInternalServerError) + return + } + + if status.Code != rpc.Code_CODE_OK { + HandleErrorStatus(&sublog, w, status) + return + } + + dstSpaceID, dstRelPath := router.ShiftPath(dst) + + // retrieve a specific storage space + dstRef, status, err := s.lookUpStorageSpaceReference(ctx, dstSpaceID, dstRelPath) + if err != nil { + sublog.Error().Err(err).Msg("error sending a grpc request") + w.WriteHeader(http.StatusInternalServerError) + return + } + + if status.Code != rpc.Code_CODE_OK { + HandleErrorStatus(&sublog, w, status) + return + } + + intermediateDirRefFunc := func() (*provider.Reference, *rpc.Status, error) { + intermediateDir := path.Dir(dstRelPath) + return s.lookUpStorageSpaceReference(ctx, dstSpaceID, intermediateDir) + } + s.handleMove(ctx, w, r, srcRef, dstRef, intermediateDirRefFunc, sublog) +} + +func (s *svc) handleMove(ctx context.Context, w http.ResponseWriter, r *http.Request, srcRef, dstRef *provider.Reference, intermediateDirRef func() (*provider.Reference, *rpc.Status, error), log zerolog.Logger) { + overwrite := r.Header.Get(HeaderOverwrite) + log.Debug().Str("overwrite", overwrite).Msg("move") overwrite = strings.ToUpper(overwrite) if overwrite == "" { @@ -60,128 +127,122 @@ func (s *svc) handleMove(w http.ResponseWriter, r *http.Request, ns string) { client, err := s.getClient() if err != nil { - sublog.Error().Err(err).Msg("error getting grpc client") + log.Error().Err(err).Msg("error getting grpc client") w.WriteHeader(http.StatusInternalServerError) return } // check src exists - srcStatReq := &provider.StatRequest{ - Ref: &provider.Reference{ - Spec: &provider.Reference_Path{Path: src}, - }, - } + srcStatReq := &provider.StatRequest{Ref: srcRef} srcStatRes, err := client.Stat(ctx, srcStatReq) if err != nil { - sublog.Error().Err(err).Msg("error sending grpc stat request") + log.Error().Err(err).Msg("error sending grpc stat request") w.WriteHeader(http.StatusInternalServerError) return } if srcStatRes.Status.Code != rpc.Code_CODE_OK { - HandleErrorStatus(&sublog, w, srcStatRes.Status) + HandleErrorStatus(&log, w, srcStatRes.Status) return } - // check dst exists - dstStatRef := &provider.Reference{ - Spec: &provider.Reference_Path{Path: dst}, - } - dstStatReq := &provider.StatRequest{Ref: dstStatRef} + dstStatReq := &provider.StatRequest{Ref: dstRef} dstStatRes, err := client.Stat(ctx, dstStatReq) if err != nil { - sublog.Error().Err(err).Msg("error getting grpc client") + log.Error().Err(err).Msg("error getting grpc client") w.WriteHeader(http.StatusInternalServerError) return } if dstStatRes.Status.Code != rpc.Code_CODE_OK && dstStatRes.Status.Code != rpc.Code_CODE_NOT_FOUND { - HandleErrorStatus(&sublog, w, srcStatRes.Status) + HandleErrorStatus(&log, w, srcStatRes.Status) return } successCode := http.StatusCreated // 201 if new resource was created, see https://tools.ietf.org/html/rfc4918#section-9.9.4 + if dstStatRes.Status.Code == rpc.Code_CODE_OK { successCode = http.StatusNoContent // 204 if target already existed, see https://tools.ietf.org/html/rfc4918#section-9.9.4 if overwrite == "F" { - sublog.Warn().Str("overwrite", overwrite).Msg("dst already exists") + log.Warn().Str("overwrite", overwrite).Msg("dst already exists") w.WriteHeader(http.StatusPreconditionFailed) // 412, see https://tools.ietf.org/html/rfc4918#section-9.9.4 return } // delete existing tree - delReq := &provider.DeleteRequest{Ref: dstStatRef} + delReq := &provider.DeleteRequest{Ref: dstRef} delRes, err := client.Delete(ctx, delReq) if err != nil { - sublog.Error().Err(err).Msg("error sending grpc delete request") + log.Error().Err(err).Msg("error sending grpc delete request") w.WriteHeader(http.StatusInternalServerError) return } if delRes.Status.Code != rpc.Code_CODE_OK && delRes.Status.Code != rpc.Code_CODE_NOT_FOUND { - HandleErrorStatus(&sublog, w, delRes.Status) + HandleErrorStatus(&log, w, delRes.Status) return } } else { // check if an intermediate path / the parent exists - intermediateDir := path.Dir(dst) - ref2 := &provider.Reference{ - Spec: &provider.Reference_Path{Path: intermediateDir}, + dstRef, status, err := intermediateDirRef() + if err != nil { + log.Error().Err(err).Msg("error sending a grpc request") + w.WriteHeader(http.StatusInternalServerError) + return + } + + if status.Code != rpc.Code_CODE_OK { + HandleErrorStatus(&log, w, status) + return } - intStatReq := &provider.StatRequest{Ref: ref2} + intStatReq := &provider.StatRequest{Ref: dstRef} intStatRes, err := client.Stat(ctx, intStatReq) if err != nil { - sublog.Error().Err(err).Msg("error sending grpc stat request") + log.Error().Err(err).Msg("error sending grpc stat request") w.WriteHeader(http.StatusInternalServerError) return } if intStatRes.Status.Code != rpc.Code_CODE_OK { if intStatRes.Status.Code == rpc.Code_CODE_NOT_FOUND { // 409 if intermediate dir is missing, see https://tools.ietf.org/html/rfc4918#section-9.8.5 - sublog.Debug().Str("parent", intermediateDir).Interface("status", intStatRes.Status).Msg("conflict") + log.Debug().Interface("parent", intermediateDirRef).Interface("status", intStatRes.Status).Msg("conflict") w.WriteHeader(http.StatusConflict) } else { - HandleErrorStatus(&sublog, w, intStatRes.Status) + HandleErrorStatus(&log, w, intStatRes.Status) } return } // TODO what if intermediate is a file? } - sourceRef := &provider.Reference{ - Spec: &provider.Reference_Path{Path: src}, - } - dstRef := &provider.Reference{ - Spec: &provider.Reference_Path{Path: dst}, - } - mReq := &provider.MoveRequest{Source: sourceRef, Destination: dstRef} + mReq := &provider.MoveRequest{Source: srcRef, Destination: dstRef} mRes, err := client.Move(ctx, mReq) if err != nil { - sublog.Error().Err(err).Msg("error sending move grpc request") + log.Error().Err(err).Msg("error sending move grpc request") w.WriteHeader(http.StatusInternalServerError) return } if mRes.Status.Code != rpc.Code_CODE_OK { - HandleErrorStatus(&sublog, w, mRes.Status) + HandleErrorStatus(&log, w, mRes.Status) return } dstStatRes, err = client.Stat(ctx, dstStatReq) if err != nil { - sublog.Error().Err(err).Msg("error sending grpc stat request") + log.Error().Err(err).Msg("error sending grpc stat request") w.WriteHeader(http.StatusInternalServerError) return } if dstStatRes.Status.Code != rpc.Code_CODE_OK { - HandleErrorStatus(&sublog, w, dstStatRes.Status) + HandleErrorStatus(&log, w, dstStatRes.Status) return } info := dstStatRes.Info - w.Header().Set("Content-Type", info.MimeType) - w.Header().Set("ETag", info.Etag) - w.Header().Set("OC-FileId", wrapResourceID(info.Id)) - w.Header().Set("OC-ETag", info.Etag) + w.Header().Set(HeaderContentType, info.MimeType) + w.Header().Set(HeaderETag, info.Etag) + w.Header().Set(HeaderOCFileID, wrapResourceID(info.Id)) + w.Header().Set(HeaderOCETag, info.Etag) w.WriteHeader(successCode) } diff --git a/internal/http/services/owncloud/ocdav/ocdav.go b/internal/http/services/owncloud/ocdav/ocdav.go index c2b7561cf2..28da43e68f 100644 --- a/internal/http/services/owncloud/ocdav/ocdav.go +++ b/internal/http/services/owncloud/ocdav/ocdav.go @@ -263,20 +263,22 @@ func addAccessHeaders(w http.ResponseWriter, r *http.Request) { } } -func extractDestination(dstHeader, baseURI string) (string, error) { +func extractDestination(r *http.Request) (string, error) { + dstHeader := r.Header.Get(HeaderDestination) if dstHeader == "" { - return "", errors.New("destination header is empty") + return "", errors.Wrap(errInvalidValue, "destination header is empty") } dstURL, err := url.ParseRequestURI(dstHeader) if err != nil { return "", err } + baseURI := r.Context().Value(ctxKeyBaseURI).(string) // TODO check if path is on same storage, return 502 on problems, see https://tools.ietf.org/html/rfc4918#section-9.9.4 // Strip the base URI from the destination. The destination might contain redirection prefixes which need to be handled urlSplit := strings.Split(dstURL.Path, baseURI) if len(urlSplit) != 2 { - return "", errors.New("destination path does not contain base URI") + return "", errors.Wrap(errInvalidValue, "destination path does not contain base URI") } return urlSplit[1], nil @@ -298,7 +300,7 @@ func replaceAllStringSubmatchFunc(re *regexp.Regexp, str string, repl func([]str return result + str[lastIndex:] } -var hrefre = regexp.MustCompile(`([^A-Za-z0-9_\-.~()/:@])`) +var hrefre = regexp.MustCompile(`([^A-Za-z0-9_\-.~()/:@!$])`) // encodePath encodes the path of a url. // diff --git a/internal/http/services/owncloud/ocdav/propfind.go b/internal/http/services/owncloud/ocdav/propfind.go index 71eb313918..2d08fb5298 100644 --- a/internal/http/services/owncloud/ocdav/propfind.go +++ b/internal/http/services/owncloud/ocdav/propfind.go @@ -43,6 +43,7 @@ import ( "github.com/cs3org/reva/pkg/appctx" ctxuser "github.com/cs3org/reva/pkg/user" "github.com/cs3org/reva/pkg/utils" + "github.com/rs/zerolog" ) const ( @@ -67,20 +68,9 @@ func (s *svc) handlePropfind(w http.ResponseWriter, r *http.Request, ns string) defer span.End() fn := path.Join(ns, r.URL.Path) - depth := r.Header.Get("Depth") - if depth == "" { - depth = "1" - } sublog := appctx.GetLogger(ctx).With().Str("path", fn).Logger() - // see https://tools.ietf.org/html/rfc4918#section-9.1 - if depth != "0" && depth != "1" && depth != "infinity" { - sublog.Debug().Str("depth", depth).Msgf("invalid Depth header value") - w.WriteHeader(http.StatusBadRequest) - return - } - pf, status, err := readPropfind(r.Body) if err != nil { sublog.Debug().Err(err).Msg("error reading propfind request") @@ -88,138 +78,17 @@ func (s *svc) handlePropfind(w http.ResponseWriter, r *http.Request, ns string) return } - client, err := s.getClient() - if err != nil { - sublog.Error().Err(err).Msg("error getting grpc client") - w.WriteHeader(http.StatusInternalServerError) - return - } - - metadataKeys := []string{} - if pf.Allprop != nil { - // TODO this changes the behavior and returns all properties if allprops has been set, - // but allprops should only return some default properties - // see https://tools.ietf.org/html/rfc4918#section-9.1 - // the description of arbitrary_metadata_keys in https://cs3org.github.io/cs3apis/#cs3.storage.provider.v1beta1.ListContainerRequest an others may need clarification - // tracked in https://github.com/cs3org/cs3apis/issues/104 - metadataKeys = append(metadataKeys, "*") - } else { - for i := range pf.Prop { - if requiresExplicitFetching(&pf.Prop[i]) { - metadataKeys = append(metadataKeys, metadataKeyOf(&pf.Prop[i])) - } - } - } ref := &provider.Reference{ Spec: &provider.Reference_Path{Path: fn}, } - req := &provider.StatRequest{ - Ref: ref, - ArbitraryMetadataKeys: metadataKeys, - } - res, err := client.Stat(ctx, req) - if err != nil { - sublog.Error().Err(err).Interface("req", req).Msg("error sending a grpc stat request") - w.WriteHeader(http.StatusInternalServerError) - return - } - - if res.Status.Code != rpc.Code_CODE_OK { - HandleErrorStatus(&sublog, w, res.Status) - return - } - - info := res.Info - infos := []*provider.ResourceInfo{info} - if info.Type == provider.ResourceType_RESOURCE_TYPE_CONTAINER && depth == "1" { - req := &provider.ListContainerRequest{ - Ref: ref, - ArbitraryMetadataKeys: metadataKeys, - } - res, err := client.ListContainer(ctx, req) - if err != nil { - sublog.Error().Err(err).Msg("error sending list container grpc request") - w.WriteHeader(http.StatusInternalServerError) - return - } - - if res.Status.Code != rpc.Code_CODE_OK { - HandleErrorStatus(&sublog, w, res.Status) - return - } - infos = append(infos, res.Infos...) - } else if depth == "infinity" { - // FIXME: doesn't work cross-storage as the results will have the wrong paths! - // use a stack to explore sub-containers breadth-first - stack := []string{info.Path} - for len(stack) > 0 { - // retrieve path on top of stack - path := stack[len(stack)-1] - ref = &provider.Reference{ - Spec: &provider.Reference_Path{Path: path}, - } - req := &provider.ListContainerRequest{ - Ref: ref, - ArbitraryMetadataKeys: metadataKeys, - } - res, err := client.ListContainer(ctx, req) - if err != nil { - sublog.Error().Err(err).Str("path", path).Msg("error sending list container grpc request") - w.WriteHeader(http.StatusInternalServerError) - return - } - if res.Status.Code != rpc.Code_CODE_OK { - HandleErrorStatus(&sublog, w, res.Status) - return - } - - infos = append(infos, res.Infos...) - - if depth != "infinity" { - break - } - - // TODO: stream response to avoid storing too many results in memory - - stack = stack[:len(stack)-1] - - // check sub-containers in reverse order and add them to the stack - // the reversed order here will produce a more logical sorting of results - for i := len(res.Infos) - 1; i >= 0; i-- { - // for i := range res.Infos { - if res.Infos[i].Type == provider.ResourceType_RESOURCE_TYPE_CONTAINER { - stack = append(stack, res.Infos[i].Path) - } - } - } - } - propRes, err := s.formatPropfind(ctx, &pf, infos, ns) - if err != nil { - sublog.Error().Err(err).Msg("error formatting propfind") - w.WriteHeader(http.StatusInternalServerError) + parentInfo, resourceInfos, ok := s.getResourceInfos(ctx, w, r, pf, ref, sublog) + if !ok { + // getResourceInfos handles responses in case of an error so we can just return here. return } - w.Header().Set("DAV", "1, 3, extended-mkcol") - w.Header().Set("Content-Type", "application/xml; charset=utf-8") - var disableTus bool - // let clients know this collection supports tus.io POST requests to start uploads - if info.Type == provider.ResourceType_RESOURCE_TYPE_CONTAINER { - if info.Opaque != nil { - _, disableTus = info.Opaque.Map["disable_tus"] - } - if !disableTus { - w.Header().Add("Access-Control-Expose-Headers", "Tus-Resumable, Tus-Version, Tus-Extension") - w.Header().Set("Tus-Resumable", "1.0.0") - w.Header().Set("Tus-Version", "1.0.0") - w.Header().Set("Tus-Extension", "creation,creation-with-upload") - } - } - w.WriteHeader(http.StatusMultiStatus) - if _, err := w.Write([]byte(propRes)); err != nil { - sublog.Err(err).Msg("error writing response") - } + s.propfindResponse(ctx, w, r, ns, pf, parentInfo, resourceInfos, sublog) } func requiresExplicitFetching(n *xml.Name) bool { @@ -922,3 +791,194 @@ type propertyXML struct { // even including the DAV: namespace. InnerXML []byte `xml:",innerxml"` } + +func (s *svc) handleSpacesPropfind(w http.ResponseWriter, r *http.Request, spaceID string) { + ctx := r.Context() + ctx, span := trace.StartSpan(ctx, "propfind") + defer span.End() + + sublog := appctx.GetLogger(ctx).With().Str("path", r.URL.Path).Str("spaceid", spaceID).Logger() + + pf, status, err := readPropfind(r.Body) + if err != nil { + sublog.Debug().Err(err).Msg("error reading propfind request") + w.WriteHeader(status) + return + } + + // retrieve a specific storage space + ref, rpcStatus, err := s.lookUpStorageSpaceReference(ctx, spaceID, r.URL.Path) + if err != nil { + sublog.Error().Err(err).Msg("error sending a grpc request") + w.WriteHeader(http.StatusInternalServerError) + return + } + + if rpcStatus.Code != rpc.Code_CODE_OK { + HandleErrorStatus(&sublog, w, rpcStatus) + return + } + + parentInfo, resourceInfos, ok := s.getResourceInfos(ctx, w, r, pf, ref, sublog) + if !ok { + // getResourceInfos handles responses in case of an error so we can just return here. + return + } + + // prefix space id to paths + for i := range resourceInfos { + resourceInfos[i].Path = path.Join("/", spaceID, resourceInfos[i].Path) + } + + s.propfindResponse(ctx, w, r, "", pf, parentInfo, resourceInfos, sublog) + +} + +func (s *svc) propfindResponse(ctx context.Context, w http.ResponseWriter, r *http.Request, namespace string, pf propfindXML, parentInfo *provider.ResourceInfo, resourceInfos []*provider.ResourceInfo, log zerolog.Logger) { + propRes, err := s.formatPropfind(ctx, &pf, resourceInfos, namespace) // no namespace because this is relative to the storage space + if err != nil { + log.Error().Err(err).Msg("error formatting propfind") + w.WriteHeader(http.StatusInternalServerError) + return + } + w.Header().Set(HeaderDav, "1, 3, extended-mkcol") + w.Header().Set(HeaderContentType, "application/xml; charset=utf-8") + + var disableTus bool + // let clients know this collection supports tus.io POST requests to start uploads + if parentInfo.Type == provider.ResourceType_RESOURCE_TYPE_CONTAINER { + if parentInfo.Opaque != nil { + _, disableTus = parentInfo.Opaque.Map["disable_tus"] + } + if !disableTus { + w.Header().Add(HeaderAccessControlExposeHeaders, strings.Join([]string{HeaderTusResumable, HeaderTusVersion, HeaderTusExtension}, ", ")) + w.Header().Set(HeaderTusResumable, "1.0.0") + w.Header().Set(HeaderTusVersion, "1.0.0") + w.Header().Set(HeaderTusExtension, "creation,creation-with-upload") + } + } + w.WriteHeader(http.StatusMultiStatus) + if _, err := w.Write([]byte(propRes)); err != nil { + log.Err(err).Msg("error writing response") + } +} + +func (s *svc) getResourceInfos(ctx context.Context, w http.ResponseWriter, r *http.Request, pf propfindXML, ref *provider.Reference, log zerolog.Logger) (*provider.ResourceInfo, []*provider.ResourceInfo, bool) { + depth := r.Header.Get(HeaderDepth) + if depth == "" { + depth = "1" + } + + // see https://tools.ietf.org/html/rfc4918#section-9.1 + if depth != "0" && depth != "1" && depth != "infinity" { + log.Debug().Str("depth", depth).Msgf("invalid Depth header value") + w.WriteHeader(http.StatusBadRequest) + return nil, nil, false + } + + // Get the getway client + gatewayClient, err := s.getClient() + if err != nil { + log.Error().Err(err).Msg("error getting grpc client") + w.WriteHeader(http.StatusInternalServerError) + return nil, nil, false + } + + metadataKeys := []string{} + if pf.Allprop != nil { + // TODO this changes the behavior and returns all properties if allprops has been set, + // but allprops should only return some default properties + // see https://tools.ietf.org/html/rfc4918#section-9.1 + // the description of arbitrary_metadata_keys in https://cs3org.github.io/cs3apis/#cs3.storage.provider.v1beta1.ListContainerRequest an others may need clarification + // tracked in https://github.com/cs3org/cs3apis/issues/104 + metadataKeys = append(metadataKeys, "*") + } else { + for i := range pf.Prop { + if requiresExplicitFetching(&pf.Prop[i]) { + metadataKeys = append(metadataKeys, metadataKeyOf(&pf.Prop[i])) + } + } + } + req := &provider.StatRequest{ + Ref: ref, + ArbitraryMetadataKeys: metadataKeys, + } + res, err := gatewayClient.Stat(ctx, req) + if err != nil { + log.Error().Err(err).Interface("req", req).Msg("error sending a grpc stat request") + w.WriteHeader(http.StatusInternalServerError) + return nil, nil, false + } + + if res.Status.Code != rpc.Code_CODE_OK { + HandleErrorStatus(&log, w, res.Status) + return nil, nil, false + } + + parentInfo := res.Info + resourceInfos := []*provider.ResourceInfo{parentInfo} + if parentInfo.Type == provider.ResourceType_RESOURCE_TYPE_CONTAINER && depth == "1" { + req := &provider.ListContainerRequest{ + Ref: ref, + ArbitraryMetadataKeys: metadataKeys, + } + res, err := gatewayClient.ListContainer(ctx, req) + if err != nil { + log.Error().Err(err).Msg("error sending list container grpc request") + w.WriteHeader(http.StatusInternalServerError) + return nil, nil, false + } + + if res.Status.Code != rpc.Code_CODE_OK { + HandleErrorStatus(&log, w, res.Status) + return nil, nil, false + } + resourceInfos = append(resourceInfos, res.Infos...) + } else if depth == "infinity" { + // FIXME: doesn't work cross-storage as the results will have the wrong paths! + // use a stack to explore sub-containers breadth-first + stack := []string{parentInfo.Path} + for len(stack) > 0 { + // retrieve path on top of stack + currentPath := stack[len(stack)-1] + ref = &provider.Reference{ + Spec: &provider.Reference_Path{Path: currentPath}, + } + req := &provider.ListContainerRequest{ + Ref: ref, + ArbitraryMetadataKeys: metadataKeys, + } + res, err := gatewayClient.ListContainer(ctx, req) + if err != nil { + log.Error().Err(err).Str("path", currentPath).Msg("error sending list container grpc request") + w.WriteHeader(http.StatusInternalServerError) + return nil, nil, false + } + if res.Status.Code != rpc.Code_CODE_OK { + HandleErrorStatus(&log, w, res.Status) + return nil, nil, false + } + + resourceInfos = append(resourceInfos, res.Infos...) + + if depth != "infinity" { + break + } + + // TODO: stream response to avoid storing too many results in memory + + stack = stack[:len(stack)-1] + + // check sub-containers in reverse order and add them to the stack + // the reversed order here will produce a more logical sorting of results + for i := len(res.Infos) - 1; i >= 0; i-- { + // for i := range res.Infos { + if res.Infos[i].Type == provider.ResourceType_RESOURCE_TYPE_CONTAINER { + stack = append(stack, res.Infos[i].Path) + } + } + } + } + + return parentInfo, resourceInfos, true +} diff --git a/internal/http/services/owncloud/ocdav/proppatch.go b/internal/http/services/owncloud/ocdav/proppatch.go index a8fe5933a0..4c1a5606cd 100644 --- a/internal/http/services/owncloud/ocdav/proppatch.go +++ b/internal/http/services/owncloud/ocdav/proppatch.go @@ -33,16 +33,14 @@ import ( provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" "github.com/cs3org/reva/pkg/appctx" "github.com/pkg/errors" + "github.com/rs/zerolog" ) -func (s *svc) handleProppatch(w http.ResponseWriter, r *http.Request, ns string) { +func (s *svc) handlePathProppatch(w http.ResponseWriter, r *http.Request, ns string) { ctx := r.Context() ctx, span := trace.StartSpan(ctx, "proppatch") defer span.End() - acceptedProps := []xml.Name{} - removedProps := []xml.Name{} - fn := path.Join(ns, r.URL.Path) sublog := appctx.GetLogger(ctx).With().Str("path", fn).Logger() @@ -61,12 +59,12 @@ func (s *svc) handleProppatch(w http.ResponseWriter, r *http.Request, ns string) return } - // check if resource exists - statReq := &provider.StatRequest{ - Ref: &provider.Reference{ - Spec: &provider.Reference_Path{Path: fn}, - }, + ref := &provider.Reference{ + Spec: &provider.Reference_Path{Path: fn}, } + + // check if resource exists + statReq := &provider.StatRequest{Ref: ref} statRes, err := c.Stat(ctx, statReq) if err != nil { sublog.Error().Err(err).Msg("error sending a grpc stat request") @@ -79,97 +77,19 @@ func (s *svc) handleProppatch(w http.ResponseWriter, r *http.Request, ns string) return } - rreq := &provider.UnsetArbitraryMetadataRequest{ - Ref: &provider.Reference{ - Spec: &provider.Reference_Path{Path: fn}, - }, - ArbitraryMetadataKeys: []string{""}, - } - sreq := &provider.SetArbitraryMetadataRequest{ - Ref: &provider.Reference{ - Spec: &provider.Reference_Path{Path: fn}, - }, - ArbitraryMetadata: &provider.ArbitraryMetadata{ - Metadata: map[string]string{}, - }, - } - for i := range pp { - if len(pp[i].Props) < 1 { - continue - } - for j := range pp[i].Props { - propNameXML := pp[i].Props[j].XMLName - // don't use path.Join. It removes the double slash! concatenate with a / - key := fmt.Sprintf("%s/%s", pp[i].Props[j].XMLName.Space, pp[i].Props[j].XMLName.Local) - value := string(pp[i].Props[j].InnerXML) - remove := pp[i].Remove - // boolean flags may be "set" to false as well - if s.isBooleanProperty(key) { - // Make boolean properties either "0" or "1" - value = s.as0or1(value) - if value == "0" { - remove = true - } - } - // Webdav spec requires the operations to be executed in the order - // specified in the PROPPATCH request - // http://www.webdav.org/specs/rfc2518.html#rfc.section.8.2 - // FIXME: batch this somehow - if remove { - rreq.ArbitraryMetadataKeys[0] = key - res, err := c.UnsetArbitraryMetadata(ctx, rreq) - if err != nil { - sublog.Error().Err(err).Msg("error sending a grpc UnsetArbitraryMetadata request") - w.WriteHeader(http.StatusInternalServerError) - return - } - - if res.Status.Code != rpc.Code_CODE_OK { - HandleErrorStatus(&sublog, w, res.Status) - return - } - removedProps = append(removedProps, propNameXML) - } else { - sreq.ArbitraryMetadata.Metadata[key] = value - res, err := c.SetArbitraryMetadata(ctx, sreq) - if err != nil { - sublog.Error().Err(err).Str("key", key).Str("value", value).Msg("error sending a grpc SetArbitraryMetadata request") - w.WriteHeader(http.StatusInternalServerError) - return - } - - if res.Status.Code != rpc.Code_CODE_OK { - HandleErrorStatus(&sublog, w, res.Status) - return - } - - acceptedProps = append(acceptedProps, propNameXML) - delete(sreq.ArbitraryMetadata.Metadata, key) - } - } - // FIXME: in case of error, need to set all properties back to the original state, - // and return the error in the matching propstat block, if applicable - // http://www.webdav.org/specs/rfc2518.html#rfc.section.8.2 + acceptedProps, removedProps, ok := s.handleProppatch(ctx, w, r, ref, pp, sublog) + if !ok { + // handleProppatch handles responses in error cases so we can just return + return } - ref := strings.TrimPrefix(fn, ns) - ref = path.Join(ctx.Value(ctxKeyBaseURI).(string), ref) + nRef := strings.TrimPrefix(fn, ns) + nRef = path.Join(ctx.Value(ctxKeyBaseURI).(string), nRef) if statRes.Info.Type == provider.ResourceType_RESOURCE_TYPE_CONTAINER { - ref += "/" + nRef += "/" } - propRes, err := s.formatProppatchResponse(ctx, acceptedProps, removedProps, ref) - if err != nil { - sublog.Error().Err(err).Msg("error formatting proppatch response") - w.WriteHeader(http.StatusInternalServerError) - return - } - w.Header().Set("DAV", "1, 3, extended-mkcol") - w.Header().Set("Content-Type", "application/xml; charset=utf-8") - w.WriteHeader(http.StatusMultiStatus) - if _, err := w.Write([]byte(propRes)); err != nil { - sublog.Err(err).Msg("error writing response") - } + s.handleProppatchResponse(ctx, w, r, acceptedProps, removedProps, nRef, sublog) } func (s *svc) formatProppatchResponse(ctx context.Context, acceptedProps []xml.Name, removedProps []xml.Name, ref string) (string, error) { @@ -353,3 +273,165 @@ func next(d *xml.Decoder) (xml.Token, error) { } var errInvalidProppatch = errors.New("webdav: invalid proppatch") + +func (s *svc) handleSpacesProppatch(w http.ResponseWriter, r *http.Request, spaceID string) { + ctx := r.Context() + ctx, span := trace.StartSpan(ctx, "spaces_proppatch") + defer span.End() + + sublog := appctx.GetLogger(ctx).With().Str("path", r.URL.Path).Str("spaceid", spaceID).Logger() + + pp, status, err := readProppatch(r.Body) + if err != nil { + sublog.Debug().Err(err).Msg("error reading proppatch") + w.WriteHeader(status) + return + } + + // retrieve a specific storage space + ref, rpcStatus, err := s.lookUpStorageSpaceReference(ctx, spaceID, r.URL.Path) + if err != nil { + sublog.Error().Err(err).Msg("error sending a grpc request") + w.WriteHeader(http.StatusInternalServerError) + return + } + + if rpcStatus.Code != rpc.Code_CODE_OK { + HandleErrorStatus(&sublog, w, rpcStatus) + return + } + + c, err := s.getClient() + if err != nil { + sublog.Error().Err(err).Msg("error getting grpc client") + w.WriteHeader(http.StatusInternalServerError) + return + } + // check if resource exists + statReq := &provider.StatRequest{ + Ref: ref, + } + statRes, err := c.Stat(ctx, statReq) + if err != nil { + sublog.Error().Err(err).Msg("error sending a grpc stat request") + w.WriteHeader(http.StatusInternalServerError) + return + } + + if statRes.Status.Code != rpc.Code_CODE_OK { + HandleErrorStatus(&sublog, w, statRes.Status) + return + } + + acceptedProps, removedProps, ok := s.handleProppatch(ctx, w, r, ref, pp, sublog) + if !ok { + // handleProppatch handles responses in error cases so we can just return + return + } + + nRef := path.Join(spaceID, statRes.Info.Path) + nRef = path.Join(ctx.Value(ctxKeyBaseURI).(string), nRef) + if statRes.Info.Type == provider.ResourceType_RESOURCE_TYPE_CONTAINER { + nRef += "/" + } + + s.handleProppatchResponse(ctx, w, r, acceptedProps, removedProps, nRef, sublog) +} + +func (s *svc) handleProppatch(ctx context.Context, w http.ResponseWriter, r *http.Request, ref *provider.Reference, patches []Proppatch, log zerolog.Logger) (accepted []xml.Name, removed []xml.Name, ok bool) { + c, err := s.getClient() + if err != nil { + log.Error().Err(err).Msg("error getting grpc client") + w.WriteHeader(http.StatusInternalServerError) + return nil, nil, false + } + + rreq := &provider.UnsetArbitraryMetadataRequest{ + Ref: ref, + ArbitraryMetadataKeys: []string{""}, + } + sreq := &provider.SetArbitraryMetadataRequest{ + Ref: ref, + ArbitraryMetadata: &provider.ArbitraryMetadata{ + Metadata: map[string]string{}, + }, + } + + acceptedProps := []xml.Name{} + removedProps := []xml.Name{} + for i := range patches { + if len(patches[i].Props) < 1 { + continue + } + for j := range patches[i].Props { + propNameXML := patches[i].Props[j].XMLName + // don't use path.Join. It removes the double slash! concatenate with a / + key := fmt.Sprintf("%s/%s", patches[i].Props[j].XMLName.Space, patches[i].Props[j].XMLName.Local) + value := string(patches[i].Props[j].InnerXML) + remove := patches[i].Remove + // boolean flags may be "set" to false as well + if s.isBooleanProperty(key) { + // Make boolean properties either "0" or "1" + value = s.as0or1(value) + if value == "0" { + remove = true + } + } + // Webdav spec requires the operations to be executed in the order + // specified in the PROPPATCH request + // http://www.webdav.org/specs/rfc2518.html#rfc.section.8.2 + // FIXME: batch this somehow + if remove { + rreq.ArbitraryMetadataKeys[0] = key + res, err := c.UnsetArbitraryMetadata(ctx, rreq) + if err != nil { + log.Error().Err(err).Msg("error sending a grpc UnsetArbitraryMetadata request") + w.WriteHeader(http.StatusInternalServerError) + return nil, nil, false + } + + if res.Status.Code != rpc.Code_CODE_OK { + HandleErrorStatus(&log, w, res.Status) + return nil, nil, false + } + removedProps = append(removedProps, propNameXML) + } else { + sreq.ArbitraryMetadata.Metadata[key] = value + res, err := c.SetArbitraryMetadata(ctx, sreq) + if err != nil { + log.Error().Err(err).Str("key", key).Str("value", value).Msg("error sending a grpc SetArbitraryMetadata request") + w.WriteHeader(http.StatusInternalServerError) + return nil, nil, false + } + + if res.Status.Code != rpc.Code_CODE_OK { + HandleErrorStatus(&log, w, res.Status) + return nil, nil, false + } + + acceptedProps = append(acceptedProps, propNameXML) + delete(sreq.ArbitraryMetadata.Metadata, key) + } + } + // FIXME: in case of error, need to set all properties back to the original state, + // and return the error in the matching propstat block, if applicable + // http://www.webdav.org/specs/rfc2518.html#rfc.section.8.2 + } + + return acceptedProps, removedProps, true +} + +func (s *svc) handleProppatchResponse(ctx context.Context, w http.ResponseWriter, r *http.Request, acceptedProps, removedProps []xml.Name, path string, log zerolog.Logger) { + propRes, err := s.formatProppatchResponse(ctx, acceptedProps, removedProps, path) + if err != nil { + log.Error().Err(err).Msg("error formatting proppatch response") + w.WriteHeader(http.StatusInternalServerError) + return + } + w.Header().Set(HeaderDav, "1, 3, extended-mkcol") + w.Header().Set(HeaderContentType, "application/xml; charset=utf-8") + w.WriteHeader(http.StatusMultiStatus) + if _, err := w.Write([]byte(propRes)); err != nil { + log.Err(err).Msg("error writing response") + } +} diff --git a/internal/http/services/owncloud/ocdav/publicfile.go b/internal/http/services/owncloud/ocdav/publicfile.go index c5670014f0..69085d8b40 100644 --- a/internal/http/services/owncloud/ocdav/publicfile.go +++ b/internal/http/services/owncloud/ocdav/publicfile.go @@ -51,34 +51,34 @@ func (h *PublicFileHandler) Handler(s *svc) http.Handler { if relativePath != "" && relativePath != "/" { // accessing the file // PROPFIND has an implicit call - if r.Method != "PROPFIND" && !s.adjustResourcePathInURL(w, r) { + if r.Method != MethodPropfind && !s.adjustResourcePathInURL(w, r) { return } r.URL.Path = path.Base(r.URL.Path) switch r.Method { - case "PROPFIND": + case MethodPropfind: s.handlePropfindOnToken(w, r, h.namespace, false) case http.MethodGet: - s.handleGet(w, r, h.namespace) + s.handlePathGet(w, r, h.namespace) case http.MethodOptions: s.handleOptions(w, r, h.namespace) case http.MethodHead: - s.handleHead(w, r, h.namespace) + s.handlePathHead(w, r, h.namespace) case http.MethodPut: - s.handlePut(w, r, h.namespace) + s.handlePathPut(w, r, h.namespace) default: w.WriteHeader(http.StatusMethodNotAllowed) } } else { // accessing the virtual parent folder switch r.Method { - case "PROPFIND": + case MethodPropfind: s.handlePropfindOnToken(w, r, h.namespace, true) case http.MethodOptions: s.handleOptions(w, r, h.namespace) case http.MethodHead: - s.handleHead(w, r, h.namespace) + s.handlePathHead(w, r, h.namespace) default: w.WriteHeader(http.StatusMethodNotAllowed) } diff --git a/internal/http/services/owncloud/ocdav/put.go b/internal/http/services/owncloud/ocdav/put.go index 71a8c1b7b3..e42d447662 100644 --- a/internal/http/services/owncloud/ocdav/put.go +++ b/internal/http/services/owncloud/ocdav/put.go @@ -19,7 +19,7 @@ package ocdav import ( - "io" + "context" "net/http" "path" "strconv" @@ -35,11 +35,12 @@ import ( "github.com/cs3org/reva/pkg/rhttp" "github.com/cs3org/reva/pkg/storage/utils/chunking" "github.com/cs3org/reva/pkg/utils" + "github.com/rs/zerolog" "go.opencensus.io/trace" ) func sufferMacOSFinder(r *http.Request) bool { - return r.Header.Get("X-Expected-Entity-Length") != "" + return r.Header.Get(HeaderExpectedEntityLength) != "" } func handleMacOSFinder(w http.ResponseWriter, r *http.Request) error { @@ -61,8 +62,8 @@ func handleMacOSFinder(w http.ResponseWriter, r *http.Request) error { */ log := appctx.GetLogger(r.Context()) - content := r.Header.Get("Content-Length") - expected := r.Header.Get("X-Expected-Entity-Length") + content := r.Header.Get(HeaderContentLength) + expected := r.Header.Get(HeaderExpectedEntityLength) log.Warn().Str("content-length", content).Str("x-expected-entity-length", expected).Msg("Mac OS Finder corner-case detected") // The best mitigation to this problem is to tell users to not use crappy Finder. @@ -100,89 +101,70 @@ func isContentRange(r *http.Request) bool { in unexpected behaviour (cf PEAR::HTTP_WebDAV_Client 1.0.1), we reject all PUT requests with a Content-Range for now. */ - return r.Header.Get("Content-Range") != "" + return r.Header.Get(HeaderContentRange) != "" } -func (s *svc) handlePut(w http.ResponseWriter, r *http.Request, ns string) { +func (s *svc) handlePathPut(w http.ResponseWriter, r *http.Request, ns string) { ctx := r.Context() + ctx, span := trace.StartSpan(ctx, "put") + defer span.End() + fn := path.Join(ns, r.URL.Path) sublog := appctx.GetLogger(ctx).With().Str("path", fn).Logger() - if r.Body == nil { - sublog.Debug().Msg("body is nil") - w.WriteHeader(http.StatusBadRequest) - return + ref := &provider.Reference{ + Spec: &provider.Reference_Path{ + Path: fn, + }, } - if isContentRange(r) { - sublog.Debug().Msg("Content-Range not supported for PUT") - w.WriteHeader(http.StatusNotImplemented) - return - } + s.handlePut(ctx, w, r, ref, fn, sublog) +} - if sufferMacOSFinder(r) { - err := handleMacOSFinder(w, r) - if err != nil { - sublog.Debug().Err(err).Msg("error handling Mac OS corner-case") - w.WriteHeader(http.StatusInternalServerError) - return - } +func (s *svc) handlePut(ctx context.Context, w http.ResponseWriter, r *http.Request, ref *provider.Reference, path string, log zerolog.Logger) { + if !checkPreconditions(w, r, log) { + // checkPreconditions handles error returns + return } - length, err := strconv.ParseInt(r.Header.Get("Content-Length"), 10, 64) + length, err := getContentLength(w, r) if err != nil { - // Fallback to Upload-Length - length, err = strconv.ParseInt(r.Header.Get("Upload-Length"), 10, 64) - if err != nil { - w.WriteHeader(http.StatusBadRequest) - return - } + w.WriteHeader(http.StatusBadRequest) + return } - s.handlePutHelper(w, r, r.Body, fn, length) -} - -func (s *svc) handlePutHelper(w http.ResponseWriter, r *http.Request, content io.Reader, fn string, length int64) { - ctx := r.Context() - ctx, span := trace.StartSpan(ctx, "put") - defer span.End() - - sublog := appctx.GetLogger(ctx).With().Str("path", fn).Logger() client, err := s.getClient() if err != nil { - sublog.Error().Err(err).Msg("error getting grpc client") + log.Error().Err(err).Msg("error getting grpc client") w.WriteHeader(http.StatusInternalServerError) return } - ref := &provider.Reference{ - Spec: &provider.Reference_Path{Path: fn}, - } sReq := &provider.StatRequest{Ref: ref} sRes, err := client.Stat(ctx, sReq) if err != nil { - sublog.Error().Err(err).Msg("error sending grpc stat request") + log.Error().Err(err).Msg("error sending grpc stat request") w.WriteHeader(http.StatusInternalServerError) return } if sRes.Status.Code != rpc.Code_CODE_OK && sRes.Status.Code != rpc.Code_CODE_NOT_FOUND { - HandleErrorStatus(&sublog, w, sRes.Status) + HandleErrorStatus(&log, w, sRes.Status) return } info := sRes.Info if info != nil { if info.Type != provider.ResourceType_RESOURCE_TYPE_FILE { - sublog.Debug().Msg("resource is not a file") + log.Debug().Msg("resource is not a file") w.WriteHeader(http.StatusConflict) return } - clientETag := r.Header.Get("If-Match") + clientETag := r.Header.Get(HeaderIfMatch) serverETag := info.Etag if clientETag != "" { if clientETag != serverETag { - sublog.Debug().Str("client-etag", clientETag).Str("server-etag", serverETag).Msg("etags mismatch") + log.Debug().Str("client-etag", clientETag).Str("server-etag", serverETag).Msg("etags mismatch") w.WriteHeader(http.StatusPreconditionFailed) return } @@ -190,38 +172,38 @@ func (s *svc) handlePutHelper(w http.ResponseWriter, r *http.Request, content io } opaqueMap := map[string]*typespb.OpaqueEntry{ - "Upload-Length": { + HeaderUploadLength: { Decoder: "plain", Value: []byte(strconv.FormatInt(length, 10)), }, } - if mtime := r.Header.Get("X-OC-Mtime"); mtime != "" { - opaqueMap["X-OC-Mtime"] = &typespb.OpaqueEntry{ + if mtime := r.Header.Get(HeaderOCMtime); mtime != "" { + opaqueMap[HeaderOCMtime] = &typespb.OpaqueEntry{ Decoder: "plain", Value: []byte(mtime), } // TODO: find a way to check if the storage really accepted the value - w.Header().Set("X-OC-Mtime", "accepted") + w.Header().Set(HeaderOCMtime, "accepted") } // curl -X PUT https://demo.owncloud.com/remote.php/webdav/testcs.bin -u demo:demo -d '123' -v -H 'OC-Checksum: SHA1:40bd001563085fc35165329ea1ff5c5ecbdbbeef' var cparts []string // TUS Upload-Checksum header takes precedence - if checksum := r.Header.Get("Upload-Checksum"); checksum != "" { + if checksum := r.Header.Get(HeaderUploadChecksum); checksum != "" { cparts = strings.SplitN(checksum, " ", 2) if len(cparts) != 2 { - sublog.Debug().Str("upload-checksum", checksum).Msg("invalid Upload-Checksum format, expected '[algorithm] [checksum]'") + log.Debug().Str("upload-checksum", checksum).Msg("invalid Upload-Checksum format, expected '[algorithm] [checksum]'") w.WriteHeader(http.StatusBadRequest) return } // Then try owncloud header - } else if checksum := r.Header.Get("OC-Checksum"); checksum != "" { + } else if checksum := r.Header.Get(HeaderOCChecksum); checksum != "" { cparts = strings.SplitN(checksum, ":", 2) if len(cparts) != 2 { - sublog.Debug().Str("oc-checksum", checksum).Msg("invalid OC-Checksum format, expected '[algorithm]:[checksum]'") + log.Debug().Str("oc-checksum", checksum).Msg("invalid OC-Checksum format, expected '[algorithm]:[checksum]'") w.WriteHeader(http.StatusBadRequest) return } @@ -229,7 +211,7 @@ func (s *svc) handlePutHelper(w http.ResponseWriter, r *http.Request, content io // we do not check the algorithm here, because it might depend on the storage if len(cparts) == 2 { // Translate into TUS style Upload-Checksum header - opaqueMap["Upload-Checksum"] = &typespb.OpaqueEntry{ + opaqueMap[HeaderUploadChecksum] = &typespb.OpaqueEntry{ Decoder: "plain", // algorithm is always lowercase, checksum is separated by space Value: []byte(strings.ToLower(cparts[0]) + " " + cparts[1]), @@ -244,13 +226,13 @@ func (s *svc) handlePutHelper(w http.ResponseWriter, r *http.Request, content io // where to upload the file? uRes, err := client.InitiateFileUpload(ctx, uReq) if err != nil { - sublog.Error().Err(err).Msg("error initiating file upload") + log.Error().Err(err).Msg("error initiating file upload") w.WriteHeader(http.StatusInternalServerError) return } if uRes.Status.Code != rpc.Code_CODE_OK { - HandleErrorStatus(&sublog, w, uRes.Status) + HandleErrorStatus(&log, w, uRes.Status) return } @@ -262,7 +244,7 @@ func (s *svc) handlePutHelper(w http.ResponseWriter, r *http.Request, content io } if length > 0 { - httpReq, err := rhttp.NewRequest(ctx, "PUT", ep, content) + httpReq, err := rhttp.NewRequest(ctx, http.MethodPut, ep, r.Body) if err != nil { w.WriteHeader(http.StatusInternalServerError) return @@ -271,7 +253,7 @@ func (s *svc) handlePutHelper(w http.ResponseWriter, r *http.Request, content io httpRes, err := s.client.Do(httpReq) if err != nil { - sublog.Error().Err(err).Msg("error doing PUT request to data service") + log.Error().Err(err).Msg("error doing PUT request to data service") w.WriteHeader(http.StatusInternalServerError) return } @@ -288,29 +270,29 @@ func (s *svc) handlePutHelper(w http.ResponseWriter, r *http.Request, content io message: "The computed checksum does not match the one received from the client.", }) if err != nil { - sublog.Error().Msgf("error marshaling xml response: %s", b) + log.Error().Msgf("error marshaling xml response: %s", b) w.WriteHeader(http.StatusInternalServerError) return } _, err = w.Write(b) if err != nil { - sublog.Err(err).Msg("error writing response") + log.Err(err).Msg("error writing response") } return } - sublog.Error().Err(err).Msg("PUT request to data server failed") + log.Error().Err(err).Msg("PUT request to data server failed") w.WriteHeader(httpRes.StatusCode) return } } - ok, err := chunking.IsChunked(fn) + ok, err := chunking.IsChunked(path) if err != nil { w.WriteHeader(http.StatusInternalServerError) return } if ok { - chunk, err := chunking.GetChunkBLOBInfo(fn) + chunk, err := chunking.GetChunkBLOBInfo(path) if err != nil { w.WriteHeader(http.StatusInternalServerError) return @@ -327,25 +309,25 @@ func (s *svc) handlePutHelper(w http.ResponseWriter, r *http.Request, content io // stat again to check the new file's metadata sRes, err = client.Stat(ctx, sReq) if err != nil { - sublog.Error().Err(err).Msg("error sending grpc stat request") + log.Error().Err(err).Msg("error sending grpc stat request") w.WriteHeader(http.StatusInternalServerError) return } if sRes.Status.Code != rpc.Code_CODE_OK { - HandleErrorStatus(&sublog, w, sRes.Status) + HandleErrorStatus(&log, w, sRes.Status) return } newInfo := sRes.Info - w.Header().Add("Content-Type", newInfo.MimeType) - w.Header().Set("ETag", newInfo.Etag) - w.Header().Set("OC-FileId", wrapResourceID(newInfo.Id)) - w.Header().Set("OC-ETag", newInfo.Etag) + w.Header().Add(HeaderContentType, newInfo.MimeType) + w.Header().Set(HeaderETag, newInfo.Etag) + w.Header().Set(HeaderOCFileID, wrapResourceID(newInfo.Id)) + w.Header().Set(HeaderOCETag, newInfo.Etag) t := utils.TSToTime(newInfo.Mtime).UTC() lastModifiedString := t.Format(time.RFC1123Z) - w.Header().Set("Last-Modified", lastModifiedString) + w.Header().Set(HeaderLastModified, lastModifiedString) // file was new if info == nil { @@ -356,3 +338,59 @@ func (s *svc) handlePutHelper(w http.ResponseWriter, r *http.Request, content io // overwrite w.WriteHeader(http.StatusNoContent) } + +func (s *svc) handleSpacesPut(w http.ResponseWriter, r *http.Request, spaceID string) { + ctx := r.Context() + + sublog := appctx.GetLogger(ctx).With().Str("spaceid", spaceID).Str("path", r.URL.Path).Logger() + + spaceRef, status, err := s.lookUpStorageSpaceReference(ctx, spaceID, r.URL.Path) + if err != nil { + sublog.Error().Err(err).Msg("error sending a grpc request") + w.WriteHeader(http.StatusInternalServerError) + return + } + + if status.Code != rpc.Code_CODE_OK { + HandleErrorStatus(&sublog, w, status) + return + } + + s.handlePut(ctx, w, r, spaceRef, spaceRef.GetId().GetOpaqueId(), sublog) +} + +func checkPreconditions(w http.ResponseWriter, r *http.Request, log zerolog.Logger) bool { + if r.Body == http.NoBody { + log.Debug().Msg("body is empty") + w.WriteHeader(http.StatusBadRequest) + return false + } + + if isContentRange(r) { + log.Debug().Msg("Content-Range not supported for PUT") + w.WriteHeader(http.StatusNotImplemented) + return false + } + + if sufferMacOSFinder(r) { + err := handleMacOSFinder(w, r) + if err != nil { + log.Debug().Err(err).Msg("error handling Mac OS corner-case") + w.WriteHeader(http.StatusInternalServerError) + return false + } + } + return true +} + +func getContentLength(w http.ResponseWriter, r *http.Request) (int64, error) { + length, err := strconv.ParseInt(r.Header.Get(HeaderContentLength), 10, 64) + if err != nil { + // Fallback to Upload-Length + length, err = strconv.ParseInt(r.Header.Get(HeaderUploadLength), 10, 64) + if err != nil { + return 0, err + } + } + return length, nil +} diff --git a/internal/http/services/owncloud/ocdav/spaces.go b/internal/http/services/owncloud/ocdav/spaces.go new file mode 100644 index 0000000000..3996b57a03 --- /dev/null +++ b/internal/http/services/owncloud/ocdav/spaces.go @@ -0,0 +1,141 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package ocdav + +import ( + "context" + "fmt" + "net/http" + "path/filepath" + + rpc "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" + storageProvider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + "github.com/cs3org/reva/pkg/rhttp/router" +) + +// SpacesHandler handles trashbin requests +type SpacesHandler struct { + gatewaySvc string +} + +func (h *SpacesHandler) init(c *Config) error { + h.gatewaySvc = c.GatewaySvc + return nil +} + +// Handler handles requests +func (h *SpacesHandler) Handler(s *svc) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // ctx := r.Context() + // log := appctx.GetLogger(ctx) + + if r.Method == http.MethodOptions { + s.handleOptions(w, r, "spaces") + return + } + + var spaceID string + spaceID, r.URL.Path = router.ShiftPath(r.URL.Path) + + if spaceID == "" { + // listing is disabled, no auth will change that + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + switch r.Method { + case MethodPropfind: + s.handleSpacesPropfind(w, r, spaceID) + case MethodProppatch: + s.handleSpacesProppatch(w, r, spaceID) + case MethodLock: + s.handleLock(w, r, spaceID) + case MethodUnlock: + s.handleUnlock(w, r, spaceID) + case MethodMkcol: + s.handleSpacesMkCol(w, r, spaceID) + case MethodMove: + s.handleSpacesMove(w, r, spaceID) + case MethodCopy: + s.handleSpacesCopy(w, r, spaceID) + case MethodReport: + s.handleReport(w, r, spaceID) + case http.MethodGet: + s.handleSpacesGet(w, r, spaceID) + case http.MethodPut: + s.handleSpacesPut(w, r, spaceID) + case http.MethodPost: + s.handleSpacesTusPost(w, r, spaceID) + case http.MethodOptions: + s.handleOptions(w, r, spaceID) + case http.MethodHead: + s.handleSpacesHead(w, r, spaceID) + case http.MethodDelete: + s.handleSpacesDelete(w, r, spaceID) + default: + http.Error(w, http.StatusText(http.StatusNotImplemented), http.StatusNotImplemented) + } + }) +} + +func (s *svc) lookUpStorageSpaceReference(ctx context.Context, spaceID string, relativePath string) (*storageProvider.Reference, *rpc.Status, error) { + // Get the getway client + gatewayClient, err := s.getClient() + if err != nil { + return nil, nil, err + } + + // retrieve a specific storage space + lSSReq := &storageProvider.ListStorageSpacesRequest{ + Filters: []*storageProvider.ListStorageSpacesRequest_Filter{ + { + Type: storageProvider.ListStorageSpacesRequest_Filter_TYPE_ID, + Term: &storageProvider.ListStorageSpacesRequest_Filter_Id{ + Id: &storageProvider.StorageSpaceId{ + OpaqueId: spaceID, + }, + }, + }, + }, + } + + lSSRes, err := gatewayClient.ListStorageSpaces(ctx, lSSReq) + if err != nil || lSSRes.Status.Code != rpc.Code_CODE_OK { + return nil, lSSRes.Status, err + } + + if len(lSSRes.StorageSpaces) != 1 { + return nil, nil, fmt.Errorf("unexpected number of spaces") + } + space := lSSRes.StorageSpaces[0] + + // TODO: + // Use ResourceId to make request to the actual storage provider via the gateway. + // - Copy the storageId from the storage space root + // - set the opaque Id to /storageSpaceId/relativePath in + // Correct fix would be to add a new Reference to the CS3API + return &storageProvider.Reference{ + Spec: &storageProvider.Reference_Id{ + Id: &storageProvider.ResourceId{ + StorageId: space.Root.StorageId, + OpaqueId: filepath.Join("/", space.Root.OpaqueId, relativePath), // FIXME this is a hack to pass storage space id and a relative path to the storage provider + }, + }, + }, lSSRes.Status, nil +} diff --git a/internal/http/services/owncloud/ocdav/trashbin.go b/internal/http/services/owncloud/ocdav/trashbin.go index c97353c76f..6db6fcb2b8 100644 --- a/internal/http/services/owncloud/ocdav/trashbin.go +++ b/internal/http/services/owncloud/ocdav/trashbin.go @@ -107,11 +107,11 @@ func (h *TrashbinHandler) Handler(s *svc) http.Handler { // return //} - if key == "" && r.Method == "PROPFIND" { + if key == "" && r.Method == MethodPropfind { h.listTrashbin(w, r, s, u) return } - if key != "" && r.Method == "MOVE" { + if key != "" && r.Method == MethodMove { // find path in url relative to trash base trashBase := ctx.Value(ctxKeyBaseURI).(string) baseURI := path.Join(path.Dir(trashBase), "files", username) @@ -119,8 +119,7 @@ func (h *TrashbinHandler) Handler(s *svc) http.Handler { r = r.WithContext(ctx) // TODO make request.php optional in destination header - dstHeader := r.Header.Get("Destination") - dst, err := extractDestination(dstHeader, baseURI) + dst, err := extractDestination(r) if err != nil { w.WriteHeader(http.StatusBadRequest) return @@ -133,7 +132,7 @@ func (h *TrashbinHandler) Handler(s *svc) http.Handler { return } - if r.Method == "DELETE" { + if r.Method == http.MethodDelete { h.delete(w, r, s, u, key) return } diff --git a/internal/http/services/owncloud/ocdav/tus.go b/internal/http/services/owncloud/ocdav/tus.go index fec279f494..4913045ea5 100644 --- a/internal/http/services/owncloud/ocdav/tus.go +++ b/internal/http/services/owncloud/ocdav/tus.go @@ -19,6 +19,7 @@ package ocdav import ( + "context" "net/http" "path" "strconv" @@ -31,28 +32,77 @@ import ( "github.com/cs3org/reva/pkg/appctx" "github.com/cs3org/reva/pkg/rhttp" "github.com/cs3org/reva/pkg/utils" + "github.com/rs/zerolog" tusd "github.com/tus/tusd/pkg/handler" "go.opencensus.io/trace" ) -func (s *svc) handleTusPost(w http.ResponseWriter, r *http.Request, ns string) { +func (s *svc) handlePathTusPost(w http.ResponseWriter, r *http.Request, ns string) { ctx := r.Context() ctx, span := trace.StartSpan(ctx, "tus-post") defer span.End() - w.Header().Add("Access-Control-Allow-Headers", "Tus-Resumable, Upload-Length, Upload-Metadata, If-Match") - w.Header().Add("Access-Control-Expose-Headers", "Tus-Resumable, Location") + // read filename from metadata + meta := tusd.ParseMetadataHeader(r.Header.Get(HeaderUploadMetadata)) + if meta["filename"] == "" { + w.WriteHeader(http.StatusPreconditionFailed) + return + } + + // append filename to current dir + fn := path.Join(ns, r.URL.Path, meta["filename"]) + + sublog := appctx.GetLogger(ctx).With().Str("path", fn).Logger() + // check tus headers? + + ref := &provider.Reference{ + Spec: &provider.Reference_Path{Path: fn}, + } + s.handleTusPost(ctx, w, r, meta, ref, sublog) +} + +func (s *svc) handleSpacesTusPost(w http.ResponseWriter, r *http.Request, spaceID string) { + ctx := r.Context() + ctx, span := trace.StartSpan(ctx, "spaces-tus-post") + defer span.End() + + // read filename from metadata + meta := tusd.ParseMetadataHeader(r.Header.Get(HeaderUploadMetadata)) + if meta["filename"] == "" { + w.WriteHeader(http.StatusPreconditionFailed) + return + } + + sublog := appctx.GetLogger(ctx).With().Str("spaceid", spaceID).Str("path", r.URL.Path).Logger() + + spaceRef, status, err := s.lookUpStorageSpaceReference(ctx, spaceID, path.Join(r.URL.Path, meta["filename"])) + if err != nil { + sublog.Error().Err(err).Msg("error sending a grpc request") + w.WriteHeader(http.StatusInternalServerError) + return + } + if status.Code != rpc.Code_CODE_OK { + HandleErrorStatus(&sublog, w, status) + return + } - w.Header().Set("Tus-Resumable", "1.0.0") + s.handleTusPost(ctx, w, r, meta, spaceRef, sublog) +} + +func (s *svc) handleTusPost(ctx context.Context, w http.ResponseWriter, r *http.Request, meta map[string]string, ref *provider.Reference, log zerolog.Logger) { + w.Header().Add(HeaderAccessControlAllowHeaders, strings.Join([]string{HeaderTusResumable, HeaderUploadLength, HeaderUploadMetadata, HeaderIfMatch}, ", ")) + w.Header().Add(HeaderAccessControlExposeHeaders, strings.Join([]string{HeaderTusResumable, HeaderLocation}, ", ")) + + w.Header().Set(HeaderTusResumable, "1.0.0") // Test if the version sent by the client is supported // GET methods are not checked since a browser may visit this URL and does // not include this header. This request is not part of the specification. - if r.Header.Get("Tus-Resumable") != "1.0.0" { + if r.Header.Get(HeaderTusResumable) != "1.0.0" { w.WriteHeader(http.StatusPreconditionFailed) return } - if r.Header.Get("Upload-Length") == "" { + if r.Header.Get(HeaderUploadLength) == "" { w.WriteHeader(http.StatusPreconditionFailed) return } @@ -61,58 +111,41 @@ func (s *svc) handleTusPost(w http.ResponseWriter, r *http.Request, ns string) { // curl -X PUT https://demo.owncloud.com/remote.php/webdav/testcs.bin -u demo:demo -d '123' -v -H 'OC-Checksum: SHA1:40bd001563085fc35165329ea1ff5c5ecbdbbeef' // TODO check Expect: 100-continue - - // read filename from metadata - meta := tusd.ParseMetadataHeader(r.Header.Get("Upload-Metadata")) - if meta["filename"] == "" { - w.WriteHeader(http.StatusPreconditionFailed) - return - } - - // append filename to current dir - fn := path.Join(ns, r.URL.Path, meta["filename"]) - - sublog := appctx.GetLogger(ctx).With().Str("path", fn).Logger() - // check tus headers? - // check if destination exists or is a file client, err := s.getClient() if err != nil { - sublog.Error().Err(err).Msg("error getting grpc client") + log.Error().Err(err).Msg("error getting grpc client") w.WriteHeader(http.StatusInternalServerError) return } - sReq := &provider.StatRequest{ - Ref: &provider.Reference{ - Spec: &provider.Reference_Path{Path: fn}, - }, + Ref: ref, } sRes, err := client.Stat(ctx, sReq) if err != nil { - sublog.Error().Err(err).Msg("error sending grpc stat request") + log.Error().Err(err).Msg("error sending grpc stat request") w.WriteHeader(http.StatusInternalServerError) return } if sRes.Status.Code != rpc.Code_CODE_OK && sRes.Status.Code != rpc.Code_CODE_NOT_FOUND { - HandleErrorStatus(&sublog, w, sRes.Status) + HandleErrorStatus(&log, w, sRes.Status) return } info := sRes.Info if info != nil && info.Type != provider.ResourceType_RESOURCE_TYPE_FILE { - sublog.Warn().Msg("resource is not a file") + log.Warn().Msg("resource is not a file") w.WriteHeader(http.StatusConflict) return } if info != nil { - clientETag := r.Header.Get("If-Match") + clientETag := r.Header.Get(HeaderIfMatch) serverETag := info.Etag if clientETag != "" { if clientETag != serverETag { - sublog.Warn().Str("client-etag", clientETag).Str("server-etag", serverETag).Msg("etags mismatch") + log.Warn().Str("client-etag", clientETag).Str("server-etag", serverETag).Msg("etags mismatch") w.WriteHeader(http.StatusPreconditionFailed) return } @@ -120,15 +153,15 @@ func (s *svc) handleTusPost(w http.ResponseWriter, r *http.Request, ns string) { } opaqueMap := map[string]*typespb.OpaqueEntry{ - "Upload-Length": { + HeaderUploadLength: { Decoder: "plain", - Value: []byte(r.Header.Get("Upload-Length")), + Value: []byte(r.Header.Get(HeaderUploadLength)), }, } mtime := meta["mtime"] if mtime != "" { - opaqueMap["X-OC-Mtime"] = &typespb.OpaqueEntry{ + opaqueMap[HeaderOCMtime] = &typespb.OpaqueEntry{ Decoder: "plain", Value: []byte(mtime), } @@ -136,9 +169,7 @@ func (s *svc) handleTusPost(w http.ResponseWriter, r *http.Request, ns string) { // initiateUpload uReq := &provider.InitiateFileUploadRequest{ - Ref: &provider.Reference{ - Spec: &provider.Reference_Path{Path: fn}, - }, + Ref: ref, Opaque: &typespb.Opaque{ Map: opaqueMap, }, @@ -146,13 +177,13 @@ func (s *svc) handleTusPost(w http.ResponseWriter, r *http.Request, ns string) { uRes, err := client.InitiateFileUpload(ctx, uReq) if err != nil { - sublog.Error().Err(err).Msg("error initiating file upload") + log.Error().Err(err).Msg("error initiating file upload") w.WriteHeader(http.StatusInternalServerError) return } if uRes.Status.Code != rpc.Code_CODE_OK { - HandleErrorStatus(&sublog, w, uRes.Status) + HandleErrorStatus(&log, w, uRes.Status) return } @@ -172,15 +203,15 @@ func (s *svc) handleTusPost(w http.ResponseWriter, r *http.Request, ns string) { ep += token } - w.Header().Set("Location", ep) + w.Header().Set(HeaderLocation, ep) // for creation-with-upload extension forward bytes to dataprovider // TODO check this really streams - if r.Header.Get("Content-Type") == "application/offset+octet-stream" { + if r.Header.Get(HeaderContentType) == "application/offset+octet-stream" { - length, err := strconv.ParseInt(r.Header.Get("Content-Length"), 10, 64) + length, err := strconv.ParseInt(r.Header.Get(HeaderContentLength), 10, 64) if err != nil { - sublog.Debug().Err(err).Msg("wrong request") + log.Debug().Err(err).Msg("wrong request") w.WriteHeader(http.StatusBadRequest) return } @@ -188,73 +219,73 @@ func (s *svc) handleTusPost(w http.ResponseWriter, r *http.Request, ns string) { var httpRes *http.Response if length != 0 { - httpReq, err := rhttp.NewRequest(ctx, "PATCH", ep, r.Body) + httpReq, err := rhttp.NewRequest(ctx, http.MethodPatch, ep, r.Body) if err != nil { - sublog.Debug().Err(err).Msg("wrong request") + log.Debug().Err(err).Msg("wrong request") w.WriteHeader(http.StatusInternalServerError) return } - httpReq.Header.Set("Content-Type", r.Header.Get("Content-Type")) - httpReq.Header.Set("Content-Length", r.Header.Get("Content-Length")) - if r.Header.Get("Upload-Offset") != "" { - httpReq.Header.Set("Upload-Offset", r.Header.Get("Upload-Offset")) + httpReq.Header.Set(HeaderContentType, r.Header.Get(HeaderContentType)) + httpReq.Header.Set(HeaderContentLength, r.Header.Get(HeaderContentLength)) + if r.Header.Get(HeaderUploadOffset) != "" { + httpReq.Header.Set(HeaderUploadOffset, r.Header.Get(HeaderUploadOffset)) } else { - httpReq.Header.Set("Upload-Offset", "0") + httpReq.Header.Set(HeaderUploadOffset, "0") } - httpReq.Header.Set("Tus-Resumable", r.Header.Get("Tus-Resumable")) + httpReq.Header.Set(HeaderTusResumable, r.Header.Get(HeaderTusResumable)) httpRes, err = s.client.Do(httpReq) if err != nil { - sublog.Error().Err(err).Msg("error doing GET request to data service") + log.Error().Err(err).Msg("error doing GET request to data service") w.WriteHeader(http.StatusInternalServerError) return } defer httpRes.Body.Close() - w.Header().Set("Upload-Offset", httpRes.Header.Get("Upload-Offset")) - w.Header().Set("Tus-Resumable", httpRes.Header.Get("Tus-Resumable")) + w.Header().Set(HeaderUploadOffset, httpRes.Header.Get(HeaderUploadOffset)) + w.Header().Set(HeaderTusResumable, httpRes.Header.Get(HeaderTusResumable)) if httpRes.StatusCode != http.StatusNoContent { w.WriteHeader(httpRes.StatusCode) return } } else { - sublog.Debug().Msg("Skipping sending a Patch request as body is empty") + log.Debug().Msg("Skipping sending a Patch request as body is empty") } // check if upload was fully completed - if length == 0 || httpRes.Header.Get("Upload-Offset") == r.Header.Get("Upload-Length") { + if length == 0 || httpRes.Header.Get(HeaderUploadOffset) == r.Header.Get(HeaderUploadOffset) { // get uploaded file metadata sRes, err := client.Stat(ctx, sReq) if err != nil { - sublog.Error().Err(err).Msg("error sending grpc stat request") + log.Error().Err(err).Msg("error sending grpc stat request") w.WriteHeader(http.StatusInternalServerError) return } if sRes.Status.Code != rpc.Code_CODE_OK && sRes.Status.Code != rpc.Code_CODE_NOT_FOUND { - HandleErrorStatus(&sublog, w, sRes.Status) + HandleErrorStatus(&log, w, sRes.Status) return } info := sRes.Info if info == nil { - sublog.Error().Msg("No info found for uploaded file") + log.Error().Msg("No info found for uploaded file") w.WriteHeader(http.StatusInternalServerError) return } - if httpRes != nil && httpRes.Header != nil && httpRes.Header.Get("X-OC-Mtime") != "" { + if httpRes != nil && httpRes.Header != nil && httpRes.Header.Get(HeaderOCMtime) != "" { // set the "accepted" value if returned in the upload response headers - w.Header().Set("X-OC-Mtime", httpRes.Header.Get("X-OC-Mtime")) + w.Header().Set(HeaderOCMtime, httpRes.Header.Get(HeaderOCMtime)) } - w.Header().Set("Content-Type", info.MimeType) - w.Header().Set("OC-FileId", wrapResourceID(info.Id)) - w.Header().Set("OC-ETag", info.Etag) - w.Header().Set("ETag", info.Etag) + w.Header().Set(HeaderContentType, info.MimeType) + w.Header().Set(HeaderOCFileID, wrapResourceID(info.Id)) + w.Header().Set(HeaderOCETag, info.Etag) + w.Header().Set(HeaderETag, info.Etag) t := utils.TSToTime(info.Mtime).UTC() lastModifiedString := t.Format(time.RFC1123Z) - w.Header().Set("Last-Modified", lastModifiedString) + w.Header().Set(HeaderLastModified, lastModifiedString) } } diff --git a/internal/http/services/owncloud/ocdav/versions.go b/internal/http/services/owncloud/ocdav/versions.go index bd261594ae..d916996a9a 100644 --- a/internal/http/services/owncloud/ocdav/versions.go +++ b/internal/http/services/owncloud/ocdav/versions.go @@ -62,11 +62,11 @@ func (h *VersionsHandler) Handler(s *svc, rid *provider.ResourceId) http.Handler s.handleOptions(w, r, "versions") return } - if key == "" && r.Method == "PROPFIND" { + if key == "" && r.Method == MethodPropfind { h.doListVersions(w, r, s, rid) return } - if key != "" && r.Method == "COPY" { + if key != "" && r.Method == MethodCopy { // TODO(jfd) it seems we cannot directly GET version content with cs3 ... // TODO(jfd) cs3api has no delete file version call // TODO(jfd) restore version to given Destination, but cs3api has no destination diff --git a/internal/http/services/owncloud/ocdav/webdav.go b/internal/http/services/owncloud/ocdav/webdav.go index b8693ef97f..a98714a5bf 100644 --- a/internal/http/services/owncloud/ocdav/webdav.go +++ b/internal/http/services/owncloud/ocdav/webdav.go @@ -23,6 +23,56 @@ import ( "path" ) +// Common Webdav methods. +// +// Unless otherwise noted, these are defined in RFC 4918 section 9. +const ( + MethodPropfind = "PROPFIND" + MethodLock = "LOCK" + MethodUnlock = "UNLOCK" + MethodProppatch = "PROPPATCH" + MethodMkcol = "MKCOL" + MethodMove = "MOVE" + MethodCopy = "COPY" + MethodReport = "REPORT" +) + +// Common HTTP headers. +const ( + HeaderAcceptRanges = "Accept-Ranges" + HeaderAccessControlAllowHeaders = "Access-Control-Allow-Headers" + HeaderAccessControlExposeHeaders = "Access-Control-Expose-Headers" + HeaderContentDisposistion = "Content-Disposition" + HeaderContentLength = "Content-Length" + HeaderContentRange = "Content-Range" + HeaderContentType = "Content-Type" + HeaderETag = "ETag" + HeaderLastModified = "Last-Modified" + HeaderLocation = "Location" + HeaderRange = "Range" + HeaderIfMatch = "If-Match" +) + +// Non standard HTTP headers. +const ( + HeaderOCFileID = "OC-FileId" + HeaderOCETag = "OC-ETag" + HeaderOCChecksum = "OC-Checksum" + HeaderDepth = "Depth" + HeaderDav = "DAV" + HeaderTusResumable = "Tus-Resumable" + HeaderTusVersion = "Tus-Version" + HeaderTusExtension = "Tus-Extension" + HeaderDestination = "Destination" + HeaderOverwrite = "Overwrite" + HeaderUploadChecksum = "Upload-Checksum" + HeaderUploadLength = "Upload-Length" + HeaderUploadMetadata = "Upload-Metadata" + HeaderUploadOffset = "Upload-Offset" + HeaderOCMtime = "X-OC-Mtime" + HeaderExpectedEntityLength = "X-Expected-Entity-Length" +) + // WebDavHandler implements a dav endpoint type WebDavHandler struct { namespace string @@ -40,34 +90,34 @@ func (h *WebDavHandler) Handler(s *svc) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ns := applyLayout(r.Context(), h.namespace, h.useLoggedInUserNS, r.URL.Path) switch r.Method { - case "PROPFIND": + case MethodPropfind: s.handlePropfind(w, r, ns) - case "LOCK": + case MethodLock: s.handleLock(w, r, ns) - case "UNLOCK": + case MethodUnlock: s.handleUnlock(w, r, ns) - case "PROPPATCH": - s.handleProppatch(w, r, ns) - case "MKCOL": - s.handleMkcol(w, r, ns) - case "MOVE": - s.handleMove(w, r, ns) - case "COPY": - s.handleCopy(w, r, ns) - case "REPORT": + case MethodProppatch: + s.handlePathProppatch(w, r, ns) + case MethodMkcol: + s.handlePathMkcol(w, r, ns) + case MethodMove: + s.handlePathMove(w, r, ns) + case MethodCopy: + s.handlePathCopy(w, r, ns) + case MethodReport: s.handleReport(w, r, ns) case http.MethodGet: - s.handleGet(w, r, ns) + s.handlePathGet(w, r, ns) case http.MethodPut: - s.handlePut(w, r, ns) + s.handlePathPut(w, r, ns) case http.MethodPost: - s.handleTusPost(w, r, ns) + s.handlePathTusPost(w, r, ns) case http.MethodOptions: s.handleOptions(w, r, ns) case http.MethodHead: - s.handleHead(w, r, ns) + s.handlePathHead(w, r, ns) case http.MethodDelete: - s.handleDelete(w, r, ns) + s.handlePathDelete(w, r, ns) default: w.WriteHeader(http.StatusNotFound) } diff --git a/pkg/rhttp/datatx/manager/loader/loader.go b/pkg/rhttp/datatx/manager/loader/loader.go index be814f3e86..15a11bb5f4 100644 --- a/pkg/rhttp/datatx/manager/loader/loader.go +++ b/pkg/rhttp/datatx/manager/loader/loader.go @@ -21,6 +21,7 @@ package loader import ( // Load core data transfer protocols _ "github.com/cs3org/reva/pkg/rhttp/datatx/manager/simple" + _ "github.com/cs3org/reva/pkg/rhttp/datatx/manager/spaces" _ "github.com/cs3org/reva/pkg/rhttp/datatx/manager/tus" // Add your own here ) diff --git a/pkg/rhttp/datatx/manager/simple/simple.go b/pkg/rhttp/datatx/manager/simple/simple.go index b09aa669ed..ea78faf482 100644 --- a/pkg/rhttp/datatx/manager/simple/simple.go +++ b/pkg/rhttp/datatx/manager/simple/simple.go @@ -68,7 +68,7 @@ func (m *manager) Handler(fs storage.FS) (http.Handler, error) { switch r.Method { case "GET", "HEAD": - download.GetOrHeadFile(w, r, fs) + download.GetOrHeadFile(w, r, fs, "") case "PUT": fn := r.URL.Path defer r.Body.Close() diff --git a/pkg/rhttp/datatx/manager/spaces/spaces.go b/pkg/rhttp/datatx/manager/spaces/spaces.go new file mode 100644 index 0000000000..5569d857c7 --- /dev/null +++ b/pkg/rhttp/datatx/manager/spaces/spaces.go @@ -0,0 +1,109 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package spaces + +import ( + "net/http" + + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + "github.com/cs3org/reva/pkg/appctx" + "github.com/cs3org/reva/pkg/errtypes" + "github.com/cs3org/reva/pkg/rhttp/datatx" + "github.com/cs3org/reva/pkg/rhttp/datatx/manager/registry" + "github.com/cs3org/reva/pkg/rhttp/datatx/utils/download" + "github.com/cs3org/reva/pkg/rhttp/router" + "github.com/cs3org/reva/pkg/storage" + "github.com/mitchellh/mapstructure" + "github.com/pkg/errors" +) + +func init() { + registry.Register("spaces", New) +} + +type config struct{} + +type manager struct { + conf *config +} + +func parseConfig(m map[string]interface{}) (*config, error) { + c := &config{} + if err := mapstructure.Decode(m, c); err != nil { + err = errors.Wrap(err, "error decoding conf") + return nil, err + } + return c, nil +} + +// New returns a datatx manager implementation that relies on HTTP PUT/GET. +func New(m map[string]interface{}) (datatx.DataTX, error) { + c, err := parseConfig(m) + if err != nil { + return nil, err + } + + return &manager{conf: c}, nil +} + +func (m *manager) Handler(fs storage.FS) (http.Handler, error) { + h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + var spaceID string + spaceID, r.URL.Path = router.ShiftPath(r.URL.Path) + + sublog := appctx.GetLogger(ctx).With().Str("datatx", "spaces").Str("space", spaceID).Logger() + + switch r.Method { + case "GET", "HEAD": + download.GetOrHeadFile(w, r, fs, spaceID) + case "PUT": + fn := r.URL.Path + defer r.Body.Close() + + ref := &provider.Reference{Spec: &provider.Reference_Path{Path: fn}} + + err := fs.Upload(ctx, ref, r.Body) + switch v := err.(type) { + case nil: + w.WriteHeader(http.StatusOK) + case errtypes.PartialContent: + w.WriteHeader(http.StatusPartialContent) + case errtypes.ChecksumMismatch: + w.WriteHeader(errtypes.StatusChecksumMismatch) + case errtypes.NotFound: + w.WriteHeader(http.StatusNotFound) + case errtypes.PermissionDenied: + w.WriteHeader(http.StatusForbidden) + case errtypes.InvalidCredentials: + w.WriteHeader(http.StatusUnauthorized) + case errtypes.InsufficientStorage: + w.WriteHeader(http.StatusInsufficientStorage) + default: + sublog.Error().Err(v).Msg("error uploading file") + w.WriteHeader(http.StatusInternalServerError) + } + return + default: + w.WriteHeader(http.StatusNotImplemented) + } + }) + return h, nil +} diff --git a/pkg/rhttp/datatx/manager/tus/tus.go b/pkg/rhttp/datatx/manager/tus/tus.go index 09694cb68a..7ece3466fb 100644 --- a/pkg/rhttp/datatx/manager/tus/tus.go +++ b/pkg/rhttp/datatx/manager/tus/tus.go @@ -103,7 +103,7 @@ func (m *manager) Handler(fs storage.FS) (http.Handler, error) { case "DELETE": handler.DelFile(w, r) case "GET": - download.GetOrHeadFile(w, r, fs) + download.GetOrHeadFile(w, r, fs, "") default: w.WriteHeader(http.StatusNotImplemented) } diff --git a/pkg/rhttp/datatx/utils/download/download.go b/pkg/rhttp/datatx/utils/download/download.go index f7de5fab9c..8dbd78da7a 100644 --- a/pkg/rhttp/datatx/utils/download/download.go +++ b/pkg/rhttp/datatx/utils/download/download.go @@ -24,6 +24,7 @@ import ( "io" "mime/multipart" "net/http" + "path" "strconv" provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" @@ -34,7 +35,7 @@ import ( ) // GetOrHeadFile returns the requested file content -func GetOrHeadFile(w http.ResponseWriter, r *http.Request, fs storage.FS) { +func GetOrHeadFile(w http.ResponseWriter, r *http.Request, fs storage.FS, spaceID string) { ctx := r.Context() sublog := appctx.GetLogger(ctx).With().Str("svc", "datatx").Str("handler", "download").Logger() @@ -46,8 +47,13 @@ func GetOrHeadFile(w http.ResponseWriter, r *http.Request, fs storage.FS) { fn = files[0] } - ref := &provider.Reference{Spec: &provider.Reference_Path{Path: fn}} - + var ref *provider.Reference + if spaceID == "" { + ref = &provider.Reference{Spec: &provider.Reference_Path{Path: fn}} + } else { + // build a storage space reference + ref = &provider.Reference{Spec: &provider.Reference_Id{Id: &provider.ResourceId{OpaqueId: path.Join("/", spaceID, fn)}}} + } // TODO check preconditions like If-Range, If-Match ... var md *provider.ResourceInfo diff --git a/pkg/storage/fs/owncloud/owncloud.go b/pkg/storage/fs/owncloud/owncloud.go index 41fdf42d5d..85aadfbf63 100644 --- a/pkg/storage/fs/owncloud/owncloud.go +++ b/pkg/storage/fs/owncloud/owncloud.go @@ -1151,7 +1151,14 @@ func (fs *ocfs) GetHome(ctx context.Context) (string, error) { return "", nil } -func (fs *ocfs) CreateDir(ctx context.Context, sp string) (err error) { +func (fs *ocfs) CreateDir(ctx context.Context, ref *provider.Reference, name string) (err error) { + + dir, err := fs.resolve(ctx, ref) + if err != nil { + return nil + } + sp := filepath.Join(dir, name) + ip := fs.toInternalPath(ctx, sp) // check permissions of parent dir @@ -2211,6 +2218,10 @@ func (fs *ocfs) RestoreRecycleItem(ctx context.Context, key, restorePath string) return fs.propagate(ctx, tgt) } +func (fs *ocfs) ListStorageSpaces(ctx context.Context, filter []*provider.ListStorageSpacesRequest_Filter) ([]*provider.StorageSpace, error) { + return nil, errtypes.NotSupported("list storage spaces") +} + func (fs *ocfs) propagate(ctx context.Context, leafPath string) error { var root string if fs.c.EnableHome { diff --git a/pkg/storage/fs/s3/s3.go b/pkg/storage/fs/s3/s3.go index d7cfc4a493..5a12339328 100644 --- a/pkg/storage/fs/s3/s3.go +++ b/pkg/storage/fs/s3/s3.go @@ -287,8 +287,15 @@ func (fs *s3FS) CreateHome(ctx context.Context) error { return errtypes.NotSupported("s3fs: not supported") } -func (fs *s3FS) CreateDir(ctx context.Context, fn string) error { +func (fs *s3FS) CreateDir(ctx context.Context, ref *provider.Reference, name string) error { log := appctx.GetLogger(ctx) + + dir, err := fs.resolve(ctx, ref) + if err != nil { + return nil + } + fn := path.Join(dir, name) + fn = fs.addRoot(fn) + "/" // append / to indicate folder // TODO only if fn does not end in / input := &s3.PutObjectInput{ @@ -659,3 +666,7 @@ func (fs *s3FS) ListRecycle(ctx context.Context) ([]*provider.RecycleItem, error func (fs *s3FS) RestoreRecycleItem(ctx context.Context, restoreKey, restorePath string) error { return errtypes.NotSupported("restore recycle") } + +func (fs *s3FS) ListStorageSpaces(ctx context.Context, filter []*provider.ListStorageSpacesRequest_Filter) ([]*provider.StorageSpace, error) { + return nil, errtypes.NotSupported("list storage spaces") +} diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go index 53e718886c..eb0f5a04f1 100644 --- a/pkg/storage/storage.go +++ b/pkg/storage/storage.go @@ -31,7 +31,7 @@ import ( type FS interface { GetHome(ctx context.Context) (string, error) CreateHome(ctx context.Context) error - CreateDir(ctx context.Context, fn string) error + CreateDir(ctx context.Context, ref *provider.Reference, name string) error Delete(ctx context.Context, ref *provider.Reference) error Move(ctx context.Context, oldRef, newRef *provider.Reference) error GetMD(ctx context.Context, ref *provider.Reference, mdKeys []string) (*provider.ResourceInfo, error) @@ -56,6 +56,7 @@ type FS interface { Shutdown(ctx context.Context) error SetArbitraryMetadata(ctx context.Context, ref *provider.Reference, md *provider.ArbitraryMetadata) error UnsetArbitraryMetadata(ctx context.Context, ref *provider.Reference, keys []string) error + ListStorageSpaces(ctx context.Context, filter []*provider.ListStorageSpacesRequest_Filter) ([]*provider.StorageSpace, error) } // Registry is the interface that storage registries implement diff --git a/pkg/storage/utils/decomposedfs/decomposedfs.go b/pkg/storage/utils/decomposedfs/decomposedfs.go index cfef24de56..b3e4528bf5 100644 --- a/pkg/storage/utils/decomposedfs/decomposedfs.go +++ b/pkg/storage/utils/decomposedfs/decomposedfs.go @@ -24,13 +24,16 @@ package decomposedfs import ( "context" "io" + "math" "net/url" "os" "path/filepath" "strconv" "strings" + userv1beta1 "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + types "github.com/cs3org/go-cs3apis/cs3/types/v1beta1" "github.com/cs3org/reva/pkg/appctx" "github.com/cs3org/reva/pkg/errtypes" "github.com/cs3org/reva/pkg/logger" @@ -235,14 +238,18 @@ func (fs *Decomposedfs) GetPathByID(ctx context.Context, id *provider.ResourceId } // CreateDir creates the specified directory -func (fs *Decomposedfs) CreateDir(ctx context.Context, fn string) (err error) { +func (fs *Decomposedfs) CreateDir(ctx context.Context, ref *provider.Reference, name string) (err error) { + var n *node.Node - if n, err = fs.lu.NodeFromPath(ctx, fn); err != nil { + if n, err = fs.lu.NodeFromResource(ctx, ref); err != nil { + return + } + if n, err = n.Child(ctx, name); err != nil { return } if n.Exists { - return errtypes.AlreadyExists(fn) + return errtypes.AlreadyExists(name) } pn, err := n.Parent() if err != nil { @@ -465,6 +472,161 @@ func (fs *Decomposedfs) Download(ctx context.Context, ref *provider.Reference) ( return reader, nil } +// ListStorageSpaces returns a list of StorageSpaces. +// The list can be filtered by space type or space id. +func (fs *Decomposedfs) ListStorageSpaces(ctx context.Context, filter []*provider.ListStorageSpacesRequest_Filter) ([]*provider.StorageSpace, error) { + // TODO check filters + + // for, now list all user homes + // TODO make a dedicated /spaces subfolder in the storage root, next to /nodes, /blobs and /trash + // it should follow /spaces// -> ../../nodes/ and point to the root node of the space + // needs a migration step that checks if the /spaces folder exists, if not + // - it iterates over the /nodes/root folder to create /spaces/personal/ symlinks + // - it iterates over all /nodes/ entries to create /spaces/shares/ symlinks + // - should be good enough to iterate over the /nodes/ entries, because the ext attrs should indicate personal spaces or share spaces + // when the space symlink is broken delete the space? yes + // read permissions are deduced from the node? + // the spaceid can be the nodeid + + // this actually requires us to move all user homes into a subfolder of /nodes/root, + // e.g. /nodes/root/ otherwise storage space names might collide even though they are of different types + // /nodes/root/personal/foo and /nodes/root/shares/foo might be two very different spaces, a /nodes/root/foo is not expressive enough + // we would not need /nodes/root if access always happened via spaceid+relative path + + spaceType := "*" + spaceID := "*" + + for i := range filter { + switch filter[i].Type { + case provider.ListStorageSpacesRequest_Filter_TYPE_SPACE_TYPE: + spaceType = filter[i].GetSpaceType() + case provider.ListStorageSpacesRequest_Filter_TYPE_ID: + //spaceId = filter[i].GetId().OpaqueId // TODO requests needs to contain the driveid ... currently it is the storage id + } + } + + // /var/lib/ocis/storage/users/spaces/personal/nodeid + // /var/lib/ocis/storage/users/spaces/shared/nodeid + matches, err := filepath.Glob(filepath.Join(fs.o.Root, "spaces", spaceType, spaceID)) + if err != nil { + return nil, err + } + + var spaces []*provider.StorageSpace + + u, ok := user.ContextGetUser(ctx) + if !ok { + appctx.GetLogger(ctx).Debug().Msg("expected user in context") + return spaces, nil + } + + for i := range matches { + // always read link in case storage space id != node id + if target, err := os.Readlink(matches[i]); err != nil { + appctx.GetLogger(ctx).Error().Err(err).Str("match", matches[i]).Msg("could not read link, skipping") + continue + } else { + n, err := node.ReadNode(ctx, fs.lu, filepath.Base(target)) + if err != nil { + appctx.GetLogger(ctx).Error().Err(err).Str("id", filepath.Base(target)).Msg("could not read node, skipping") + continue + } + owner, err := n.Owner() + if err != nil { + appctx.GetLogger(ctx).Error().Err(err).Interface("node", n).Msg("could not read owner, skipping") + continue + } + + // filter out spaces user cannot access (currently based on stat permission) + p, err := n.ReadUserPermissions(ctx, u) + if err != nil { + appctx.GetLogger(ctx).Error().Err(err).Interface("node", n).Msg("could not read permissions, skipping") + continue + } + if !p.Stat { + continue + } + + // TODO apply filter + + // build return value + + space := &provider.StorageSpace{ + Id: &provider.StorageSpaceId{OpaqueId: n.ID}, // FIXME Id should just be a string + Root: &provider.ResourceId{ + StorageId: "1284d238-aa92-42ce-bdc4-0b0000009157", // FIXME storage provider id needs to be returned so the gateway can route + OpaqueId: n.ID, + }, + Name: n.Name, + SpaceType: filepath.Base(filepath.Dir(matches[i])), + // Mtime is set either as node.tmtime or as fi.mtime below + } + + if space.SpaceType == "share" { + // return folder name? + space.Name = n.Name + } else { + space.Name = "root" // do not expose the id as name, this is the root of a space + // TODO read from extended attribute for project / group spaces + } + + // fill in user object if the current user is the owner + if owner.Idp == u.Id.Idp && owner.OpaqueId == u.Id.OpaqueId { + space.Owner = u + } else { + space.Owner = &userv1beta1.User{ // FIXME only return a UserID, not a full blown user object + Id: owner, + } + } + + // we set the space mtime to the root item mtime + // override the stat mtime with a tmtime if it is present + if tmt, err := n.GetTMTime(); err == nil { + un := tmt.UnixNano() + space.Mtime = &types.Timestamp{ + Seconds: uint64(un / 1000000000), + Nanos: uint32(un % 1000000000), + } + } else { + // fall back to stat mtime + if fi, err := os.Stat(matches[i]); err == nil { + un := fi.ModTime().UnixNano() + space.Mtime = &types.Timestamp{ + Seconds: uint64(un / 1000000000), + Nanos: uint32(un % 1000000000), + } + } + } + + // quota + v, err := xattr.Get(matches[i], xattrs.QuotaAttr) + switch { + case err == nil: + // make sure we have a proper signed int + // we use the same magic numbers to indicate: + // -1 = uncalculated + // -2 = unknown + // -3 = unlimited + if quota, err := strconv.ParseInt(string(v), 10, 64); err == nil { + if quota >= 0 { + space.Quota = &provider.Quota{ + QuotaMaxBytes: uint64(quota), + QuotaMaxFiles: math.MaxUint64, // TODO MaxUInt64? = unlimited? why even max files? 0 = unlimited? + } + } + } else { + appctx.GetLogger(ctx).Debug().Err(err).Str("nodepath", matches[i]).Msg("could not read quota") + } + } + + spaces = append(spaces, space) + } + } + + return spaces, nil + +} + func (fs *Decomposedfs) copyMD(s string, t string) (err error) { var attrs []string if attrs, err = xattr.List(s); err != nil { diff --git a/pkg/storage/utils/decomposedfs/lookup.go b/pkg/storage/utils/decomposedfs/lookup.go index 0de4106b73..607342dbfb 100644 --- a/pkg/storage/utils/decomposedfs/lookup.go +++ b/pkg/storage/utils/decomposedfs/lookup.go @@ -38,6 +38,20 @@ type Lookup struct { Options *options.Options } +func getStorageSpaceReference(ref *provider.ResourceId) (string, string, bool) { + if strings.HasPrefix(ref.OpaqueId, "/") { + // opaqueID looks like "/a-storage-space-id/optional/relative/path" + parts := strings.SplitN(ref.OpaqueId, "/", 3) + switch len(parts) { + case 2: + return parts[1], "", true + case 3: + return parts[1], parts[2], true + } + } + return "", "", false +} + // NodeFromResource takes in a request path or request id and converts it to a Node func (lu *Lookup) NodeFromResource(ctx context.Context, ref *provider.Reference) (*node.Node, error) { if ref.GetPath() != "" { @@ -45,6 +59,25 @@ func (lu *Lookup) NodeFromResource(ctx context.Context, ref *provider.Reference) } if ref.GetId() != nil { + // check if a storage space reference is used + if spaceID, relPath, ok := getStorageSpaceReference(ref.GetId()); ok { + // currently, the decomposed fs uses the root node id as the space id + n, err := lu.NodeFromID(ctx, &provider.ResourceId{OpaqueId: spaceID}) + if err != nil { + return nil, err + } + if relPath != "" { + // now walk the relative path + n, err = lu.WalkPath(ctx, n, relPath, func(ctx context.Context, n *node.Node) error { + return nil + }) + if err != nil { + return nil, err + } + } + return n, nil + } + return lu.NodeFromID(ctx, ref.GetId()) } diff --git a/pkg/storage/utils/decomposedfs/node/node.go b/pkg/storage/utils/decomposedfs/node/node.go index 55af74eb5c..09820f52a9 100644 --- a/pkg/storage/utils/decomposedfs/node/node.go +++ b/pkg/storage/utils/decomposedfs/node/node.go @@ -853,6 +853,7 @@ func (n *Node) hasUserShares(ctx context.Context) bool { return false } +// TODO make public in a comparison package func isSameUserID(i *userpb.UserId, j *userpb.UserId) bool { switch { case i == nil, j == nil: diff --git a/pkg/storage/utils/decomposedfs/tree/tree.go b/pkg/storage/utils/decomposedfs/tree/tree.go index a6f0700871..e7ea4a1574 100644 --- a/pkg/storage/utils/decomposedfs/tree/tree.go +++ b/pkg/storage/utils/decomposedfs/tree/tree.go @@ -114,9 +114,75 @@ func (t *Tree) Setup(owner string) error { if err != nil { return err } + + // create spaces folder and iterate over existing nodes to populate it + spacesPath := filepath.Join(t.root, "spaces") + fi, err := os.Stat(spacesPath) + if os.IsNotExist(err) { + // create personal spaces dir + if err := os.MkdirAll(filepath.Join(spacesPath, "personal"), 0700); err != nil { + return err + } + // create share spaces dir + if err := os.MkdirAll(filepath.Join(spacesPath, "share"), 0700); err != nil { + return err + } + + f, err := os.Open(filepath.Join(t.root, "nodes")) + if err != nil { + return err + } + nodes, err := f.Readdir(0) + if err != nil { + return err + } + + for i := range nodes { + nodePath := filepath.Join(t.root, "nodes", nodes[i].Name()) + + // is it a user root? -> create personal space + if isRootNode(nodePath) { + // create personal space + // we can reuse the node id as the space id + err = os.Symlink("../../nodes/"+nodes[i].Name(), filepath.Join(t.root, "spaces/personal", nodes[i].Name())) + if err != nil { + fmt.Printf("could not create symlink for personal space %s, %s\n", nodes[i].Name(), err) + } + } + + // is it a shared node? -> create shared space + if isSharedNode(nodePath) { + err = os.Symlink("../../nodes/"+nodes[i].Name(), filepath.Join(t.root, "spaces/share", nodes[i].Name())) + if err != nil { + fmt.Printf("could not create symlink for shared space %s, %s\n", nodes[i].Name(), err) + } + } + } + } else { + // check if it is a directory + if !fi.IsDir() { + return fmt.Errorf("%s is not a directory", spacesPath) + } + } + return nil } +func isRootNode(nodePath string) bool { + attrBytes, err := xattr.Get(nodePath, xattrs.ParentidAttr) + return err == nil && string(attrBytes) == "root" +} +func isSharedNode(nodePath string) bool { + if attrs, err := xattr.List(nodePath); err == nil { + for i := range attrs { + if strings.HasPrefix(attrs[i], xattrs.GrantPrefix) { + return true + } + } + } + return false +} + // GetMD returns the metadata of a node in the tree func (t *Tree) GetMD(ctx context.Context, n *node.Node) (os.FileInfo, error) { md, err := os.Stat(n.InternalPath()) diff --git a/pkg/storage/utils/eosfs/eosfs.go b/pkg/storage/utils/eosfs/eosfs.go index ba6c8de598..cb29de9721 100644 --- a/pkg/storage/utils/eosfs/eosfs.go +++ b/pkg/storage/utils/eosfs/eosfs.go @@ -954,7 +954,7 @@ func (fs *eosfs) createUserDir(ctx context.Context, u *userpb.User, path string, return nil } -func (fs *eosfs) CreateDir(ctx context.Context, p string) error { +func (fs *eosfs) CreateDir(ctx context.Context, ref *provider.Reference, name string) error { log := appctx.GetLogger(ctx) u, err := getUser(ctx) if err != nil { @@ -966,6 +966,12 @@ func (fs *eosfs) CreateDir(ctx context.Context, p string) error { return err } + dir, err := fs.resolve(ctx, u, ref) + if err != nil { + return nil + } + p := path.Join(dir, name) + log.Info().Msgf("eos: createdir: path=%s", p) if fs.isShareFolder(ctx, p) { @@ -1303,6 +1309,10 @@ func (fs *eosfs) RestoreRecycleItem(ctx context.Context, key, restorePath string return fs.c.RestoreDeletedEntry(ctx, uid, gid, key) } +func (fs *eosfs) ListStorageSpaces(ctx context.Context, filter []*provider.ListStorageSpacesRequest_Filter) ([]*provider.StorageSpace, error) { + return nil, errtypes.NotSupported("list storage spaces") +} + func (fs *eosfs) convertToRecycleItem(ctx context.Context, eosDeletedItem *eosclient.DeletedEntry) (*provider.RecycleItem, error) { path, err := fs.unwrap(ctx, eosDeletedItem.RestorePath) if err != nil { diff --git a/pkg/storage/utils/localfs/localfs.go b/pkg/storage/utils/localfs/localfs.go index 3ed3873aba..ac17a17e3f 100644 --- a/pkg/storage/utils/localfs/localfs.go +++ b/pkg/storage/utils/localfs/localfs.go @@ -739,7 +739,13 @@ func (fs *localfs) createHomeInternal(ctx context.Context, fn string) error { return nil } -func (fs *localfs) CreateDir(ctx context.Context, fn string) error { +func (fs *localfs) CreateDir(ctx context.Context, ref *provider.Reference, name string) error { + + dir, err := fs.resolve(ctx, ref) + if err != nil { + return nil + } + fn := path.Join(dir, name) if fs.isShareFolder(ctx, fn) { return errtypes.PermissionDenied("localfs: cannot create folder under the share folder") @@ -749,7 +755,7 @@ func (fs *localfs) CreateDir(ctx context.Context, fn string) error { if _, err := os.Stat(fn); err == nil { return errtypes.AlreadyExists(fn) } - err := os.Mkdir(fn, 0700) + err = os.Mkdir(fn, 0700) if err != nil { if os.IsNotExist(err) { return errtypes.NotFound(fn) @@ -1238,6 +1244,10 @@ func (fs *localfs) RestoreRecycleItem(ctx context.Context, restoreKey, restorePa return fs.propagate(ctx, localRestorePath) } +func (fs *localfs) ListStorageSpaces(ctx context.Context, filter []*provider.ListStorageSpacesRequest_Filter) ([]*provider.StorageSpace, error) { + return nil, errtypes.NotSupported("list storage spaces") +} + func (fs *localfs) propagate(ctx context.Context, leafPath string) error { var root string