diff --git a/changelog/unreleased/grant-locking.md b/changelog/unreleased/grant-locking.md new file mode 100644 index 0000000000..3518552692 --- /dev/null +++ b/changelog/unreleased/grant-locking.md @@ -0,0 +1,7 @@ +Bugfix: fix jsoncs3 atomic persistence + +The jsoncs3 share manager now uses etags instead of mtimes to determine when metadata needs to be updated. +As a precondtition we had to change decomposedfs as well: to consistently calculate the etag for the file content +we now store the mtime in the metadata and use the metadata lock for atomicity. + +https://github.com/cs3org/reva/pull/4117 diff --git a/go.mod b/go.mod index d9a9c874dc..f5f050e1ae 100644 --- a/go.mod +++ b/go.mod @@ -82,6 +82,7 @@ require ( go.opentelemetry.io/otel/sdk v1.16.0 go.opentelemetry.io/otel/trace v1.16.0 golang.org/x/crypto v0.11.0 + golang.org/x/exp v0.0.0-20221026004748-78e5e7837ae6 golang.org/x/oauth2 v0.10.0 golang.org/x/sync v0.2.0 golang.org/x/sys v0.10.0 diff --git a/go.sum b/go.sum index b3cc61c352..5ab381a7d6 100644 --- a/go.sum +++ b/go.sum @@ -1305,6 +1305,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20221026004748-78e5e7837ae6 h1:mC6uOkPi9SUk8A59jZvw7//rlyc+MlELtQUCyOUSKZQ= +golang.org/x/exp v0.0.0-20221026004748-78e5e7837ae6/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= diff --git a/internal/grpc/services/gateway/usershareprovider.go b/internal/grpc/services/gateway/usershareprovider.go index 07e50c39e0..bbcfbf0bc4 100644 --- a/internal/grpc/services/gateway/usershareprovider.go +++ b/internal/grpc/services/gateway/usershareprovider.go @@ -52,6 +52,13 @@ func (s *svc) RemoveShare(ctx context.Context, req *collaboration.RemoveShareReq return s.removeShare(ctx, req) } +func (s *svc) UpdateShare(ctx context.Context, req *collaboration.UpdateShareRequest) (*collaboration.UpdateShareResponse, error) { + if !s.c.UseCommonSpaceRootShareLogic && refIsSpaceRoot(req.GetShare().GetResourceId()) { + return s.updateSpaceShare(ctx, req) + } + return s.updateShare(ctx, req) +} + // TODO(labkode): we need to validate share state vs storage grant and storage ref // If there are any inconsistencies, the share needs to be flag as invalid and a background process // or active fix needs to be performed. @@ -98,7 +105,7 @@ func (s *svc) ListShares(ctx context.Context, req *collaboration.ListSharesReque return res, nil } -func (s *svc) UpdateShare(ctx context.Context, req *collaboration.UpdateShareRequest) (*collaboration.UpdateShareResponse, error) { +func (s *svc) updateShare(ctx context.Context, req *collaboration.UpdateShareRequest) (*collaboration.UpdateShareResponse, error) { c, err := pool.GetUserShareProviderClient(s.c.UserShareProviderEndpoint) if err != nil { appctx.GetLogger(ctx). @@ -114,9 +121,14 @@ func (s *svc) UpdateShare(ctx context.Context, req *collaboration.UpdateShareReq } if s.c.CommitShareToStorageGrant { - updateGrantStatus, err := s.updateGrant(ctx, res.GetShare().GetResourceId(), - res.GetShare().GetGrantee(), - res.GetShare().GetPermissions().GetPermissions()) + creator := ctxpkg.ContextMustGetUser(ctx) + grant := &provider.Grant{ + Grantee: req.GetShare().GetGrantee(), + Permissions: req.GetShare().GetPermissions().GetPermissions(), + Expiration: req.GetShare().GetExpiration(), + Creator: creator.GetId(), + } + updateGrantStatus, err := s.updateGrant(ctx, res.GetShare().GetResourceId(), grant, nil) if err != nil { return nil, errors.Wrap(err, "gateway: error calling updateGrant") @@ -134,6 +146,51 @@ func (s *svc) UpdateShare(ctx context.Context, req *collaboration.UpdateShareReq return res, nil } +func (s *svc) updateSpaceShare(ctx context.Context, req *collaboration.UpdateShareRequest) (*collaboration.UpdateShareResponse, error) { + // If the share is a denial we call denyGrant instead. + var st *rpc.Status + var err error + // TODO: change CS3 APIs + opaque := &typesv1beta1.Opaque{ + Map: map[string]*typesv1beta1.OpaqueEntry{ + "spacegrant": {}, + }, + } + utils.AppendPlainToOpaque(opaque, "spacetype", utils.ReadPlainFromOpaque(req.Opaque, "spacetype")) + + creator := ctxpkg.ContextMustGetUser(ctx) + grant := &provider.Grant{ + Grantee: req.GetShare().GetGrantee(), + Permissions: req.GetShare().GetPermissions().GetPermissions(), + Expiration: req.GetShare().GetExpiration(), + Creator: creator.GetId(), + } + if grants.PermissionsEqual(req.Share.GetPermissions().GetPermissions(), &provider.ResourcePermissions{}) { + st, err = s.denyGrant(ctx, req.GetShare().GetResourceId(), req.GetShare().GetGrantee(), opaque) + if err != nil { + return nil, errors.Wrap(err, "gateway: error denying grant in storage") + } + } else { + st, err = s.updateGrant(ctx, req.GetShare().GetResourceId(), grant, opaque) + if err != nil { + return nil, errors.Wrap(err, "gateway: error adding grant to storage") + } + } + + res := &collaboration.UpdateShareResponse{ + Status: st, + Share: req.Share, + } + + if st.Code != rpc.Code_CODE_OK { + return res, nil + } + + s.statCache.RemoveStatContext(ctx, ctxpkg.ContextMustGetUser(ctx).GetId(), req.GetShare().GetResourceId()) + s.providerCache.RemoveListStorageProviders(req.GetShare().GetResourceId()) + return res, nil +} + // TODO(labkode): listing received shares just goes to the user share manager and gets the list of // received shares. The display name of the shares should be the a friendly name, like the basename // of the original file. @@ -333,19 +390,15 @@ func (s *svc) addGrant(ctx context.Context, id *provider.ResourceId, g *provider return grantRes.Status, nil } -func (s *svc) updateGrant(ctx context.Context, id *provider.ResourceId, g *provider.Grantee, p *provider.ResourcePermissions) (*rpc.Status, error) { +func (s *svc) updateGrant(ctx context.Context, id *provider.ResourceId, grant *provider.Grant, opaque *typesv1beta1.Opaque) (*rpc.Status, error) { ref := &provider.Reference{ ResourceId: id, } - creator := ctxpkg.ContextMustGetUser(ctx) grantReq := &provider.UpdateGrantRequest{ - Ref: ref, - Grant: &provider.Grant{ - Grantee: g, - Permissions: p, - Creator: creator.GetId(), - }, + Opaque: opaque, + Ref: ref, + Grant: grant, } c, _, err := s.find(ctx, ref) diff --git a/internal/grpc/services/storageprovider/storageprovider.go b/internal/grpc/services/storageprovider/storageprovider.go index 67156b31e8..f7812783cf 100644 --- a/internal/grpc/services/storageprovider/storageprovider.go +++ b/internal/grpc/services/storageprovider/storageprovider.go @@ -324,7 +324,13 @@ func (s *service) InitiateFileUpload(ctx context.Context, req *provider.Initiate return nil, err } switch sRes.Status.Code { - case rpc.Code_CODE_OK, rpc.Code_CODE_NOT_FOUND: + case rpc.Code_CODE_OK: + if req.GetIfNotExist() { + return &provider.InitiateFileUploadResponse{ + Status: status.NewAlreadyExists(ctx, errors.New("already exists"), "already exists"), + }, nil + } + case rpc.Code_CODE_NOT_FOUND: // Just continue with a normal upload default: return &provider.InitiateFileUploadResponse{ @@ -342,10 +348,14 @@ func (s *service) InitiateFileUpload(ctx context.Context, req *provider.Initiate } metadata["if-match"] = ifMatch } - if !validateIfUnmodifiedSince(req.GetIfUnmodifiedSince(), sRes.GetInfo()) { - return &provider.InitiateFileUploadResponse{ - Status: status.NewFailedPrecondition(ctx, errors.New("resource has been modified"), "resource has been modified"), - }, nil + ifUnmodifiedSince := req.GetIfUnmodifiedSince() + if ifUnmodifiedSince != nil { + metadata["if-unmodified-since"] = utils.TSToTime(ifUnmodifiedSince).Format(time.RFC3339Nano) + if !validateIfUnmodifiedSince(ifUnmodifiedSince, sRes.GetInfo()) { + return &provider.InitiateFileUploadResponse{ + Status: status.NewFailedPrecondition(ctx, errors.New("resource has been modified"), "resource has been modified"), + }, nil + } } ctx = ctxpkg.ContextSetLockID(ctx, req.LockId) @@ -1080,6 +1090,19 @@ func (s *service) UpdateGrant(ctx context.Context, req *provider.UpdateGrantRequ } } + // TODO: update CS3 APIs + // FIXME these should be part of the AddGrantRequest object + // https://github.com/owncloud/ocis/issues/4312 + if utils.ExistsInOpaque(req.Opaque, "spacegrant") { + ctx = context.WithValue( + ctx, + utils.SpaceGrant, + struct{ SpaceType string }{ + SpaceType: utils.ReadPlainFromOpaque(req.Opaque, "spacetype"), + }, + ) + } + // check grantee type is valid if req.Grant.Grantee.Type == provider.GranteeType_GRANTEE_TYPE_INVALID { return &provider.UpdateGrantResponse{ diff --git a/internal/http/services/owncloud/ocdav/net/headers.go b/internal/http/services/owncloud/ocdav/net/headers.go index d5205ae433..10a475c0ab 100644 --- a/internal/http/services/owncloud/ocdav/net/headers.go +++ b/internal/http/services/owncloud/ocdav/net/headers.go @@ -33,6 +33,7 @@ const ( HeaderLocation = "Location" HeaderRange = "Range" HeaderIfMatch = "If-Match" + HeaderIfNoneMatch = "If-None-Match" HeaderPrefer = "Prefer" HeaderPreferenceApplied = "Preference-Applied" HeaderVary = "Vary" diff --git a/internal/http/services/owncloud/ocs/handlers/apps/sharing/shares/spaces.go b/internal/http/services/owncloud/ocs/handlers/apps/sharing/shares/spaces.go index 7956918ff8..a473ca0f43 100644 --- a/internal/http/services/owncloud/ocs/handlers/apps/sharing/shares/spaces.go +++ b/internal/http/services/owncloud/ocs/handlers/apps/sharing/shares/spaces.go @@ -39,6 +39,7 @@ import ( "github.com/cs3org/reva/v2/pkg/rgrpc/todo/pool" sdk "github.com/cs3org/reva/v2/pkg/sdk/common" "github.com/cs3org/reva/v2/pkg/storagespace" + "github.com/cs3org/reva/v2/pkg/utils" "github.com/pkg/errors" ) @@ -126,45 +127,70 @@ func (h *Handler) addSpaceMember(w http.ResponseWriter, r *http.Request, info *p } } - if role.Name != conversions.RoleManager { - ref := provider.Reference{ResourceId: info.GetId()} - p, err := h.findProvider(ctx, &ref) - if err != nil { - response.WriteOCSError(w, r, response.MetaNotFound.StatusCode, "error getting storage provider", err) - return - } + ref := provider.Reference{ResourceId: info.GetId()} + p, err := h.findProvider(ctx, &ref) + if err != nil { + response.WriteOCSError(w, r, response.MetaNotFound.StatusCode, "error getting storage provider", err) + return + } - providerClient, err := h.getStorageProviderClient(p) - if err != nil { - response.WriteOCSError(w, r, response.MetaNotFound.StatusCode, "error getting storage provider client", err) - return - } + providerClient, err := h.getStorageProviderClient(p) + if err != nil { + response.WriteOCSError(w, r, response.MetaNotFound.StatusCode, "error getting storage provider client", err) + return + } - lgRes, err := providerClient.ListGrants(ctx, &provider.ListGrantsRequest{Ref: &ref}) - if err != nil || lgRes.Status.Code != rpc.Code_CODE_OK { - response.WriteOCSError(w, r, response.MetaServerError.StatusCode, "error listing space grants", err) - return - } + lgRes, err := providerClient.ListGrants(ctx, &provider.ListGrantsRequest{Ref: &ref}) + if err != nil || lgRes.Status.Code != rpc.Code_CODE_OK { + response.WriteOCSError(w, r, response.MetaServerError.StatusCode, "error listing space grants", err) + return + } - if !isSpaceManagerRemaining(lgRes.Grants, grantee) { - response.WriteOCSError(w, r, http.StatusForbidden, "the space must have at least one manager", nil) - return - } + if !isSpaceManagerRemaining(lgRes.Grants, grantee) { + response.WriteOCSError(w, r, http.StatusForbidden, "the space must have at least one manager", nil) + return } - createShareRes, err := client.CreateShare(ctx, &collaborationv1beta1.CreateShareRequest{ - ResourceInfo: info, - Grant: &collaborationv1beta1.ShareGrant{ - Permissions: &collaborationv1beta1.SharePermissions{ - Permissions: permissions, + // we have to send the update request to the gateway to give it a chance to invalidate its cache + // TODO the gateway no longer should cache stuff because invalidation is to expensive. The decomposedfs already has a better cache. + if granteeExists(lgRes.Grants, grantee) { + updateShareReq := &collaborationv1beta1.UpdateShareRequest{ + // TODO: change CS3 APIs + Opaque: &types.Opaque{ + Map: map[string]*types.OpaqueEntry{ + "spacegrant": {}, + }, }, - Grantee: &grantee, - Expiration: expirationTs, - }, - }) - if err != nil || createShareRes.Status.Code != rpc.Code_CODE_OK { - response.WriteOCSError(w, r, response.MetaServerError.StatusCode, "could not add space member", err) - return + Share: &collaborationv1beta1.Share{ + ResourceId: ref.GetResourceId(), + Permissions: &collaborationv1beta1.SharePermissions{ + Permissions: permissions, + }, + Grantee: &grantee, + Expiration: expirationTs, + }, + } + updateShareReq.Opaque = utils.AppendPlainToOpaque(updateShareReq.Opaque, "spacetype", info.GetSpace().GetSpaceType()) + updateShareRes, err := client.UpdateShare(ctx, updateShareReq) + if err != nil || updateShareRes.Status.Code != rpc.Code_CODE_OK { + response.WriteOCSError(w, r, response.MetaServerError.StatusCode, "could not update space member grant", err) + return + } + } else { + createShareRes, err := client.CreateShare(ctx, &collaborationv1beta1.CreateShareRequest{ + ResourceInfo: info, + Grant: &collaborationv1beta1.ShareGrant{ + Permissions: &collaborationv1beta1.SharePermissions{ + Permissions: permissions, + }, + Grantee: &grantee, + Expiration: expirationTs, + }, + }) + if err != nil || createShareRes.Status.Code != rpc.Code_CODE_OK { + response.WriteOCSError(w, r, response.MetaServerError.StatusCode, "could not add space member grant", err) + return + } } response.WriteOCSSuccess(w, r, nil) @@ -325,6 +351,15 @@ func isSpaceManagerRemaining(grants []*provider.Grant, grantee provider.Grantee) return false } +func granteeExists(grants []*provider.Grant, grantee provider.Grantee) bool { + for _, g := range grants { + if isEqualGrantee(*g.Grantee, grantee) { + return true + } + } + return false +} + func isEqualGrantee(a, b provider.Grantee) bool { // Ideally we would want to use utils.GranteeEqual() // but the grants stored in the decomposedfs aren't complete (missing usertype and idp) diff --git a/pkg/errtypes/errtypes.go b/pkg/errtypes/errtypes.go index affdd04320..4aff212931 100644 --- a/pkg/errtypes/errtypes.go +++ b/pkg/errtypes/errtypes.go @@ -23,6 +23,7 @@ package errtypes import ( + "net/http" "strings" rpc "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" @@ -172,6 +173,19 @@ func (e InsufficientStorage) StatusCode() int { return StatusInsufficientStorage } +// NotModified is the error to use when a resource was not modified, e.g. the requested etag did not change. +type NotModified string + +func (e NotModified) Error() string { return "error: not modified: " + string(e) } + +// NotModified implements the IsNotModified interface. +func (e NotModified) IsNotModified() {} + +// StatusCode returns StatusInsufficientStorage, this implementation is needed to allow TUS to cast the correct http errors. +func (e NotModified) StatusCode() int { + return http.StatusNotModified +} + // Body returns the error body. This implementation is needed to allow TUS to cast the correct http errors func (e InsufficientStorage) Body() []byte { return []byte(e.Error()) @@ -302,3 +316,37 @@ func NewErrtypeFromStatus(status *rpc.Status) error { return InternalError(status.Message) } } + +// NewErrtypeFromHTTPStatusCode maps an http status to an errtype +func NewErrtypeFromHTTPStatusCode(code int, message string) error { + switch code { + case http.StatusOK: + return nil + case http.StatusNotFound: + return NotFound(message) + case http.StatusConflict: + return AlreadyExists(message) + case http.StatusNotImplemented: + return NotSupported(message) + case http.StatusNotModified: + return NotModified(message) + case http.StatusForbidden: + return PermissionDenied(message) + case http.StatusLocked: + return Locked(message) + case http.StatusPreconditionFailed: + return Aborted(message) + case http.StatusMethodNotAllowed: + return PreconditionFailed(message) + case http.StatusInsufficientStorage: + return InsufficientStorage(message) + case http.StatusBadRequest: + return BadRequest(message) + case http.StatusPartialContent: + return PartialContent(message) + case StatusChecksumMismatch: + return ChecksumMismatch(message) + default: + return InternalError(message) + } +} diff --git a/pkg/publicshare/manager/json/persistence/cs3/cs3.go b/pkg/publicshare/manager/json/persistence/cs3/cs3.go index e2d3fb989d..c54e933e28 100644 --- a/pkg/publicshare/manager/json/persistence/cs3/cs3.go +++ b/pkg/publicshare/manager/json/persistence/cs3/cs3.go @@ -102,9 +102,10 @@ func (p *cs3) Write(ctx context.Context, db persistence.PublicShares) error { return err } - return p.s.Upload(ctx, metadata.UploadRequest{ + _, err = p.s.Upload(ctx, metadata.UploadRequest{ Content: dbAsJSON, Path: "publicshares.json", IfUnmodifiedSince: p.db.mtime, }) + return err } diff --git a/pkg/rhttp/datatx/manager/simple/simple.go b/pkg/rhttp/datatx/manager/simple/simple.go index 5e47c9a15a..05dc7d241a 100644 --- a/pkg/rhttp/datatx/manager/simple/simple.go +++ b/pkg/rhttp/datatx/manager/simple/simple.go @@ -129,7 +129,7 @@ func (m *manager) Handler(fs storage.FS) (http.Handler, error) { w.WriteHeader(http.StatusUnauthorized) case errtypes.InsufficientStorage: w.WriteHeader(http.StatusInsufficientStorage) - case errtypes.PreconditionFailed, errtypes.Aborted: + case errtypes.PreconditionFailed, errtypes.Aborted, errtypes.AlreadyExists: w.WriteHeader(http.StatusPreconditionFailed) default: sublog.Error().Err(v).Msg("error uploading file") diff --git a/pkg/rhttp/datatx/manager/spaces/spaces.go b/pkg/rhttp/datatx/manager/spaces/spaces.go index f16f656ec9..05357ca4ef 100644 --- a/pkg/rhttp/datatx/manager/spaces/spaces.go +++ b/pkg/rhttp/datatx/manager/spaces/spaces.go @@ -144,7 +144,7 @@ func (m *manager) Handler(fs storage.FS) (http.Handler, error) { w.WriteHeader(http.StatusUnauthorized) case errtypes.InsufficientStorage: w.WriteHeader(http.StatusInsufficientStorage) - case errtypes.PreconditionFailed, errtypes.Aborted: + case errtypes.PreconditionFailed, errtypes.Aborted, errtypes.AlreadyExists: w.WriteHeader(http.StatusPreconditionFailed) default: sublog.Error().Err(v).Msg("error uploading file") diff --git a/pkg/rhttp/datatx/utils/download/download.go b/pkg/rhttp/datatx/utils/download/download.go index c40b55f9ab..cf0c863237 100644 --- a/pkg/rhttp/datatx/utils/download/download.go +++ b/pkg/rhttp/datatx/utils/download/download.go @@ -20,6 +20,7 @@ package download import ( + "context" "fmt" "io" "mime/multipart" @@ -39,6 +40,25 @@ import ( "github.com/rs/zerolog" ) +type contextKey struct{} + +var etagKey = contextKey{} + +// ContextWithEtag returns a new `context.Context` that holds an etag. +func ContextWithEtag(ctx context.Context, etag string) context.Context { + return context.WithValue(ctx, etagKey, etag) +} + +// EtagFromContext returns the etag previously associated with `ctx`, or +// `""` if no such etag could be found. +func EtagFromContext(ctx context.Context) string { + val := ctx.Value(etagKey) + if etag, ok := val.(string); ok { + return etag + } + return "" +} + // GetOrHeadFile returns the requested file content func GetOrHeadFile(w http.ResponseWriter, r *http.Request, fs storage.FS, spaceID string) { ctx := r.Context() @@ -75,11 +95,25 @@ func GetOrHeadFile(w http.ResponseWriter, r *http.Request, fs storage.FS, spaceI // do a stat to set a Content-Length header - if md, err = fs.GetMD(ctx, ref, nil, []string{"size", "mimetype"}); err != nil { + if md, err = fs.GetMD(ctx, ref, nil, []string{"size", "mimetype", "etag"}); err != nil { handleError(w, &sublog, err, "stat") return } + // check etag, see https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/If-None-Match + for _, etag := range r.Header.Values(net.HeaderIfNoneMatch) { + if md.Etag == etag { + // When the condition fails for GET and HEAD methods, then the server must return + // HTTP status code 304 (Not Modified). [...] Note that the server generating a + // 304 response MUST generate any of the following header fields that would have + // been sent in a 200 (OK) response to the same request: + // Cache-Control, Content-Location, Date, ETag, Expires, and Vary. + w.Header().Set(net.HeaderETag, md.Etag) + w.WriteHeader(http.StatusNotModified) + return + } + } + // fill in storage provider id if it is missing if spaceID != "" && md.GetId().GetStorageId() == "" { md.Id.StorageId = ref.ResourceId.StorageId @@ -107,6 +141,7 @@ func GetOrHeadFile(w http.ResponseWriter, r *http.Request, fs storage.FS, spaceI } } + ctx = ContextWithEtag(ctx, md.Etag) content, err := fs.Download(ctx, ref) if err != nil { handleError(w, &sublog, err, "download") diff --git a/pkg/share/manager/jsoncs3/jsoncs3.go b/pkg/share/manager/jsoncs3/jsoncs3.go index f7f7709eec..661767f9ce 100644 --- a/pkg/share/manager/jsoncs3/jsoncs3.go +++ b/pkg/share/manager/jsoncs3/jsoncs3.go @@ -92,19 +92,22 @@ import ( 2. create /users/{userid}/created.json if it doesn't exist yet and add the space/share 3. create /users/{userid}/received.json or /groups/{groupid}/received.json if it doesn exist yet and add the space/share - When updating shares /storages/{storageid}/{spaceid}.json is updated accordingly. The mtime is used to invalidate in-memory caches: + When updating shares /storages/{storageid}/{spaceid}.json is updated accordingly. The etag is used to invalidate in-memory caches: - TODO the upload is tried with an if-unmodified-since header - - TODO when if fails, the {spaceid}.json file is downloaded, the changes are reapplied and the upload is retried with the new mtime + - TODO when if fails, the {spaceid}.json file is downloaded, the changes are reapplied and the upload is retried with the new etag When updating received shares the mountpoint and state are updated in /users/{userid}/received.json (for both user and group shares). When reading the list of received shares the /users/{userid}/received.json file and the /groups/{groupid}/received.json files are statted. - - if the mtime changed we download the file to update the local cache + - if the etag changed we download the file to update the local cache When reading the list of created shares the /users/{userid}/created.json file is statted - - if the mtime changed we download the file to update the local cache + - if the etag changed we download the file to update the local cache */ +// TODO implement a channel based aggregation of sharing requests: every in memory cache should read as many share updates to a space that are available and update them all in one go +// whenever a persist operation fails we check if we can read more shares from the channel + // name is the Tracer name used to identify this instrumentation library. const tracerName = "jsoncs3" @@ -365,7 +368,7 @@ func (m *Manager) Share(ctx context.Context, md *provider.ResourceInfo, g *colla func (m *Manager) getByID(ctx context.Context, id *collaboration.ShareId) (*collaboration.Share, error) { storageID, spaceID, _ := shareid.Decode(id.OpaqueId) - share, err := m.Cache.Get(ctx, storageID, spaceID, id.OpaqueId) + share, err := m.Cache.Get(ctx, storageID, spaceID, id.OpaqueId, false) if err != nil { return nil, err } @@ -655,41 +658,98 @@ func (m *Manager) listCreatedShares(ctx context.Context, user *userv1beta1.User, return nil, err } - var ss []*collaboration.Share - for ssid, spaceShareIDs := range list { - storageID, spaceID, _ := shareid.Decode(ssid) - spaceShares, err := m.Cache.ListSpace(ctx, storageID, spaceID) - if err != nil { - continue - } - for shareid := range spaceShareIDs.IDs { - s := spaceShares.Shares[shareid] - if s == nil { - continue + numWorkers := m.MaxConcurrency + if numWorkers == 0 || len(list) < numWorkers { + numWorkers = len(list) + } + + type w struct { + ssid string + ids sharecache.SpaceShareIDs + } + work := make(chan w) + results := make(chan *collaboration.Share) + + g, ctx := errgroup.WithContext(ctx) + + // Distribute work + g.Go(func() error { + defer close(work) + for ssid, ids := range list { + select { + case work <- w{ssid, ids}: + case <-ctx.Done(): + return ctx.Err() } - if share.IsExpired(s) { - if err := m.removeShare(ctx, s); err != nil { - log.Error().Err(err). - Msg("failed to unshare expired share") - } - if err := events.Publish(ctx, m.eventStream, events.ShareExpired{ - ShareOwner: s.GetOwner(), - ItemID: s.GetResourceId(), - ExpiredAt: time.Unix(int64(s.GetExpiration().GetSeconds()), int64(s.GetExpiration().GetNanos())), - GranteeUserID: s.GetGrantee().GetUserId(), - GranteeGroupID: s.GetGrantee().GetGroupId(), - }); err != nil { + } + return nil + }) + // Spawn workers that'll concurrently work the queue + for i := 0; i < numWorkers; i++ { + g.Go(func() error { + for w := range work { + storageID, spaceID, _ := shareid.Decode(w.ssid) + // fetch all shares from space with one request + _, err := m.Cache.ListSpace(ctx, storageID, spaceID) + if err != nil { log.Error().Err(err). - Msg("failed to publish share expired event") + Str("storageid", storageID). + Str("spaceid", spaceID). + Msg("failed to list shares in space") + continue } - continue - } - if utils.UserEqual(user.GetId(), s.GetCreator()) { - if share.MatchesFilters(s, filters) { - ss = append(ss, s) + for shareID := range w.ids.IDs { + s, err := m.Cache.Get(ctx, storageID, spaceID, shareID, true) + if err != nil || s == nil { + continue + } + if share.IsExpired(s) { + if err := m.removeShare(ctx, s); err != nil { + log.Error().Err(err). + Msg("failed to unshare expired share") + } + if err := events.Publish(ctx, m.eventStream, events.ShareExpired{ + ShareOwner: s.GetOwner(), + ItemID: s.GetResourceId(), + ExpiredAt: time.Unix(int64(s.GetExpiration().GetSeconds()), int64(s.GetExpiration().GetNanos())), + GranteeUserID: s.GetGrantee().GetUserId(), + GranteeGroupID: s.GetGrantee().GetGroupId(), + }); err != nil { + log.Error().Err(err). + Msg("failed to publish share expired event") + } + continue + } + if utils.UserEqual(user.GetId(), s.GetCreator()) { + if share.MatchesFilters(s, filters) { + select { + case results <- s: + case <-ctx.Done(): + return ctx.Err() + } + } + } } } - } + return nil + }) + } + + // Wait for things to settle down, then close results chan + go func() { + _ = g.Wait() // error is checked later + close(results) + }() + + ss := []*collaboration.Share{} + for n := range results { + ss = append(ss, n) + } + + if err := g.Wait(); err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) + return nil, err } span.SetStatus(codes.Ok, "") @@ -722,7 +782,6 @@ func (m *Manager) ListReceivedShares(ctx context.Context, filters []*collaborati var ok bool if rs, ok = ssids[ssid]; !ok { rs = &receivedsharecache.Space{ - Mtime: spaceShareIDs.Mtime, States: make(map[string]*receivedsharecache.State, len(spaceShareIDs.IDs)), } ssids[ssid] = rs @@ -785,8 +844,17 @@ func (m *Manager) ListReceivedShares(ctx context.Context, filters []*collaborati g.Go(func() error { for w := range work { storageID, spaceID, _ := shareid.Decode(w.ssid) + // fetch all shares from space with one request + _, err := m.Cache.ListSpace(ctx, storageID, spaceID) + if err != nil { + log.Error().Err(err). + Str("storageid", storageID). + Str("spaceid", spaceID). + Msg("failed to list shares in space") + continue + } for shareID, state := range w.rspace.States { - s, err := m.Cache.Get(ctx, storageID, spaceID, shareID) + s, err := m.Cache.Get(ctx, storageID, spaceID, shareID, true) if err != nil || s == nil { continue } diff --git a/pkg/share/manager/jsoncs3/jsoncs3_test.go b/pkg/share/manager/jsoncs3/jsoncs3_test.go index 63beca5ec0..702b6defa5 100644 --- a/pkg/share/manager/jsoncs3/jsoncs3_test.go +++ b/pkg/share/manager/jsoncs3/jsoncs3_test.go @@ -24,7 +24,6 @@ import ( "os" "path/filepath" "sync" - "time" groupv1beta1 "github.com/cs3org/go-cs3apis/cs3/identity/group/v1beta1" userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" @@ -418,7 +417,10 @@ var _ = Describe("Jsoncs3", func() { Expect(s.Permissions.Permissions.InitiateFileUpload).To(BeFalse()) // Change providercache on disk - cache := m.Cache.Providers["storageid"].Spaces["spaceid"] + spaces, ok := m.Cache.Providers.Load("storageid") + Expect(ok).To(BeTrue()) + cache, ok := spaces.Spaces.Load("spaceid") + Expect(ok).To(BeTrue()) cache.Shares[share.Id.OpaqueId].Permissions.Permissions.InitiateFileUpload = true bytes, err := json.Marshal(cache) Expect(err).ToNot(HaveOccurred()) @@ -428,15 +430,8 @@ var _ = Describe("Jsoncs3", func() { // Reset providercache in memory cache.Shares[share.Id.OpaqueId].Permissions.Permissions.InitiateFileUpload = false - // Set local cache mtime to something later then on disk - m.Cache.Providers["storageid"].Spaces["spaceid"].Mtime = time.Now().Add(time.Hour) - s, err = m.GetShare(ctx, shareRef) - Expect(err).ToNot(HaveOccurred()) - Expect(s).ToNot(BeNil()) - Expect(s.Permissions.Permissions.InitiateFileUpload).To(BeFalse()) - - // Set local cache mtime to something earlier then on disk - m.Cache.Providers["storageid"].Spaces["spaceid"].Mtime = time.Now().Add(-time.Hour) + // Set local cache etag to something other then on disk + cache.Etag = "reset1" // trigger reload s, err = m.GetShare(ctx, shareRef) Expect(err).ToNot(HaveOccurred()) Expect(s).ToNot(BeNil()) @@ -645,7 +640,10 @@ var _ = Describe("Jsoncs3", func() { Expect(shares[0].Permissions.Permissions.InitiateFileUpload).To(BeFalse()) // Change providercache on disk - cache := m.Cache.Providers["storageid"].Spaces["spaceid"] + spaces, ok := m.Cache.Providers.Load("storageid") + Expect(ok).To(BeTrue()) + cache, ok := spaces.Spaces.Load("spaceid") + Expect(ok).To(BeTrue()) cache.Shares[share.Id.OpaqueId].Permissions.Permissions.InitiateFileUpload = true bytes, err := json.Marshal(cache) Expect(err).ToNot(HaveOccurred()) @@ -655,7 +653,7 @@ var _ = Describe("Jsoncs3", func() { // Reset providercache in memory cache.Shares[share.Id.OpaqueId].Permissions.Permissions.InitiateFileUpload = false - m.Cache.Providers["storageid"].Spaces["spaceid"].Mtime = time.Time{} // trigger reload + cache.Etag = "reset1" // trigger reload shares, err = m.ListShares(ctx, nil) Expect(err).ToNot(HaveOccurred()) Expect(len(shares)).To(Equal(1)) @@ -669,15 +667,16 @@ var _ = Describe("Jsoncs3", func() { Expect(len(shares)).To(Equal(1)) // Add a second cache to the provider cache so it can be referenced - Expect(m.Cache.Add(ctx, "storageid", "spaceid", "storageid"+shareid.IDDelimiter+"spaceid"+shareid.IDDelimiter+"secondshare", &collaboration.Share{ + secondShareID := "storageid" + shareid.IDDelimiter + "spaceid" + shareid.IDDelimiter + "secondshare" + Expect(m.Cache.Add(ctx, "storageid", "spaceid", secondShareID, &collaboration.Share{ + Id: &collaboration.ShareId{OpaqueId: secondShareID}, Creator: user1.Id, })).To(Succeed()) cache := sharecache.UserShareCache{ - Mtime: time.Now(), + Etag: "etag1", UserShares: map[string]*sharecache.SpaceShareIDs{ "storageid" + shareid.IDDelimiter + "spaceid": { - Mtime: time.Now(), IDs: map[string]struct{}{ shares[0].Id.OpaqueId: {}, "storageid" + shareid.IDDelimiter + "spaceid" + shareid.IDDelimiter + "secondshare": {}, @@ -690,7 +689,7 @@ var _ = Describe("Jsoncs3", func() { err = os.WriteFile(filepath.Join(tmpdir, "users/admin/created.json"), bytes, 0x755) Expect(err).ToNot(HaveOccurred()) - m.CreatedCache.UserShares["admin"].Mtime = time.Time{} // trigger reload + m.CreatedCache.UserShares["admin"].Etag = "reset1" // trigger reload shares, err = m.ListShares(ctx, nil) Expect(err).ToNot(HaveOccurred()) Expect(len(shares)).To(Equal(2)) @@ -730,7 +729,10 @@ var _ = Describe("Jsoncs3", func() { Expect(received[0].Share.Permissions.Permissions.InitiateFileUpload).To(BeFalse()) // Change providercache on disk - cache := m.Cache.Providers["storageid"].Spaces["spaceid"] + spaces, ok := m.Cache.Providers.Load("storageid") + Expect(ok).To(BeTrue()) + cache, ok := spaces.Spaces.Load("spaceid") + Expect(ok).To(BeTrue()) cache.Shares[share.Id.OpaqueId].Permissions.Permissions.InitiateFileUpload = true bytes, err := json.Marshal(cache) Expect(err).ToNot(HaveOccurred()) @@ -740,7 +742,7 @@ var _ = Describe("Jsoncs3", func() { // Reset providercache in memory cache.Shares[share.Id.OpaqueId].Permissions.Permissions.InitiateFileUpload = false - m.Cache.Providers["storageid"].Spaces["spaceid"].Mtime = time.Time{} // trigger reload + cache.Etag = "reset1" // trigger reload received, err = m.ListReceivedShares(granteeCtx, []*collaboration.Filter{}) Expect(err).ToNot(HaveOccurred()) Expect(len(received)).To(Equal(1)) diff --git a/pkg/share/manager/jsoncs3/providercache/providercache.go b/pkg/share/manager/jsoncs3/providercache/providercache.go index 2d80d2c190..672d9b21c9 100644 --- a/pkg/share/manager/jsoncs3/providercache/providercache.go +++ b/pkg/share/manager/jsoncs3/providercache/providercache.go @@ -32,20 +32,26 @@ import ( provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" "github.com/cs3org/reva/v2/pkg/appctx" "github.com/cs3org/reva/v2/pkg/errtypes" + "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/mtimesyncedcache" "github.com/cs3org/reva/v2/pkg/storage/utils/metadata" - "github.com/cs3org/reva/v2/pkg/utils" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" + "golang.org/x/exp/maps" ) -// name is the Tracer name used to identify this instrumentation library. -const tracerName = "providercache" +var tracer trace.Tracer + +func init() { + tracer = otel.Tracer("github.com/cs3org/reva/pkg/share/manager/jsoncs3/providercache") +} // Cache holds share information structured by provider and space type Cache struct { lockMap sync.Map - Providers map[string]*Spaces + Providers mtimesyncedcache.Map[string, *Spaces] storage metadata.Storage ttl time.Duration @@ -53,14 +59,14 @@ type Cache struct { // Spaces holds the share information for provider type Spaces struct { - Spaces map[string]*Shares + Spaces mtimesyncedcache.Map[string, *Shares] } // Shares holds the share information of one space type Shares struct { - Shares map[string]*collaboration.Share - Mtime time.Time - nextSync time.Time + Shares map[string]*collaboration.Share + + Etag string } // UnmarshalJSON overrides the default unmarshaling @@ -70,7 +76,6 @@ type Shares struct { func (s *Shares) UnmarshalJSON(data []byte) error { tmp := struct { Shares map[string]json.RawMessage - Mtime time.Time }{} err := json.Unmarshal(data, &tmp) @@ -78,7 +83,6 @@ func (s *Shares) UnmarshalJSON(data []byte) error { return err } - s.Mtime = tmp.Mtime s.Shares = make(map[string]*collaboration.Share, len(tmp.Shares)) for id, genericShare := range tmp.Shares { userShare := &collaboration.Share{ @@ -93,11 +97,20 @@ func (s *Shares) UnmarshalJSON(data []byte) error { groupShare := &collaboration.Share{ Grantee: &provider.Grantee{Id: &provider.Grantee_GroupId{}}, } - err = json.Unmarshal(genericShare, groupShare) // try to unmarshal to a group share if the user share unmarshalling failed - if err != nil { - return err + err = json.Unmarshal(genericShare, groupShare) // is this a group share? + if err == nil && groupShare.Grantee.Type == provider.GranteeType_GRANTEE_TYPE_GROUP { + s.Shares[id] = groupShare + continue + } + + invalidShare := &collaboration.Share{} + err = json.Unmarshal(genericShare, invalidShare) // invalid + if err == nil { + s.Shares[id] = invalidShare + continue } - s.Shares[id] = groupShare + + return err } return nil @@ -115,26 +128,25 @@ func (c *Cache) LockSpace(spaceID string) func() { // New returns a new Cache instance func New(s metadata.Storage, ttl time.Duration) Cache { return Cache{ - Providers: map[string]*Spaces{}, + Providers: mtimesyncedcache.Map[string, *Spaces]{}, storage: s, ttl: ttl, lockMap: sync.Map{}, } } -// Add adds a share to the cache -func (c *Cache) Add(ctx context.Context, storageID, spaceID, shareID string, share *collaboration.Share) error { - unlock := c.LockSpace(spaceID) - defer unlock() - - if c.Providers[storageID] == nil || c.Providers[storageID].Spaces[spaceID] == nil { - err := c.syncWithLock(ctx, storageID, spaceID) - if err != nil { - return err - } +func (c *Cache) isSpaceCached(storageID, spaceID string) bool { + spaces, ok := c.Providers.Load(storageID) + if !ok { + return false } + _, ok = spaces.Spaces.Load(spaceID) + return ok +} - ctx, span := appctx.GetTracerProvider(ctx).Tracer(tracerName).Start(ctx, "Add") +// Add adds a share to the cache +func (c *Cache) Add(ctx context.Context, storageID, spaceID, shareID string, share *collaboration.Share) error { + ctx, span := tracer.Start(ctx, "Add") defer span.End() span.SetAttributes(attribute.String("cs3.storageid", storageID), attribute.String("cs3.spaceid", spaceID), attribute.String("cs3.shareid", shareID)) @@ -146,221 +158,320 @@ func (c *Cache) Add(ctx context.Context, storageID, spaceID, shareID string, sha case shareID == "": return fmt.Errorf("missing share id") } - c.initializeIfNeeded(storageID, spaceID) + + unlock := c.LockSpace(spaceID) + defer unlock() + span.AddEvent("got lock") + + var err error + if !c.isSpaceCached(storageID, spaceID) { + err = c.syncWithLock(ctx, storageID, spaceID) + if err != nil { + return err + } + } + + log := appctx.GetLogger(ctx).With(). + Str("hostname", os.Getenv("HOSTNAME")). + Str("storageID", storageID). + Str("spaceID", spaceID). + Str("shareID", shareID).Logger() persistFunc := func() error { - c.Providers[storageID].Spaces[spaceID].Shares[shareID] = share + + spaces, _ := c.Providers.Load(storageID) + space, _ := spaces.Spaces.Load(spaceID) + + log.Info().Interface("shares", maps.Keys(space.Shares)).Str("New share", shareID).Msg("Adding share to space") + space.Shares[shareID] = share return c.Persist(ctx, storageID, spaceID) } - err := persistFunc() - if _, ok := err.(errtypes.IsPreconditionFailed); ok { + for retries := 100; retries > 0; retries-- { + err = persistFunc() + switch err.(type) { + case nil: + span.SetStatus(codes.Ok, "") + return nil + case errtypes.Aborted: + log.Debug().Msg("aborted when persisting added provider share: etag changed. retrying...") + // this is the expected status code from the server when the if-match etag check fails + // continue with sync below + case errtypes.PreconditionFailed: + log.Debug().Msg("precondition failed when persisting added provider share: etag changed. retrying...") + // actually, this is the wrong status code and we treat it like errtypes.Aborted because of inconsistencies on the server side + // continue with sync below + default: + span.SetStatus(codes.Error, fmt.Sprintf("persisting added provider share failed. giving up: %s", err.Error())) + log.Error().Err(err).Msg("persisting added provider share failed") + return err + } if err := c.syncWithLock(ctx, storageID, spaceID); err != nil { span.RecordError(err) span.SetStatus(codes.Error, err.Error()) - + log.Error().Err(err).Msg("persisting added provider share failed. giving up.") return err } - - err = persistFunc() } + return err } // Remove removes a share from the cache func (c *Cache) Remove(ctx context.Context, storageID, spaceID, shareID string) error { + ctx, span := tracer.Start(ctx, "Remove") + defer span.End() + span.SetAttributes(attribute.String("cs3.storageid", storageID), attribute.String("cs3.spaceid", spaceID), attribute.String("cs3.shareid", shareID)) + unlock := c.LockSpace(spaceID) defer unlock() + span.AddEvent("got lock") - if c.Providers[storageID] == nil || c.Providers[storageID].Spaces[spaceID] == nil { + if !c.isSpaceCached(storageID, spaceID) { err := c.syncWithLock(ctx, storageID, spaceID) if err != nil { return err } } - ctx, span := appctx.GetTracerProvider(ctx).Tracer(tracerName).Start(ctx, "Remove") - defer span.End() - span.SetAttributes(attribute.String("cs3.storageid", storageID), attribute.String("cs3.spaceid", spaceID), attribute.String("cs3.shareid", shareID)) - persistFunc := func() error { - if c.Providers[storageID] == nil || - c.Providers[storageID].Spaces[spaceID] == nil { + spaces, ok := c.Providers.Load(storageID) + if !ok { return nil } - delete(c.Providers[storageID].Spaces[spaceID].Shares, shareID) + space, _ := spaces.Spaces.Load(spaceID) + if !ok { + return nil + } + delete(space.Shares, shareID) return c.Persist(ctx, storageID, spaceID) } - err := persistFunc() - if _, ok := err.(errtypes.IsPreconditionFailed); ok { + + log := appctx.GetLogger(ctx).With(). + Str("hostname", os.Getenv("HOSTNAME")). + Str("storageID", storageID). + Str("spaceID", spaceID). + Str("shareID", shareID).Logger() + + var err error + for retries := 100; retries > 0; retries-- { + err = persistFunc() + switch err.(type) { + case nil: + span.SetStatus(codes.Ok, "") + return nil + case errtypes.Aborted: + log.Debug().Msg("aborted when persisting removed provider share: etag changed. retrying...") + // this is the expected status code from the server when the if-match etag check fails + // continue with sync below + case errtypes.PreconditionFailed: + log.Debug().Msg("precondition failed when persisting removed provider share: etag changed. retrying...") + // actually, this is the wrong status code and we treat it like errtypes.Aborted because of inconsistencies on the server side + // continue with sync below + default: + span.SetStatus(codes.Error, fmt.Sprintf("persisting removed provider share failed. giving up: %s", err.Error())) + log.Error().Err(err).Msg("persisting removed provider share failed") + return err + } if err := c.syncWithLock(ctx, storageID, spaceID); err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) + log.Error().Err(err).Msg("persisting removed provider share failed. giving up.") return err } - err = persistFunc() } - return err } // Get returns one entry from the cache -func (c *Cache) Get(ctx context.Context, storageID, spaceID, shareID string) (*collaboration.Share, error) { - // sync cache, maybe our data is outdated - err := c.Sync(ctx, storageID, spaceID) - if err != nil { - return nil, err +func (c *Cache) Get(ctx context.Context, storageID, spaceID, shareID string, skipSync bool) (*collaboration.Share, error) { + ctx, span := tracer.Start(ctx, "Get") + defer span.End() + span.SetAttributes(attribute.String("cs3.storageid", storageID), attribute.String("cs3.spaceid", spaceID), attribute.String("cs3.shareid", shareID)) + + unlock := c.LockSpace(spaceID) + defer unlock() + span.AddEvent("got lock") + + if !skipSync { + // sync cache, maybe our data is outdated + err := c.syncWithLock(ctx, storageID, spaceID) + if err != nil { + return nil, err + } } - if c.Providers[storageID] == nil || - c.Providers[storageID].Spaces[spaceID] == nil { + spaces, ok := c.Providers.Load(storageID) + if !ok { return nil, nil } - return c.Providers[storageID].Spaces[spaceID].Shares[shareID], nil + space, ok := spaces.Spaces.Load(spaceID) + if !ok { + return nil, nil + } + return space.Shares[shareID], nil } // ListSpace returns the list of shares in a given space func (c *Cache) ListSpace(ctx context.Context, storageID, spaceID string) (*Shares, error) { + ctx, span := tracer.Start(ctx, "ListSpace") + defer span.End() + span.SetAttributes(attribute.String("cs3.storageid", storageID), attribute.String("cs3.spaceid", spaceID)) + + unlock := c.LockSpace(spaceID) + defer unlock() + span.AddEvent("got lock") + // sync cache, maybe our data is outdated - err := c.Sync(ctx, storageID, spaceID) + err := c.syncWithLock(ctx, storageID, spaceID) if err != nil { return nil, err } - if c.Providers[storageID] == nil || c.Providers[storageID].Spaces[spaceID] == nil { + spaces, ok := c.Providers.Load(storageID) + if !ok { + return &Shares{}, nil + } + + space, ok := spaces.Spaces.Load(spaceID) + if !ok { return &Shares{}, nil } - return c.Providers[storageID].Spaces[spaceID], nil + + shares := &Shares{ + Shares: maps.Clone(space.Shares), + Etag: space.Etag, + } + return shares, nil } // Persist persists the data of one space func (c *Cache) Persist(ctx context.Context, storageID, spaceID string) error { - ctx, span := appctx.GetTracerProvider(ctx).Tracer(tracerName).Start(ctx, "PersistWithTime") + ctx, span := tracer.Start(ctx, "Persist") defer span.End() span.SetAttributes(attribute.String("cs3.storageid", storageID), attribute.String("cs3.spaceid", spaceID)) - if c.Providers[storageID] == nil || c.Providers[storageID].Spaces[spaceID] == nil { - span.SetStatus(codes.Ok, "no shares in provider or space") + spaces, ok := c.Providers.Load(storageID) + if !ok { + span.AddEvent("nothing to persist") + span.SetStatus(codes.Ok, "") return nil } + space, ok := spaces.Spaces.Load(spaceID) + if !ok { + span.AddEvent("nothing to persist") + span.SetStatus(codes.Ok, "") + return nil + } + span.SetAttributes(attribute.String("BeforeEtag", space.Etag)) + log := appctx.GetLogger(ctx).With().Str("storageID", storageID).Str("spaceID", spaceID).Logger() + log = log.With().Str("BeforeEtag", space.Etag).Logger() - oldMtime := c.Providers[storageID].Spaces[spaceID].Mtime - c.Providers[storageID].Spaces[spaceID].Mtime = time.Now() - - // FIXME there is a race when between this time now and the below Uploed another process also updates the file -> we need a lock - createdBytes, err := json.Marshal(c.Providers[storageID].Spaces[spaceID]) + createdBytes, err := json.Marshal(space) if err != nil { - c.Providers[storageID].Spaces[spaceID].Mtime = oldMtime span.RecordError(err) span.SetStatus(codes.Error, err.Error()) return err } jsonPath := spaceJSONPath(storageID, spaceID) if err := c.storage.MakeDirIfNotExist(ctx, path.Dir(jsonPath)); err != nil { - c.Providers[storageID].Spaces[spaceID].Mtime = oldMtime span.RecordError(err) span.SetStatus(codes.Error, err.Error()) return err } - if err = c.storage.Upload(ctx, metadata.UploadRequest{ - Path: jsonPath, - Content: createdBytes, - IfUnmodifiedSince: oldMtime, - MTime: c.Providers[storageID].Spaces[spaceID].Mtime, - }); err != nil { - c.Providers[storageID].Spaces[spaceID].Mtime = oldMtime + span.SetAttributes(attribute.String("etag", space.Etag)) + + ur := metadata.UploadRequest{ + Path: jsonPath, + Content: createdBytes, + IfMatchEtag: space.Etag, + } + // when there is no etag in memory make sure the file has not been created on the server, see https://www.rfc-editor.org/rfc/rfc9110#field.if-match + // > If the field value is "*", the condition is false if the origin server has a current representation for the target resource. + if space.Etag == "" { + ur.IfNoneMatch = []string{"*"} + log.Debug().Msg("setting IfNoneMatch to *") + } else { + log.Debug().Msg("setting IfMatchEtag") + } + res, err := c.storage.Upload(ctx, ur) + if err != nil { span.RecordError(err) span.SetStatus(codes.Error, err.Error()) + log.Debug().Err(err).Msg("persisting provider cache failed") return err } + space.Etag = res.Etag span.SetStatus(codes.Ok, "") + shares := []string{} + for _, s := range space.Shares { + shares = append(shares, s.GetId().GetOpaqueId()) + } + log.Debug().Str("AfterEtag", space.Etag).Interface("Shares", shares).Msg("persisted provider cache") return nil } -// Sync updates the in-memory data with the data from the storage if it is outdated -func (c *Cache) Sync(ctx context.Context, storageID, spaceID string) error { - unlock := c.LockSpace(spaceID) - defer unlock() - - return c.syncWithLock(ctx, storageID, spaceID) -} - func (c *Cache) syncWithLock(ctx context.Context, storageID, spaceID string) error { - ctx, span := appctx.GetTracerProvider(ctx).Tracer(tracerName).Start(ctx, "Sync") + ctx, span := tracer.Start(ctx, "syncWithLock") defer span.End() - span.SetAttributes(attribute.String("cs3.storageid", storageID), attribute.String("cs3.spaceid", spaceID)) + c.initializeIfNeeded(storageID, spaceID) - log := appctx.GetLogger(ctx).With().Str("storageID", storageID).Str("spaceID", spaceID).Logger() + spaces, _ := c.Providers.Load(storageID) + space, _ := spaces.Spaces.Load(spaceID) + span.SetAttributes(attribute.String("cs3.storageid", storageID), attribute.String("cs3.spaceid", spaceID), attribute.String("etag", space.Etag)) + log := appctx.GetLogger(ctx).With().Str("storageID", storageID).Str("spaceID", spaceID).Str("etag", space.Etag).Str("hostname", os.Getenv("HOSTNAME")).Logger() - var mtime time.Time - if c.Providers[storageID] != nil && c.Providers[storageID].Spaces[spaceID] != nil { - mtime = c.Providers[storageID].Spaces[spaceID].Mtime + dlreq := metadata.DownloadRequest{ + Path: spaceJSONPath(storageID, spaceID), + } + // when we know an etag, only download if it changed remotely + if space.Etag != "" { + dlreq.IfNoneMatch = []string{space.Etag} + } - if time.Now().Before(c.Providers[storageID].Spaces[spaceID].nextSync) { - span.AddEvent("skip sync") - span.SetStatus(codes.Ok, "") - return nil - } - c.Providers[storageID].Spaces[spaceID].nextSync = time.Now().Add(c.ttl) - } else { - mtime = time.Time{} // Set zero time so that data from storage always takes precedence + dlres, err := c.storage.Download(ctx, dlreq) + switch err.(type) { + case nil: + span.AddEvent("updating local cache") + case errtypes.NotFound: + span.SetStatus(codes.Ok, "") + return nil + case errtypes.NotModified: + span.SetStatus(codes.Ok, "") + return nil + default: + span.RecordError(err) + span.SetStatus(codes.Error, "downloading provider cache failed") + return err } - jsonPath := spaceJSONPath(storageID, spaceID) - info, err := c.storage.Stat(ctx, jsonPath) + span.AddEvent("updating local cache") + newShares := &Shares{} + err = json.Unmarshal(dlres.Content, newShares) if err != nil { - if _, ok := err.(errtypes.NotFound); ok { - span.AddEvent("no file") - span.SetStatus(codes.Ok, "") - return nil // Nothing to sync against - } - if _, ok := err.(*os.PathError); ok { - span.AddEvent("no dir") - span.SetStatus(codes.Ok, "") - return nil // Nothing to sync against - } - span.SetStatus(codes.Error, fmt.Sprintf("Failed to stat the provider cache: %s", err.Error())) - log.Error().Err(err).Msg("Failed to stat the provider cache") + span.RecordError(err) + span.SetStatus(codes.Error, "unmarshaling provider cache failed") + log.Error().Err(err).Msg("unmarshaling provider cache failed") return err } - // check mtime of /users/{userid}/created.json - if utils.TSToTime(info.Mtime).After(mtime) { - span.AddEvent("updating cache") - // - update cached list of created shares for the user in memory if changed - createdBlob, err := c.storage.SimpleDownload(ctx, jsonPath) - if err != nil { - span.SetStatus(codes.Error, fmt.Sprintf("Failed to download the provider cache: %s", err.Error())) - log.Error().Err(err).Msg("Failed to download the provider cache") - return err - } - newShares := &Shares{} - err = json.Unmarshal(createdBlob, newShares) - if err != nil { - span.SetStatus(codes.Error, fmt.Sprintf("Failed to unmarshal the provider cache: %s", err.Error())) - log.Error().Err(err).Msg("Failed to unmarshal the provider cache") - return err - } - newShares.Mtime = utils.TSToTime(info.Mtime) - c.initializeIfNeeded(storageID, spaceID) - c.Providers[storageID].Spaces[spaceID] = newShares - } + newShares.Etag = dlres.Etag + + spaces.Spaces.Store(spaceID, newShares) span.SetStatus(codes.Ok, "") return nil } func (c *Cache) initializeIfNeeded(storageID, spaceID string) { - if c.Providers[storageID] == nil { - c.Providers[storageID] = &Spaces{ - Spaces: map[string]*Shares{}, - } - } - if c.Providers[storageID].Spaces[spaceID] == nil { - c.Providers[storageID].Spaces[spaceID] = &Shares{ - Shares: map[string]*collaboration.Share{}, - } - } + spaces, _ := c.Providers.LoadOrStore(storageID, &Spaces{ + Spaces: mtimesyncedcache.Map[string, *Shares]{}, + }) + _, _ = spaces.Spaces.LoadOrStore(spaceID, &Shares{ + Shares: map[string]*collaboration.Share{}, + }) } func spaceJSONPath(storageID, spaceID string) string { diff --git a/pkg/share/manager/jsoncs3/providercache/providercache_test.go b/pkg/share/manager/jsoncs3/providercache/providercache_test.go index fc0fbdfa00..5d62212aa2 100644 --- a/pkg/share/manager/jsoncs3/providercache/providercache_test.go +++ b/pkg/share/manager/jsoncs3/providercache/providercache_test.go @@ -75,28 +75,36 @@ var _ = Describe("Cache", func() { Describe("Add", func() { It("adds a share", func() { - s, err := c.Get(ctx, storageID, spaceID, shareID) + s, err := c.Get(ctx, storageID, spaceID, shareID, false) Expect(err).ToNot(HaveOccurred()) Expect(s).To(BeNil()) Expect(c.Add(ctx, storageID, spaceID, shareID, share1)).To(Succeed()) - s, err = c.Get(ctx, storageID, spaceID, shareID) + s, err = c.Get(ctx, storageID, spaceID, shareID, false) Expect(err).ToNot(HaveOccurred()) Expect(s).ToNot(BeNil()) Expect(s).To(Equal(share1)) }) - It("sets the mtime", func() { + It("sets the etag", func() { Expect(c.Add(ctx, storageID, spaceID, shareID, share1)).To(Succeed()) - Expect(c.Providers[storageID].Spaces[spaceID].Mtime).ToNot(Equal(time.Time{})) + spaces, ok := c.Providers.Load(storageID) + Expect(ok).To(BeTrue()) + space, ok := spaces.Spaces.Load(spaceID) + Expect(ok).To(BeTrue()) + Expect(space.Etag).ToNot(BeEmpty()) }) - It("updates the mtime", func() { + It("updates the etag", func() { Expect(c.Add(ctx, storageID, spaceID, shareID, share1)).To(Succeed()) - old := c.Providers[storageID].Spaces[spaceID].Mtime + spaces, ok := c.Providers.Load(storageID) + Expect(ok).To(BeTrue()) + space, ok := spaces.Spaces.Load(spaceID) + Expect(ok).To(BeTrue()) + old := space.Etag Expect(c.Add(ctx, storageID, spaceID, shareID, share1)).To(Succeed()) - Expect(c.Providers[storageID].Spaces[spaceID].Mtime).ToNot(Equal(old)) + Expect(space.Etag).ToNot(Equal(old)) }) }) @@ -107,7 +115,7 @@ var _ = Describe("Cache", func() { Describe("Get", func() { It("returns the entry", func() { - s, err := c.Get(ctx, storageID, spaceID, shareID) + s, err := c.Get(ctx, storageID, spaceID, shareID, false) Expect(err).ToNot(HaveOccurred()) Expect(s).ToNot(BeNil()) }) @@ -115,23 +123,27 @@ var _ = Describe("Cache", func() { Describe("Remove", func() { It("removes the entry", func() { - s, err := c.Get(ctx, storageID, spaceID, shareID) + s, err := c.Get(ctx, storageID, spaceID, shareID, false) Expect(err).ToNot(HaveOccurred()) Expect(s).ToNot(BeNil()) Expect(s).To(Equal(share1)) Expect(c.Remove(ctx, storageID, spaceID, shareID)).To(Succeed()) - s, err = c.Get(ctx, storageID, spaceID, shareID) + s, err = c.Get(ctx, storageID, spaceID, shareID, false) Expect(err).ToNot(HaveOccurred()) Expect(s).To(BeNil()) }) - It("updates the mtime", func() { + It("updates the etag", func() { Expect(c.Add(ctx, storageID, spaceID, shareID, share1)).To(Succeed()) - old := c.Providers[storageID].Spaces[spaceID].Mtime + spaces, ok := c.Providers.Load(storageID) + Expect(ok).To(BeTrue()) + space, ok := spaces.Spaces.Load(spaceID) + Expect(ok).To(BeTrue()) + old := space.Etag Expect(c.Remove(ctx, storageID, spaceID, shareID)).To(Succeed()) - Expect(c.Providers[storageID].Spaces[spaceID].Mtime).ToNot(Equal(old)) + Expect(space.Etag).ToNot(Equal(old)) }) }) @@ -147,21 +159,25 @@ var _ = Describe("Cache", func() { Expect(c.Persist(ctx, storageID, spaceID)).To(Succeed()) }) - It("updates the mtime", func() { - oldMtime := c.Providers[storageID].Spaces[spaceID].Mtime + It("updates the etag", func() { + spaces, ok := c.Providers.Load(storageID) + Expect(ok).To(BeTrue()) + space, ok := spaces.Spaces.Load(spaceID) + Expect(ok).To(BeTrue()) + oldEtag := space.Etag Expect(c.Persist(ctx, storageID, spaceID)).To(Succeed()) - Expect(c.Providers[storageID].Spaces[spaceID].Mtime).ToNot(Equal(oldMtime)) + Expect(space.Etag).ToNot(Equal(oldEtag)) }) }) Describe("PersistWithTime", func() { - It("does not persist if the mtime on disk is more recent", func() { + It("does not persist if the etag changed", func() { time.Sleep(1 * time.Nanosecond) path := filepath.Join(tmpdir, "storages/storageid/spaceid.json") now := time.Now() - _ = os.Chtimes(path, now, now) + _ = os.Chtimes(path, now, now) // this only works for the file backend Expect(c.Persist(ctx, storageID, spaceID)).ToNot(Succeed()) }) }) diff --git a/pkg/share/manager/jsoncs3/receivedsharecache/receivedsharecache.go b/pkg/share/manager/jsoncs3/receivedsharecache/receivedsharecache.go index c4d3aca072..7c719aa486 100644 --- a/pkg/share/manager/jsoncs3/receivedsharecache/receivedsharecache.go +++ b/pkg/share/manager/jsoncs3/receivedsharecache/receivedsharecache.go @@ -22,6 +22,7 @@ import ( "context" "encoding/json" "fmt" + "os" "path" "path/filepath" "sync" @@ -32,7 +33,6 @@ import ( "github.com/cs3org/reva/v2/pkg/appctx" "github.com/cs3org/reva/v2/pkg/errtypes" "github.com/cs3org/reva/v2/pkg/storage/utils/metadata" - "github.com/cs3org/reva/v2/pkg/utils" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" ) @@ -54,15 +54,13 @@ type Cache struct { // Spaces holds the received shares of one user per space type Spaces struct { - Mtime time.Time Spaces map[string]*Space - nextSync time.Time + etag string } // Space holds the received shares of one user in one space type Space struct { - Mtime time.Time States map[string]*State } @@ -92,7 +90,10 @@ func (c *Cache) lockUser(userID string) func() { // Add adds a new entry to the cache func (c *Cache) Add(ctx context.Context, userID, spaceID string, rs *collaboration.ReceivedShare) error { + ctx, span := appctx.GetTracerProvider(ctx).Tracer(tracerName).Start(ctx, "Grab lock") unlock := c.lockUser(userID) + span.End() + span.SetAttributes(attribute.String("cs3.userid", userID)) defer unlock() if c.ReceivedSpaces[userID] == nil { @@ -102,22 +103,14 @@ func (c *Cache) Add(ctx context.Context, userID, spaceID string, rs *collaborati } } - ctx, span := appctx.GetTracerProvider(ctx).Tracer(tracerName).Start(ctx, "Add") + ctx, span = appctx.GetTracerProvider(ctx).Tracer(tracerName).Start(ctx, "Add") defer span.End() span.SetAttributes(attribute.String("cs3.userid", userID), attribute.String("cs3.spaceid", spaceID)) persistFunc := func() error { - if c.ReceivedSpaces[userID] == nil { - c.ReceivedSpaces[userID] = &Spaces{ - Spaces: map[string]*Space{}, - } - } - if c.ReceivedSpaces[userID].Spaces[spaceID] == nil { - c.ReceivedSpaces[userID].Spaces[spaceID] = &Space{} - } + c.initializeIfNeeded(userID, spaceID) receivedSpace := c.ReceivedSpaces[userID].Spaces[spaceID] - receivedSpace.Mtime = time.Now() if receivedSpace.States == nil { receivedSpace.States = map[string]*State{} } @@ -128,22 +121,51 @@ func (c *Cache) Add(ctx context.Context, userID, spaceID string, rs *collaborati return c.persist(ctx, userID) } - err := persistFunc() - if _, ok := err.(errtypes.IsPreconditionFailed); ok { + + log := appctx.GetLogger(ctx).With(). + Str("hostname", os.Getenv("HOSTNAME")). + Str("userID", userID). + Str("spaceID", spaceID).Logger() + + var err error + for retries := 100; retries > 0; retries-- { + err = persistFunc() + switch err.(type) { + case nil: + span.SetStatus(codes.Ok, "") + return nil + case errtypes.Aborted: + log.Debug().Msg("aborted when persisting added received share: etag changed. retrying...") + // this is the expected status code from the server when the if-match etag check fails + // continue with sync below + case errtypes.PreconditionFailed: + log.Debug().Msg("precondition failed when persisting added received share: etag changed. retrying...") + // actually, this is the wrong status code and we treat it like errtypes.Aborted because of inconsistencies on the server side + // continue with sync below + default: + span.SetStatus(codes.Error, fmt.Sprintf("persisting added received share failed. giving up: %s", err.Error())) + log.Error().Err(err).Msg("persisting added received share failed") + return err + } if err := c.syncWithLock(ctx, userID); err != nil { span.RecordError(err) span.SetStatus(codes.Error, err.Error()) + log.Error().Err(err).Msg("persisting added received share failed. giving up.") return err } - - err = persistFunc() } return err } // Get returns one entry from the cache func (c *Cache) Get(ctx context.Context, userID, spaceID, shareID string) (*State, error) { - err := c.Sync(ctx, userID) + ctx, span := appctx.GetTracerProvider(ctx).Tracer(tracerName).Start(ctx, "Grab lock") + unlock := c.lockUser(userID) + span.End() + span.SetAttributes(attribute.String("cs3.userid", userID)) + defer unlock() + + err := c.syncWithLock(ctx, userID) if err != nil { return nil, err } @@ -155,7 +177,10 @@ func (c *Cache) Get(ctx context.Context, userID, spaceID, shareID string) (*Stat // Sync updates the in-memory data with the data from the storage if it is outdated func (c *Cache) Sync(ctx context.Context, userID string) error { + ctx, span := appctx.GetTracerProvider(ctx).Tracer(tracerName).Start(ctx, "Grab lock") unlock := c.lockUser(userID) + span.End() + span.SetAttributes(attribute.String("cs3.userid", userID)) defer unlock() return c.syncWithLock(ctx, userID) @@ -168,52 +193,40 @@ func (c *Cache) syncWithLock(ctx context.Context, userID string) error { log := appctx.GetLogger(ctx).With().Str("userID", userID).Logger() - var mtime time.Time - if c.ReceivedSpaces[userID] != nil { - if time.Now().Before(c.ReceivedSpaces[userID].nextSync) { - span.AddEvent("skip sync") - span.SetStatus(codes.Ok, "") - return nil - } - c.ReceivedSpaces[userID].nextSync = time.Now().Add(c.ttl) + c.initializeIfNeeded(userID, "") - mtime = c.ReceivedSpaces[userID].Mtime - } else { - mtime = time.Time{} // Set zero time so that data from storage always takes precedence + jsonPath := userJSONPath(userID) + span.AddEvent("updating cache") + // - update cached list of created shares for the user in memory if changed + dlres, err := c.storage.Download(ctx, metadata.DownloadRequest{ + Path: jsonPath, + IfNoneMatch: []string{c.ReceivedSpaces[userID].etag}, + }) + switch err.(type) { + case nil: + span.AddEvent("updating local cache") + case errtypes.NotFound: + span.SetStatus(codes.Ok, "") + return nil + case errtypes.NotModified: + span.SetStatus(codes.Ok, "") + return nil + default: + span.SetStatus(codes.Error, fmt.Sprintf("Failed to download the received share: %s", err.Error())) + log.Error().Err(err).Msg("Failed to download the received share") + return err } - jsonPath := userJSONPath(userID) - info, err := c.storage.Stat(ctx, jsonPath) // TODO we only need the mtime ... use fieldmask to make the request cheaper + newSpaces := &Spaces{} + err = json.Unmarshal(dlres.Content, newSpaces) if err != nil { - if _, ok := err.(errtypes.NotFound); ok { - span.AddEvent("no file") - span.SetStatus(codes.Ok, "") - return nil // Nothing to sync against - } - span.SetStatus(codes.Error, fmt.Sprintf("Failed to stat the received share: %s", err.Error())) - log.Error().Err(err).Msg("Failed to stat the received share") + span.SetStatus(codes.Error, fmt.Sprintf("Failed to unmarshal the received share: %s", err.Error())) + log.Error().Err(err).Msg("Failed to unmarshal the received share") return err } - // check mtime of /users/{userid}/created.json - if utils.TSToTime(info.Mtime).After(mtime) { - span.AddEvent("updating cache") - // - update cached list of created shares for the user in memory if changed - createdBlob, err := c.storage.SimpleDownload(ctx, jsonPath) - if err != nil { - span.SetStatus(codes.Error, fmt.Sprintf("Failed to download the received share: %s", err.Error())) - log.Error().Err(err).Msg("Failed to download the received share") - return err - } - newSpaces := &Spaces{} - err = json.Unmarshal(createdBlob, newSpaces) - if err != nil { - span.SetStatus(codes.Error, fmt.Sprintf("Failed to unmarshal the received share: %s", err.Error())) - log.Error().Err(err).Msg("Failed to unmarshal the received share") - return err - } - newSpaces.Mtime = utils.TSToTime(info.Mtime) - c.ReceivedSpaces[userID] = newSpaces - } + newSpaces.etag = dlres.Etag + + c.ReceivedSpaces[userID] = newSpaces span.SetStatus(codes.Ok, "") return nil } @@ -229,31 +242,32 @@ func (c *Cache) persist(ctx context.Context, userID string) error { return nil } - oldMtime := c.ReceivedSpaces[userID].Mtime - c.ReceivedSpaces[userID].Mtime = time.Now() - createdBytes, err := json.Marshal(c.ReceivedSpaces[userID]) if err != nil { - c.ReceivedSpaces[userID].Mtime = oldMtime span.RecordError(err) span.SetStatus(codes.Error, err.Error()) return err } jsonPath := userJSONPath(userID) if err := c.storage.MakeDirIfNotExist(ctx, path.Dir(jsonPath)); err != nil { - c.ReceivedSpaces[userID].Mtime = oldMtime span.RecordError(err) span.SetStatus(codes.Error, err.Error()) return err } - if err = c.storage.Upload(ctx, metadata.UploadRequest{ - Path: jsonPath, - Content: createdBytes, - IfUnmodifiedSince: oldMtime, - MTime: c.ReceivedSpaces[userID].Mtime, - }); err != nil { - c.ReceivedSpaces[userID].Mtime = oldMtime + ur := metadata.UploadRequest{ + Path: jsonPath, + Content: createdBytes, + IfMatchEtag: c.ReceivedSpaces[userID].etag, + } + // when there is no etag in memory make sure the file has not been created on the server, see https://www.rfc-editor.org/rfc/rfc9110#field.if-match + // > If the field value is "*", the condition is false if the origin server has a current representation for the target resource. + if c.ReceivedSpaces[userID].etag == "" { + ur.IfNoneMatch = []string{"*"} + } + + _, err = c.storage.Upload(ctx, ur) + if err != nil { span.RecordError(err) span.SetStatus(codes.Error, err.Error()) return err @@ -265,3 +279,14 @@ func (c *Cache) persist(ctx context.Context, userID string) error { func userJSONPath(userID string) string { return filepath.Join("/users", userID, "received.json") } + +func (c *Cache) initializeIfNeeded(userID, spaceID string) { + if c.ReceivedSpaces[userID] == nil { + c.ReceivedSpaces[userID] = &Spaces{ + Spaces: map[string]*Space{}, + } + } + if spaceID != "" && c.ReceivedSpaces[userID].Spaces[spaceID] == nil { + c.ReceivedSpaces[userID].Spaces[spaceID] = &Space{} + } +} diff --git a/pkg/share/manager/jsoncs3/sharecache/sharecache.go b/pkg/share/manager/jsoncs3/sharecache/sharecache.go index 1814e3447e..fe6dfde338 100644 --- a/pkg/share/manager/jsoncs3/sharecache/sharecache.go +++ b/pkg/share/manager/jsoncs3/sharecache/sharecache.go @@ -22,6 +22,7 @@ import ( "context" "encoding/json" "fmt" + "os" "path" "path/filepath" "sync" @@ -31,7 +32,6 @@ import ( "github.com/cs3org/reva/v2/pkg/errtypes" "github.com/cs3org/reva/v2/pkg/share/manager/jsoncs3/shareid" "github.com/cs3org/reva/v2/pkg/storage/utils/metadata" - "github.com/cs3org/reva/v2/pkg/utils" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" ) @@ -55,16 +55,14 @@ type Cache struct { // UserShareCache holds the space/share map for one user type UserShareCache struct { - Mtime time.Time UserShares map[string]*SpaceShareIDs - nextSync time.Time + Etag string } // SpaceShareIDs holds the unique list of share ids for a space type SpaceShareIDs struct { - Mtime time.Time - IDs map[string]struct{} + IDs map[string]struct{} } func (c *Cache) lockUser(userID string) func() { @@ -89,7 +87,10 @@ func New(s metadata.Storage, namespace, filename string, ttl time.Duration) Cach // Add adds a share to the cache func (c *Cache) Add(ctx context.Context, userid, shareID string) error { + ctx, span := appctx.GetTracerProvider(ctx).Tracer(tracerName).Start(ctx, "Grab lock") unlock := c.lockUser(userid) + span.End() + span.SetAttributes(attribute.String("cs3.userid", userid)) defer unlock() if c.UserShares[userid] == nil { @@ -99,7 +100,7 @@ func (c *Cache) Add(ctx context.Context, userid, shareID string) error { } } - ctx, span := appctx.GetTracerProvider(ctx).Tracer(tracerName).Start(ctx, "Add") + ctx, span = appctx.GetTracerProvider(ctx).Tracer(tracerName).Start(ctx, "Add") defer span.End() span.SetAttributes(attribute.String("cs3.userid", userid), attribute.String("cs3.shareid", shareID)) @@ -107,41 +108,54 @@ func (c *Cache) Add(ctx context.Context, userid, shareID string) error { ssid := storageid + shareid.IDDelimiter + spaceid persistFunc := func() error { - now := time.Now() - if c.UserShares[userid] == nil { - c.UserShares[userid] = &UserShareCache{ - UserShares: map[string]*SpaceShareIDs{}, - } - } - if c.UserShares[userid].UserShares[ssid] == nil { - c.UserShares[userid].UserShares[ssid] = &SpaceShareIDs{ - IDs: map[string]struct{}{}, - } - } + c.initializeIfNeeded(userid, ssid) + // add share id - c.UserShares[userid].UserShares[ssid].Mtime = now c.UserShares[userid].UserShares[ssid].IDs[shareID] = struct{}{} return c.Persist(ctx, userid) } - err := persistFunc() - if _, ok := err.(errtypes.IsPreconditionFailed); ok { + log := appctx.GetLogger(ctx).With(). + Str("hostname", os.Getenv("HOSTNAME")). + Str("userID", userid). + Str("shareID", shareID).Logger() + + var err error + for retries := 100; retries > 0; retries-- { + err = persistFunc() + switch err.(type) { + case nil: + span.SetStatus(codes.Ok, "") + return nil + case errtypes.Aborted: + log.Debug().Msg("aborted when persisting added share: etag changed. retrying...") + // this is the expected status code from the server when the if-match etag check fails + // continue with sync below + case errtypes.PreconditionFailed: + log.Debug().Msg("precondition failed when persisting added share: etag changed. retrying...") + // actually, this is the wrong status code and we treat it like errtypes.Aborted because of inconsistencies on the server side + // continue with sync below + default: + span.SetStatus(codes.Error, fmt.Sprintf("persisting added share failed. giving up: %s", err.Error())) + log.Error().Err(err).Msg("persisting added share failed") + return err + } if err := c.syncWithLock(ctx, userid); err != nil { span.RecordError(err) span.SetStatus(codes.Error, err.Error()) - + log.Error().Err(err).Msg("persisting added share failed. giving up.") return err } - - err = persistFunc() - // TODO try more often? } return err } // Remove removes a share for the given user func (c *Cache) Remove(ctx context.Context, userid, shareID string) error { + ctx, span := appctx.GetTracerProvider(ctx).Tracer(tracerName).Start(ctx, "Grab lock") unlock := c.lockUser(userid) + span.End() + span.SetAttributes(attribute.String("cs3.userid", userid)) defer unlock() if c.UserShares[userid] == nil { @@ -151,7 +165,7 @@ func (c *Cache) Remove(ctx context.Context, userid, shareID string) error { } } - ctx, span := appctx.GetTracerProvider(ctx).Tracer(tracerName).Start(ctx, "Remove") + ctx, span = appctx.GetTracerProvider(ctx).Tracer(tracerName).Start(ctx, "Remove") defer span.End() span.SetAttributes(attribute.String("cs3.userid", userid), attribute.String("cs3.shareid", shareID)) @@ -167,19 +181,42 @@ func (c *Cache) Remove(ctx context.Context, userid, shareID string) error { if c.UserShares[userid].UserShares[ssid] != nil { // remove share id - c.UserShares[userid].UserShares[ssid].Mtime = time.Now() delete(c.UserShares[userid].UserShares[ssid].IDs, shareID) } return c.Persist(ctx, userid) } - err := persistFunc() - if _, ok := err.(errtypes.IsPreconditionFailed); ok { + log := appctx.GetLogger(ctx).With(). + Str("hostname", os.Getenv("HOSTNAME")). + Str("userID", userid). + Str("shareID", shareID).Logger() + + var err error + for retries := 100; retries > 0; retries-- { + err = persistFunc() + switch err.(type) { + case nil: + span.SetStatus(codes.Ok, "") + return nil + case errtypes.Aborted: + log.Debug().Msg("aborted when persisting removed share: etag changed. retrying...") + // this is the expected status code from the server when the if-match etag check fails + // continue with sync below + case errtypes.PreconditionFailed: + log.Debug().Msg("precondition failed when persisting removed share: etag changed. retrying...") + // actually, this is the wrong status code and we treat it like errtypes.Aborted because of inconsistencies on the server side + // continue with sync below + default: + span.SetStatus(codes.Error, fmt.Sprintf("persisting removed share failed. giving up: %s", err.Error())) + log.Error().Err(err).Msg("persisting removed share failed") + return err + } if err := c.syncWithLock(ctx, userid); err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) return err } - err = persistFunc() } return err @@ -187,7 +224,10 @@ func (c *Cache) Remove(ctx context.Context, userid, shareID string) error { // List return the list of spaces/shares for the given user/group func (c *Cache) List(ctx context.Context, userid string) (map[string]SpaceShareIDs, error) { + ctx, span := appctx.GetTracerProvider(ctx).Tracer(tracerName).Start(ctx, "Grab lock") unlock := c.lockUser(userid) + span.End() + span.SetAttributes(attribute.String("cs3.userid", userid)) defer unlock() if err := c.syncWithLock(ctx, userid); err != nil { return nil, err @@ -200,8 +240,7 @@ func (c *Cache) List(ctx context.Context, userid string) (map[string]SpaceShareI for ssid, cached := range c.UserShares[userid].UserShares { r[ssid] = SpaceShareIDs{ - Mtime: cached.Mtime, - IDs: cached.IDs, + IDs: cached.IDs, } } return r, nil @@ -214,54 +253,44 @@ func (c *Cache) syncWithLock(ctx context.Context, userID string) error { log := appctx.GetLogger(ctx).With().Str("userID", userID).Logger() - var mtime time.Time - // - do we have a cached list of created shares for the user in memory? - if usc := c.UserShares[userID]; usc != nil { - if time.Now().Before(c.UserShares[userID].nextSync) { - span.AddEvent("skip sync") - span.SetStatus(codes.Ok, "") - return nil - } - c.UserShares[userID].nextSync = time.Now().Add(c.ttl) + c.initializeIfNeeded(userID, "") - mtime = usc.Mtime - // - y: set If-Modified-Since header to only download if it changed - } else { - mtime = time.Time{} // Set zero time so that data from storage always takes precedence + userCreatedPath := c.userCreatedPath(userID) + span.AddEvent("updating cache") + // - update cached list of created shares for the user in memory if changed + dlreq := metadata.DownloadRequest{ + Path: userCreatedPath, + } + if c.UserShares[userID].Etag != "" { + dlreq.IfNoneMatch = []string{c.UserShares[userID].Etag} } - userCreatedPath := c.userCreatedPath(userID) - info, err := c.storage.Stat(ctx, userCreatedPath) - if err != nil { - if _, ok := err.(errtypes.NotFound); ok { - span.AddEvent("no file") - span.SetStatus(codes.Ok, "") - return nil // Nothing to sync against - } - span.SetStatus(codes.Error, fmt.Sprintf("Failed to stat the share cache: %s", err.Error())) - log.Error().Err(err).Msg("Failed to stat the share cache") + dlres, err := c.storage.Download(ctx, dlreq) + switch err.(type) { + case nil: + span.AddEvent("updating local cache") + case errtypes.NotFound: + span.SetStatus(codes.Ok, "") + return nil + case errtypes.NotModified: + span.SetStatus(codes.Ok, "") + return nil + default: + span.SetStatus(codes.Error, fmt.Sprintf("Failed to download the share cache: %s", err.Error())) + log.Error().Err(err).Msg("Failed to download the share cache") return err } - // check mtime of /users/{userid}/created.json - if utils.TSToTime(info.Mtime).After(mtime) { - span.AddEvent("updating cache") - // - update cached list of created shares for the user in memory if changed - createdBlob, err := c.storage.SimpleDownload(ctx, userCreatedPath) - if err != nil { - span.SetStatus(codes.Error, fmt.Sprintf("Failed to download the share cache: %s", err.Error())) - log.Error().Err(err).Msg("Failed to download the share cache") - return err - } - newShareCache := &UserShareCache{} - err = json.Unmarshal(createdBlob, newShareCache) - if err != nil { - span.SetStatus(codes.Error, fmt.Sprintf("Failed to unmarshal the share cache: %s", err.Error())) - log.Error().Err(err).Msg("Failed to unmarshal the share cache") - return err - } - newShareCache.Mtime = utils.TSToTime(info.Mtime) - c.UserShares[userID] = newShareCache + + newShareCache := &UserShareCache{} + err = json.Unmarshal(dlres.Content, newShareCache) + if err != nil { + span.SetStatus(codes.Error, fmt.Sprintf("Failed to unmarshal the share cache: %s", err.Error())) + log.Error().Err(err).Msg("Failed to unmarshal the share cache") + return err } + newShareCache.Etag = dlres.Etag + + c.UserShares[userID] = newShareCache span.SetStatus(codes.Ok, "") return nil } @@ -272,35 +301,36 @@ func (c *Cache) Persist(ctx context.Context, userid string) error { defer span.End() span.SetAttributes(attribute.String("cs3.userid", userid)) - oldMtime := c.UserShares[userid].Mtime - c.UserShares[userid].Mtime = time.Now() - createdBytes, err := json.Marshal(c.UserShares[userid]) if err != nil { - c.UserShares[userid].Mtime = oldMtime span.RecordError(err) span.SetStatus(codes.Error, err.Error()) return err } jsonPath := c.userCreatedPath(userid) if err := c.storage.MakeDirIfNotExist(ctx, path.Dir(jsonPath)); err != nil { - c.UserShares[userid].Mtime = oldMtime span.RecordError(err) span.SetStatus(codes.Error, err.Error()) return err } - if err = c.storage.Upload(ctx, metadata.UploadRequest{ - Path: jsonPath, - Content: createdBytes, - IfUnmodifiedSince: oldMtime, - MTime: c.UserShares[userid].Mtime, - }); err != nil { - c.UserShares[userid].Mtime = oldMtime + ur := metadata.UploadRequest{ + Path: jsonPath, + Content: createdBytes, + IfMatchEtag: c.UserShares[userid].Etag, + } + // when there is no etag in memory make sure the file has not been created on the server, see https://www.rfc-editor.org/rfc/rfc9110#field.if-match + // > If the field value is "*", the condition is false if the origin server has a current representation for the target resource. + if c.UserShares[userid].Etag == "" { + ur.IfNoneMatch = []string{"*"} + } + res, err := c.storage.Upload(ctx, ur) + if err != nil { span.RecordError(err) span.SetStatus(codes.Error, err.Error()) return err } + c.UserShares[userid].Etag = res.Etag span.SetStatus(codes.Ok, "") return nil } @@ -308,3 +338,16 @@ func (c *Cache) Persist(ctx context.Context, userid string) error { func (c *Cache) userCreatedPath(userid string) string { return filepath.Join("/", c.namespace, userid, c.filename) } + +func (c *Cache) initializeIfNeeded(userid, ssid string) { + if c.UserShares[userid] == nil { + c.UserShares[userid] = &UserShareCache{ + UserShares: map[string]*SpaceShareIDs{}, + } + } + if ssid != "" && c.UserShares[userid].UserShares[ssid] == nil { + c.UserShares[userid].UserShares[ssid] = &SpaceShareIDs{ + IDs: map[string]struct{}{}, + } + } +} diff --git a/pkg/share/manager/jsoncs3/sharecache/sharecache_test.go b/pkg/share/manager/jsoncs3/sharecache/sharecache_test.go index 456a3b58a9..a01dc27ff4 100644 --- a/pkg/share/manager/jsoncs3/sharecache/sharecache_test.go +++ b/pkg/share/manager/jsoncs3/sharecache/sharecache_test.go @@ -70,12 +70,12 @@ var _ = Describe("Sharecache", func() { Expect(c.Add(ctx, userid, shareID)).To(Succeed()) }) - It("updates the mtime", func() { - oldMtime := c.UserShares[userid].Mtime - Expect(oldMtime).ToNot(Equal(time.Time{})) + It("updates the etag", func() { + oldEtag := c.UserShares[userid].Etag + Expect(oldEtag).ToNot(BeEmpty()) Expect(c.Persist(ctx, userid)).To(Succeed()) - Expect(c.UserShares[userid]).ToNot(Equal(oldMtime)) + Expect(c.UserShares[userid].Etag).ToNot(Equal(oldEtag)) }) }) }) diff --git a/pkg/storage/utils/decomposedfs/decomposedfs.go b/pkg/storage/utils/decomposedfs/decomposedfs.go index 5f494e8663..5c341c7357 100644 --- a/pkg/storage/utils/decomposedfs/decomposedfs.go +++ b/pkg/storage/utils/decomposedfs/decomposedfs.go @@ -42,6 +42,7 @@ import ( "github.com/cs3org/reva/v2/pkg/events" "github.com/cs3org/reva/v2/pkg/logger" "github.com/cs3org/reva/v2/pkg/rgrpc/todo/pool" + "github.com/cs3org/reva/v2/pkg/rhttp/datatx/utils/download" "github.com/cs3org/reva/v2/pkg/storage" "github.com/cs3org/reva/v2/pkg/storage/cache" "github.com/cs3org/reva/v2/pkg/storage/utils/chunking" @@ -992,17 +993,17 @@ func (fs *Decomposedfs) Download(ctx context.Context, ref *provider.Reference) ( return fs.DownloadRevision(ctx, ref, ref.ResourceId.OpaqueId) } - node, err := fs.lu.NodeFromResource(ctx, ref) + n, err := fs.lu.NodeFromResource(ctx, ref) if err != nil { return nil, errors.Wrap(err, "Decomposedfs: error resolving ref") } - if !node.Exists { - err = errtypes.NotFound(filepath.Join(node.ParentID, node.Name)) + if !n.Exists { + err = errtypes.NotFound(filepath.Join(n.ParentID, n.Name)) return nil, err } - rp, err := fs.p.AssemblePermissions(ctx, node) + rp, err := fs.p.AssemblePermissions(ctx, n) switch { case err != nil: return nil, err @@ -1014,9 +1015,21 @@ func (fs *Decomposedfs) Download(ctx context.Context, ref *provider.Reference) ( return nil, errtypes.NotFound(f) } - reader, err := fs.tp.ReadBlob(node) + mtime, err := n.GetMTime(ctx) + if err != nil { + return nil, errors.Wrap(err, "Decomposedfs: error getting mtime for '"+n.ID+"'") + } + currentEtag, err := node.CalculateEtag(n, mtime) + if err != nil { + return nil, errors.Wrap(err, "Decomposedfs: error calculating etag for '"+n.ID+"'") + } + expectedEtag := download.EtagFromContext(ctx) + if currentEtag != expectedEtag { + return nil, errtypes.Aborted(fmt.Sprintf("file changed from etag %s to %s", expectedEtag, currentEtag)) + } + reader, err := fs.tp.ReadBlob(n) if err != nil { - return nil, errors.Wrap(err, "Decomposedfs: error download blob '"+node.ID+"'") + return nil, errors.Wrap(err, "Decomposedfs: error download blob '"+n.ID+"'") } return reader, nil } diff --git a/pkg/storage/utils/decomposedfs/grants.go b/pkg/storage/utils/decomposedfs/grants.go index 6176110f18..ed3f7ca113 100644 --- a/pkg/storage/utils/decomposedfs/grants.go +++ b/pkg/storage/utils/decomposedfs/grants.go @@ -20,6 +20,7 @@ package decomposedfs import ( "context" + "os" "path/filepath" "strings" @@ -32,6 +33,7 @@ import ( "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/node" "github.com/cs3org/reva/v2/pkg/storagespace" "github.com/cs3org/reva/v2/pkg/utils" + "github.com/rogpeppe/go-internal/lockedfile" ) // DenyGrant denies access to a resource. @@ -74,15 +76,14 @@ func (fs *Decomposedfs) DenyGrant(ctx context.Context, ref *provider.Reference, func (fs *Decomposedfs) AddGrant(ctx context.Context, ref *provider.Reference, g *provider.Grant) (err error) { log := appctx.GetLogger(ctx) log.Debug().Interface("ref", ref).Interface("grant", g).Msg("AddGrant()") - grantNode, grant, err := fs.loadGrant(ctx, ref, g) + grantNode, unlockFunc, grant, err := fs.loadGrant(ctx, ref, g) if err != nil { return err } + defer unlockFunc() if grant != nil { - // grant exists -> go to UpdateGrant - // TODO: should we hard error in this case? - return fs.UpdateGrant(ctx, ref, g) + return errtypes.AlreadyExists(filepath.Join(grantNode.ParentID, grantNode.Name)) } owner := grantNode.Owner() @@ -171,10 +172,11 @@ func (fs *Decomposedfs) ListGrants(ctx context.Context, ref *provider.Reference) // RemoveGrant removes a grant from resource func (fs *Decomposedfs) RemoveGrant(ctx context.Context, ref *provider.Reference, g *provider.Grant) (err error) { - grantNode, grant, err := fs.loadGrant(ctx, ref, g) + grantNode, unlockFunc, grant, err := fs.loadGrant(ctx, ref, g) if err != nil { return err } + defer unlockFunc() if grant == nil { return errtypes.NotFound("grant not found") @@ -200,14 +202,7 @@ func (fs *Decomposedfs) RemoveGrant(ctx context.Context, ref *provider.Reference return err } - var attr string - if g.Grantee.Type == provider.GranteeType_GRANTEE_TYPE_GROUP { - attr = prefixes.GrantGroupAcePrefix + g.Grantee.GetGroupId().OpaqueId - } else { - attr = prefixes.GrantUserAcePrefix + g.Grantee.GetUserId().OpaqueId - } - - if err = grantNode.RemoveXattr(ctx, attr); err != nil { + if err := grantNode.DeleteGrant(ctx, g, false); err != nil { return err } @@ -243,10 +238,11 @@ func (fs *Decomposedfs) UpdateGrant(ctx context.Context, ref *provider.Reference log := appctx.GetLogger(ctx) log.Debug().Interface("ref", ref).Interface("grant", g).Msg("UpdateGrant()") - grantNode, grant, err := fs.loadGrant(ctx, ref, g) + grantNode, unlockFunc, grant, err := fs.loadGrant(ctx, ref, g) if err != nil { return err } + defer unlockFunc() if grant == nil { // grant not found @@ -273,34 +269,42 @@ func (fs *Decomposedfs) UpdateGrant(ctx context.Context, ref *provider.Reference } // checks if the given grant exists and returns it. Nil grant means it doesn't exist -func (fs *Decomposedfs) loadGrant(ctx context.Context, ref *provider.Reference, g *provider.Grant) (*node.Node, *provider.Grant, error) { +func (fs *Decomposedfs) loadGrant(ctx context.Context, ref *provider.Reference, g *provider.Grant) (*node.Node, func(), *provider.Grant, error) { + var unlockFunc func() + n, err := fs.lu.NodeFromResource(ctx, ref) if err != nil { - return nil, nil, err + return nil, nil, nil, err } if !n.Exists { - return nil, nil, errtypes.NotFound(filepath.Join(n.ParentID, n.Name)) + return nil, nil, nil, errtypes.NotFound(filepath.Join(n.ParentID, n.Name)) } + f, err := lockedfile.OpenFile(fs.lu.MetadataBackend().LockfilePath(n.InternalPath()), os.O_RDWR|os.O_CREATE, 0600) + if err != nil { + return nil, nil, nil, err + } + unlockFunc = func() { f.Close() } + grants, err := n.ListGrants(ctx) if err != nil { - return nil, nil, err + return nil, nil, nil, err } for _, grant := range grants { switch grant.Grantee.GetType() { case provider.GranteeType_GRANTEE_TYPE_USER: if g.Grantee.GetUserId().GetOpaqueId() == grant.Grantee.GetUserId().GetOpaqueId() { - return n, grant, nil + return n, unlockFunc, grant, nil } case provider.GranteeType_GRANTEE_TYPE_GROUP: if g.Grantee.GetGroupId().GetOpaqueId() == grant.Grantee.GetGroupId().GetOpaqueId() { - return n, grant, nil + return n, unlockFunc, grant, nil } } } - return n, nil, nil + return n, unlockFunc, nil, nil } func (fs *Decomposedfs) storeGrant(ctx context.Context, n *node.Node, g *provider.Grant) error { @@ -323,7 +327,10 @@ func (fs *Decomposedfs) storeGrant(ctx context.Context, n *node.Node, g *provide // set the grant e := ace.FromGrant(g) principal, value := e.Marshal() - if err := n.SetXattr(ctx, prefixes.GrantPrefix+principal, value); err != nil { + attribs := node.Attributes{ + prefixes.GrantPrefix + principal: value, + } + if err := n.SetXattrsWithContext(ctx, attribs, false); err != nil { appctx.GetLogger(ctx).Error().Err(err). Str("principal", principal).Msg("Could not set grant for principal") return err diff --git a/pkg/storage/utils/decomposedfs/lookup/lookup.go b/pkg/storage/utils/decomposedfs/lookup/lookup.go index a3377083e4..9e0ffd55b1 100644 --- a/pkg/storage/utils/decomposedfs/lookup/lookup.go +++ b/pkg/storage/utils/decomposedfs/lookup/lookup.go @@ -295,7 +295,7 @@ func refFromCS3(b []byte) (*provider.Reference, error) { // The optional filter function can be used to filter by attribute name, e.g. by checking a prefix // For the source file, a shared lock is acquired. // NOTE: target resource will be write locked! -func (lu *Lookup) CopyMetadata(ctx context.Context, src, target string, filter func(attributeName string) bool) (err error) { +func (lu *Lookup) CopyMetadata(ctx context.Context, src, target string, filter func(attributeName string, value []byte) (newValue []byte, copy bool), acquireTargetLock bool) (err error) { // Acquire a read log on the source node // write lock existing node before reading treesize or tree time lock, err := lockedfile.OpenFile(lu.MetadataBackend().LockfilePath(src), os.O_RDONLY|os.O_CREATE, 0600) @@ -315,14 +315,14 @@ func (lu *Lookup) CopyMetadata(ctx context.Context, src, target string, filter f } }() - return lu.CopyMetadataWithSourceLock(ctx, src, target, filter, lock) + return lu.CopyMetadataWithSourceLock(ctx, src, target, filter, lock, acquireTargetLock) } // CopyMetadataWithSourceLock copies all extended attributes from source to target. // The optional filter function can be used to filter by attribute name, e.g. by checking a prefix // For the source file, a matching lockedfile is required. // NOTE: target resource will be write locked! -func (lu *Lookup) CopyMetadataWithSourceLock(ctx context.Context, sourcePath, targetPath string, filter func(attributeName string) bool, lockedSource *lockedfile.File) (err error) { +func (lu *Lookup) CopyMetadataWithSourceLock(ctx context.Context, sourcePath, targetPath string, filter func(attributeName string, value []byte) (newValue []byte, copy bool), lockedSource *lockedfile.File, acquireTargetLock bool) (err error) { switch { case lockedSource == nil: return errors.New("no lock provided") @@ -337,12 +337,16 @@ func (lu *Lookup) CopyMetadataWithSourceLock(ctx context.Context, sourcePath, ta newAttrs := make(map[string][]byte, 0) for attrName, val := range attrs { - if filter == nil || filter(attrName) { - newAttrs[attrName] = val + if filter != nil { + var ok bool + if val, ok = filter(attrName, val); !ok { + continue + } } + newAttrs[attrName] = val } - return lu.MetadataBackend().SetMultiple(ctx, targetPath, newAttrs, true) + return lu.MetadataBackend().SetMultiple(ctx, targetPath, newAttrs, acquireTargetLock) } // DetectBackendOnDisk returns the name of the metadata backend being used on disk diff --git a/pkg/storage/utils/decomposedfs/metadata.go b/pkg/storage/utils/decomposedfs/metadata.go index 4498794829..ec5fed27cb 100644 --- a/pkg/storage/utils/decomposedfs/metadata.go +++ b/pkg/storage/utils/decomposedfs/metadata.go @@ -74,7 +74,7 @@ func (fs *Decomposedfs) SetArbitraryMetadata(ctx context.Context, ref *provider. if md.Metadata != nil { if val, ok := md.Metadata["mtime"]; ok { delete(md.Metadata, "mtime") - if err := n.SetMtimeString(val); err != nil { + if err := n.SetMtimeString(ctx, val); err != nil { errs = append(errs, errors.Wrap(err, "could not set mtime")) } } @@ -184,7 +184,7 @@ func (fs *Decomposedfs) UnsetArbitraryMetadata(ctx context.Context, ref *provide continue } fa := fmt.Sprintf("%s:%s:%s@%s", prefixes.FavPrefix, utils.UserTypeToString(uid.GetType()), uid.GetOpaqueId(), uid.GetIdp()) - if err := n.RemoveXattr(ctx, fa); err != nil { + if err := n.RemoveXattr(ctx, fa, true); err != nil { if metadata.IsAttrUnset(err) { continue // already gone, ignore } @@ -195,7 +195,7 @@ func (fs *Decomposedfs) UnsetArbitraryMetadata(ctx context.Context, ref *provide errs = append(errs, errors.Wrap(err, "could not unset favorite flag")) } default: - if err = n.RemoveXattr(ctx, prefixes.MetadataPrefix+k); err != nil { + if err = n.RemoveXattr(ctx, prefixes.MetadataPrefix+k, true); err != nil { if metadata.IsAttrUnset(err) { continue // already gone, ignore } diff --git a/pkg/storage/utils/decomposedfs/metadata/messagepack_backend.go b/pkg/storage/utils/decomposedfs/metadata/messagepack_backend.go index 3858fbe915..22485d7b3c 100644 --- a/pkg/storage/utils/decomposedfs/metadata/messagepack_backend.go +++ b/pkg/storage/utils/decomposedfs/metadata/messagepack_backend.go @@ -120,8 +120,8 @@ func (b MessagePackBackend) SetMultiple(ctx context.Context, path string, attrib } // Remove an extended attribute key -func (b MessagePackBackend) Remove(ctx context.Context, path, key string) error { - return b.saveAttributes(ctx, path, nil, []string{key}, true) +func (b MessagePackBackend) Remove(ctx context.Context, path, key string, acquireLock bool) error { + return b.saveAttributes(ctx, path, nil, []string{key}, acquireLock) } // AllWithLockedSource reads all extended attributes from the given reader (if possible). diff --git a/pkg/storage/utils/decomposedfs/metadata/metadata.go b/pkg/storage/utils/decomposedfs/metadata/metadata.go index 243895b364..57557b7078 100644 --- a/pkg/storage/utils/decomposedfs/metadata/metadata.go +++ b/pkg/storage/utils/decomposedfs/metadata/metadata.go @@ -46,7 +46,7 @@ type Backend interface { List(ctx context.Context, path string) (attribs []string, err error) Set(ctx context.Context, path, key string, val []byte) error SetMultiple(ctx context.Context, path string, attribs map[string][]byte, acquireLock bool) error - Remove(ctx context.Context, path, key string) error + Remove(ctx context.Context, path, key string, acquireLock bool) error Purge(path string) error Rename(oldPath, newPath string) error @@ -95,7 +95,7 @@ func (NullBackend) SetMultiple(ctx context.Context, path string, attribs map[str } // Remove removes an extended attribute key -func (NullBackend) Remove(ctx context.Context, path string, key string) error { +func (NullBackend) Remove(ctx context.Context, path string, key string, acquireLock bool) error { return errUnconfiguredError } diff --git a/pkg/storage/utils/decomposedfs/metadata/metadata_test.go b/pkg/storage/utils/decomposedfs/metadata/metadata_test.go index 4cd0dad629..04869cd0fd 100644 --- a/pkg/storage/utils/decomposedfs/metadata/metadata_test.go +++ b/pkg/storage/utils/decomposedfs/metadata/metadata_test.go @@ -210,7 +210,7 @@ var _ = Describe("Backend", func() { Expect(err).ToNot(HaveOccurred()) Expect(v).To(Equal([]byte("bar"))) - err = backend.Remove(context.Background(), file, "foo") + err = backend.Remove(context.Background(), file, "foo", true) Expect(err).ToNot(HaveOccurred()) _, err = backend.Get(context.Background(), file, "foo") diff --git a/pkg/storage/utils/decomposedfs/metadata/prefixes/prefixes.go b/pkg/storage/utils/decomposedfs/metadata/prefixes/prefixes.go index 301b466764..020e837fdf 100644 --- a/pkg/storage/utils/decomposedfs/metadata/prefixes/prefixes.go +++ b/pkg/storage/utils/decomposedfs/metadata/prefixes/prefixes.go @@ -66,6 +66,8 @@ const ( // that node. PropagationAttr string = OcisPrefix + "propagation" + // we need mtime to keep mtime in sync with the metadata + MTimeAttr string = OcisPrefix + "mtime" // the tree modification time of the tree below this node, // propagated when synctime_accounting is true and // user.ocis.propagation=1 is set diff --git a/pkg/storage/utils/decomposedfs/metadata/xattrs_backend.go b/pkg/storage/utils/decomposedfs/metadata/xattrs_backend.go index b9c5d87ef1..7c60043590 100644 --- a/pkg/storage/utils/decomposedfs/metadata/xattrs_backend.go +++ b/pkg/storage/utils/decomposedfs/metadata/xattrs_backend.go @@ -146,12 +146,14 @@ func (XattrsBackend) SetMultiple(ctx context.Context, path string, attribs map[s } // Remove an extended attribute key -func (XattrsBackend) Remove(ctx context.Context, filePath string, key string) (err error) { - lockedFile, err := lockedfile.OpenFile(filePath+filelocks.LockFileSuffix, os.O_CREATE|os.O_WRONLY, 0600) - if err != nil { - return err +func (XattrsBackend) Remove(ctx context.Context, filePath string, key string, acquireLock bool) (err error) { + if acquireLock { + lockedFile, err := lockedfile.OpenFile(filePath+filelocks.LockFileSuffix, os.O_CREATE|os.O_WRONLY, 0600) + if err != nil { + return err + } + defer cleanupLockfile(lockedFile) } - defer cleanupLockfile(lockedFile) return xattr.Remove(filePath, key) } diff --git a/pkg/storage/utils/decomposedfs/migrator/0003_switch_to_messagepack_metadata.go b/pkg/storage/utils/decomposedfs/migrator/0003_switch_to_messagepack_metadata.go index 73c919d0fa..fc9ec042a8 100644 --- a/pkg/storage/utils/decomposedfs/migrator/0003_switch_to_messagepack_metadata.go +++ b/pkg/storage/utils/decomposedfs/migrator/0003_switch_to_messagepack_metadata.go @@ -97,7 +97,7 @@ func (m Migration0003) Migrate(migrator *Migrator) (Result, error) { } for k := range attribs { - err = xattrs.Remove(context.Background(), path, k) + err = xattrs.Remove(context.Background(), path, k, false) if err != nil { migrator.log.Debug().Err(err).Str("path", path).Msg("error removing xattr") } diff --git a/pkg/storage/utils/decomposedfs/node/node.go b/pkg/storage/utils/decomposedfs/node/node.go index a3e31ee90b..45e06dd0a3 100644 --- a/pkg/storage/utils/decomposedfs/node/node.go +++ b/pkg/storage/utils/decomposedfs/node/node.go @@ -511,16 +511,21 @@ func (n *Node) LockFilePath() string { } // CalculateEtag returns a hash of fileid + tmtime (or mtime) -func CalculateEtag(nodeID string, tmTime time.Time) (string, error) { - return calculateEtag(nodeID, tmTime) +func CalculateEtag(n *Node, tmTime time.Time) (string, error) { + return calculateEtag(n, tmTime) } // calculateEtag returns a hash of fileid + tmtime (or mtime) -func calculateEtag(nodeID string, tmTime time.Time) (string, error) { +func calculateEtag(n *Node, tmTime time.Time) (string, error) { h := md5.New() - if _, err := io.WriteString(h, nodeID); err != nil { + if _, err := io.WriteString(h, n.ID); err != nil { return "", err } + /* TODO we could strengthen the etag by adding the blobid, but then all etags would change. we would need a legacy etag check as well + if _, err := io.WriteString(h, n.BlobID); err != nil { + return "", err + } + */ if tb, err := tmTime.UTC().MarshalBinary(); err == nil { if _, err := h.Write(tb); err != nil { return "", err @@ -532,19 +537,20 @@ func calculateEtag(nodeID string, tmTime time.Time) (string, error) { } // SetMtimeString sets the mtime and atime of a node to the unixtime parsed from the given string -func (n *Node) SetMtimeString(mtime string) error { - mt, err := parseMTime(mtime) +func (n *Node) SetMtimeString(ctx context.Context, mtime string) error { + mt, err := utils.MTimeToTime(mtime) if err != nil { return err } - return n.SetMtime(mt) + return n.SetMtime(ctx, &mt) } -// SetMtime sets the mtime and atime of a node -func (n *Node) SetMtime(mtime time.Time) error { - nodePath := n.InternalPath() - // updating mtime also updates atime - return os.Chtimes(nodePath, mtime, mtime) +// SetMTime writes the UTC mtime to the extended attributes or removes the attribute if nil is passed +func (n *Node) SetMtime(ctx context.Context, t *time.Time) (err error) { + if t == nil { + return n.RemoveXattr(ctx, prefixes.MTimeAttr, true) + } + return n.SetXattrString(ctx, prefixes.MTimeAttr, t.UTC().Format(time.RFC3339Nano)) } // SetEtag sets the temporary etag of a node if it differs from the current etag @@ -555,7 +561,7 @@ func (n *Node) SetEtag(ctx context.Context, val string) (err error) { return } var etag string - if etag, err = calculateEtag(n.ID, tmTime); err != nil { + if etag, err = calculateEtag(n, tmTime); err != nil { return } @@ -666,7 +672,7 @@ func (n *Node) AsResourceInfo(ctx context.Context, rp *provider.ResourcePermissi // use temporary etag if it is set if b, err := n.XattrString(ctx, prefixes.TmpEtagAttr); err == nil && b != "" { ri.Etag = fmt.Sprintf(`"%x"`, b) - } else if ri.Etag, err = calculateEtag(n.ID, tmTime); err != nil { + } else if ri.Etag, err = calculateEtag(n, tmTime); err != nil { sublog.Debug().Err(err).Msg("could not calculate etag") } @@ -883,22 +889,26 @@ func (n *Node) GetTMTime(ctx context.Context) (time.Time, error) { } // no tmtime, use mtime - return n.GetMTime() + return n.GetMTime(ctx) } -// GetMTime reads the mtime from disk -func (n *Node) GetMTime() (time.Time, error) { - fi, err := os.Lstat(n.InternalPath()) +// GetMTime reads the mtime from the extended attributes, falling back to disk +func (n *Node) GetMTime(ctx context.Context) (time.Time, error) { + b, err := n.XattrString(ctx, prefixes.MTimeAttr) if err != nil { - return time.Time{}, err + fi, err := os.Lstat(n.InternalPath()) + if err != nil { + return time.Time{}, err + } + return fi.ModTime(), nil } - return fi.ModTime(), nil + return time.Parse(time.RFC3339Nano, b) } // SetTMTime writes the UTC tmtime to the extended attributes or removes the attribute if nil is passed func (n *Node) SetTMTime(ctx context.Context, t *time.Time) (err error) { if t == nil { - return n.RemoveXattr(ctx, prefixes.TreeMTimeAttr) + return n.RemoveXattr(ctx, prefixes.TreeMTimeAttr, true) } return n.SetXattrString(ctx, prefixes.TreeMTimeAttr, t.UTC().Format(time.RFC3339Nano)) } @@ -915,7 +925,7 @@ func (n *Node) GetDTime(ctx context.Context) (tmTime time.Time, err error) { // SetDTime writes the UTC dtime to the extended attributes or removes the attribute if nil is passed func (n *Node) SetDTime(ctx context.Context, t *time.Time) (err error) { if t == nil { - return n.RemoveXattr(ctx, prefixes.DTimeAttr) + return n.RemoveXattr(ctx, prefixes.DTimeAttr, true) } return n.SetXattrString(ctx, prefixes.DTimeAttr, t.UTC().Format(time.RFC3339Nano)) } @@ -962,7 +972,14 @@ func (n *Node) SetChecksum(ctx context.Context, csType string, h hash.Hash) (err // UnsetTempEtag removes the temporary etag attribute func (n *Node) UnsetTempEtag(ctx context.Context) (err error) { - return n.RemoveXattr(ctx, prefixes.TmpEtagAttr) + return n.RemoveXattr(ctx, prefixes.TmpEtagAttr, true) +} + +func isGrantExpired(g *provider.Grant) bool { + if g.Expiration == nil { + return false + } + return time.Now().After(time.Unix(int64(g.Expiration.Seconds), int64(g.Expiration.Nanos))) } // ReadUserPermissions will assemble the permissions for the current user on the given node without parent nodes @@ -1017,6 +1034,10 @@ func (n *Node) ReadUserPermissions(ctx context.Context, u *userpb.User) (ap prov continue } + if isGrantExpired(g) { + continue + } + switch { case err == nil: // If all permissions are set to false we have a deny grant @@ -1111,6 +1132,23 @@ func (n *Node) ReadGrant(ctx context.Context, grantee string) (g *provider.Grant return e.Grant(), nil } +// ReadGrant reads a CS3 grant +func (n *Node) DeleteGrant(ctx context.Context, g *provider.Grant, acquireLock bool) (err error) { + + var attr string + if g.Grantee.Type == provider.GranteeType_GRANTEE_TYPE_GROUP { + attr = prefixes.GrantGroupAcePrefix + g.Grantee.GetGroupId().OpaqueId + } else { + attr = prefixes.GrantUserAcePrefix + g.Grantee.GetUserId().OpaqueId + } + + if err = n.RemoveXattr(ctx, attr, acquireLock); err != nil { + return err + } + + return nil +} + // ListGrants lists all grants of the current node. func (n *Node) ListGrants(ctx context.Context) ([]*provider.Grant, error) { grantees, err := n.ListGrantees(ctx) @@ -1159,17 +1197,6 @@ func (n *Node) getGranteeTypes(ctx context.Context) []provider.GranteeType { return types } -func parseMTime(v string) (t time.Time, err error) { - p := strings.SplitN(v, ".", 2) - var sec, nsec int64 - if sec, err = strconv.ParseInt(p[0], 10, 64); err == nil { - if len(p) > 1 { - nsec, err = strconv.ParseInt(p[1], 10, 64) - } - } - return time.Unix(sec, nsec), err -} - // FindStorageSpaceRoot calls n.Parent() and climbs the tree // until it finds the space root node and adds it to the node func (n *Node) FindStorageSpaceRoot(ctx context.Context) error { @@ -1198,7 +1225,7 @@ func (n *Node) UnmarkProcessing(ctx context.Context, uploadID string) error { // file started another postprocessing later - do not remove return nil } - return n.RemoveXattr(ctx, prefixes.StatusPrefix) + return n.RemoveXattr(ctx, prefixes.StatusPrefix, true) } // IsProcessing returns true if the node is currently being processed diff --git a/pkg/storage/utils/decomposedfs/node/node_test.go b/pkg/storage/utils/decomposedfs/node/node_test.go index 7c257cb559..c0fe3488f7 100644 --- a/pkg/storage/utils/decomposedfs/node/node_test.go +++ b/pkg/storage/utils/decomposedfs/node/node_test.go @@ -246,7 +246,7 @@ var _ = Describe("Node", func() { UpdateGrant: true, Stat: true, }, nil).Times(1) - err = env.Fs.AddGrant(env.Ctx, &provider.Reference{ + err = env.Fs.UpdateGrant(env.Ctx, &provider.Reference{ ResourceId: &provider.ResourceId{ SpaceId: pSpace.SpaceId, OpaqueId: pSpace.OpaqueId, @@ -273,7 +273,7 @@ var _ = Describe("Node", func() { UpdateGrant: true, Stat: true, }, nil).Times(1) - err = env.Fs.AddGrant(env.Ctx, &provider.Reference{ + err = env.Fs.UpdateGrant(env.Ctx, &provider.Reference{ ResourceId: &provider.ResourceId{ SpaceId: storageSpace.SpaceId, OpaqueId: storageSpace.OpaqueId, diff --git a/pkg/storage/utils/decomposedfs/node/xattrs.go b/pkg/storage/utils/decomposedfs/node/xattrs.go index 39e1725476..79ba71f708 100644 --- a/pkg/storage/utils/decomposedfs/node/xattrs.go +++ b/pkg/storage/utils/decomposedfs/node/xattrs.go @@ -94,11 +94,11 @@ func (n *Node) SetXattrString(ctx context.Context, key, val string) (err error) } // RemoveXattr removes an extended attribute from the write-through cache/node -func (n *Node) RemoveXattr(ctx context.Context, key string) error { +func (n *Node) RemoveXattr(ctx context.Context, key string, acquireLock bool) error { if n.xattrsCache != nil { delete(n.xattrsCache, key) } - return n.lu.MetadataBackend().Remove(ctx, n.InternalPath(), key) + return n.lu.MetadataBackend().Remove(ctx, n.InternalPath(), key, acquireLock) } // XattrsWithReader returns the extended attributes of the node. If the attributes have already diff --git a/pkg/storage/utils/decomposedfs/revisions.go b/pkg/storage/utils/decomposedfs/revisions.go index 4b2869b621..e078d78bac 100644 --- a/pkg/storage/utils/decomposedfs/revisions.go +++ b/pkg/storage/utils/decomposedfs/revisions.go @@ -33,6 +33,7 @@ import ( "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/node" "github.com/cs3org/reva/v2/pkg/storagespace" "github.com/pkg/errors" + "github.com/rogpeppe/go-internal/lockedfile" ) // Revision entries are stored inside the node folder and start with the same uuid as the current version. @@ -90,7 +91,7 @@ func (fs *Decomposedfs) ListRevisions(ctx context.Context, ref *provider.Referen appctx.GetLogger(ctx).Error().Err(err).Str("name", fi.Name()).Msg("error reading blobsize xattr, using 0") } rev.Size = uint64(blobSize) - etag, err := node.CalculateEtag(np, mtime) + etag, err := node.CalculateEtag(n, mtime) if err != nil { return nil, errors.Wrapf(err, "error calculating etag") } @@ -207,83 +208,98 @@ func (fs *Decomposedfs) RestoreRevision(ctx context.Context, ref *provider.Refer return err } + // write lock node before copying metadata + f, err := lockedfile.OpenFile(fs.lu.MetadataBackend().LockfilePath(n.InternalPath()), os.O_RDWR|os.O_CREATE, 0600) + if err != nil { + return err + } + defer f.Close() + // move current version to new revision nodePath := fs.lu.InternalPath(spaceID, kp[0]) - var fi os.FileInfo - if fi, err = os.Stat(nodePath); err == nil { - // revisions are stored alongside the actual file, so a rename can be efficient and does not cross storage / partition boundaries - newRevisionPath := fs.lu.InternalPath(spaceID, kp[0]+node.RevisionIDDelimiter+fi.ModTime().UTC().Format(time.RFC3339Nano)) - - // touch new revision - if _, err := os.Create(newRevisionPath); err != nil { - return err - } - defer func() { - if returnErr != nil { - if err := os.Remove(newRevisionPath); err != nil { - log.Error().Err(err).Str("revision", filepath.Base(newRevisionPath)).Msg("could not clean up revision node") - } - if err := fs.lu.MetadataBackend().Purge(newRevisionPath); err != nil { - log.Error().Err(err).Str("revision", filepath.Base(newRevisionPath)).Msg("could not clean up revision node") - } - } - }() - - // copy blob metadata from node to new revision node - err = fs.lu.CopyMetadata(ctx, nodePath, newRevisionPath, func(attributeName string) bool { - return strings.HasPrefix(attributeName, prefixes.ChecksumPrefix) || // for checksums - attributeName == prefixes.TypeAttr || - attributeName == prefixes.BlobIDAttr || - attributeName == prefixes.BlobsizeAttr - }) - if err != nil { - return errtypes.InternalError("failed to copy blob xattrs to version node: " + err.Error()) - } - - // remember mtime from node as new revision mtime - if err = os.Chtimes(newRevisionPath, fi.ModTime(), fi.ModTime()); err != nil { - return errtypes.InternalError("failed to change mtime of version node") - } + mtime, err := n.GetMTime(ctx) + if err != nil { + log.Error().Err(err).Interface("ref", ref).Str("originalnode", kp[0]).Str("revisionKey", revisionKey).Msg("cannot read mtime") + return err + } - // update blob id in node + // revisions are stored alongside the actual file, so a rename can be efficient and does not cross storage / partition boundaries + newRevisionPath := fs.lu.InternalPath(spaceID, kp[0]+node.RevisionIDDelimiter+mtime.UTC().Format(time.RFC3339Nano)) - // copy blob metadata from restored revision to node - restoredRevisionPath := fs.lu.InternalPath(spaceID, revisionKey) - err = fs.lu.CopyMetadata(ctx, restoredRevisionPath, nodePath, func(attributeName string) bool { - return strings.HasPrefix(attributeName, prefixes.ChecksumPrefix) || - attributeName == prefixes.TypeAttr || - attributeName == prefixes.BlobIDAttr || - attributeName == prefixes.BlobsizeAttr - }) - if err != nil { - return errtypes.InternalError("failed to copy blob xattrs to old revision to node: " + err.Error()) + // touch new revision + if _, err := os.Create(newRevisionPath); err != nil { + return err + } + defer func() { + if returnErr != nil { + if err := os.Remove(newRevisionPath); err != nil { + log.Error().Err(err).Str("revision", filepath.Base(newRevisionPath)).Msg("could not clean up revision node") + } + if err := fs.lu.MetadataBackend().Purge(newRevisionPath); err != nil { + log.Error().Err(err).Str("revision", filepath.Base(newRevisionPath)).Msg("could not clean up revision node") + } } + }() + + // copy blob metadata from node to new revision node + err = fs.lu.CopyMetadataWithSourceLock(ctx, nodePath, newRevisionPath, func(attributeName string, value []byte) (newValue []byte, copy bool) { + return value, strings.HasPrefix(attributeName, prefixes.ChecksumPrefix) || // for checksums + attributeName == prefixes.TypeAttr || + attributeName == prefixes.BlobIDAttr || + attributeName == prefixes.BlobsizeAttr || + attributeName == prefixes.MTimeAttr // FIXME somewhere I mix up the revision time and the mtime, causing the restore to overwrite the other existing revisien + }, f, true) + if err != nil { + return errtypes.InternalError("failed to copy blob xattrs to version node: " + err.Error()) + } - revisionSize, err := fs.lu.MetadataBackend().GetInt64(ctx, restoredRevisionPath, prefixes.BlobsizeAttr) - if err != nil { - return errtypes.InternalError("failed to read blob size xattr from old revision") - } + // remember mtime from node as new revision mtime + if err = os.Chtimes(newRevisionPath, mtime, mtime); err != nil { + return errtypes.InternalError("failed to change mtime of version node") + } - // drop old revision - if err := os.Remove(restoredRevisionPath); err != nil { - log.Warn().Err(err).Interface("ref", ref).Str("originalnode", kp[0]).Str("revisionKey", revisionKey).Msg("could not delete old revision, continuing") - } + // update blob id in node - // explicitly update mtime of node as writing xattrs does not change mtime - now := time.Now() - if err := os.Chtimes(nodePath, now, now); err != nil { - return errtypes.InternalError("failed to change mtime of version node") + // copy blob metadata from restored revision to node + restoredRevisionPath := fs.lu.InternalPath(spaceID, revisionKey) + err = fs.lu.CopyMetadata(ctx, restoredRevisionPath, nodePath, func(attributeName string, value []byte) (newValue []byte, copy bool) { + if attributeName == prefixes.MTimeAttr { + // update mtime + return []byte(time.Now().UTC().Format(time.RFC3339Nano)), true } + return value, strings.HasPrefix(attributeName, prefixes.ChecksumPrefix) || + attributeName == prefixes.TypeAttr || + attributeName == prefixes.BlobIDAttr || + attributeName == prefixes.BlobsizeAttr + }, false) + if err != nil { + return errtypes.InternalError("failed to copy blob xattrs to old revision to node: " + err.Error()) + } - // revision 5, current 10 (restore a smaller blob) -> 5-10 = -5 - // revision 10, current 5 (restore a bigger blob) -> 10-5 = +5 - sizeDiff := revisionSize - n.Blobsize + revisionSize, err := fs.lu.MetadataBackend().GetInt64(ctx, restoredRevisionPath, prefixes.BlobsizeAttr) + if err != nil { + return errtypes.InternalError("failed to read blob size xattr from old revision") + } - return fs.tp.Propagate(ctx, n, sizeDiff) + // drop old revision + if err := os.Remove(restoredRevisionPath); err != nil { + log.Warn().Err(err).Interface("ref", ref).Str("originalnode", kp[0]).Str("revisionKey", revisionKey).Msg("could not delete old revision, continuing") } + if err := os.Remove(fs.lu.MetadataBackend().MetadataPath(restoredRevisionPath)); err != nil { + log.Warn().Err(err).Interface("ref", ref).Str("originalnode", kp[0]).Str("revisionKey", revisionKey).Msg("could not delete old revision metadata, continuing") + } + if err := os.Remove(fs.lu.MetadataBackend().LockfilePath(restoredRevisionPath)); err != nil { + log.Warn().Err(err).Interface("ref", ref).Str("originalnode", kp[0]).Str("revisionKey", revisionKey).Msg("could not delete old revision metadata lockfile, continuing") + } + if err := fs.lu.MetadataBackend().Purge(restoredRevisionPath); err != nil { + log.Warn().Err(err).Interface("ref", ref).Str("originalnode", kp[0]).Str("revisionKey", revisionKey).Msg("could not purge old revision from cache, continuing") + } + + // revision 5, current 10 (restore a smaller blob) -> 5-10 = -5 + // revision 10, current 5 (restore a bigger blob) -> 10-5 = +5 + sizeDiff := revisionSize - n.Blobsize - log.Error().Err(err).Interface("ref", ref).Str("originalnode", kp[0]).Str("revisionKey", revisionKey).Msg("original node does not exist") - return nil + return fs.tp.Propagate(ctx, n, sizeDiff) } // DeleteRevision deletes the specified revision of the resource diff --git a/pkg/storage/utils/decomposedfs/spaces.go b/pkg/storage/utils/decomposedfs/spaces.go index cc72cced20..e0b28313b9 100644 --- a/pkg/storage/utils/decomposedfs/spaces.go +++ b/pkg/storage/utils/decomposedfs/spaces.go @@ -806,12 +806,14 @@ func (fs *Decomposedfs) storageSpaceFromNode(ctx context.Context, n *node.Node, } } + sublog := appctx.GetLogger(ctx).With().Str("space", n.SpaceRoot.ID).Logger() + var err error // TODO apply more filters var sname string if sname, err = n.SpaceRoot.XattrString(ctx, prefixes.SpaceNameAttr); err != nil { // FIXME: Is that a severe problem? - appctx.GetLogger(ctx).Debug().Err(err).Msg("space does not have a name attribute") + sublog.Debug().Err(err).Msg("space does not have a name attribute") } /* @@ -846,15 +848,28 @@ func (fs *Decomposedfs) storageSpaceFromNode(ctx context.Context, n *node.Node, // This way we don't have to have a cron job checking the grants in regular intervals. // The tradeof obviously is that this code is here. if isGrantExpired(g) { - err := fs.RemoveGrant(ctx, &provider.Reference{ - ResourceId: &provider.ResourceId{ - SpaceId: n.SpaceRoot.SpaceID, - OpaqueId: n.ID}, - }, g) - appctx.GetLogger(ctx).Error().Err(err). - Str("space", n.SpaceRoot.ID). - Str("grantee", id). - Msg("failed to remove expired space grant") + if err := n.DeleteGrant(ctx, g, true); err != nil { + sublog.Error().Err(err).Str("grantee", id). + Msg("failed to delete expired space grant") + } + if n.IsSpaceRoot(ctx) { + // invalidate space grant + switch { + case g.Grantee.Type == provider.GranteeType_GRANTEE_TYPE_USER: + // remove from user index + if err := fs.userSpaceIndex.Remove(g.Grantee.GetUserId().GetOpaqueId(), n.SpaceID); err != nil { + sublog.Error().Err(err).Str("grantee", id). + Msg("failed to delete expired user space index") + } + case g.Grantee.Type == provider.GranteeType_GRANTEE_TYPE_GROUP: + // remove from group index + if err := fs.groupSpaceIndex.Remove(g.Grantee.GetGroupId().GetOpaqueId(), n.SpaceID); err != nil { + sublog.Error().Err(err).Str("grantee", id). + Msg("failed to delete expired group space index") + } + } + } + continue } grantExpiration[id] = g.Expiration @@ -949,7 +964,7 @@ func (fs *Decomposedfs) storageSpaceFromNode(ctx context.Context, n *node.Node, } } - etag, err := node.CalculateEtag(n.ID, tmtime) + etag, err := node.CalculateEtag(n, tmtime) if err != nil { return nil, err } diff --git a/pkg/storage/utils/decomposedfs/tree/tree.go b/pkg/storage/utils/decomposedfs/tree/tree.go index 4ab4bda4d1..e0610ed5d5 100644 --- a/pkg/storage/utils/decomposedfs/tree/tree.go +++ b/pkg/storage/utils/decomposedfs/tree/tree.go @@ -149,7 +149,7 @@ func (t *Tree) TouchFile(ctx context.Context, n *node.Node, markprocessing bool, attributes[prefixes.StatusPrefix] = []byte(node.ProcessingStatus) } if mtime != "" { - if err := n.SetMtimeString(mtime); err != nil { + if err := n.SetMtimeString(ctx, mtime); err != nil { return errors.Wrap(err, "Decomposedfs: could not set mtime") } } @@ -469,7 +469,7 @@ func (t *Tree) Delete(ctx context.Context, n *node.Node) (err error) { trashLink := filepath.Join(t.options.Root, "spaces", lookup.Pathify(n.SpaceRoot.ID, 1, 2), "trash", lookup.Pathify(n.ID, 4, 2)) if err := os.MkdirAll(filepath.Dir(trashLink), 0700); err != nil { // Roll back changes - _ = n.RemoveXattr(ctx, prefixes.TrashOriginAttr) + _ = n.RemoveXattr(ctx, prefixes.TrashOriginAttr, true) return err } @@ -482,7 +482,7 @@ func (t *Tree) Delete(ctx context.Context, n *node.Node) (err error) { err = os.Symlink("../../../../../nodes/"+lookup.Pathify(n.ID, 4, 2)+node.TrashIDDelimiter+deletionTime, trashLink) if err != nil { // Roll back changes - _ = n.RemoveXattr(ctx, prefixes.TrashOriginAttr) + _ = n.RemoveXattr(ctx, prefixes.TrashOriginAttr, true) return } @@ -495,12 +495,12 @@ func (t *Tree) Delete(ctx context.Context, n *node.Node) (err error) { // To roll back changes // TODO remove symlink // Roll back changes - _ = n.RemoveXattr(ctx, prefixes.TrashOriginAttr) + _ = n.RemoveXattr(ctx, prefixes.TrashOriginAttr, true) return } err = t.lookup.MetadataBackend().Rename(nodePath, trashPath) if err != nil { - _ = n.RemoveXattr(ctx, prefixes.TrashOriginAttr) + _ = n.RemoveXattr(ctx, prefixes.TrashOriginAttr, true) _ = os.Rename(trashPath, nodePath) return } @@ -514,7 +514,7 @@ func (t *Tree) Delete(ctx context.Context, n *node.Node) (err error) { // TODO revert the rename // TODO remove symlink // Roll back changes - _ = n.RemoveXattr(ctx, prefixes.TrashOriginAttr) + _ = n.RemoveXattr(ctx, prefixes.TrashOriginAttr, true) return } diff --git a/pkg/storage/utils/decomposedfs/upload.go b/pkg/storage/utils/decomposedfs/upload.go index a77986fc0e..a27086a4df 100644 --- a/pkg/storage/utils/decomposedfs/upload.go +++ b/pkg/storage/utils/decomposedfs/upload.go @@ -189,8 +189,16 @@ func (fs *Decomposedfs) InitiateUpload(ctx context.Context, ref *provider.Refere return nil, errtypes.BadRequest("unsupported checksum algorithm: " + parts[0]) } } - if ifMatch, ok := metadata["if-match"]; ok { - info.MetaData["if-match"] = ifMatch + + // only check preconditions if they are not empty // TODO or is this a bad request? + if metadata["if-match"] != "" { + info.MetaData["if-match"] = metadata["if-match"] + } + if metadata["if-none-match"] != "" { + info.MetaData["if-none-match"] = metadata["if-none-match"] + } + if metadata["if-unmodified-since"] != "" { + info.MetaData["if-unmodified-since"] = metadata["if-unmodified-since"] } } diff --git a/pkg/storage/utils/decomposedfs/upload/processing.go b/pkg/storage/utils/decomposedfs/upload/processing.go index 7dba91a18a..d70f7f44a8 100644 --- a/pkg/storage/utils/decomposedfs/upload/processing.go +++ b/pkg/storage/utils/decomposedfs/upload/processing.go @@ -168,6 +168,9 @@ func New(ctx context.Context, info tusd.FileInfo, lu *lookup.Lookup, tp Tree, p info.Storage["NodeId"] = uuid.New().String() info.Storage["NodeExists"] = "false" } + if info.MetaData["if-none-match"] == "*" && info.Storage["NodeExists"] == "true" { + return nil, errtypes.Aborted(fmt.Sprintf("parent %s already has a child %s", n.ID, n.Name)) + } // Create binary file in the upload folder with no content log.Debug().Interface("info", info).Msg("Decomposedfs: built storage info") file, err := os.OpenFile(binPath, os.O_CREATE|os.O_WRONLY, defaultFilePerm) @@ -276,10 +279,19 @@ func CreateNodeForUpload(upload *Upload, initAttrs node.Attributes) (*node.Node, switch upload.Info.Storage["NodeExists"] { case "false": f, err = initNewNode(upload, n, uint64(fsize)) + if f != nil { + appctx.GetLogger(upload.Ctx).Info().Str("lockfile", f.Name()).Interface("err", err).Msg("got lock file from initNewNode") + } default: f, err = updateExistingNode(upload, n, spaceID, uint64(fsize)) + if f != nil { + appctx.GetLogger(upload.Ctx).Info().Str("lockfile", f.Name()).Interface("err", err).Msg("got lock file from updateExistingNode") + } } defer func() { + if f == nil { + return + } if err := f.Close(); err != nil { appctx.GetLogger(upload.Ctx).Error().Err(err).Str("nodeid", n.ID).Str("parentid", n.ParentID).Msg("could not close lock") } @@ -288,7 +300,17 @@ func CreateNodeForUpload(upload *Upload, initAttrs node.Attributes) (*node.Node, return nil, err } + mtime := time.Now() + if upload.Info.MetaData["mtime"] != "" { + // overwrite mtime if requested + mtime, err = utils.MTimeToTime(upload.Info.MetaData["mtime"]) + if err != nil { + return nil, err + } + } + // overwrite technical information + initAttrs.SetString(prefixes.MTimeAttr, mtime.UTC().Format(time.RFC3339Nano)) initAttrs.SetInt64(prefixes.TypeAttr, int64(provider.ResourceType_RESOURCE_TYPE_FILE)) initAttrs.SetString(prefixes.ParentidAttr, n.ParentID) initAttrs.SetString(prefixes.NameAttr, n.Name) @@ -302,19 +324,8 @@ func CreateNodeForUpload(upload *Upload, initAttrs node.Attributes) (*node.Node, return nil, errors.Wrap(err, "Decomposedfs: could not write metadata") } - // overwrite mtime if requested - if upload.Info.MetaData["mtime"] != "" { - if err := n.SetMtimeString(upload.Info.MetaData["mtime"]); err != nil { - return nil, err - } - } - // add etag to metadata - tmtime, err := n.GetMTime() - if err != nil { - return nil, err - } - upload.Info.MetaData["etag"], _ = node.CalculateEtag(n.ID, tmtime) + upload.Info.MetaData["etag"], _ = node.CalculateEtag(n, mtime) // update nodeid for later upload.Info.Storage["NodeId"] = n.ID @@ -338,7 +349,7 @@ func initNewNode(upload *Upload, n *node.Node, fsize uint64) (*lockedfile.File, } // we also need to touch the actual node file here it stores the mtime of the resource - h, err := os.OpenFile(n.InternalPath(), os.O_CREATE, 0600) + h, err := os.OpenFile(n.InternalPath(), os.O_CREATE|os.O_EXCL, 0600) if err != nil { return f, err } @@ -351,12 +362,18 @@ func initNewNode(upload *Upload, n *node.Node, fsize uint64) (*lockedfile.File, // link child name to parent if it is new childNameLink := filepath.Join(n.ParentPath(), n.Name) relativeNodePath := filepath.Join("../../../../../", lookup.Pathify(n.ID, 4, 2)) + log := appctx.GetLogger(upload.Ctx).With().Str("childNameLink", childNameLink).Str("relativeNodePath", relativeNodePath).Logger() + log.Info().Msg("initNewNode: creating symlink") + if err = os.Symlink(relativeNodePath, childNameLink); err != nil { + log.Info().Err(err).Msg("initNewNode: symlink failed") if errors.Is(err, iofs.ErrExist) { - return nil, errtypes.AlreadyExists(n.Name) + log.Info().Err(err).Msg("initNewNode: symlink already exists") + return f, errtypes.AlreadyExists(n.Name) } return f, errors.Wrap(err, "Decomposedfs: could not symlink child entry") } + log.Info().Msg("initNewNode: symlink created") // on a new file the sizeDiff is the fileSize upload.SizeDiff = int64(fsize) @@ -365,67 +382,91 @@ func initNewNode(upload *Upload, n *node.Node, fsize uint64) (*lockedfile.File, } func updateExistingNode(upload *Upload, n *node.Node, spaceID string, fsize uint64) (*lockedfile.File, error) { + targetPath := n.InternalPath() + + // write lock existing node before reading any metadata + f, err := lockedfile.OpenFile(upload.lu.MetadataBackend().LockfilePath(targetPath), os.O_RDWR|os.O_CREATE, 0600) + if err != nil { + return nil, err + } + old, _ := node.ReadNode(upload.Ctx, upload.lu, spaceID, n.ID, false, nil, false) if _, err := node.CheckQuota(upload.Ctx, n.SpaceRoot, true, uint64(old.Blobsize), fsize); err != nil { - return nil, err + return f, err } - tmtime, err := old.GetTMTime(upload.Ctx) + oldNodeMtime, err := old.GetMTime(upload.Ctx) if err != nil { - return nil, err + return f, err + } + oldNodeEtag, err := node.CalculateEtag(old, oldNodeMtime) + if err != nil { + return f, err } // When the if-match header was set we need to check if the // etag still matches before finishing the upload. if ifMatch, ok := upload.Info.MetaData["if-match"]; ok { - targetEtag, err := node.CalculateEtag(n.ID, tmtime) - switch { - case err != nil: - return nil, errtypes.InternalError(err.Error()) - case ifMatch != targetEtag: - return nil, errtypes.Aborted("etag mismatch") + if ifMatch != oldNodeEtag { + return f, errtypes.Aborted("etag mismatch") } } - upload.versionsPath = upload.lu.InternalPath(spaceID, n.ID+node.RevisionIDDelimiter+tmtime.UTC().Format(time.RFC3339Nano)) - upload.SizeDiff = int64(fsize) - old.Blobsize - upload.Info.MetaData["versionsPath"] = upload.versionsPath - upload.Info.MetaData["sizeDiff"] = strconv.Itoa(int(upload.SizeDiff)) + // When the if-none-match header was set we need to check if any of the + // etags matches before finishing the upload. + if ifNoneMatch, ok := upload.Info.MetaData["if-none-match"]; ok { + if ifNoneMatch == "*" { + return f, errtypes.Aborted("etag mismatch, resource exists") + } + for _, ifNoneMatchTag := range strings.Split(ifNoneMatch, ",") { + if ifNoneMatchTag == oldNodeEtag { + return f, errtypes.Aborted("etag mismatch") + } + } + } - targetPath := n.InternalPath() + // When the if-unmodified-since header was set we need to check if the + // etag still matches before finishing the upload. + if ifUnmodifiedSince, ok := upload.Info.MetaData["if-unmodified-since"]; ok { + if err != nil { + return f, errtypes.InternalError(fmt.Sprintf("failed to read mtime of node: %s", err)) + } + ifUnmodifiedSince, err := time.Parse(time.RFC3339Nano, ifUnmodifiedSince) + if err != nil { + return f, errtypes.InternalError(fmt.Sprintf("failed to parse if-unmodified-since time: %s", err)) + } - // write lock existing node before reading treesize or tree time - f, err := lockedfile.OpenFile(upload.lu.MetadataBackend().LockfilePath(targetPath), os.O_RDWR|os.O_CREATE, 0600) - if err != nil { - return nil, err + if oldNodeMtime.After(ifUnmodifiedSince) { + return f, errtypes.Aborted("if-unmodified-since mismatch") + } } + upload.versionsPath = upload.lu.InternalPath(spaceID, n.ID+node.RevisionIDDelimiter+oldNodeMtime.UTC().Format(time.RFC3339Nano)) + upload.SizeDiff = int64(fsize) - old.Blobsize + upload.Info.MetaData["versionsPath"] = upload.versionsPath + upload.Info.MetaData["sizeDiff"] = strconv.Itoa(int(upload.SizeDiff)) + // create version node if _, err := os.Create(upload.versionsPath); err != nil { return f, err } // copy blob metadata to version node - if err := upload.lu.CopyMetadataWithSourceLock(upload.Ctx, targetPath, upload.versionsPath, func(attributeName string) bool { - return strings.HasPrefix(attributeName, prefixes.ChecksumPrefix) || + if err := upload.lu.CopyMetadataWithSourceLock(upload.Ctx, targetPath, upload.versionsPath, func(attributeName string, value []byte) (newValue []byte, copy bool) { + return value, strings.HasPrefix(attributeName, prefixes.ChecksumPrefix) || attributeName == prefixes.TypeAttr || attributeName == prefixes.BlobIDAttr || - attributeName == prefixes.BlobsizeAttr - }, f); err != nil { + attributeName == prefixes.BlobsizeAttr || + attributeName == prefixes.MTimeAttr + }, f, true); err != nil { return f, err } // keep mtime from previous version - if err := os.Chtimes(upload.versionsPath, tmtime, tmtime); err != nil { + if err := os.Chtimes(upload.versionsPath, oldNodeMtime, oldNodeMtime); err != nil { return f, errtypes.InternalError(fmt.Sprintf("failed to change mtime of version node: %s", err)) } - // update mtime of current version - mtime := time.Now() - if err := os.Chtimes(n.InternalPath(), mtime, mtime); err != nil { - return nil, err - } - return f, nil } diff --git a/pkg/storage/utils/decomposedfs/upload/processing_test.go b/pkg/storage/utils/decomposedfs/upload/processing_test.go index 75dd427548..facbf2cd53 100644 --- a/pkg/storage/utils/decomposedfs/upload/processing_test.go +++ b/pkg/storage/utils/decomposedfs/upload/processing_test.go @@ -33,16 +33,18 @@ func TestInitNewNode(t *testing.T) { } n := node.New("e48c4e7a-beac-4b82-b991-a5cff7b8c39c", "930b7a2e-b745-41e1-8a9b-712582021842", "e48c4e7a-beac-4b82-b991-a5cff7b8c39c", "newchild", 10, "26493c53-2634-45f8-949f-dc07b88df9b0", providerv1beta1.ResourceType_RESOURCE_TYPE_FILE, &userv1beta1.UserId{}, lookup) n.SpaceRoot = rootNode - _, err = initNewNode(&Upload{Ctx: context.Background(), lu: lookup, Info: handler.FileInfo{MetaData: handler.MetaData{}}}, n, 10) + f, err := initNewNode(&Upload{Ctx: context.Background(), lu: lookup, Info: handler.FileInfo{MetaData: handler.MetaData{}}}, n, 10) if err != nil { t.Fatalf(err.Error()) } + defer f.Close() // try initializing the same new node again in case a concurrent requests tries to create a file with the same name n = node.New("e48c4e7a-beac-4b82-b991-a5cff7b8c39c", "a6ede986-cfcd-41c5-a820-6eee955a1c2b", "e48c4e7a-beac-4b82-b991-a5cff7b8c39c", "newchild", 10, "26493c53-2634-45f8-949f-dc07b88df9b0", providerv1beta1.ResourceType_RESOURCE_TYPE_FILE, &userv1beta1.UserId{}, lookup) n.SpaceRoot = rootNode - _, err = initNewNode(&Upload{Ctx: context.Background(), lu: lookup, Info: handler.FileInfo{MetaData: handler.MetaData{}}}, n, 10) + f2, err := initNewNode(&Upload{Ctx: context.Background(), lu: lookup, Info: handler.FileInfo{MetaData: handler.MetaData{}}}, n, 10) if _, ok := err.(errtypes.IsAlreadyExists); !ok { t.Fatalf(`initNewNode(with same 'newchild' name), %v, want %v`, err, errtypes.AlreadyExists("newchild")) } + f2.Close() } diff --git a/pkg/storage/utils/decomposedfs/upload/upload.go b/pkg/storage/utils/decomposedfs/upload/upload.go index 91342a3d76..89e4ab6597 100644 --- a/pkg/storage/utils/decomposedfs/upload/upload.go +++ b/pkg/storage/utils/decomposedfs/upload/upload.go @@ -401,12 +401,13 @@ func (upload *Upload) cleanup(cleanNode, cleanBin, cleanInfo bool) { upload.Node = nil default: - if err := upload.lu.CopyMetadata(upload.Ctx, p, upload.Node.InternalPath(), func(attributeName string) bool { - return strings.HasPrefix(attributeName, prefixes.ChecksumPrefix) || + if err := upload.lu.CopyMetadata(upload.Ctx, p, upload.Node.InternalPath(), func(attributeName string, value []byte) (newValue []byte, copy bool) { + return value, strings.HasPrefix(attributeName, prefixes.ChecksumPrefix) || attributeName == prefixes.TypeAttr || attributeName == prefixes.BlobIDAttr || - attributeName == prefixes.BlobsizeAttr - }); err != nil { + attributeName == prefixes.BlobsizeAttr || + attributeName == prefixes.MTimeAttr + }, true); err != nil { upload.log.Info().Str("versionpath", p).Str("nodepath", upload.Node.InternalPath()).Err(err).Msg("renaming version node failed") } diff --git a/pkg/storage/utils/downloader/downloader.go b/pkg/storage/utils/downloader/downloader.go index b38ad80b3a..84b5729dbe 100644 --- a/pkg/storage/utils/downloader/downloader.go +++ b/pkg/storage/utils/downloader/downloader.go @@ -101,13 +101,8 @@ func (r *revaDownloader) Download(ctx context.Context, id *provider.ResourceId, } defer httpRes.Body.Close() - if httpRes.StatusCode != http.StatusOK { - switch httpRes.StatusCode { - case http.StatusNotFound: - return errtypes.NotFound(id.String()) - default: - return errtypes.InternalError(httpRes.Status) - } + if err := errtypes.NewErrtypeFromHTTPStatusCode(httpRes.StatusCode, id.String()); err != nil { + return err } _, err = io.Copy(dst, httpRes.Body) diff --git a/pkg/storage/utils/metadata/cs3.go b/pkg/storage/utils/metadata/cs3.go index 977066e0cb..2f0fed087e 100644 --- a/pkg/storage/utils/metadata/cs3.go +++ b/pkg/storage/utils/metadata/cs3.go @@ -33,6 +33,7 @@ import ( rpc "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" types "github.com/cs3org/go-cs3apis/cs3/types/v1beta1" + "github.com/cs3org/reva/v2/internal/http/services/owncloud/ocdav/net" ctxpkg "github.com/cs3org/reva/v2/pkg/ctx" "github.com/cs3org/reva/v2/pkg/errtypes" "github.com/cs3org/reva/v2/pkg/rgrpc/todo/pool" @@ -128,24 +129,25 @@ func (cs3 *CS3) SimpleUpload(ctx context.Context, uploadpath string, content []b ctx, span := tracer.Start(ctx, "SimpleUpload") defer span.End() - return cs3.Upload(ctx, UploadRequest{ + _, err := cs3.Upload(ctx, UploadRequest{ Path: uploadpath, Content: content, }) + return err } // Upload uploads a file to the metadata storage -func (cs3 *CS3) Upload(ctx context.Context, req UploadRequest) error { +func (cs3 *CS3) Upload(ctx context.Context, req UploadRequest) (*UploadResponse, error) { ctx, span := tracer.Start(ctx, "Upload") defer span.End() client, err := cs3.providerClient() if err != nil { - return err + return nil, err } ctx, err = cs3.getAuthContext(ctx) if err != nil { - return err + return nil, err } ifuReq := &provider.InitiateFileUploadRequest{ @@ -161,6 +163,14 @@ func (cs3 *CS3) Upload(ctx context.Context, req UploadRequest) error { IfMatch: req.IfMatchEtag, } } + if len(req.IfNoneMatch) > 0 { + if req.IfNoneMatch[0] == "*" { + ifuReq.Options = &provider.InitiateFileUploadRequest_IfNotExist{} + } + // else { + // the http upload will carry all if-not-match etags + // } + } if req.IfUnmodifiedSince != (time.Time{}) { ifuReq.Options = &provider.InitiateFileUploadRequest_IfUnmodifiedSince{ IfUnmodifiedSince: utils.TimeToTS(req.IfUnmodifiedSince), @@ -173,10 +183,10 @@ func (cs3 *CS3) Upload(ctx context.Context, req UploadRequest) error { res, err := client.InitiateFileUpload(ctx, ifuReq) if err != nil { - return err + return nil, err } if res.Status.Code != rpc.Code_CODE_OK { - return errtypes.NewErrtypeFromStatus(res.Status) + return nil, errtypes.NewErrtypeFromStatus(res.Status) } var endpoint string @@ -188,21 +198,34 @@ func (cs3 *CS3) Upload(ctx context.Context, req UploadRequest) error { } } if endpoint == "" { - return errors.New("metadata storage doesn't support the simple upload protocol") + return nil, errors.New("metadata storage doesn't support the simple upload protocol") } httpReq, err := http.NewRequest(http.MethodPut, endpoint, bytes.NewReader(req.Content)) if err != nil { - return err + return nil, err + } + for _, etag := range req.IfNoneMatch { + httpReq.Header.Add(net.HeaderIfNoneMatch, etag) } md, _ := metadata.FromOutgoingContext(ctx) httpReq.Header.Add(ctxpkg.TokenHeader, md.Get(ctxpkg.TokenHeader)[0]) resp, err := cs3.dataGatewayClient.Do(httpReq) if err != nil { - return err + return nil, err + } + defer resp.Body.Close() + if err := errtypes.NewErrtypeFromHTTPStatusCode(resp.StatusCode, httpReq.URL.Path); err != nil { + return nil, err + } + etag := resp.Header.Get("Etag") + if ocEtag := resp.Header.Get("OC-ETag"); ocEtag != "" { + etag = ocEtag } - return resp.Body.Close() + return &UploadResponse{ + Etag: etag, + }, nil } // Stat returns the metadata for the given path @@ -241,6 +264,17 @@ func (cs3 *CS3) Stat(ctx context.Context, path string) (*provider.ResourceInfo, func (cs3 *CS3) SimpleDownload(ctx context.Context, downloadpath string) (content []byte, err error) { ctx, span := tracer.Start(ctx, "SimpleDownload") defer span.End() + dres, err := cs3.Download(ctx, DownloadRequest{Path: downloadpath}) + if err != nil { + return nil, err + } + return dres.Content, nil +} + +// Download reads a file from the metadata storage +func (cs3 *CS3) Download(ctx context.Context, req DownloadRequest) (*DownloadResponse, error) { + ctx, span := tracer.Start(ctx, "Download") + defer span.End() client, err := cs3.providerClient() if err != nil { @@ -254,13 +288,18 @@ func (cs3 *CS3) SimpleDownload(ctx context.Context, downloadpath string) (conten dreq := provider.InitiateFileDownloadRequest{ Ref: &provider.Reference{ ResourceId: cs3.SpaceRoot, - Path: utils.MakeRelativePath(downloadpath), + Path: utils.MakeRelativePath(req.Path), }, } + // FIXME add a dedicated property on the CS3 InitiateFileDownloadRequest message + // well the gateway never forwards the initiate request to the storageprovider, so we have to send it in the actual GET request + // if len(req.IfNoneMatch) > 0 { + // dreq.Opaque = utils.AppendPlainToOpaque(dreq.Opaque, "if-none-match", strings.Join(req.IfNoneMatch, ",")) + // } res, err := client.InitiateFileDownload(ctx, &dreq) if err != nil { - return []byte{}, errtypes.NotFound(dreq.Ref.Path) + return nil, errtypes.NotFound(dreq.Ref.Path) } var endpoint string @@ -272,35 +311,46 @@ func (cs3 *CS3) SimpleDownload(ctx context.Context, downloadpath string) (conten } } if endpoint == "" { - return []byte{}, errors.New("metadata storage doesn't support the spaces download protocol") + return nil, errors.New("metadata storage doesn't support the spaces download protocol") } - req, err := http.NewRequest(http.MethodGet, endpoint, nil) + hreq, err := http.NewRequest(http.MethodGet, endpoint, nil) if err != nil { - return []byte{}, err + return nil, err + } + + for _, etag := range req.IfNoneMatch { + hreq.Header.Add(net.HeaderIfNoneMatch, etag) } md, _ := metadata.FromOutgoingContext(ctx) - req.Header.Add(ctxpkg.TokenHeader, md.Get(ctxpkg.TokenHeader)[0]) - resp, err := cs3.dataGatewayClient.Do(req) + hreq.Header.Add(ctxpkg.TokenHeader, md.Get(ctxpkg.TokenHeader)[0]) + resp, err := cs3.dataGatewayClient.Do(hreq) if err != nil { - return []byte{}, err + return nil, err } + defer resp.Body.Close() + + dres := DownloadResponse{} + + dres.Etag = resp.Header.Get("etag") + dres.Etag = resp.Header.Get("oc-etag") // takes precedence - if resp.StatusCode != http.StatusOK { - return []byte{}, errtypes.NotFound(dreq.Ref.Path) + if err := errtypes.NewErrtypeFromHTTPStatusCode(resp.StatusCode, hreq.URL.Path); err != nil { + return nil, err } - b, err := io.ReadAll(resp.Body) + dres.Mtime, err = time.Parse(time.RFC1123Z, resp.Header.Get("last-modified")) if err != nil { - return []byte{}, err + return nil, err } - if err = resp.Body.Close(); err != nil { - return []byte{}, err + dres.Content, err = io.ReadAll(resp.Body) + if err != nil { + return nil, err } - return b, nil + return &dres, nil } // Delete deletes a path diff --git a/pkg/storage/utils/metadata/disk.go b/pkg/storage/utils/metadata/disk.go index 2e78d7240f..9aec25002c 100644 --- a/pkg/storage/utils/metadata/disk.go +++ b/pkg/storage/utils/metadata/disk.go @@ -22,6 +22,7 @@ import ( "context" "errors" "fmt" + "io" "io/fs" "os" "path/filepath" @@ -82,45 +83,94 @@ func (disk *Disk) Stat(ctx context.Context, path string) (*provider.ResourceInfo // SimpleUpload stores a file on disk func (disk *Disk) SimpleUpload(ctx context.Context, uploadpath string, content []byte) error { - return disk.Upload(ctx, UploadRequest{ + _, err := disk.Upload(ctx, UploadRequest{ Path: uploadpath, Content: content, }) + return err } // Upload stores a file on disk -func (disk *Disk) Upload(_ context.Context, req UploadRequest) error { +func (disk *Disk) Upload(_ context.Context, req UploadRequest) (*UploadResponse, error) { p := disk.targetPath(req.Path) if req.IfMatchEtag != "" { info, err := os.Stat(p) if err != nil && !errors.Is(err, os.ErrNotExist) { - return err + return nil, err } else if err == nil { etag, err := calcEtag(info.ModTime(), info.Size()) if err != nil { - return err + return nil, err } if etag != req.IfMatchEtag { - return errtypes.PreconditionFailed("etag mismatch") + return nil, errtypes.PreconditionFailed("etag mismatch") } } } if req.IfUnmodifiedSince != (time.Time{}) { info, err := os.Stat(p) if err != nil && !errors.Is(err, os.ErrNotExist) { - return err + return nil, err } else if err == nil { if info.ModTime().After(req.IfUnmodifiedSince) { - return errtypes.PreconditionFailed(fmt.Sprintf("resource has been modified, mtime: %s > since %s", info.ModTime(), req.IfUnmodifiedSince)) + return nil, errtypes.PreconditionFailed(fmt.Sprintf("resource has been modified, mtime: %s > since %s", info.ModTime(), req.IfUnmodifiedSince)) } } } - return os.WriteFile(p, req.Content, 0644) + err := os.WriteFile(p, req.Content, 0644) + if err != nil { + return nil, err + } + + info, err := os.Stat(disk.targetPath(req.Path)) + if err != nil { + return nil, err + } + res := &UploadResponse{} + res.Etag, err = calcEtag(info.ModTime(), info.Size()) + if err != nil { + return nil, err + } + return res, nil +} + +// Download reads a file from disk +func (disk *Disk) Download(_ context.Context, req DownloadRequest) (*DownloadResponse, error) { + var err error + + f, err := os.Open(disk.targetPath(req.Path)) + if err != nil { + var pathError *fs.PathError + if errors.As(err, &pathError) { + return nil, errtypes.NotFound("path not found: " + disk.targetPath(req.Path)) + } + return nil, err + } + defer f.Close() + + info, err := f.Stat() + if err != nil { + return nil, err + } + + res := DownloadResponse{} + res.Mtime = info.ModTime() + res.Etag, err = calcEtag(info.ModTime(), info.Size()) + if err != nil { + return nil, err + } + + res.Content, err = io.ReadAll(f) + if err != nil { + return nil, err + } + return &res, nil } // SimpleDownload reads a file from disk -func (disk *Disk) SimpleDownload(_ context.Context, downloadpath string) ([]byte, error) { - return os.ReadFile(disk.targetPath(downloadpath)) +func (disk *Disk) SimpleDownload(ctx context.Context, downloadpath string) ([]byte, error) { + res, err := disk.Download(ctx, DownloadRequest{Path: downloadpath}) + return res.Content, err } // Delete deletes a path diff --git a/pkg/storage/utils/metadata/mocks/Storage.go b/pkg/storage/utils/metadata/mocks/Storage.go index 0e5230d2fd..8523f3635a 100644 --- a/pkg/storage/utils/metadata/mocks/Storage.go +++ b/pkg/storage/utils/metadata/mocks/Storage.go @@ -76,6 +76,32 @@ func (_m *Storage) Delete(ctx context.Context, path string) error { return r0 } +// Download provides a mock function with given fields: ctx, req +func (_m *Storage) Download(ctx context.Context, req metadata.DownloadRequest) (*metadata.DownloadResponse, error) { + ret := _m.Called(ctx, req) + + var r0 *metadata.DownloadResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, metadata.DownloadRequest) (*metadata.DownloadResponse, error)); ok { + return rf(ctx, req) + } + if rf, ok := ret.Get(0).(func(context.Context, metadata.DownloadRequest) *metadata.DownloadResponse); ok { + r0 = rf(ctx, req) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*metadata.DownloadResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, metadata.DownloadRequest) error); ok { + r1 = rf(ctx, req) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // Init provides a mock function with given fields: ctx, name func (_m *Storage) Init(ctx context.Context, name string) error { ret := _m.Called(ctx, name) @@ -247,17 +273,29 @@ func (_m *Storage) Stat(ctx context.Context, path string) (*providerv1beta1.Reso } // Upload provides a mock function with given fields: ctx, req -func (_m *Storage) Upload(ctx context.Context, req metadata.UploadRequest) error { +func (_m *Storage) Upload(ctx context.Context, req metadata.UploadRequest) (*metadata.UploadResponse, error) { ret := _m.Called(ctx, req) - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, metadata.UploadRequest) error); ok { + var r0 *metadata.UploadResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, metadata.UploadRequest) (*metadata.UploadResponse, error)); ok { + return rf(ctx, req) + } + if rf, ok := ret.Get(0).(func(context.Context, metadata.UploadRequest) *metadata.UploadResponse); ok { r0 = rf(ctx, req) } else { - r0 = ret.Error(0) + if ret.Get(0) != nil { + r0 = ret.Get(0).(*metadata.UploadResponse) + } } - return r0 + if rf, ok := ret.Get(1).(func(context.Context, metadata.UploadRequest) error); ok { + r1 = rf(ctx, req) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } type mockConstructorTestingTNewStorage interface { diff --git a/pkg/storage/utils/metadata/storage.go b/pkg/storage/utils/metadata/storage.go index 2933f2a50a..7c6107cff0 100644 --- a/pkg/storage/utils/metadata/storage.go +++ b/pkg/storage/utils/metadata/storage.go @@ -36,16 +36,37 @@ type UploadRequest struct { Content []byte IfMatchEtag string + IfNoneMatch []string IfUnmodifiedSince time.Time MTime time.Time } +// UploadResponse represents a upload response +type UploadResponse struct { + Etag string +} + +// DownloadRequest represents a download request and its options +type DownloadRequest struct { + Path string + IfNoneMatch []string +} + +// DownloadResponse represents a download response and its options +type DownloadResponse struct { + Content []byte + + Etag string + Mtime time.Time +} + // Storage is the interface to maintain metadata in a storage type Storage interface { Backend() string Init(ctx context.Context, name string) (err error) - Upload(ctx context.Context, req UploadRequest) error + Upload(ctx context.Context, req UploadRequest) (*UploadResponse, error) + Download(ctx context.Context, req DownloadRequest) (*DownloadResponse, error) SimpleUpload(ctx context.Context, uploadpath string, content []byte) error SimpleDownload(ctx context.Context, path string) ([]byte, error) Delete(ctx context.Context, path string) error diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index c67c50dbf9..45f62702c3 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -180,6 +180,18 @@ func MTimeToTS(v string) (ts types.Timestamp, err error) { return types.Timestamp{Seconds: sec, Nanos: uint32(nsec)}, err } +// MTimeToTime converts a string in the form "." into a go time.Time +func MTimeToTime(v string) (t time.Time, err error) { + p := strings.SplitN(v, ".", 2) + var sec, nsec int64 + if sec, err = strconv.ParseInt(p[0], 10, 64); err == nil { + if len(p) > 1 { + nsec, err = strconv.ParseInt(p[1], 10, 64) + } + } + return time.Unix(sec, nsec), err +} + // ExtractGranteeID returns the ID, user or group, set in the GranteeId object func ExtractGranteeID(grantee *provider.Grantee) (*userpb.UserId, *grouppb.GroupId) { switch t := grantee.Id.(type) {