From b27ff41a32a39a9e588d7b8d3867e64791f15503 Mon Sep 17 00:00:00 2001 From: Andre Duffeck Date: Wed, 28 Jul 2021 18:00:03 +0200 Subject: [PATCH] owncloudsql fixes (#1932) --- changelog/unreleased/owncloudsql-fixes.md | 5 + .../fs/owncloudsql/filecache/filecache.go | 261 ++++- .../owncloudsql/filecache/filecache_test.go | 127 ++- pkg/storage/fs/owncloudsql/owncloudsql.go | 936 +++++++----------- .../fs/owncloudsql/owncloudsql_unix.go | 4 +- pkg/storage/fs/owncloudsql/upload.go | 34 +- 6 files changed, 721 insertions(+), 646 deletions(-) create mode 100644 changelog/unreleased/owncloudsql-fixes.md diff --git a/changelog/unreleased/owncloudsql-fixes.md b/changelog/unreleased/owncloudsql-fixes.md new file mode 100644 index 0000000000..4f62519d11 --- /dev/null +++ b/changelog/unreleased/owncloudsql-fixes.md @@ -0,0 +1,5 @@ +Bugfix: Numerous fixes to the owncloudsql storage driver + +The owncloudsql storage driver received numerous bugfixes and cleanups. + +https://github.com/cs3org/reva/pull/1932 diff --git a/pkg/storage/fs/owncloudsql/filecache/filecache.go b/pkg/storage/fs/owncloudsql/filecache/filecache.go index 44f6fbddcb..7408dd0480 100644 --- a/pkg/storage/fs/owncloudsql/filecache/filecache.go +++ b/pkg/storage/fs/owncloudsql/filecache/filecache.go @@ -70,7 +70,7 @@ func New(driver string, sqldb *sql.DB) (*Cache, error) { // GetNumericStorageID returns the database id for the given storage func (c *Cache) GetNumericStorageID(id string) (int, error) { - row := c.db.QueryRow("Select numeric_id from oc_storages where id = ?", id) + row := c.db.QueryRow("SELECT numeric_id FROM oc_storages WHERE id = ?", id) var nid int switch err := row.Scan(&nid); err { case nil: @@ -80,6 +80,79 @@ func (c *Cache) GetNumericStorageID(id string) (int, error) { } } +// CreateStorage creates a new storage and returns its numeric id +func (c *Cache) CreateStorage(id string) (int, error) { + tx, err := c.db.Begin() + if err != nil { + return -1, err + } + defer func() { _ = tx.Rollback() }() + + stmt, err := tx.Prepare("INSERT INTO oc_storages(id) VALUES(?)") + if err != nil { + return -1, err + } + defer stmt.Close() + + res, err := stmt.Exec(id) + if err != nil { + return -1, err + } + insertedID, err := res.LastInsertId() + if err != nil { + return -1, err + } + + data := map[string]interface{}{ + "path": "", + "etag": "", + "mimetype": "httpd/unix-directory", + } + _, err = c.doInsertOrUpdate(tx, int(insertedID), data, true) + if err != nil { + return -1, err + } + + err = tx.Commit() + if err != nil { + return -1, err + } + + return int(insertedID), err +} + +// GetStorageOwner returns the username of the owner of the given storage +func (c *Cache) GetStorageOwner(numericID interface{}) (string, error) { + numericID, err := toIntID(numericID) + if err != nil { + return "", err + } + row := c.db.QueryRow("SELECT id FROM oc_storages WHERE numeric_id = ?", numericID) + var id string + switch err := row.Scan(&id); err { + case nil: + return strings.TrimPrefix(id, "home::"), nil + default: + return "", err + } +} + +// GetStorageOwnerByFileID returns the username of the owner of the given entry +func (c *Cache) GetStorageOwnerByFileID(numericID interface{}) (string, error) { + numericID, err := toIntID(numericID) + if err != nil { + return "", err + } + row := c.db.QueryRow("SELECT id FROM oc_storages storages, oc_filecache cache WHERE storages.numeric_id = cache.storage AND cache.fileid = ?", numericID) + var id string + switch err := row.Scan(&id); err { + case nil: + return strings.TrimPrefix(id, "home::"), nil + default: + return "", err + } +} + // File represents an entry of the file cache type File struct { ID int @@ -87,6 +160,7 @@ type File struct { Parent int MimePart int MimeType int + MimeTypeString string Size int MTime int StorageMTime int @@ -114,9 +188,10 @@ type Scannable interface { } func (c *Cache) rowToFile(row Scannable) (*File, error) { - var fileid, storage, parent, mimetype, mimepart, size, mtime, storageMtime, encrypted, unencryptedSize, permissions int - var path, name, etag, checksum string - err := row.Scan(&fileid, &storage, &path, &parent, &permissions, &mimetype, &mimepart, &size, &mtime, &storageMtime, &encrypted, &unencryptedSize, &name, &etag, &checksum) + var fileid, storage, parent, mimetype, mimepart, size, mtime, storageMtime, encrypted, unencryptedSize int + var permissions sql.NullInt32 + var path, name, etag, checksum, mimetypestring sql.NullString + err := row.Scan(&fileid, &storage, &path, &parent, &permissions, &mimetype, &mimepart, &mimetypestring, &size, &mtime, &storageMtime, &encrypted, &unencryptedSize, &name, &etag, &checksum) if err != nil { return nil, err } @@ -124,19 +199,20 @@ func (c *Cache) rowToFile(row Scannable) (*File, error) { return &File{ ID: fileid, Storage: storage, - Path: path, + Path: path.String, Parent: parent, - Permissions: permissions, + Permissions: int(permissions.Int32), MimeType: mimetype, + MimeTypeString: mimetypestring.String, MimePart: mimepart, Size: size, MTime: mtime, StorageMTime: storageMtime, Encrypted: encrypted == 1, UnencryptedSize: unencryptedSize, - Name: name, - Etag: etag, - Checksum: checksum, + Name: name.String, + Etag: etag.String, + Checksum: checksum.String, }, nil } @@ -150,7 +226,14 @@ func (c *Cache) Get(s interface{}, p string) (*File, error) { phashBytes := md5.Sum([]byte(p)) phash := hex.EncodeToString(phashBytes[:]) - row := c.db.QueryRow("Select fileid, storage, path, parent, permissions, mimetype, mimepart, size, mtime, storage_mtime, encrypted, unencrypted_size, name, etag, checksum from oc_filecache where path_hash = ? and storage = ?", phash, storageID) + row := c.db.QueryRow(` + SELECT + fc.fileid, fc.storage, fc.path, fc.parent, fc.permissions, fc.mimetype, fc.mimepart, + mt.mimetype, fc.size, fc.mtime, fc.storage_mtime, fc.encrypted, fc.unencrypted_size, + fc.name, fc.etag, fc.checksum + FROM oc_filecache fc + LEFT JOIN oc_mimetypes mt ON fc.mimetype = mt.id + WHERE path_hash = ? AND storage = ?`, phash, storageID) return c.rowToFile(row) } @@ -161,7 +244,7 @@ func (c *Cache) Path(id interface{}) (string, error) { return "", err } - row := c.db.QueryRow("Select path from oc_filecache where fileid = ?", id) + row := c.db.QueryRow("SELECT path FROM oc_filecache WHERE fileid = ?", id) var path string err = row.Scan(&path) if err != nil { @@ -170,6 +253,38 @@ func (c *Cache) Path(id interface{}) (string, error) { return path, nil } +// List returns the list of entries below the given path +func (c *Cache) List(storage interface{}, p string) ([]*File, error) { + storageID, err := toIntID(storage) + if err != nil { + return nil, err + } + + rows, err := c.db.Query(` + SELECT + fc.fileid, fc.storage, fc.path, fc.parent, fc.permissions, fc.mimetype, fc.mimepart, + mt.mimetype, fc.size, fc.mtime, fc.storage_mtime, fc.encrypted, fc.unencrypted_size, + fc.name, fc.etag, fc.checksum + FROM oc_filecache fc + LEFT JOIN oc_mimetypes mt ON fc.mimetype = mt.id + WHERE path != '' AND path LIKE ? AND PATH NOT LIKE ? AND storage = ? + `, p+"%", p+"%/%", storageID) + if err != nil { + return nil, err + } + defer rows.Close() + entries := []*File{} + for rows.Next() { + entry, err := c.rowToFile(rows) + if err != nil { + return nil, err + } + entries = append(entries, entry) + } + + return entries, nil +} + // Permissions returns the permissions for the specified storage/path func (c *Cache) Permissions(storage interface{}, p string) (*provider.ResourcePermissions, error) { entry, err := c.Get(storage, p) @@ -186,7 +301,27 @@ func (c *Cache) Permissions(storage interface{}, p string) (*provider.ResourcePe } // InsertOrUpdate creates or updates a cache entry -func (c *Cache) InsertOrUpdate(storage interface{}, data map[string]interface{}) (int, error) { +func (c *Cache) InsertOrUpdate(storage interface{}, data map[string]interface{}, allowEmptyParent bool) (int, error) { + tx, err := c.db.Begin() + if err != nil { + return -1, err + } + defer func() { _ = tx.Rollback() }() + + id, err := c.doInsertOrUpdate(tx, storage, data, allowEmptyParent) + if err != nil { + return -1, err + } + + err = tx.Commit() + if err != nil { + return -1, err + } + + return id, err +} + +func (c *Cache) doInsertOrUpdate(tx *sql.Tx, storage interface{}, data map[string]interface{}, allowEmptyParent bool) (int, error) { storageID, err := toIntID(storage) if err != nil { return -1, err @@ -208,10 +343,15 @@ func (c *Cache) InsertOrUpdate(storage interface{}, data map[string]interface{}) parentPath = "" } parent, err := c.Get(storageID, parentPath) - if err != nil { - return -1, fmt.Errorf("could not find parent %s, %s, %v, %w", parentPath, path, parent, err) + if err == nil { + data["parent"] = parent.ID + } else { + if allowEmptyParent { + data["parent"] = -1 + } else { + return -1, fmt.Errorf("could not find parent %s, %s, %v, %w", parentPath, path, parent, err) + } } - data["parent"] = parent.ID data["name"] = filepath.Base(path) if _, exists := data["checksum"]; !exists { data["checksum"] = "" @@ -235,10 +375,10 @@ func (c *Cache) InsertOrUpdate(storage interface{}, data map[string]interface{}) parts := strings.Split(v.(string), "/") columns = append(columns, "mimetype") values = append(values, v) - placeholders = append(placeholders, "(SELECT id from oc_mimetypes where mimetype=?)") + placeholders = append(placeholders, "(SELECT id FROM oc_mimetypes WHERE mimetype=?)") columns = append(columns, "mimepart") values = append(values, parts[0]) - placeholders = append(placeholders, "(SELECT id from oc_mimetypes where mimetype=?)") + placeholders = append(placeholders, "(SELECT id FROM oc_mimetypes WHERE mimetype=?)") continue } @@ -247,7 +387,7 @@ func (c *Cache) InsertOrUpdate(storage interface{}, data map[string]interface{}) placeholders = append(placeholders, "?") } - err = c.InsertMimetype(data["mimetype"].(string)) + err = c.insertMimetype(tx, data["mimetype"].(string)) if err != nil { return -1, err } @@ -268,7 +408,7 @@ func (c *Cache) InsertOrUpdate(storage interface{}, data map[string]interface{}) } query += strings.Join(updates, ",") - stmt, err := c.db.Prepare(query) + stmt, err := tx.Prepare(query) if err != nil { return -1, err } @@ -315,7 +455,7 @@ func (c *Cache) Copy(storage interface{}, sourcePath, targetPath string) (int, e "encrypted": source.Encrypted, "unencrypted_size": source.UnencryptedSize, } - return c.InsertOrUpdate(storage, data) + return c.InsertOrUpdate(storage, data, false) } // Move moves the specified entry to the target path @@ -339,7 +479,7 @@ func (c *Cache) Move(storage interface{}, sourcePath, targetPath string) error { return err } defer func() { _ = tx.Rollback() }() - stmt, err := tx.Prepare("UPDATE oc_filecache SET parent=?, path=?, name=?, path_hash=? WHERE storage = ? and fileid=?") + stmt, err := tx.Prepare("UPDATE oc_filecache SET parent=?, path=?, name=?, path_hash=? WHERE storage = ? AND fileid=?") if err != nil { return err } @@ -350,7 +490,7 @@ func (c *Cache) Move(storage interface{}, sourcePath, targetPath string) error { return err } - childRows, err := tx.Query("SELECT fileid, path from oc_filecache where parent = ?", source.ID) + childRows, err := tx.Query("SELECT fileid, path FROM oc_filecache WHERE parent = ?", source.ID) if err != nil { return err } @@ -380,6 +520,18 @@ func (c *Cache) Move(storage interface{}, sourcePath, targetPath string) error { return tx.Commit() } +// Purge removes the specified storage/path from the cache without putting it into the trash +func (c *Cache) Purge(storage interface{}, path string) error { + storageID, err := toIntID(storage) + if err != nil { + return err + } + phashBytes := md5.Sum([]byte(path)) + phash := hex.EncodeToString(phashBytes[:]) + _, err = c.db.Exec("DELETE FROM oc_filecache WHERE storage = ? and path_hash = ?", storageID, phash) + return err +} + // Delete removes the specified storage/path from the cache func (c *Cache) Delete(storage interface{}, user, path, trashPath string) error { err := c.Move(storage, path, trashPath) @@ -411,7 +563,7 @@ func (c *Cache) Delete(storage interface{}, user, path, trashPath string) error // GetRecycleItem returns the specified recycle item func (c *Cache) GetRecycleItem(user, path string, timestamp int) (*TrashItem, error) { - row := c.db.QueryRow("SELECT auto_id, id, location FROM oc_files_trash WHERE id = ? and user = ? and timestamp = ?", path, user, timestamp) + row := c.db.QueryRow("SELECT auto_id, id, location FROM oc_files_trash WHERE id = ? AND user = ? AND timestamp = ?", path, user, timestamp) var autoID int var id, location string err := row.Scan(&autoID, &id, &location) @@ -428,9 +580,31 @@ func (c *Cache) GetRecycleItem(user, path string, timestamp int) (*TrashItem, er }, nil } -// PurgeRecycleItem deletes the specified item from the cache -func (c *Cache) PurgeRecycleItem(user, path string, timestamp int) error { - row := c.db.QueryRow("Select auto_id, location from oc_files_trash where id = ? and user = ? and timestamp = ?", path, user, timestamp) +// EmptyRecycle clears the recycle bin for the given user +func (c *Cache) EmptyRecycle(user string) error { + _, err := c.db.Exec("DELETE FROM oc_files_trash WHERE user = ?", user) + if err != nil { + return err + } + + storage, err := c.GetNumericStorageID("home::" + user) + if err != nil { + return err + } + + _, err = c.db.Exec("DELETE FROM oc_filecache WHERE storage = ? AND PATH LIKE ?", storage, "files_trashbin/%") + return err +} + +// DeleteRecycleItem deletes the specified item from the trash +func (c *Cache) DeleteRecycleItem(user, path string, timestamp int) error { + _, err := c.db.Exec("DELETE FROM oc_files_trash WHERE id = ? AND user = ? AND timestamp = ?", path, user, timestamp) + return err +} + +// PurgeRecycleItem deletes the specified item from the filecache and the trash +func (c *Cache) PurgeRecycleItem(user, path string, timestamp int, isVersionFile bool) error { + row := c.db.QueryRow("SELECT auto_id, location FROM oc_files_trash WHERE id = ? AND user = ? AND timestamp = ?", path, user, timestamp) var autoID int var location string err := row.Scan(&autoID, &location) @@ -447,7 +621,11 @@ func (c *Cache) PurgeRecycleItem(user, path string, timestamp int) error { if err != nil { return err } - item, err := c.Get(storage, filepath.Join("files_trashbin", "files", location, path+".d"+strconv.Itoa(timestamp))) + trashType := "files" + if isVersionFile { + trashType = "versions" + } + item, err := c.Get(storage, filepath.Join("files_trashbin", trashType, path+".d"+strconv.Itoa(timestamp))) if err != nil { return err } @@ -466,7 +644,7 @@ func (c *Cache) SetEtag(storage interface{}, path, etag string) error { if err != nil { return errors.Wrap(err, "could not find source") } - stmt, err := c.db.Prepare("UPDATE oc_filecache SET etag=? WHERE storage = ? and fileid=?") + stmt, err := c.db.Prepare("UPDATE oc_filecache SET etag=? WHERE storage = ? AND fileid=?") if err != nil { return err } @@ -474,20 +652,27 @@ func (c *Cache) SetEtag(storage interface{}, path, etag string) error { return err } -// InsertMimetype adds a new mimetype to the database -func (c *Cache) InsertMimetype(mimetype string) error { - stmt, err := c.db.Prepare("INSERT INTO oc_mimetypes(mimetype) VALUES(?)") - if err != nil { - return err +func (c *Cache) insertMimetype(tx *sql.Tx, mimetype string) error { + insertPart := func(v string) error { + stmt, err := tx.Prepare("INSERT INTO oc_mimetypes(mimetype) VALUES(?)") + if err != nil { + return err + } + _, err = stmt.Exec(v) + if err != nil { + if strings.Contains(err.Error(), "UNIQUE") || strings.Contains(err.Error(), "Error 1062") { + return nil // Already exists + } + return err + } + return nil } - _, err = stmt.Exec(mimetype) + parts := strings.Split(mimetype, "/") + err := insertPart(parts[0]) if err != nil { - if strings.Contains(err.Error(), "UNIQUE") || strings.Contains(err.Error(), "Error 1062") { - return nil // Already exists - } return err } - return nil + return insertPart(mimetype) } func toIntID(rid interface{}) (int, error) { diff --git a/pkg/storage/fs/owncloudsql/filecache/filecache_test.go b/pkg/storage/fs/owncloudsql/filecache/filecache_test.go index 8df48d4470..b00cc981c5 100644 --- a/pkg/storage/fs/owncloudsql/filecache/filecache_test.go +++ b/pkg/storage/fs/owncloudsql/filecache/filecache_test.go @@ -71,6 +71,37 @@ var _ = Describe("Filecache", func() { }) }) + Describe("GetStorageOwner", func() { + It("returns the owner", func() { + owner, err := cache.GetStorageOwner("1") + Expect(err).ToNot(HaveOccurred()) + Expect(owner).To(Equal("admin")) + }) + }) + + Describe("CreateStorage", func() { + It("creates the storage and a root item", func() { + id, err := cache.CreateStorage("bar") + Expect(err).ToNot(HaveOccurred()) + Expect(id > 0).To(BeTrue()) + + owner, err := cache.GetStorageOwner(id) + Expect(err).ToNot(HaveOccurred()) + Expect(owner).To(Equal("bar")) + + file, err := cache.Get(1, "") + Expect(err).ToNot(HaveOccurred()) + Expect(file).ToNot(BeNil()) + }) + }) + Describe("GetStorageOwnerByFileID", func() { + It("returns the owner", func() { + owner, err := cache.GetStorageOwnerByFileID("10") + Expect(err).ToNot(HaveOccurred()) + Expect(owner).To(Equal("admin")) + }) + }) + Describe("Get", func() { It("gets existing files", func() { path := "files/Photos/Portugal.jpg" @@ -83,6 +114,7 @@ var _ = Describe("Filecache", func() { Expect(file.Parent).To(Equal(9)) Expect(file.MimeType).To(Equal(6)) Expect(file.MimePart).To(Equal(5)) + Expect(file.MimeTypeString).To(Equal("image/jpeg")) Expect(file.Size).To(Equal(243733)) Expect(file.MTime).To(Equal(1619007009)) Expect(file.StorageMTime).To(Equal(1619007009)) @@ -94,6 +126,26 @@ var _ = Describe("Filecache", func() { }) }) + Describe("List", func() { + It("lists all entries", func() { + list, err := cache.List(1, "") + Expect(err).ToNot(HaveOccurred()) + Expect(len(list)).To(Equal(3)) + }) + + It("filters", func() { + list, err := cache.List(1, "files_trashbin/") + Expect(err).ToNot(HaveOccurred()) + Expect(len(list)).To(Equal(3)) + }) + + It("filters deep", func() { + list, err := cache.List(1, "files/Photos/") + Expect(err).ToNot(HaveOccurred()) + Expect(len(list)).To(Equal(3)) + }) + }) + Describe("Path", func() { It("returns the path", func() { path, err := cache.Path(10) @@ -115,21 +167,21 @@ var _ = Describe("Filecache", func() { "mimetype": "httpd/unix-directory", "etag": "abcdefg", } - _, err := cache.InsertOrUpdate(3, data) + _, err := cache.InsertOrUpdate(3, data, false) Expect(err).To(MatchError("missing required data")) data = map[string]interface{}{ "path": "files/Photos/foo.jpg", "etag": "abcdefg", } - _, err = cache.InsertOrUpdate(3, data) + _, err = cache.InsertOrUpdate(3, data, false) Expect(err).To(MatchError("missing required data")) data = map[string]interface{}{ "path": "files/Photos/foo.jpg", "mimetype": "httpd/unix-directory", } - _, err = cache.InsertOrUpdate(3, data) + _, err = cache.InsertOrUpdate(3, data, false) Expect(err).To(MatchError("missing required data")) }) @@ -139,7 +191,7 @@ var _ = Describe("Filecache", func() { "mimetype": "httpd/unix-directory", "etag": "abcdefg", } - id, err := cache.InsertOrUpdate(1, data) + id, err := cache.InsertOrUpdate(1, data, false) Expect(err).ToNot(HaveOccurred()) Expect(id).To(Equal(18)) @@ -164,7 +216,7 @@ var _ = Describe("Filecache", func() { "encrypted": true, "unencrypted_size": 2000, } - _, err := cache.InsertOrUpdate(1, data) + _, err := cache.InsertOrUpdate(1, data, false) Expect(err).ToNot(HaveOccurred()) entry, err := cache.Get(1, "files/Photos/foo.jpg") @@ -189,7 +241,7 @@ var _ = Describe("Filecache", func() { "etag": "abcdefg", } - _, err := cache.InsertOrUpdate(1, data) + _, err := cache.InsertOrUpdate(1, data, false) Expect(err).ToNot(HaveOccurred()) entry, err := cache.Get(1, "files/Photos/foo.jpg") @@ -205,7 +257,7 @@ var _ = Describe("Filecache", func() { "storage_mtime": 1617702483, } - _, err := cache.InsertOrUpdate(1, data) + _, err := cache.InsertOrUpdate(1, data, false) Expect(err).ToNot(HaveOccurred()) entry, err := cache.Get(1, "files/Photos/foo.jpg") @@ -221,7 +273,7 @@ var _ = Describe("Filecache", func() { "mimetype": "image/jpeg", } - _, err := cache.InsertOrUpdate(1, data) + _, err := cache.InsertOrUpdate(1, data, false) Expect(err).ToNot(HaveOccurred()) entry, err := cache.Get(1, "files/Photos/foo.jpg") @@ -238,7 +290,7 @@ var _ = Describe("Filecache", func() { "mimetype": "image/tiff", } - _, err := cache.InsertOrUpdate(1, data) + _, err := cache.InsertOrUpdate(1, data, false) Expect(err).ToNot(HaveOccurred()) entry, err := cache.Get(1, "files/Photos/foo.tiff") @@ -259,7 +311,7 @@ var _ = Describe("Filecache", func() { "mimetype": "httpd/unix-directory", "etag": "abcdefg", } - _, err := cache.InsertOrUpdate(1, data) + _, err := cache.InsertOrUpdate(1, data, false) Expect(err).ToNot(HaveOccurred()) }) @@ -268,7 +320,7 @@ var _ = Describe("Filecache", func() { Expect(err).ToNot(HaveOccurred()) data["etag"] = "12345" - id, err := cache.InsertOrUpdate(1, data) + id, err := cache.InsertOrUpdate(1, data, false) Expect(err).ToNot(HaveOccurred()) Expect(id).To(Equal(recordBefore.ID)) @@ -349,11 +401,11 @@ var _ = Describe("Filecache", func() { } trashPathBase = "Portugal.jpg" trashPathTimestamp = 1619007109 - trashPath = "files_trashbin/files/Photos/" + trashPathBase + ".d" + strconv.Itoa(trashPathTimestamp) + trashPath = "files_trashbin/files/" + trashPathBase + ".d" + strconv.Itoa(trashPathTimestamp) ) BeforeEach(func() { - _, err := cache.InsertOrUpdate(1, data) + _, err := cache.InsertOrUpdate(1, data, false) Expect(err).ToNot(HaveOccurred()) }) @@ -364,7 +416,7 @@ var _ = Describe("Filecache", func() { _, err = cache.Get(1, "files/Photos/Portugal.jpg") Expect(err).To(HaveOccurred()) - _, err = cache.Get(1, "files_trashbin/files/Photos/Portugal.jpg.d1619007109") + _, err = cache.Get(1, "files_trashbin/files/Portugal.jpg.d1619007109") Expect(err).ToNot(HaveOccurred()) }) @@ -386,6 +438,43 @@ var _ = Describe("Filecache", func() { }) }) + Describe("EmptyRecycle", func() { + It("clears the recycle bin", func() { + err := cache.Delete(1, "admin", filePath, trashPath) + Expect(err).ToNot(HaveOccurred()) + + err = cache.EmptyRecycle("admin") + Expect(err).ToNot(HaveOccurred()) + + _, err = cache.GetRecycleItem("admin", trashPathBase, trashPathTimestamp) + Expect(err).To(HaveOccurred()) + }) + }) + + Describe("DeleteRecycleItem", func() { + It("removes the item from the trash", func() { + err := cache.Delete(1, "admin", filePath, trashPath) + Expect(err).ToNot(HaveOccurred()) + + err = cache.DeleteRecycleItem("admin", trashPathBase, trashPathTimestamp) + Expect(err).ToNot(HaveOccurred()) + + _, err = cache.GetRecycleItem("admin", trashPathBase, trashPathTimestamp) + Expect(err).To(HaveOccurred()) + }) + + It("does not remove the item from the file cache", func() { + err := cache.Delete(1, "admin", filePath, trashPath) + Expect(err).ToNot(HaveOccurred()) + + err = cache.DeleteRecycleItem("admin", trashPathBase, trashPathTimestamp) + Expect(err).ToNot(HaveOccurred()) + + _, err = cache.Get(1, trashPath) + Expect(err).ToNot(HaveOccurred()) + }) + }) + Describe("PurgeRecycleItem", func() { It("removes the item from the database", func() { err := cache.Delete(1, "admin", filePath, trashPath) @@ -394,7 +483,7 @@ var _ = Describe("Filecache", func() { _, err = cache.GetRecycleItem("admin", trashPathBase, trashPathTimestamp) Expect(err).ToNot(HaveOccurred()) - err = cache.PurgeRecycleItem("admin", trashPathBase, trashPathTimestamp) + err = cache.PurgeRecycleItem("admin", trashPathBase, trashPathTimestamp, false) Expect(err).ToNot(HaveOccurred()) _, err = cache.GetRecycleItem("admin", trashPathBase, trashPathTimestamp) @@ -405,10 +494,10 @@ var _ = Describe("Filecache", func() { err := cache.Delete(1, "admin", filePath, trashPath) Expect(err).ToNot(HaveOccurred()) - err = cache.PurgeRecycleItem("admin", trashPathBase, trashPathTimestamp) + err = cache.PurgeRecycleItem("admin", trashPathBase, trashPathTimestamp, false) Expect(err).ToNot(HaveOccurred()) - _, err = cache.Get(1, "files_trashbin/files/Photos.d1619007109") + _, err = cache.Get(1, trashPath) Expect(err).To(HaveOccurred()) }) @@ -419,7 +508,7 @@ var _ = Describe("Filecache", func() { _, err = cache.Get(1, "files_trashbin/files/Photos.d1619007109/Portugal.jpg") Expect(err).ToNot(HaveOccurred()) - err = cache.PurgeRecycleItem("admin", "Photos", 1619007109) + err = cache.PurgeRecycleItem("admin", "Photos", 1619007109, false) Expect(err).ToNot(HaveOccurred()) _, err = cache.Get(1, "files_trashbin/files/Photos.d1619007109/Portugal.jpg") @@ -436,7 +525,7 @@ var _ = Describe("Filecache", func() { "mimetype": "httpd/unix-directory", "etag": "abcdefg", } - _, err := cache.InsertOrUpdate(1, parentData) + _, err := cache.InsertOrUpdate(1, parentData, false) Expect(err).ToNot(HaveOccurred()) } diff --git a/pkg/storage/fs/owncloudsql/owncloudsql.go b/pkg/storage/fs/owncloudsql/owncloudsql.go index 6a004f46e2..364977f333 100644 --- a/pkg/storage/fs/owncloudsql/owncloudsql.go +++ b/pkg/storage/fs/owncloudsql/owncloudsql.go @@ -22,6 +22,7 @@ import ( "context" "crypto/md5" "crypto/sha1" + "database/sql" "fmt" "hash/adler32" "io" @@ -40,6 +41,7 @@ import ( provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" types "github.com/cs3org/go-cs3apis/cs3/types/v1beta1" "github.com/cs3org/reva/internal/grpc/services/storageprovider" + conversions "github.com/cs3org/reva/internal/http/services/owncloud/ocs/conversions" "github.com/cs3org/reva/pkg/appctx" "github.com/cs3org/reva/pkg/errtypes" "github.com/cs3org/reva/pkg/logger" @@ -49,7 +51,6 @@ import ( "github.com/cs3org/reva/pkg/storage" "github.com/cs3org/reva/pkg/storage/fs/owncloudsql/filecache" "github.com/cs3org/reva/pkg/storage/fs/registry" - "github.com/cs3org/reva/pkg/storage/utils/ace" "github.com/cs3org/reva/pkg/storage/utils/chunking" "github.com/cs3org/reva/pkg/storage/utils/templates" "github.com/cs3org/reva/pkg/user" @@ -68,13 +69,10 @@ const ( // "user.oc." ocPrefix string = "user.oc." - // SharePrefix is the prefix for sharing related extended attributes - sharePrefix string = ocPrefix + "grant." // grants are similar to acls, but they are not propagated down the tree when being changed - trashOriginPrefix string = ocPrefix + "o" - mdPrefix string = ocPrefix + "md." // arbitrary metadata - favPrefix string = ocPrefix + "fav." // favorite flag, per user - etagPrefix string = ocPrefix + "etag." // allow overriding a calculated etag with one from the extended attributes - checksumsKey string = "http://owncloud.org/ns/checksums" + mdPrefix string = ocPrefix + "md." // arbitrary metadata + favPrefix string = ocPrefix + "fav." // favorite flag, per user + etagPrefix string = ocPrefix + "etag." // allow overriding a calculated etag with one from the extended attributes + checksumsKey string = "http://owncloud.org/ns/checksums" ) var defaultPermissions *provider.ResourcePermissions = &provider.ResourcePermissions{ @@ -183,28 +181,28 @@ func New(m map[string]interface{}) (storage.FS, error) { return nil, err } - return &ocfs{ + return &owncloudsqlfs{ c: c, chunkHandler: chunking.NewChunkHandler(c.UploadInfoDir), filecache: filecache, }, nil } -type ocfs struct { +type owncloudsqlfs struct { c *config chunkHandler *chunking.ChunkHandler filecache *filecache.Cache } -func (fs *ocfs) Shutdown(ctx context.Context) error { +func (fs *owncloudsqlfs) Shutdown(ctx context.Context) error { return nil } -// owncloud stores files in the files subfolder +// owncloudsql stores files in the files subfolder // the incoming path starts with /, so we need to insert the files subfolder into the path // and prefix the data directory // TODO the path handed to a storage provider should not contain the username -func (fs *ocfs) toInternalPath(ctx context.Context, sp string) (ip string) { +func (fs *owncloudsqlfs) toInternalPath(ctx context.Context, sp string) (ip string) { if fs.c.EnableHome { u := user.ContextMustGetUser(ctx) layout := templates.WithUser(u, fs.c.UserLayout) @@ -242,48 +240,11 @@ func (fs *ocfs) toInternalPath(ctx context.Context, sp string) (ip string) { return } -func (fs *ocfs) toInternalShadowPath(ctx context.Context, sp string) (internal string) { - if fs.c.EnableHome { - u := user.ContextMustGetUser(ctx) - layout := templates.WithUser(u, fs.c.UserLayout) - internal = filepath.Join(fs.c.DataDirectory, layout, "shadow_files", sp) - } else { - // trim all / - sp = strings.Trim(sp, "/") - // p = "" or - // p = or - // p = /foo/bar.txt - segments := strings.SplitN(sp, "/", 2) - - if len(segments) == 1 && segments[0] == "" { - internal = fs.c.DataDirectory - return - } - - // parts[0] contains the username or userid. - u, err := fs.getUser(ctx, segments[0]) - if err != nil { - // TODO return invalid internal path? - return - } - layout := templates.WithUser(u, fs.c.UserLayout) - - if len(segments) == 1 { - // parts = "" - internal = filepath.Join(fs.c.DataDirectory, layout, "shadow_files") - } else { - // parts = "", "foo/bar.txt" - internal = filepath.Join(fs.c.DataDirectory, layout, "shadow_files", segments[1]) - } - } - return -} - -// ownloud stores versions in the files_versions subfolder +// owncloudsql stores versions in the files_versions subfolder // the incoming path starts with /, so we need to insert the files subfolder into the path // and prefix the data directory // TODO the path handed to a storage provider should not contain the username -func (fs *ocfs) getVersionsPath(ctx context.Context, ip string) string { +func (fs *owncloudsqlfs) getVersionsPath(ctx context.Context, ip string) string { // ip = /path/to/data//files/foo/bar.txt // remove data dir if fs.c.DataDirectory != "/" { @@ -314,18 +275,22 @@ func (fs *ocfs) getVersionsPath(ctx context.Context, ip string) string { } -// owncloud stores trashed items in the files_trashbin subfolder of a users home -func (fs *ocfs) getRecyclePath(ctx context.Context) (string, error) { +// owncloudsql stores trashed items in the files_trashbin subfolder of a users home +func (fs *owncloudsqlfs) getRecyclePath(ctx context.Context) (string, error) { u, ok := user.ContextGetUser(ctx) if !ok { err := errors.Wrap(errtypes.UserRequired("userrequired"), "error getting user from ctx") return "", err } layout := templates.WithUser(u, fs.c.UserLayout) - return filepath.Join(fs.c.DataDirectory, layout, "files_trashbin/files"), nil + return fs.getRecyclePathForUser(layout) } -func (fs *ocfs) getVersionRecyclePath(ctx context.Context) (string, error) { +func (fs *owncloudsqlfs) getRecyclePathForUser(user string) (string, error) { + return filepath.Join(fs.c.DataDirectory, user, "files_trashbin/files"), nil +} + +func (fs *owncloudsqlfs) getVersionRecyclePath(ctx context.Context) (string, error) { u, ok := user.ContextGetUser(ctx) if !ok { err := errors.Wrap(errtypes.UserRequired("userrequired"), "error getting user from ctx") @@ -335,17 +300,15 @@ func (fs *ocfs) getVersionRecyclePath(ctx context.Context) (string, error) { return filepath.Join(fs.c.DataDirectory, layout, "files_trashbin/versions"), nil } -func (fs *ocfs) toDatabasePath(ctx context.Context, ip string) string { - // TODO aduffeck: add support for non-home layout - u := user.ContextMustGetUser(ctx) - layout := templates.WithUser(u, fs.c.UserLayout) - trim := filepath.Join(fs.c.DataDirectory, layout) +func (fs *owncloudsqlfs) toDatabasePath(ip string) string { + owner := fs.getOwner(ip) + trim := filepath.Join(fs.c.DataDirectory, owner) p := strings.TrimPrefix(ip, trim) p = strings.TrimPrefix(p, "/") return p } -func (fs *ocfs) toStoragePath(ctx context.Context, ip string) (sp string) { +func (fs *owncloudsqlfs) toStoragePath(ctx context.Context, ip string) (sp string) { if fs.c.EnableHome { u := user.ContextMustGetUser(ctx) layout := templates.WithUser(u, fs.c.UserLayout) @@ -374,48 +337,16 @@ func (fs *ocfs) toStoragePath(ctx context.Context, ip string) (sp string) { case 3: sp = filepath.Join("/", segments[1]) default: - sp = filepath.Join("/", segments[1], segments[3]) + sp = filepath.Join(segments[1], segments[3]) } } log := appctx.GetLogger(ctx) - log.Debug().Str("driver", "ocfs").Str("ipath", ip).Str("spath", sp).Msg("toStoragePath") - return -} - -func (fs *ocfs) toStorageShadowPath(ctx context.Context, ip string) (sp string) { - if fs.c.EnableHome { - u := user.ContextMustGetUser(ctx) - layout := templates.WithUser(u, fs.c.UserLayout) - trim := filepath.Join(fs.c.DataDirectory, layout, "shadow_files") - sp = strings.TrimPrefix(ip, trim) - } else { - // ip = /data//shadow_files/foo/bar.txt - // remove data dir - if fs.c.DataDirectory != "/" { - // fs.c.DataDirectory is a clean path, so it never ends in / - ip = strings.TrimPrefix(ip, fs.c.DataDirectory) - // ip = //shadow_files/foo/bar.txt - } - - segments := strings.SplitN(ip, "/", 4) - // parts = "", "", "shadow_files", "foo/bar.txt" - switch len(segments) { - case 1: - sp = "/" - case 2: - sp = filepath.Join("/", segments[1]) - case 3: - sp = filepath.Join("/", segments[1]) - default: - sp = filepath.Join("/", segments[1], segments[3]) - } - } - appctx.GetLogger(ctx).Debug().Str("driver", "ocfs").Str("ipath", ip).Str("spath", sp).Msg("toStorageShadowPath") + log.Debug().Str("driver", "owncloudsql").Str("ipath", ip).Str("spath", sp).Msg("toStoragePath") return } // TODO the owner needs to come from a different place -func (fs *ocfs) getOwner(ip string) string { +func (fs *owncloudsqlfs) getOwner(ip string) string { ip = strings.TrimPrefix(ip, fs.c.DataDirectory) parts := strings.SplitN(ip, "/", 3) if len(parts) > 1 { @@ -425,7 +356,7 @@ func (fs *ocfs) getOwner(ip string) string { } // TODO cache user lookup -func (fs *ocfs) getUser(ctx context.Context, usernameOrID string) (id *userpb.User, err error) { +func (fs *owncloudsqlfs) getUser(ctx context.Context, usernameOrID string) (id *userpb.User, err error) { u := user.ContextMustGetUser(ctx) // check if username matches and id is set if u.Username == usernameOrID && u.Id != nil && u.Id.OpaqueId != "" { @@ -482,7 +413,7 @@ func (fs *ocfs) getUser(ctx context.Context, usernameOrID string) (id *userpb.Us } // permissionSet returns the permission set for the current user -func (fs *ocfs) permissionSet(ctx context.Context, owner *userpb.UserId) *provider.ResourcePermissions { +func (fs *owncloudsqlfs) permissionSet(ctx context.Context, owner *userpb.UserId) *provider.ResourcePermissions { if owner == nil { return &provider.ResourcePermissions{ Stat: true, @@ -545,26 +476,19 @@ func (fs *ocfs) permissionSet(ctx context.Context, owner *userpb.UserId) *provid } } -func (fs *ocfs) getUserStorage(ctx context.Context) (int, error) { - user, ok := user.ContextGetUser(ctx) - if !ok { - return -1, fmt.Errorf("could not get user for context") - } - return fs.filecache.GetNumericStorageID("home::" + user.Username) +func (fs *owncloudsqlfs) getStorage(ip string) (int, error) { + return fs.filecache.GetNumericStorageID("home::" + fs.getOwner(ip)) } -func (fs *ocfs) convertToResourceInfo(ctx context.Context, fi os.FileInfo, ip string, sp string, mdKeys []string) (*provider.ResourceInfo, error) { - storage, err := fs.getUserStorage(ctx) - if err != nil { - return nil, err - } - - p := fs.toDatabasePath(ctx, ip) - cacheEntry, err := fs.filecache.Get(storage, p) +func (fs *owncloudsqlfs) getUserStorage(user string) (int, error) { + id, err := fs.filecache.GetNumericStorageID("home::" + user) if err != nil { - return nil, err + id, err = fs.filecache.CreateStorage("home::" + user) } + return id, err +} +func (fs *owncloudsqlfs) convertToResourceInfo(ctx context.Context, entry *filecache.File, ip string, mdKeys []string) (*provider.ResourceInfo, error) { mdKeysMap := make(map[string]struct{}) for _, k := range mdKeys { mdKeysMap[k] = struct{}{} @@ -575,16 +499,16 @@ func (fs *ocfs) convertToResourceInfo(ctx context.Context, fi os.FileInfo, ip st returnAllKeys = true } + isDir := entry.MimeTypeString == "httpd/unix-directory" ri := &provider.ResourceInfo{ - Id: &provider.ResourceId{OpaqueId: strconv.Itoa(cacheEntry.ID)}, - Path: sp, - Type: getResourceType(fi.IsDir()), - Etag: cacheEntry.Etag, - MimeType: mime.Detect(fi.IsDir(), ip), - Size: uint64(fi.Size()), + Id: &provider.ResourceId{OpaqueId: strconv.Itoa(entry.ID)}, + Path: fs.toStoragePath(ctx, ip), + Type: getResourceType(isDir), + Etag: entry.Etag, + MimeType: entry.MimeTypeString, + Size: uint64(entry.Size), Mtime: &types.Timestamp{ - Seconds: uint64(fi.ModTime().Unix()), - // TODO read nanos from where? Nanos: fi.MTimeNanos, + Seconds: uint64(entry.MTime), }, ArbitraryMetadata: &provider.ArbitraryMetadata{ Metadata: map[string]string{}, // TODO aduffeck: which metadata needs to go in here? @@ -600,26 +524,20 @@ func (fs *ocfs) convertToResourceInfo(ctx context.Context, fi os.FileInfo, ip st ri.PermissionSet = fs.permissionSet(ctx, ri.Owner) // checksums - if !fi.IsDir() { + if !isDir { if _, checksumRequested := mdKeysMap[checksumsKey]; returnAllKeys || checksumRequested { // TODO which checksum was requested? sha1 adler32 or md5? for now hardcode sha1? - readChecksumIntoResourceChecksum(ctx, cacheEntry.Checksum, storageprovider.XSSHA1, ri) - readChecksumIntoOpaque(ctx, cacheEntry.Checksum, storageprovider.XSMD5, ri) + readChecksumIntoResourceChecksum(ctx, entry.Checksum, storageprovider.XSSHA1, ri) + readChecksumIntoOpaque(ctx, entry.Checksum, storageprovider.XSMD5, ri) readChecksumIntoOpaque(ctx, ip, storageprovider.XSAdler32, ri) } } return ri, nil } -func getResourceType(isDir bool) provider.ResourceType { - if isDir { - return provider.ResourceType_RESOURCE_TYPE_CONTAINER - } - return provider.ResourceType_RESOURCE_TYPE_FILE -} // GetPathByID returns the storage relative path for the file id, without the internal namespace -func (fs *ocfs) GetPathByID(ctx context.Context, id *provider.ResourceId) (string, error) { +func (fs *owncloudsqlfs) GetPathByID(ctx context.Context, id *provider.ResourceId) (string, error) { ip, err := fs.filecache.Path(id.OpaqueId) if err != nil { return "", err @@ -641,7 +559,7 @@ func (fs *ocfs) GetPathByID(ctx context.Context, id *provider.ResourceId) (strin } // resolve takes in a request path or request id and converts it to an internal path. -func (fs *ocfs) resolve(ctx context.Context, ref *provider.Reference) (string, error) { +func (fs *owncloudsqlfs) resolve(ctx context.Context, ref *provider.Reference) (string, error) { if ref.GetResourceId() != nil { p, err := fs.filecache.Path(ref.GetResourceId().OpaqueId) @@ -650,11 +568,11 @@ func (fs *ocfs) resolve(ctx context.Context, ref *provider.Reference) (string, e } p = strings.TrimPrefix(p, "files/") if !fs.c.EnableHome { - u, ok := user.ContextGetUser(ctx) - if !ok { - return "", fmt.Errorf("could not infer user from context") + owner, err := fs.filecache.GetStorageOwnerByFileID(ref.GetResourceId().OpaqueId) + if err != nil { + return "", err } - p = filepath.Join(u.Username, p) + p = filepath.Join(owner, p) } return fs.toInternalPath(ctx, p), nil @@ -668,73 +586,33 @@ func (fs *ocfs) resolve(ctx context.Context, ref *provider.Reference) (string, e return "", fmt.Errorf("invalid reference %+v", ref) } -func (fs *ocfs) AddGrant(ctx context.Context, ref *provider.Reference, g *provider.Grant) error { - ip, err := fs.resolve(ctx, ref) - if err != nil { - return errors.Wrap(err, "owncloudsql: error resolving reference") - } - - // check permissions - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.AddGrant { - return errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return errtypes.NotFound(fs.toStoragePath(ctx, ip)) - } - return errors.Wrap(err, "owncloudsql: error reading permissions") - } - - e := ace.FromGrant(g) - principal, value := e.Marshal() - if err := xattr.Set(ip, sharePrefix+principal, value); err != nil { - return err - } - return fs.propagate(ctx, ip) -} - -// extractACEsFromAttrs reads ACEs in the list of attrs from the file -func extractACEsFromAttrs(ctx context.Context, ip string, attrs []string) (entries []*ace.ACE) { - log := appctx.GetLogger(ctx) - entries = []*ace.ACE{} - for i := range attrs { - if strings.HasPrefix(attrs[i], sharePrefix) { - var value []byte - var err error - if value, err = xattr.Get(ip, attrs[i]); err != nil { - log.Error().Err(err).Str("attr", attrs[i]).Msg("could not read attribute") - continue - } - var e *ace.ACE - principal := attrs[i][len(sharePrefix):] - if e, err = ace.Unmarshal(principal, value); err != nil { - log.Error().Err(err).Str("principal", principal).Str("attr", attrs[i]).Msg("could not unmarshal ace") - continue - } - entries = append(entries, e) - } - } - return -} - -func (fs *ocfs) readPermissions(ctx context.Context, ip string) (p *provider.ResourcePermissions, err error) { +func (fs *owncloudsqlfs) readPermissions(ctx context.Context, ip string) (p *provider.ResourcePermissions, err error) { u, ok := user.ContextGetUser(ctx) if !ok { appctx.GetLogger(ctx).Debug().Str("ipath", ip).Msg("no user in context, returning default permissions") return defaultPermissions, nil } // check if the current user is the owner - if fs.getOwner(ip) == u.Username { + owner := fs.getOwner(ip) + if owner == u.Username { appctx.GetLogger(ctx).Debug().Str("ipath", ip).Msg("user is owner, returning owner permissions") return ownerPermissions, nil } - storageID, err := fs.getUserStorage(ctx) + // otherwise this is a share + ownerStorageID, err := fs.filecache.GetNumericStorageID("home::" + owner) + if err != nil { + return nil, err + } + entry, err := fs.filecache.Get(ownerStorageID, fs.toDatabasePath(ip)) + if err != nil { + return nil, err + } + perms, err := conversions.NewPermissions(entry.Permissions) if err != nil { return nil, err } - return fs.filecache.Permissions(storageID, fs.toDatabasePath(ctx, ip)) + return conversions.RoleFromOCSPermissions(perms).CS3ResourcePermissions(), nil } // The os not exists error is buried inside the xattr error, @@ -748,125 +626,46 @@ func isNotFound(err error) bool { return false } -func (fs *ocfs) ListGrants(ctx context.Context, ref *provider.Reference) (grants []*provider.Grant, err error) { - log := appctx.GetLogger(ctx) - var ip string - if ip, err = fs.resolve(ctx, ref); err != nil { - return nil, errors.Wrap(err, "owncloudsql: error resolving reference") - } - - // check permissions - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.ListGrants { - return nil, errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return nil, errtypes.NotFound(fs.toStoragePath(ctx, ip)) - } - return nil, errors.Wrap(err, "owncloudsql: error reading permissions") - } - - var attrs []string - if attrs, err = xattr.List(ip); err != nil { - // TODO err might be a not exists - log.Error().Err(err).Msg("error listing attributes") - return nil, err - } - - log.Debug().Interface("attrs", attrs).Msg("read attributes") - - aces := extractACEsFromAttrs(ctx, ip, attrs) - - grants = make([]*provider.Grant, 0, len(aces)) - for i := range aces { - grants = append(grants, aces[i].Grant()) - } - - return grants, nil +func (fs *owncloudsqlfs) ListGrants(ctx context.Context, ref *provider.Reference) (grants []*provider.Grant, err error) { + return []*provider.Grant{}, nil // nop } -func (fs *ocfs) RemoveGrant(ctx context.Context, ref *provider.Reference, g *provider.Grant) (err error) { - - var ip string - if ip, err = fs.resolve(ctx, ref); err != nil { - return errors.Wrap(err, "owncloudsql: error resolving reference") - } - - // check permissions - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.ListContainer { - return errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return errtypes.NotFound(fs.toStoragePath(ctx, ip)) - } - return errors.Wrap(err, "owncloudsql: error reading permissions") - } - - var attr string - if g.Grantee.Type == provider.GranteeType_GRANTEE_TYPE_GROUP { - attr = sharePrefix + "g:" + g.Grantee.GetGroupId().OpaqueId - } else { - attr = sharePrefix + "u:" + g.Grantee.GetUserId().OpaqueId - } - - if err = xattr.Remove(ip, attr); err != nil { - return - } - - return fs.propagate(ctx, ip) +func (fs *owncloudsqlfs) AddGrant(ctx context.Context, ref *provider.Reference, g *provider.Grant) error { + return nil // nop } -func (fs *ocfs) UpdateGrant(ctx context.Context, ref *provider.Reference, g *provider.Grant) error { - ip, err := fs.resolve(ctx, ref) - if err != nil { - return errors.Wrap(err, "owncloudsql: error resolving reference") - } - - // check permissions - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.UpdateGrant { - return errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return errtypes.NotFound(fs.toStoragePath(ctx, ip)) - } - return errors.Wrap(err, "owncloudsql: error reading permissions") - } +func (fs *owncloudsqlfs) RemoveGrant(ctx context.Context, ref *provider.Reference, g *provider.Grant) (err error) { + return nil // nop +} - e := ace.FromGrant(g) - principal, value := e.Marshal() - if err := xattr.Set(ip, sharePrefix+principal, value); err != nil { - return err - } - return fs.propagate(ctx, ip) +func (fs *owncloudsqlfs) UpdateGrant(ctx context.Context, ref *provider.Reference, g *provider.Grant) error { + return nil // nop } -func (fs *ocfs) GetQuota(ctx context.Context) (uint64, uint64, error) { +func (fs *owncloudsqlfs) GetQuota(ctx context.Context) (uint64, uint64, error) { return 0, 0, nil } -func (fs *ocfs) CreateHome(ctx context.Context) error { +func (fs *owncloudsqlfs) CreateHome(ctx context.Context) error { u, ok := user.ContextGetUser(ctx) if !ok { err := errors.Wrap(errtypes.UserRequired("userrequired"), "error getting user from ctx") return err } - layout := templates.WithUser(u, fs.c.UserLayout) + return fs.createHomeForUser(ctx, templates.WithUser(u, fs.c.UserLayout)) +} +func (fs *owncloudsqlfs) createHomeForUser(ctx context.Context, user string) error { homePaths := []string{ - filepath.Join(fs.c.DataDirectory, layout, "files"), - filepath.Join(fs.c.DataDirectory, layout, "files_trashbin"), - filepath.Join(fs.c.DataDirectory, layout, "files_trashbin/files"), - filepath.Join(fs.c.DataDirectory, layout, "files_trashbin/versions"), - filepath.Join(fs.c.DataDirectory, layout, "uploads"), - filepath.Join(fs.c.DataDirectory, layout, "shadow_files"), + filepath.Join(fs.c.DataDirectory, user), + filepath.Join(fs.c.DataDirectory, user, "files"), + filepath.Join(fs.c.DataDirectory, user, "files_trashbin"), + filepath.Join(fs.c.DataDirectory, user, "files_trashbin/files"), + filepath.Join(fs.c.DataDirectory, user, "files_trashbin/versions"), + filepath.Join(fs.c.DataDirectory, user, "uploads"), } - storageID, err := fs.getUserStorage(ctx) + storageID, err := fs.getUserStorage(user) if err != nil { return err } @@ -880,28 +679,29 @@ func (fs *ocfs) CreateHome(ctx context.Context) error { return err } data := map[string]interface{}{ - "path": fs.toDatabasePath(ctx, v), + "path": fs.toDatabasePath(v), "etag": calcEtag(ctx, fi), "mimetype": "httpd/unix-directory", } - _, err = fs.filecache.InsertOrUpdate(storageID, data) + + allowEmptyParent := v == filepath.Join(fs.c.DataDirectory, user) // the root doesn't have a parent + _, err = fs.filecache.InsertOrUpdate(storageID, data, allowEmptyParent) if err != nil { return err } } - return nil } // If home is enabled, the relative home is always the empty string -func (fs *ocfs) GetHome(ctx context.Context) (string, error) { +func (fs *owncloudsqlfs) GetHome(ctx context.Context) (string, error) { if !fs.c.EnableHome { return "", errtypes.NotSupported("owncloudsql: get home not supported") } return "", nil } -func (fs *ocfs) CreateDir(ctx context.Context, sp string) (err error) { +func (fs *owncloudsqlfs) CreateDir(ctx context.Context, sp string) (err error) { ip := fs.toInternalPath(ctx, sp) // check permissions of parent dir @@ -929,62 +729,38 @@ func (fs *ocfs) CreateDir(ctx context.Context, sp string) (err error) { return err } mtime := time.Now().Unix() + + permissions := 31 // 1: READ, 2: UPDATE, 4: CREATE, 8: DELETE, 16: SHARE + if perm, err := fs.readPermissions(ctx, filepath.Dir(ip)); err == nil { + permissions = int(conversions.RoleFromResourcePermissions(perm).OCSPermissions()) // inherit permissions of parent + } data := map[string]interface{}{ - "path": fs.toDatabasePath(ctx, ip), + "path": fs.toDatabasePath(ip), "etag": calcEtag(ctx, fi), "mimetype": "httpd/unix-directory", - "permissions": 31, // 1: READ, 2: UPDATE, 4: CREATE, 8: DELETE, 16: SHARE + "permissions": permissions, "mtime": mtime, "storage_mtime": mtime, } - storageID, err := fs.getUserStorage(ctx) + storageID, err := fs.getStorage(ip) if err != nil { return err } - _, err = fs.filecache.InsertOrUpdate(storageID, data) + _, err = fs.filecache.InsertOrUpdate(storageID, data, false) if err != nil { if err != nil { return err } } - return fs.propagate(ctx, ip) -} - -func (fs *ocfs) isShareFolderChild(sp string) bool { - return strings.HasPrefix(sp, fs.c.ShareFolder) -} - -func (fs *ocfs) isShareFolderRoot(sp string) bool { - return sp == fs.c.ShareFolder + return fs.propagate(ctx, filepath.Dir(ip)) } -func (fs *ocfs) CreateReference(ctx context.Context, sp string, targetURI *url.URL) error { - if !fs.isShareFolderChild(sp) { - return errtypes.PermissionDenied("owncloudsql: cannot create references outside the share folder: share_folder=" + "/Shares" + " path=" + sp) - } - - ip := fs.toInternalShadowPath(ctx, sp) - // TODO check permission? - - dir, _ := filepath.Split(ip) - if err := os.MkdirAll(dir, 0700); err != nil { - return errors.Wrapf(err, "owncloudsql: error creating shadow path %s", dir) - } - - f, err := os.Create(ip) - if err != nil { - return errors.Wrapf(err, "owncloudsql: error creating shadow file %s", ip) - } - - err = xattr.FSet(f, mdPrefix+"target", []byte(targetURI.String())) - if err != nil { - return errors.Wrapf(err, "owncloudsql: error setting the target %s on the shadow file %s", targetURI.String(), ip) - } - return nil +func (fs *owncloudsqlfs) CreateReference(ctx context.Context, sp string, targetURI *url.URL) error { + return errtypes.NotSupported("owncloudsql: operation not supported") } -func (fs *ocfs) setMtime(ctx context.Context, ip string, mtime string) error { +func (fs *owncloudsqlfs) setMtime(ctx context.Context, ip string, mtime string) error { log := appctx.GetLogger(ctx) if mt, err := parseMTime(mtime); err == nil { // updating mtime also updates atime @@ -1004,7 +780,7 @@ func (fs *ocfs) setMtime(ctx context.Context, ip string, mtime string) error { } return nil } -func (fs *ocfs) SetArbitraryMetadata(ctx context.Context, ref *provider.Reference, md *provider.ArbitraryMetadata) (err error) { +func (fs *owncloudsqlfs) SetArbitraryMetadata(ctx context.Context, ref *provider.Reference, md *provider.ArbitraryMetadata) (err error) { log := appctx.GetLogger(ctx) var ip string @@ -1147,7 +923,7 @@ func parseMTime(v string) (t time.Time, err error) { return time.Unix(sec, nsec), err } -func (fs *ocfs) UnsetArbitraryMetadata(ctx context.Context, ref *provider.Reference, keys []string) (err error) { +func (fs *owncloudsqlfs) UnsetArbitraryMetadata(ctx context.Context, ref *provider.Reference, keys []string) (err error) { log := appctx.GetLogger(ctx) var ip string @@ -1242,7 +1018,7 @@ func (fs *ocfs) UnsetArbitraryMetadata(ctx context.Context, ref *provider.Refere // versions were not. // We will live with that compromise since this storage driver will be // deprecated soon. -func (fs *ocfs) Delete(ctx context.Context, ref *provider.Reference) (err error) { +func (fs *owncloudsqlfs) Delete(ctx context.Context, ref *provider.Reference) (err error) { var ip string if ip, err = fs.resolve(ctx, ref); err != nil { return errors.Wrap(err, "owncloudsql: error resolving reference") @@ -1268,7 +1044,8 @@ func (fs *ocfs) Delete(ctx context.Context, ref *provider.Reference) (err error) return errors.Wrap(err, "owncloudsql: error stating "+ip) } - rp, err := fs.getRecyclePath(ctx) + // Delete file into the owner's trash, not the user's (in case of shares) + rp, err := fs.getRecyclePathForUser(fs.getOwner(ip)) if err != nil { return errors.Wrap(err, "owncloudsql: error resolving recycle path") } @@ -1284,15 +1061,10 @@ func (fs *ocfs) Delete(ctx context.Context, ref *provider.Reference) (err error) if err != nil { return errors.Wrapf(err, "owncloudsql: error deleting file %s", ip) } - err = fs.trashVersions(ctx, ip, origin) - if err != nil { - return errors.Wrapf(err, "owncloudsql: error deleting versions of file %s", ip) - } return nil } -func (fs *ocfs) trash(ctx context.Context, ip string, rp string, origin string) error { - +func (fs *owncloudsqlfs) trash(ctx context.Context, ip string, rp string, origin string) error { // move to trash location dtime := time.Now().Unix() tgt := filepath.Join(rp, fmt.Sprintf("%s.d%d", filepath.Base(ip), dtime)) @@ -1307,19 +1079,35 @@ func (fs *ocfs) trash(ctx context.Context, ip string, rp string, origin string) } } - storage, err := fs.getUserStorage(ctx) + storage, err := fs.getStorage(ip) if err != nil { return err } - err = fs.filecache.Delete(storage, user.ContextMustGetUser(ctx).Username, fs.toDatabasePath(ctx, ip), fs.toDatabasePath(ctx, tgt)) + + tryDelete := func() error { + return fs.filecache.Delete(storage, fs.getOwner(ip), fs.toDatabasePath(ip), fs.toDatabasePath(tgt)) + } + err = tryDelete() if err != nil { - return err + err = fs.createHomeForUser(ctx, fs.getOwner(ip)) // Try setting up the owner's home (incl. trash) to fix the problem + if err != nil { + return err + } + err = tryDelete() + if err != nil { + return err + } + } + + err = fs.trashVersions(ctx, ip, origin, dtime) + if err != nil { + return errors.Wrapf(err, "owncloudsql: error deleting versions of file %s", ip) } return fs.propagate(ctx, filepath.Dir(ip)) } -func (fs *ocfs) trashVersions(ctx context.Context, ip string, origin string) error { +func (fs *owncloudsqlfs) trashVersions(ctx context.Context, ip string, origin string, dtime int64) error { vp := fs.getVersionsPath(ctx, ip) vrp, err := fs.getVersionRecyclePath(ctx) if err != nil { @@ -1332,8 +1120,26 @@ func (fs *ocfs) trashVersions(ctx context.Context, ip string, origin string) err // Ignore error since the only possible error is malformed pattern. versions, _ := filepath.Glob(vp + ".v*") + storage, err := fs.getStorage(ip) + if err != nil { + return err + } for _, v := range versions { - err := fs.trash(ctx, v, vrp, origin) + tgt := filepath.Join(vrp, fmt.Sprintf("%s.d%d", filepath.Base(v), dtime)) + if err := os.Rename(v, tgt); err != nil { + if os.IsExist(err) { + // timestamp collision, try again with higher value: + dtime++ + tgt := filepath.Join(vrp, fmt.Sprintf("%s.d%d", filepath.Base(ip), dtime)) + if err := os.Rename(ip, tgt); err != nil { + return errors.Wrap(err, "owncloudsql: could not move item to trash") + } + } + } + if err != nil { + return errors.Wrap(err, "owncloudsql: error deleting file "+v) + } + err = fs.filecache.Move(storage, fs.toDatabasePath(v), fs.toDatabasePath(tgt)) if err != nil { return errors.Wrap(err, "owncloudsql: error deleting file "+v) } @@ -1341,7 +1147,7 @@ func (fs *ocfs) trashVersions(ctx context.Context, ip string, origin string) err return nil } -func (fs *ocfs) Move(ctx context.Context, oldRef, newRef *provider.Reference) (err error) { +func (fs *owncloudsqlfs) Move(ctx context.Context, oldRef, newRef *provider.Reference) (err error) { var oldIP string if oldIP, err = fs.resolve(ctx, oldRef); err != nil { return errors.Wrap(err, "owncloudsql: error resolving reference") @@ -1365,11 +1171,11 @@ func (fs *ocfs) Move(ctx context.Context, oldRef, newRef *provider.Reference) (e } // TODO check target permissions ... if it exists - storage, err := fs.getUserStorage(ctx) + storage, err := fs.getStorage(oldIP) if err != nil { return err } - err = fs.filecache.Move(storage, fs.toDatabasePath(ctx, oldIP), fs.toDatabasePath(ctx, newIP)) + err = fs.filecache.Move(storage, fs.toDatabasePath(oldIP), fs.toDatabasePath(newIP)) if err != nil { return err } @@ -1380,13 +1186,15 @@ func (fs *ocfs) Move(ctx context.Context, oldRef, newRef *provider.Reference) (e if err := fs.propagate(ctx, newIP); err != nil { return err } - if err := fs.propagate(ctx, filepath.Dir(oldIP)); err != nil { - return err + if filepath.Dir(newIP) != filepath.Dir(oldIP) { + if err := fs.propagate(ctx, filepath.Dir(oldIP)); err != nil { + return err + } } return nil } -func (fs *ocfs) GetMD(ctx context.Context, ref *provider.Reference, mdKeys []string) (*provider.ResourceInfo, error) { +func (fs *owncloudsqlfs) GetMD(ctx context.Context, ref *provider.Reference, mdKeys []string) (*provider.ResourceInfo, error) { ip, err := fs.resolve(ctx, ref) if err != nil { // TODO return correct errtype @@ -1397,44 +1205,12 @@ func (fs *ocfs) GetMD(ctx context.Context, ref *provider.Reference, mdKeys []str } p := fs.toStoragePath(ctx, ip) - if fs.c.EnableHome { - if fs.isShareFolderChild(p) { - return fs.getMDShareFolder(ctx, p, mdKeys) - } - } - // If GetMD is called for a path shared with the user then the path is // already wrapped. (fs.resolve wraps the path) if strings.HasPrefix(p, fs.c.DataDirectory) { ip = p } - md, err := os.Stat(ip) - if err != nil { - if os.IsNotExist(err) { - return nil, errtypes.NotFound(fs.toStoragePath(ctx, ip)) - } - return nil, errors.Wrap(err, "owncloudsql: error stating "+ip) - } - - // check permissions - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.Stat { - return nil, errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return nil, errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) - } - return nil, errors.Wrap(err, "owncloudsql: error reading permissions") - } - - return fs.convertToResourceInfo(ctx, md, ip, fs.toStoragePath(ctx, ip), mdKeys) -} - -func (fs *ocfs) getMDShareFolder(ctx context.Context, sp string, mdKeys []string) (*provider.ResourceInfo, error) { - ip := fs.toInternalShadowPath(ctx, sp) - // check permissions if perm, err := fs.readPermissions(ctx, ip); err == nil { if !perm.Stat { @@ -1447,34 +1223,22 @@ func (fs *ocfs) getMDShareFolder(ctx context.Context, sp string, mdKeys []string return nil, errors.Wrap(err, "owncloudsql: error reading permissions") } - md, err := os.Stat(ip) - if err != nil { - if os.IsNotExist(err) { - return nil, errtypes.NotFound(fs.toStorageShadowPath(ctx, ip)) - } - return nil, errors.Wrapf(err, "owncloudsql: error stating %s", ip) - } - m, err := fs.convertToResourceInfo(ctx, md, ip, fs.toStorageShadowPath(ctx, ip), mdKeys) + ownerStorageID, err := fs.filecache.GetNumericStorageID("home::" + fs.getOwner(ip)) if err != nil { return nil, err } - - if !fs.isShareFolderRoot(sp) { - m.Type = provider.ResourceType_RESOURCE_TYPE_REFERENCE - ref, err := xattr.Get(ip, mdPrefix+"target") - if err != nil { - if isNotFound(err) { - return nil, errtypes.NotFound(fs.toStorageShadowPath(ctx, ip)) - } - return nil, err - } - m.Target = string(ref) + entry, err := fs.filecache.Get(ownerStorageID, fs.toDatabasePath(ip)) + switch { + case err == sql.ErrNoRows: + return nil, errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) + case err != nil: + return nil, err } - return m, nil + return fs.convertToResourceInfo(ctx, entry, ip, mdKeys) } -func (fs *ocfs) ListFolder(ctx context.Context, ref *provider.Reference, mdKeys []string) ([]*provider.ResourceInfo, error) { +func (fs *owncloudsqlfs) ListFolder(ctx context.Context, ref *provider.Reference, mdKeys []string) ([]*provider.ResourceInfo, error) { log := appctx.GetLogger(ctx) ip, err := fs.resolve(ctx, ref) @@ -1496,7 +1260,7 @@ func (fs *ocfs) ListFolder(ctx context.Context, ref *provider.Reference, mdKeys return fs.listWithNominalHome(ctx, sp, mdKeys) } -func (fs *ocfs) listWithNominalHome(ctx context.Context, ip string, mdKeys []string) ([]*provider.ResourceInfo, error) { +func (fs *owncloudsqlfs) listWithNominalHome(ctx context.Context, ip string, mdKeys []string) ([]*provider.ResourceInfo, error) { // If a user wants to list a folder shared with him the path will already // be wrapped with the files directory path of the share owner. @@ -1517,14 +1281,22 @@ func (fs *ocfs) listWithNominalHome(ctx context.Context, ip string, mdKeys []str return nil, errors.Wrap(err, "owncloudsql: error reading permissions") } - mds, err := ioutil.ReadDir(ip) + storage, err := fs.getStorage(ip) + if err != nil { + return nil, err + } + entries, err := fs.filecache.List(storage, fs.toDatabasePath(ip)+"/") if err != nil { return nil, errors.Wrapf(err, "owncloudsql: error listing %s", ip) } + owner := fs.getOwner(ip) finfos := []*provider.ResourceInfo{} - for _, md := range mds { - cp := filepath.Join(ip, md.Name()) - m, err := fs.convertToResourceInfo(ctx, md, cp, fs.toStoragePath(ctx, cp), mdKeys) + for _, entry := range entries { + cp := filepath.Join(fs.c.DataDirectory, owner, entry.Path) + if err != nil { + return nil, err + } + m, err := fs.convertToResourceInfo(ctx, entry, cp, mdKeys) if err != nil { appctx.GetLogger(ctx).Error().Err(err).Str("path", cp).Msg("error converting to a resource info") } @@ -1533,27 +1305,18 @@ func (fs *ocfs) listWithNominalHome(ctx context.Context, ip string, mdKeys []str return finfos, nil } -func (fs *ocfs) listWithHome(ctx context.Context, home, p string, mdKeys []string) ([]*provider.ResourceInfo, error) { +func (fs *owncloudsqlfs) listWithHome(ctx context.Context, home, p string, mdKeys []string) ([]*provider.ResourceInfo, error) { log := appctx.GetLogger(ctx) if p == home { log.Debug().Msg("listing home") return fs.listHome(ctx, home, mdKeys) } - if fs.isShareFolderRoot(p) { - log.Debug().Msg("listing share folder root") - return fs.listShareFolderRoot(ctx, p, mdKeys) - } - - if fs.isShareFolderChild(p) { - return nil, errtypes.PermissionDenied("owncloudsql: error listing folders inside the shared folder, only file references are stored inside") - } - log.Debug().Msg("listing nominal home") return fs.listWithNominalHome(ctx, p, mdKeys) } -func (fs *ocfs) listHome(ctx context.Context, home string, mdKeys []string) ([]*provider.ResourceInfo, error) { +func (fs *owncloudsqlfs) listHome(ctx context.Context, home string, mdKeys []string) ([]*provider.ResourceInfo, error) { // list files ip := fs.toInternalPath(ctx, home) @@ -1569,81 +1332,28 @@ func (fs *ocfs) listHome(ctx context.Context, home string, mdKeys []string) ([]* return nil, errors.Wrap(err, "owncloudsql: error reading permissions") } - mds, err := ioutil.ReadDir(ip) - if err != nil { - return nil, errors.Wrap(err, "owncloudsql: error listing files") - } - - finfos := []*provider.ResourceInfo{} - for _, md := range mds { - cp := filepath.Join(ip, md.Name()) - m, err := fs.convertToResourceInfo(ctx, md, cp, fs.toStoragePath(ctx, cp), mdKeys) - if err != nil { - appctx.GetLogger(ctx).Error().Err(err).Str("path", cp).Msg("error converting to a resource info") - } - finfos = append(finfos, m) - } - - // list shadow_files - ip = fs.toInternalShadowPath(ctx, home) - mds, err = ioutil.ReadDir(ip) + storage, err := fs.getStorage(ip) if err != nil { - return nil, errors.Wrap(err, "owncloudsql: error listing shadow_files") - } - for _, md := range mds { - cp := filepath.Join(ip, md.Name()) - m, err := fs.convertToResourceInfo(ctx, md, cp, fs.toStorageShadowPath(ctx, cp), mdKeys) - if err != nil { - appctx.GetLogger(ctx).Error().Err(err).Str("path", cp).Msg("error converting to a resource info") - } - finfos = append(finfos, m) - } - return finfos, nil -} - -func (fs *ocfs) listShareFolderRoot(ctx context.Context, sp string, mdKeys []string) ([]*provider.ResourceInfo, error) { - ip := fs.toInternalShadowPath(ctx, sp) - - // check permissions - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.ListContainer { - return nil, errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return nil, errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) - } - return nil, errors.Wrap(err, "owncloudsql: error reading permissions") + return nil, err } - - mds, err := ioutil.ReadDir(ip) + entries, err := fs.filecache.List(storage, fs.toDatabasePath(ip)+"/") if err != nil { - if os.IsNotExist(err) { - return nil, errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) - } - return nil, errors.Wrap(err, "owncloudsql: error listing shadow_files") + return nil, errors.Wrapf(err, "owncloudsql: error listing %s", ip) } - + owner := fs.getOwner(ip) finfos := []*provider.ResourceInfo{} - for _, md := range mds { - cp := filepath.Join(ip, md.Name()) - m, err := fs.convertToResourceInfo(ctx, md, cp, fs.toStorageShadowPath(ctx, cp), mdKeys) + for _, entry := range entries { + cp := filepath.Join(fs.c.DataDirectory, owner, entry.Path) + m, err := fs.convertToResourceInfo(ctx, entry, cp, mdKeys) if err != nil { appctx.GetLogger(ctx).Error().Err(err).Str("path", cp).Msg("error converting to a resource info") } - m.Type = provider.ResourceType_RESOURCE_TYPE_REFERENCE - ref, err := xattr.Get(cp, mdPrefix+"target") - if err != nil { - return nil, err - } - m.Target = string(ref) finfos = append(finfos, m) } - return finfos, nil } -func (fs *ocfs) archiveRevision(ctx context.Context, vbp string, ip string) error { +func (fs *owncloudsqlfs) archiveRevision(ctx context.Context, vbp string, ip string) error { // move existing file to versions dir vp := fmt.Sprintf("%s.v%d", vbp, time.Now().Unix()) if err := os.MkdirAll(filepath.Dir(vp), 0700); err != nil { @@ -1655,12 +1365,12 @@ func (fs *ocfs) archiveRevision(ctx context.Context, vbp string, ip string) erro return errors.Wrap(err, "owncloudsql: error renaming from "+ip+" to "+vp) } - storage, err := fs.getUserStorage(ctx) + storage, err := fs.getStorage(ip) if err != nil { return err } - vdp := fs.toDatabasePath(ctx, vp) + vdp := fs.toDatabasePath(vp) basePath := strings.TrimSuffix(vp, vdp) parts := strings.Split(filepath.Dir(vdp), "/") walkPath := "" @@ -1682,16 +1392,16 @@ func (fs *ocfs) archiveRevision(ctx context.Context, vbp string, ip string) erro "permissions": 31, // 1: READ, 2: UPDATE, 4: CREATE, 8: DELETE, 16: SHARE } - _, err = fs.filecache.InsertOrUpdate(storage, data) + _, err = fs.filecache.InsertOrUpdate(storage, data, false) if err != nil { return errors.Wrap(err, "could not create parent version directory") } } - _, err = fs.filecache.Copy(storage, fs.toDatabasePath(ctx, ip), vdp) + _, err = fs.filecache.Copy(storage, fs.toDatabasePath(ip), vdp) return err } -func (fs *ocfs) Download(ctx context.Context, ref *provider.Reference) (io.ReadCloser, error) { +func (fs *owncloudsqlfs) Download(ctx context.Context, ref *provider.Reference) (io.ReadCloser, error) { ip, err := fs.resolve(ctx, ref) if err != nil { return nil, errors.Wrap(err, "owncloudsql: error resolving reference") @@ -1719,7 +1429,7 @@ func (fs *ocfs) Download(ctx context.Context, ref *provider.Reference) (io.ReadC return r, nil } -func (fs *ocfs) ListRevisions(ctx context.Context, ref *provider.Reference) ([]*provider.FileVersion, error) { +func (fs *owncloudsqlfs) ListRevisions(ctx context.Context, ref *provider.Reference) ([]*provider.FileVersion, error) { ip, err := fs.resolve(ctx, ref) if err != nil { return nil, errors.Wrap(err, "owncloudsql: error resolving reference") @@ -1739,47 +1449,42 @@ func (fs *ocfs) ListRevisions(ctx context.Context, ref *provider.Reference) ([]* vp := fs.getVersionsPath(ctx, ip) bn := filepath.Base(ip) - - revisions := []*provider.FileVersion{} - mds, err := ioutil.ReadDir(filepath.Dir(vp)) + storageID, err := fs.getStorage(ip) if err != nil { - return nil, errors.Wrap(err, "owncloudsql: error reading"+filepath.Dir(vp)) + return nil, err } - for i := range mds { - rev := fs.filterAsRevision(ctx, bn, mds[i]) - if rev != nil { - revisions = append(revisions, rev) - } + entries, err := fs.filecache.List(storageID, filepath.Dir(fs.toDatabasePath(vp))+"/") + if err != nil { + return nil, err } - return revisions, nil -} - -func (fs *ocfs) filterAsRevision(ctx context.Context, bn string, md os.FileInfo) *provider.FileVersion { - if strings.HasPrefix(md.Name(), bn) { - // versions have filename.ext.v12345678 - version := md.Name()[len(bn)+2:] // truncate ".v" to get version mtime - mtime, err := strconv.Atoi(version) - if err != nil { - log := appctx.GetLogger(ctx) - log.Error().Err(err).Str("path", md.Name()).Msg("invalid version mtime") - return nil - } - // TODO(jfd) trashed versions are in the files_trashbin/versions folder ... not relevant here - return &provider.FileVersion{ - Key: version, - Size: uint64(md.Size()), - Mtime: uint64(mtime), - Etag: calcEtag(ctx, md), + revisions := []*provider.FileVersion{} + for _, entry := range entries { + if strings.HasPrefix(entry.Name, bn) { + // versions have filename.ext.v12345678 + version := entry.Name[len(bn)+2:] // truncate ".v" to get version mtime + mtime, err := strconv.Atoi(version) + if err != nil { + log := appctx.GetLogger(ctx) + log.Error().Err(err).Str("path", entry.Name).Msg("invalid version mtime") + return nil, err + } + revisions = append(revisions, &provider.FileVersion{ + Key: version, + Size: uint64(entry.Size), + Mtime: uint64(mtime), + Etag: entry.Etag, + }) } } - return nil + + return revisions, nil } -func (fs *ocfs) DownloadRevision(ctx context.Context, ref *provider.Reference, revisionKey string) (io.ReadCloser, error) { +func (fs *owncloudsqlfs) DownloadRevision(ctx context.Context, ref *provider.Reference, revisionKey string) (io.ReadCloser, error) { return nil, errtypes.NotSupported("download revision") } -func (fs *ocfs) RestoreRevision(ctx context.Context, ref *provider.Reference, revisionKey string) error { +func (fs *owncloudsqlfs) RestoreRevision(ctx context.Context, ref *provider.Reference, revisionKey string) error { ip, err := fs.resolve(ctx, ref) if err != nil { return errors.Wrap(err, "owncloudsql: error resolving reference") @@ -1843,7 +1548,7 @@ func (fs *ocfs) RestoreRevision(ctx context.Context, ref *provider.Reference, re } mtime := time.Now().Unix() data := map[string]interface{}{ - "path": fs.toDatabasePath(ctx, ip), + "path": fs.toDatabasePath(ip), "checksum": fmt.Sprintf("SHA1:%032x MD5:%032x ADLER32:%032x", sha1h, md5h, adler32h), "etag": calcEtag(ctx, fi), "size": fi.Size(), @@ -1851,11 +1556,11 @@ func (fs *ocfs) RestoreRevision(ctx context.Context, ref *provider.Reference, re "mtime": mtime, "storage_mtime": mtime, } - storageID, err := fs.getUserStorage(ctx) + storageID, err := fs.getStorage(ip) if err != nil { return err } - _, err = fs.filecache.InsertOrUpdate(storageID, data) + _, err = fs.filecache.InsertOrUpdate(storageID, data, false) if err != nil { return err } @@ -1864,11 +1569,12 @@ func (fs *ocfs) RestoreRevision(ctx context.Context, ref *provider.Reference, re return fs.propagate(ctx, ip) } -func (fs *ocfs) PurgeRecycleItem(ctx context.Context, key, path string) error { +func (fs *owncloudsqlfs) PurgeRecycleItem(ctx context.Context, key, path string) error { rp, err := fs.getRecyclePath(ctx) if err != nil { return errors.Wrap(err, "owncloudsql: error resolving recycle path") } + vp := filepath.Join(filepath.Dir(rp), "versions") ip := filepath.Join(rp, filepath.Clean(key)) // TODO check permission? @@ -1890,24 +1596,40 @@ func (fs *ocfs) PurgeRecycleItem(ctx context.Context, key, path string) error { if err != nil { return errors.Wrap(err, "owncloudsql: error deleting recycle item") } - err = os.RemoveAll(filepath.Join(filepath.Dir(rp), "versions", filepath.Clean(key))) + base, ttime, err := splitTrashKey(key) if err != nil { - return errors.Wrap(err, "owncloudsql: error deleting recycle item versions") + return err } - - base, ttime, err := splitTrashKey(key) + err = fs.filecache.PurgeRecycleItem(user.ContextMustGetUser(ctx).Username, base, ttime, false) if err != nil { return err } - err = fs.filecache.PurgeRecycleItem(user.ContextMustGetUser(ctx).Username, base, ttime) + + versionsGlob := filepath.Join(vp, base+".v*.d"+strconv.Itoa(ttime)) + versionFiles, err := filepath.Glob(versionsGlob) + if err != nil { + return errors.Wrap(err, "owncloudsql: error listing recycle item versions") + } + storageID, err := fs.getStorage(ip) if err != nil { return err } + for _, versionFile := range versionFiles { + err = os.Remove(versionFile) + if err != nil { + return errors.Wrap(err, "owncloudsql: error deleting recycle item versions") + } + err = fs.filecache.Purge(storageID, fs.toDatabasePath(versionFile)) + if err != nil { + return err + } + } + // TODO delete keyfiles, keys, share-keys return nil } -func (fs *ocfs) EmptyRecycle(ctx context.Context) error { +func (fs *owncloudsqlfs) EmptyRecycle(ctx context.Context) error { // TODO check permission? on what? user must be the owner rp, err := fs.getRecyclePath(ctx) if err != nil { @@ -1921,6 +1643,13 @@ func (fs *ocfs) EmptyRecycle(ctx context.Context) error { if err != nil { return errors.Wrap(err, "owncloudsql: error deleting recycle files versions") } + + u := user.ContextMustGetUser(ctx) + err = fs.filecache.EmptyRecycle(u.Username) + if err != nil { + return errors.Wrap(err, "owncloudsql: error deleting recycle items from the database") + } + // TODO delete keyfiles, keys, share-keys ... or just everything? return nil } @@ -1939,7 +1668,7 @@ func splitTrashKey(key string) (string, int, error) { return strings.TrimSuffix(filepath.Base(key), suffix), ttime, nil } -func (fs *ocfs) convertToRecycleItem(ctx context.Context, md os.FileInfo) *provider.RecycleItem { +func (fs *owncloudsqlfs) convertToRecycleItem(ctx context.Context, md os.FileInfo) *provider.RecycleItem { base, ttime, err := splitTrashKey(md.Name()) if err != nil { log := appctx.GetLogger(ctx) @@ -1972,7 +1701,7 @@ func (fs *ocfs) convertToRecycleItem(ctx context.Context, md os.FileInfo) *provi } } -func (fs *ocfs) ListRecycle(ctx context.Context, key, path string) ([]*provider.RecycleItem, error) { +func (fs *owncloudsqlfs) ListRecycle(ctx context.Context, key, path string) ([]*provider.RecycleItem, error) { // TODO check permission? on what? user must be the owner? rp, err := fs.getRecyclePath(ctx) if err != nil { @@ -1999,28 +1728,32 @@ func (fs *ocfs) ListRecycle(ctx context.Context, key, path string) ([]*provider. return items, nil } -func (fs *ocfs) RestoreRecycleItem(ctx context.Context, key, path string, restoreRef *provider.Reference) error { - // TODO check permission? on what? user must be the owner? +func (fs *owncloudsqlfs) RestoreRecycleItem(ctx context.Context, key, path string, restoreRef *provider.Reference) error { log := appctx.GetLogger(ctx) - rp, err := fs.getRecyclePath(ctx) + + base, ttime, err := splitTrashKey(key) if err != nil { - return errors.Wrap(err, "owncloudsql: error resolving recycle path") + log.Error().Str("path", key).Msg("invalid trash item key") + return fmt.Errorf("invalid trash item suffix") } - src := filepath.Join(rp, filepath.Clean(key)) - suffix := filepath.Ext(src) - if len(suffix) == 0 || !strings.HasPrefix(suffix, ".d") { - log.Error().Str("key", key).Str("path", src).Msg("invalid trash item suffix") - return nil + recyclePath, err := fs.getRecyclePath(ctx) + if err != nil { + return errors.Wrap(err, "owncloudsql: error resolving recycle path") } + src := filepath.Join(recyclePath, filepath.Clean(key)) if restoreRef.Path == "" { - v, err := xattr.Get(src, trashOriginPrefix) + u := user.ContextMustGetUser(ctx) + item, err := fs.filecache.GetRecycleItem(u.Username, base, ttime) if err != nil { - log.Error().Err(err).Str("key", key).Str("path", src).Msg("could not read origin") + log := appctx.GetLogger(ctx) + log.Error().Err(err).Str("path", key).Msg("could not get trash item") + return nil } - restoreRef.Path = filepath.Join("/", filepath.Clean(string(v)), strings.TrimSuffix(filepath.Base(src), suffix)) + restoreRef.Path = filepath.Join(item.Path, item.Name) } + tgt := fs.toInternalPath(ctx, restoreRef.Path) // move back to original location if err := os.Rename(src, tgt); err != nil { @@ -2028,28 +1761,74 @@ func (fs *ocfs) RestoreRecycleItem(ctx context.Context, key, path string, restor return errors.Wrap(err, "owncloudsql: could not restore item") } - storage, err := fs.getUserStorage(ctx) + storage, err := fs.getStorage(src) if err != nil { return err } - err = fs.filecache.Move(storage, fs.toDatabasePath(ctx, src), fs.toDatabasePath(ctx, tgt)) + err = fs.filecache.Move(storage, fs.toDatabasePath(src), fs.toDatabasePath(tgt)) + if err != nil { + return err + } + err = fs.filecache.DeleteRecycleItem(user.ContextMustGetUser(ctx).Username, base, ttime) + if err != nil { + return err + } + err = fs.RestoreRecycleItemVersions(ctx, key, tgt) if err != nil { return err } - - // TODO(jfd) restore versions return fs.propagate(ctx, tgt) } -func (fs *ocfs) propagate(ctx context.Context, leafPath string) error { +func (fs *owncloudsqlfs) RestoreRecycleItemVersions(ctx context.Context, key, target string) error { + base, ttime, err := splitTrashKey(key) + if err != nil { + return fmt.Errorf("invalid trash item suffix") + } + storage, err := fs.getStorage(target) + if err != nil { + return err + } + + recyclePath, err := fs.getRecyclePath(ctx) + if err != nil { + return errors.Wrap(err, "owncloudsql: error resolving recycle path") + } + versionsRecyclePath := filepath.Join(filepath.Dir(recyclePath), "versions") + + // Restore versions + deleteSuffix := ".d" + strconv.Itoa(ttime) + versionsGlob := filepath.Join(versionsRecyclePath, base+".v*"+deleteSuffix) + versionFiles, err := filepath.Glob(versionsGlob) + versionsRoot := filepath.Dir(fs.getVersionsPath(ctx, target)) + + if err != nil { + return errors.Wrap(err, "owncloudsql: error listing recycle item versions") + } + for _, versionFile := range versionFiles { + versionBase := strings.TrimSuffix(filepath.Base(versionFile), deleteSuffix) + versionsRestorePath := filepath.Join(versionsRoot, versionBase) + if err = os.Rename(versionFile, versionsRestorePath); err != nil { + return errors.Wrap(err, "owncloudsql: could not restore version file") + } + err = fs.filecache.Move(storage, fs.toDatabasePath(versionFile), fs.toDatabasePath(versionsRestorePath)) + if err != nil { + return err + } + } + return nil +} + +func (fs *owncloudsqlfs) propagate(ctx context.Context, leafPath string) error { var root string if fs.c.EnableHome { - root = fs.toInternalPath(ctx, "/") + root = filepath.Clean(fs.toInternalPath(ctx, "/")) } else { owner := fs.getOwner(leafPath) - root = fs.toInternalPath(ctx, owner) + root = filepath.Clean(fs.toInternalPath(ctx, owner)) } + versionsRoot := filepath.Join(filepath.Dir(root), "files_versions") if !strings.HasPrefix(leafPath, root) { err := errors.New("internal path outside root") appctx.GetLogger(ctx).Error(). @@ -2070,47 +1849,51 @@ func (fs *ocfs) propagate(ctx context.Context, leafPath string) error { return err } - storageID, err := fs.getUserStorage(ctx) + storageID, err := fs.getStorage(leafPath) if err != nil { return err } - parts := strings.Split(strings.TrimPrefix(leafPath, root), "/") - // root never ends in / so the split returns an empty first element, which we can skip - // we do not need to chmod the last element because it is the leaf path (< and not <= comparison) - for i := 1; i < len(parts); i++ { + + currentPath := filepath.Clean(leafPath) + for currentPath != root && currentPath != versionsRoot { appctx.GetLogger(ctx).Debug(). Str("leafPath", leafPath). - Str("root", root). - Int("i", i). - Interface("parts", parts). + Str("currentPath", currentPath). Msg("propagating change") - if err := os.Chtimes(filepath.Join(root), fi.ModTime(), fi.ModTime()); err != nil { - appctx.GetLogger(ctx).Error(). - Err(err). - Str("leafPath", leafPath). - Str("root", root). - Msg("could not propagate change") + parentFi, err := os.Stat(filepath.Join(currentPath)) + if err != nil { return err } - fi, err := os.Stat(filepath.Join(root)) + if fi.ModTime().UnixNano() > parentFi.ModTime().UnixNano() { + if err := os.Chtimes(filepath.Join(currentPath), fi.ModTime(), fi.ModTime()); err != nil { + appctx.GetLogger(ctx).Error(). + Err(err). + Str("leafPath", leafPath). + Str("currentPath", currentPath). + Msg("could not propagate change") + return err + } + } + fi, err = os.Stat(filepath.Join(currentPath)) if err != nil { return err } etag := calcEtag(ctx, fi) - if err := fs.filecache.SetEtag(storageID, fs.toDatabasePath(ctx, root), etag); err != nil { + if err := fs.filecache.SetEtag(storageID, fs.toDatabasePath(currentPath), etag); err != nil { appctx.GetLogger(ctx).Error(). Err(err). Str("leafPath", leafPath). - Str("root", root). + Str("currentPath", currentPath). Msg("could not set etag") return err } - root = filepath.Join(root, parts[i]) + + currentPath = filepath.Dir(currentPath) } return nil } -func (fs *ocfs) HashFile(path string) (string, string, string, error) { +func (fs *owncloudsqlfs) HashFile(path string) (string, string, string, error) { sha1h := sha1.New() md5h := md5.New() adler32h := adler32.New() @@ -2132,7 +1915,7 @@ func (fs *ocfs) HashFile(path string) (string, string, string, error) { } } -func (fs *ocfs) ListStorageSpaces(ctx context.Context, filter []*provider.ListStorageSpacesRequest_Filter) ([]*provider.StorageSpace, error) { +func (fs *owncloudsqlfs) ListStorageSpaces(ctx context.Context, filter []*provider.ListStorageSpacesRequest_Filter) ([]*provider.StorageSpace, error) { // TODO(corby): Implement return nil, errtypes.NotSupported("list storage spaces") } @@ -2176,5 +1959,12 @@ func readChecksumIntoOpaque(ctx context.Context, checksums, algo string, ri *pro } } +func getResourceType(isDir bool) provider.ResourceType { + if isDir { + return provider.ResourceType_RESOURCE_TYPE_CONTAINER + } + return provider.ResourceType_RESOURCE_TYPE_FILE +} + // TODO propagate etag and mtime or append event to history? propagate on disk ... // - but propagation is a separate task. only if upload was successful ... diff --git a/pkg/storage/fs/owncloudsql/owncloudsql_unix.go b/pkg/storage/fs/owncloudsql/owncloudsql_unix.go index 80006bfaf7..440e2c98ff 100755 --- a/pkg/storage/fs/owncloudsql/owncloudsql_unix.go +++ b/pkg/storage/fs/owncloudsql/owncloudsql_unix.go @@ -63,6 +63,6 @@ func calcEtag(ctx context.Context, fi os.FileInfo) string { if err != nil { log.Error().Err(err).Msg("error writing size") } - etag := fmt.Sprintf(`"%x"`, h.Sum(nil)) - return fmt.Sprintf("\"%s\"", strings.Trim(etag, "\"")) + etag := fmt.Sprintf("%x", h.Sum(nil)) + return strings.Trim(etag, "\"") } diff --git a/pkg/storage/fs/owncloudsql/upload.go b/pkg/storage/fs/owncloudsql/upload.go index 2c9a8d81a6..48bbd74baa 100644 --- a/pkg/storage/fs/owncloudsql/upload.go +++ b/pkg/storage/fs/owncloudsql/upload.go @@ -30,6 +30,7 @@ import ( userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + conversions "github.com/cs3org/reva/internal/http/services/owncloud/ocs/conversions" "github.com/cs3org/reva/pkg/appctx" "github.com/cs3org/reva/pkg/errtypes" "github.com/cs3org/reva/pkg/logger" @@ -45,7 +46,7 @@ import ( var defaultFilePerm = os.FileMode(0664) -func (fs *ocfs) Upload(ctx context.Context, ref *provider.Reference, r io.ReadCloser) error { +func (fs *owncloudsqlfs) Upload(ctx context.Context, ref *provider.Reference, r io.ReadCloser) error { upload, err := fs.GetUpload(ctx, ref.GetPath()) if err != nil { // Upload corresponding to this ID was not found. @@ -100,7 +101,7 @@ func (fs *ocfs) Upload(ctx context.Context, ref *provider.Reference, r io.ReadCl // InitiateUpload returns upload ids corresponding to different protocols it supports // TODO read optional content for small files in this request -func (fs *ocfs) InitiateUpload(ctx context.Context, ref *provider.Reference, uploadLength int64, metadata map[string]string) (map[string]string, error) { +func (fs *owncloudsqlfs) InitiateUpload(ctx context.Context, ref *provider.Reference, uploadLength int64, metadata map[string]string) (map[string]string, error) { ip, err := fs.resolve(ctx, ref) if err != nil { return nil, errors.Wrap(err, "owncloudsql: error resolving reference") @@ -141,7 +142,7 @@ func (fs *ocfs) InitiateUpload(ctx context.Context, ref *provider.Reference, upl } // UseIn tells the tus upload middleware which extensions it supports. -func (fs *ocfs) UseIn(composer *tusd.StoreComposer) { +func (fs *owncloudsqlfs) UseIn(composer *tusd.StoreComposer) { composer.UseCore(fs) composer.UseTerminater(fs) composer.UseConcater(fs) @@ -152,7 +153,7 @@ func (fs *ocfs) UseIn(composer *tusd.StoreComposer) { // - the storage needs to implement NewUpload and GetUpload // - the upload needs to implement the tusd.Upload interface: WriteChunk, GetInfo, GetReader and FinishUpload -func (fs *ocfs) NewUpload(ctx context.Context, info tusd.FileInfo) (upload tusd.Upload, err error) { +func (fs *owncloudsqlfs) NewUpload(ctx context.Context, info tusd.FileInfo) (upload tusd.Upload, err error) { log := appctx.GetLogger(ctx) log.Debug().Interface("info", info).Msg("owncloudsql: NewUpload") @@ -201,7 +202,7 @@ func (fs *ocfs) NewUpload(ctx context.Context, info tusd.FileInfo) (upload tusd. return nil, errors.Wrap(err, "owncloudsql: error resolving upload path") } usr := user.ContextMustGetUser(ctx) - storageID, err := fs.getUserStorage(ctx) + storageID, err := fs.getStorage(ip) if err != nil { return nil, err } @@ -209,6 +210,7 @@ func (fs *ocfs) NewUpload(ctx context.Context, info tusd.FileInfo) (upload tusd. "Type": "OwnCloudStore", "BinPath": binPath, "InternalDestination": ip, + "Permissions": strconv.Itoa((int)(conversions.RoleFromResourcePermissions(perm).OCSPermissions())), "Idp": usr.Id.Idp, "UserId": usr.Id.OpaqueId, @@ -253,7 +255,7 @@ func (fs *ocfs) NewUpload(ctx context.Context, info tusd.FileInfo) (upload tusd. return u, nil } -func (fs *ocfs) getUploadPath(ctx context.Context, uploadID string) (string, error) { +func (fs *owncloudsqlfs) getUploadPath(ctx context.Context, uploadID string) (string, error) { u, ok := user.ContextGetUser(ctx) if !ok { err := errors.Wrap(errtypes.UserRequired("userrequired"), "error getting user from ctx") @@ -264,7 +266,7 @@ func (fs *ocfs) getUploadPath(ctx context.Context, uploadID string) (string, err } // GetUpload returns the Upload for the given upload id -func (fs *ocfs) GetUpload(ctx context.Context, id string) (tusd.Upload, error) { +func (fs *owncloudsqlfs) GetUpload(ctx context.Context, id string) (tusd.Upload, error) { infoPath := filepath.Join(fs.c.UploadInfoDir, id+".info") info := tusd.FileInfo{} @@ -320,7 +322,7 @@ type fileUpload struct { // binPath is the path to the binary file (which has no extension) binPath string // only fs knows how to handle metadata and versions - fs *ocfs + fs *owncloudsqlfs // a context with a user // TODO add logger as well? ctx context.Context @@ -406,17 +408,21 @@ func (upload *fileUpload) FinishUpload(ctx context.Context) error { return err } + perms, err := strconv.Atoi(upload.info.Storage["Permissions"]) + if err != nil { + return err + } data := map[string]interface{}{ - "path": upload.fs.toDatabasePath(upload.ctx, ip), + "path": upload.fs.toDatabasePath(ip), "checksum": fmt.Sprintf("SHA1:%032x MD5:%032x ADLER32:%032x", sha1h, md5h, adler32h), "etag": calcEtag(upload.ctx, fi), "size": upload.info.Size, "mimetype": mime.Detect(false, ip), - "permissions": 27, // 1: READ, 2: UPDATE, 4: CREATE, 8: DELETE, 16: SHARE + "permissions": perms, "mtime": upload.info.MetaData["mtime"], "storage_mtime": upload.info.MetaData["mtime"], } - _, err = upload.fs.filecache.InsertOrUpdate(upload.info.Storage["StorageId"], data) + _, err = upload.fs.filecache.InsertOrUpdate(upload.info.Storage["StorageId"], data, false) if err != nil { return err } @@ -437,7 +443,7 @@ func (upload *fileUpload) FinishUpload(ctx context.Context) error { // - the upload needs to implement Terminate // AsTerminatableUpload returns a TerminatableUpload -func (fs *ocfs) AsTerminatableUpload(upload tusd.Upload) tusd.TerminatableUpload { +func (fs *owncloudsqlfs) AsTerminatableUpload(upload tusd.Upload) tusd.TerminatableUpload { return upload.(*fileUpload) } @@ -461,7 +467,7 @@ func (upload *fileUpload) Terminate(ctx context.Context) error { // - the upload needs to implement DeclareLength // AsLengthDeclarableUpload returns a LengthDeclarableUpload -func (fs *ocfs) AsLengthDeclarableUpload(upload tusd.Upload) tusd.LengthDeclarableUpload { +func (fs *owncloudsqlfs) AsLengthDeclarableUpload(upload tusd.Upload) tusd.LengthDeclarableUpload { return upload.(*fileUpload) } @@ -477,7 +483,7 @@ func (upload *fileUpload) DeclareLength(ctx context.Context, length int64) error // - the upload needs to implement ConcatUploads // AsConcatableUpload returns a ConcatableUpload -func (fs *ocfs) AsConcatableUpload(upload tusd.Upload) tusd.ConcatableUpload { +func (fs *owncloudsqlfs) AsConcatableUpload(upload tusd.Upload) tusd.ConcatableUpload { return upload.(*fileUpload) }