diff --git a/changelog/unreleased/fix-skip-early-empty-file-creation.md b/changelog/unreleased/fix-skip-early-empty-file-creation.md new file mode 100644 index 0000000000..56edb6141e --- /dev/null +++ b/changelog/unreleased/fix-skip-early-empty-file-creation.md @@ -0,0 +1,7 @@ +Bugfix: Remove early finish for zero byte file uploads + +We've fixed the upload of zero byte files by removing the +early upload finishing mechanism. + +https://github.com/cs3org/reva/issues/2309 +https://github.com/owncloud/ocis/issues/2609 diff --git a/internal/http/services/owncloud/ocdav/tus.go b/internal/http/services/owncloud/ocdav/tus.go index b1dbc073fd..8dea0a0fce 100644 --- a/internal/http/services/owncloud/ocdav/tus.go +++ b/internal/http/services/owncloud/ocdav/tus.go @@ -226,40 +226,36 @@ func (s *svc) handleTusPost(ctx context.Context, w http.ResponseWriter, r *http. var httpRes *http.Response - if length != 0 { - httpReq, err := rhttp.NewRequest(ctx, http.MethodPatch, ep, r.Body) - if err != nil { - log.Debug().Err(err).Msg("wrong request") - w.WriteHeader(http.StatusInternalServerError) - return - } + httpReq, err := rhttp.NewRequest(ctx, http.MethodPatch, ep, r.Body) + if err != nil { + log.Debug().Err(err).Msg("wrong request") + w.WriteHeader(http.StatusInternalServerError) + return + } - httpReq.Header.Set(HeaderContentType, r.Header.Get(HeaderContentType)) - httpReq.Header.Set(HeaderContentLength, r.Header.Get(HeaderContentLength)) - if r.Header.Get(HeaderUploadOffset) != "" { - httpReq.Header.Set(HeaderUploadOffset, r.Header.Get(HeaderUploadOffset)) - } else { - httpReq.Header.Set(HeaderUploadOffset, "0") - } - httpReq.Header.Set(HeaderTusResumable, r.Header.Get(HeaderTusResumable)) + httpReq.Header.Set(HeaderContentType, r.Header.Get(HeaderContentType)) + httpReq.Header.Set(HeaderContentLength, r.Header.Get(HeaderContentLength)) + if r.Header.Get(HeaderUploadOffset) != "" { + httpReq.Header.Set(HeaderUploadOffset, r.Header.Get(HeaderUploadOffset)) + } else { + httpReq.Header.Set(HeaderUploadOffset, "0") + } + httpReq.Header.Set(HeaderTusResumable, r.Header.Get(HeaderTusResumable)) - httpRes, err = s.client.Do(httpReq) - if err != nil { - log.Error().Err(err).Msg("error doing GET request to data service") - w.WriteHeader(http.StatusInternalServerError) - return - } - defer httpRes.Body.Close() + httpRes, err = s.client.Do(httpReq) + if err != nil { + log.Error().Err(err).Msg("error doing GET request to data service") + w.WriteHeader(http.StatusInternalServerError) + return + } + defer httpRes.Body.Close() - w.Header().Set(HeaderUploadOffset, httpRes.Header.Get(HeaderUploadOffset)) - w.Header().Set(HeaderTusResumable, httpRes.Header.Get(HeaderTusResumable)) - w.Header().Set(HeaderTusUploadExpires, httpRes.Header.Get(HeaderTusUploadExpires)) - if httpRes.StatusCode != http.StatusNoContent { - w.WriteHeader(httpRes.StatusCode) - return - } - } else { - log.Debug().Msg("Skipping sending a Patch request as body is empty") + w.Header().Set(HeaderUploadOffset, httpRes.Header.Get(HeaderUploadOffset)) + w.Header().Set(HeaderTusResumable, httpRes.Header.Get(HeaderTusResumable)) + w.Header().Set(HeaderTusUploadExpires, httpRes.Header.Get(HeaderTusUploadExpires)) + if httpRes.StatusCode != http.StatusNoContent { + w.WriteHeader(httpRes.StatusCode) + return } // check if upload was fully completed diff --git a/pkg/storage/fs/owncloud/upload.go b/pkg/storage/fs/owncloud/upload.go index 9a08ffb266..69a48d25d8 100644 --- a/pkg/storage/fs/owncloud/upload.go +++ b/pkg/storage/fs/owncloud/upload.go @@ -53,18 +53,7 @@ var defaultFilePerm = os.FileMode(0664) func (fs *ocfs) Upload(ctx context.Context, ref *provider.Reference, r io.ReadCloser) error { upload, err := fs.GetUpload(ctx, ref.GetPath()) if err != nil { - // Upload corresponding to this ID was not found. - // Assume that this corresponds to the resource path to which the file has to be uploaded. - - // Set the length to 0 and set SizeIsDeferred to true - metadata := map[string]string{"sizedeferred": "true"} - uploadIDs, err := fs.InitiateUpload(ctx, ref, 0, metadata) - if err != nil { - return err - } - if upload, err = fs.GetUpload(ctx, uploadIDs["simple"]); err != nil { - return errors.Wrap(err, "ocfs: error retrieving upload") - } + return errors.Wrap(err, "ocfs: error retrieving upload") } uploadInfo := upload.(*fileUpload) @@ -234,16 +223,6 @@ func (fs *ocfs) NewUpload(ctx context.Context, info tusd.FileInfo) (upload tusd. ctx: ctx, } - if !info.SizeIsDeferred && info.Size == 0 { - log.Debug().Interface("info", info).Msg("ocfs: finishing upload for empty file") - // no need to create info file and finish directly - err := u.FinishUpload(ctx) - if err != nil { - return nil, err - } - return u, nil - } - // writeInfo creates the file by itself if necessary err = u.writeInfo() if err != nil { diff --git a/pkg/storage/fs/owncloudsql/upload.go b/pkg/storage/fs/owncloudsql/upload.go index bb50ab2af0..3ebb2aeae3 100644 --- a/pkg/storage/fs/owncloudsql/upload.go +++ b/pkg/storage/fs/owncloudsql/upload.go @@ -49,18 +49,7 @@ var defaultFilePerm = os.FileMode(0664) func (fs *owncloudsqlfs) Upload(ctx context.Context, ref *provider.Reference, r io.ReadCloser) error { upload, err := fs.GetUpload(ctx, ref.GetPath()) if err != nil { - // Upload corresponding to this ID was not found. - // Assume that this corresponds to the resource path to which the file has to be uploaded. - - // Set the length to 0 and set SizeIsDeferred to true - metadata := map[string]string{"sizedeferred": "true"} - uploadIDs, err := fs.InitiateUpload(ctx, ref, 0, metadata) - if err != nil { - return err - } - if upload, err = fs.GetUpload(ctx, uploadIDs["simple"]); err != nil { - return errors.Wrap(err, "owncloudsql: error retrieving upload") - } + return errors.Wrap(err, "owncloudsql: error retrieving upload") } uploadInfo := upload.(*fileUpload) @@ -236,16 +225,6 @@ func (fs *owncloudsqlfs) NewUpload(ctx context.Context, info tusd.FileInfo) (upl ctx: ctx, } - if !info.SizeIsDeferred && info.Size == 0 { - log.Debug().Interface("info", info).Msg("owncloudsql: finishing upload for empty file") - // no need to create info file and finish directly - err := u.FinishUpload(ctx) - if err != nil { - return nil, err - } - return u, nil - } - // writeInfo creates the file by itself if necessary err = u.writeInfo() if err != nil { diff --git a/pkg/storage/utils/decomposedfs/decomposedfs_concurrency_test.go b/pkg/storage/utils/decomposedfs/decomposedfs_concurrency_test.go index 00d4de1e79..867d9ebaa4 100644 --- a/pkg/storage/utils/decomposedfs/decomposedfs_concurrency_test.go +++ b/pkg/storage/utils/decomposedfs/decomposedfs_concurrency_test.go @@ -20,7 +20,6 @@ package decomposedfs_test import ( "context" - "fmt" "io/ioutil" "os" "path" @@ -85,22 +84,10 @@ var _ = Describe("Decomposed", func() { Describe("concurrent", func() { Describe("Upload", func() { var ( - f, f1 *os.File + r1 = []byte("test") + r2 = []byte("another run") ) - BeforeEach(func() { - // Prepare two test files for upload - err := ioutil.WriteFile(fmt.Sprintf("%s/%s", tmpRoot, "f.lol"), []byte("test"), 0644) - Expect(err).ToNot(HaveOccurred()) - f, err = os.Open(fmt.Sprintf("%s/%s", tmpRoot, "f.lol")) - Expect(err).ToNot(HaveOccurred()) - - err = ioutil.WriteFile(fmt.Sprintf("%s/%s", tmpRoot, "f1.lol"), []byte("another run"), 0644) - Expect(err).ToNot(HaveOccurred()) - f1, err = os.Open(fmt.Sprintf("%s/%s", tmpRoot, "f1.lol")) - Expect(err).ToNot(HaveOccurred()) - }) - PIt("generates two revisions", func() { // runtime.GOMAXPROCS(1) // uncomment to remove concurrency and see revisions working. wg := &sync.WaitGroup{} @@ -108,13 +95,13 @@ var _ = Describe("Decomposed", func() { // upload file with contents: "test" go func(wg *sync.WaitGroup) { - _ = fs.Upload(ctx, &provider.Reference{Path: "uploaded.txt"}, f) + _ = helpers.Upload(ctx, fs, &provider.Reference{Path: "uploaded.txt"}, r1) wg.Done() }(wg) // upload file with contents: "another run" go func(wg *sync.WaitGroup) { - _ = fs.Upload(ctx, &provider.Reference{Path: "uploaded.txt"}, f1) + _ = helpers.Upload(ctx, fs, &provider.Reference{Path: "uploaded.txt"}, r2) wg.Done() }(wg) diff --git a/pkg/storage/utils/decomposedfs/upload.go b/pkg/storage/utils/decomposedfs/upload.go index cde8729dac..2ed872e6a1 100644 --- a/pkg/storage/utils/decomposedfs/upload.go +++ b/pkg/storage/utils/decomposedfs/upload.go @@ -57,18 +57,7 @@ var defaultFilePerm = os.FileMode(0664) func (fs *Decomposedfs) Upload(ctx context.Context, ref *provider.Reference, r io.ReadCloser) (err error) { upload, err := fs.GetUpload(ctx, ref.GetPath()) if err != nil { - // Upload corresponding to this ID was not found. - // Assume that this corresponds to the resource path to which the file has to be uploaded. - - // Set the length to 0 and set SizeIsDeferred to true - metadata := map[string]string{"sizedeferred": "true"} - uploadIDs, err := fs.InitiateUpload(ctx, ref, 0, metadata) - if err != nil { - return err - } - if upload, err = fs.GetUpload(ctx, uploadIDs["simple"]); err != nil { - return errors.Wrap(err, "Decomposedfs: error retrieving upload") - } + return errors.Wrap(err, "Decomposedfs: error retrieving upload") } uploadInfo := upload.(*fileUpload) @@ -297,16 +286,6 @@ func (fs *Decomposedfs) NewUpload(ctx context.Context, info tusd.FileInfo) (uplo ctx: ctx, } - if !info.SizeIsDeferred && info.Size == 0 { - log.Debug().Interface("info", info).Msg("Decomposedfs: finishing upload for empty file") - // no need to create info file and finish directly - err := u.FinishUpload(ctx) - if err != nil { - return nil, err - } - return u, nil - } - // writeInfo creates the file by itself if necessary err = u.writeInfo() if err != nil { diff --git a/pkg/storage/utils/decomposedfs/upload_test.go b/pkg/storage/utils/decomposedfs/upload_test.go index 26d492d9a7..2b0c2eb681 100644 --- a/pkg/storage/utils/decomposedfs/upload_test.go +++ b/pkg/storage/utils/decomposedfs/upload_test.go @@ -28,20 +28,19 @@ import ( userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - "github.com/cs3org/reva/pkg/errtypes" - "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node" - "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/xattrs" - "github.com/pkg/xattr" - "github.com/stretchr/testify/mock" - ruser "github.com/cs3org/reva/pkg/ctx" + "github.com/cs3org/reva/pkg/errtypes" "github.com/cs3org/reva/pkg/storage" "github.com/cs3org/reva/pkg/storage/utils/decomposedfs" "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/mocks" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/node" "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/options" "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/tree" treemocks "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/tree/mocks" + "github.com/cs3org/reva/pkg/storage/utils/decomposedfs/xattrs" "github.com/cs3org/reva/tests/helpers" + "github.com/pkg/xattr" + "github.com/stretchr/testify/mock" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -98,8 +97,8 @@ var _ = Describe("File uploads", func() { Expect(err).ToNot(HaveOccurred()) }) - Context("quota exceeded", func() { - Describe("InitiateUpload", func() { + Context("the user's quota is exceeded", func() { + When("the user wants to initiate a file upload", func() { It("fails", func() { var originalFunc = node.CheckQuota node.CheckQuota = func(spaceRoot *node.Node, fileSize uint64) (quotaSufficient bool, err error) { @@ -112,12 +111,12 @@ var _ = Describe("File uploads", func() { }) }) - Context("with insufficient permissions", func() { + Context("the user has insufficient permissions", func() { BeforeEach(func() { permissions.On("HasPermission", mock.Anything, mock.Anything, mock.Anything).Return(false, nil) }) - Describe("InitiateUpload", func() { + When("the user wants to initiate a file upload", func() { It("fails", func() { _, err := fs.InitiateUpload(ctx, ref, 10, map[string]string{}) Expect(err).To(MatchError("error: permission denied: root/foo")) @@ -143,7 +142,7 @@ var _ = Describe("File uploads", func() { permissions.On("HasPermission", mock.Anything, mock.Anything, mock.Anything).Return(false, nil) }) - Describe("InitiateUpload", func() { + When("the user wants to initiate a file upload", func() { It("fails", func() { h, err := lookup.HomeNode(ctx) Expect(err).ToNot(HaveOccurred()) @@ -157,25 +156,61 @@ var _ = Describe("File uploads", func() { Context("with sufficient permissions", func() { BeforeEach(func() { permissions.On("HasPermission", mock.Anything, mock.Anything, mock.Anything).Return(true, nil) + permissions.On("AssemblePermissions", mock.Anything, mock.Anything). + Return(provider.ResourcePermissions{ + ListContainer: true, + }, nil) }) - Describe("InitiateUpload", func() { - It("returns uploadIds for simple and tus uploads", func() { + When("the user initiates a non zero byte file upload", func() { + It("succeeds", func() { uploadIds, err := fs.InitiateUpload(ctx, ref, 10, map[string]string{}) Expect(err).ToNot(HaveOccurred()) Expect(len(uploadIds)).To(Equal(2)) Expect(uploadIds["simple"]).ToNot(BeEmpty()) Expect(uploadIds["tus"]).ToNot(BeEmpty()) + + rootRef := &provider.Reference{Path: "/"} + resources, err := fs.ListFolder(ctx, rootRef, []string{}) + + Expect(err).ToNot(HaveOccurred()) + Expect(len(resources)).To(Equal(0)) + }) + }) + + When("the user initiates a zero byte file upload", func() { + It("succeeds", func() { + uploadIds, err := fs.InitiateUpload(ctx, ref, 0, map[string]string{}) + + Expect(err).ToNot(HaveOccurred()) + Expect(len(uploadIds)).To(Equal(2)) + Expect(uploadIds["simple"]).ToNot(BeEmpty()) + Expect(uploadIds["tus"]).ToNot(BeEmpty()) + + rootRef := &provider.Reference{Path: "/"} + resources, err := fs.ListFolder(ctx, rootRef, []string{}) + + Expect(err).ToNot(HaveOccurred()) + Expect(len(resources)).To(Equal(0)) }) }) - Describe("Upload", func() { - var ( - fileContent = []byte("0123456789") - ) + When("the user uploads a non zero byte file", func() { + It("succeeds", func() { + var ( + fileContent = []byte("0123456789") + ) + + uploadIds, err := fs.InitiateUpload(ctx, ref, 10, map[string]string{}) + + Expect(err).ToNot(HaveOccurred()) + Expect(len(uploadIds)).To(Equal(2)) + Expect(uploadIds["simple"]).ToNot(BeEmpty()) + Expect(uploadIds["tus"]).ToNot(BeEmpty()) + + uploadRef := &provider.Reference{Path: "/" + uploadIds["simple"]} - It("stores the blob in the blobstore", func() { bs.On("Upload", mock.AnythingOfType("string"), mock.AnythingOfType("*os.File")). Return(nil). Run(func(args mock.Arguments) { @@ -186,11 +221,77 @@ var _ = Describe("File uploads", func() { Expect(data).To(Equal([]byte("0123456789"))) }) - err := fs.Upload(ctx, ref, ioutil.NopCloser(bytes.NewReader(fileContent))) + err = fs.Upload(ctx, uploadRef, ioutil.NopCloser(bytes.NewReader(fileContent))) + + Expect(err).ToNot(HaveOccurred()) + bs.AssertCalled(GinkgoT(), "Upload", mock.Anything, mock.Anything) + + rootRef := &provider.Reference{Path: "/"} + resources, err := fs.ListFolder(ctx, rootRef, []string{}) + Expect(err).ToNot(HaveOccurred()) + Expect(len(resources)).To(Equal(1)) + Expect(resources[0].Path).To(Equal(ref.Path)) + }) + }) + + When("the user uploads a zero byte file", func() { + It("succeeds", func() { + var ( + fileContent = []byte("") + ) + + uploadIds, err := fs.InitiateUpload(ctx, ref, 0, map[string]string{}) + + Expect(err).ToNot(HaveOccurred()) + Expect(len(uploadIds)).To(Equal(2)) + Expect(uploadIds["simple"]).ToNot(BeEmpty()) + Expect(uploadIds["tus"]).ToNot(BeEmpty()) + + uploadRef := &provider.Reference{Path: "/" + uploadIds["simple"]} + + bs.On("Upload", mock.AnythingOfType("string"), mock.AnythingOfType("*os.File")). + Return(nil). + Run(func(args mock.Arguments) { + reader := args.Get(1).(io.Reader) + data, err := ioutil.ReadAll(reader) + Expect(err).ToNot(HaveOccurred()) + Expect(data).To(Equal([]byte(""))) + }) + + err = fs.Upload(ctx, uploadRef, ioutil.NopCloser(bytes.NewReader(fileContent))) + + Expect(err).ToNot(HaveOccurred()) bs.AssertCalled(GinkgoT(), "Upload", mock.Anything, mock.Anything) + + rootRef := &provider.Reference{Path: "/"} + resources, err := fs.ListFolder(ctx, rootRef, []string{}) + + Expect(err).ToNot(HaveOccurred()) + Expect(len(resources)).To(Equal(1)) + Expect(resources[0].Path).To(Equal(ref.Path)) + }) + }) + + When("the user tries to upload a file without intialising the upload", func() { + It("fails", func() { + var ( + fileContent = []byte("0123456789") + ) + + uploadRef := &provider.Reference{Path: "/some-non-existent-upload-reference"} + err := fs.Upload(ctx, uploadRef, ioutil.NopCloser(bytes.NewReader(fileContent))) + + Expect(err).To(HaveOccurred()) + + rootRef := &provider.Reference{Path: "/"} + resources, err := fs.ListFolder(ctx, rootRef, []string{}) + + Expect(err).ToNot(HaveOccurred()) + Expect(len(resources)).To(Equal(0)) }) }) + }) }) diff --git a/pkg/storage/utils/localfs/upload.go b/pkg/storage/utils/localfs/upload.go index 35a6d0052a..fb91a6b358 100644 --- a/pkg/storage/utils/localfs/upload.go +++ b/pkg/storage/utils/localfs/upload.go @@ -43,18 +43,7 @@ var defaultFilePerm = os.FileMode(0664) func (fs *localfs) Upload(ctx context.Context, ref *provider.Reference, r io.ReadCloser) error { upload, err := fs.GetUpload(ctx, ref.GetPath()) if err != nil { - // Upload corresponding to this ID was not found. - // Assume that this corresponds to the resource path to which the file has to be uploaded. - - // Set the length to 0 and set SizeIsDeferred to true - metadata := map[string]string{"sizedeferred": "true"} - uploadIDs, err := fs.InitiateUpload(ctx, ref, 0, metadata) - if err != nil { - return err - } - if upload, err = fs.GetUpload(ctx, uploadIDs["simple"]); err != nil { - return errors.Wrap(err, "localfs: error retrieving upload") - } + return errors.Wrap(err, "localfs: error retrieving upload") } uploadInfo := upload.(*fileUpload) @@ -201,16 +190,6 @@ func (fs *localfs) NewUpload(ctx context.Context, info tusd.FileInfo) (upload tu ctx: ctx, } - if !info.SizeIsDeferred && info.Size == 0 { - log.Debug().Interface("info", info).Msg("localfs: finishing upload for empty file") - // no need to create info file and finish directly - err := u.FinishUpload(ctx) - if err != nil { - return nil, err - } - return u, nil - } - // writeInfo creates the file by itself if necessary err = u.writeInfo() if err != nil { diff --git a/tests/helpers/helpers.go b/tests/helpers/helpers.go index 6b6ba37248..bff07340b2 100644 --- a/tests/helpers/helpers.go +++ b/tests/helpers/helpers.go @@ -19,10 +19,16 @@ package helpers import ( + "bytes" + "context" + "errors" "io/ioutil" "os" "path/filepath" "runtime" + + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + "github.com/cs3org/reva/pkg/storage" ) // TempDir creates a temporary directory in tmp/ and returns its path @@ -44,3 +50,18 @@ func TempDir(name string) (string, error) { return tmpRoot, nil } + +// Upload can be used to initiate an upload and do the upload to a storage.FS in one step +func Upload(ctx context.Context, fs storage.FS, ref *provider.Reference, content []byte) error { + uploadIds, err := fs.InitiateUpload(ctx, ref, 0, map[string]string{}) + if err != nil { + return err + } + uploadID, ok := uploadIds["simple"] + if !ok { + return errors.New("simple upload method not available") + } + uploadRef := &provider.Reference{Path: "/" + uploadID} + err = fs.Upload(ctx, uploadRef, ioutil.NopCloser(bytes.NewReader(content))) + return err +} diff --git a/tests/integration/grpc/storageprovider_test.go b/tests/integration/grpc/storageprovider_test.go index 9c19da71cd..0a0eb3f6fa 100644 --- a/tests/integration/grpc/storageprovider_test.go +++ b/tests/integration/grpc/storageprovider_test.go @@ -19,9 +19,7 @@ package grpc_test import ( - "bytes" "context" - "io/ioutil" "os" "google.golang.org/grpc/metadata" @@ -35,6 +33,7 @@ import ( "github.com/cs3org/reva/pkg/storage/fs/ocis" "github.com/cs3org/reva/pkg/storage/fs/owncloud" jwt "github.com/cs3org/reva/pkg/token/manager/jwt" + "github.com/cs3org/reva/tests/helpers" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -507,16 +506,16 @@ var _ = Describe("storage providers", func() { }) Expect(err).ToNot(HaveOccurred()) - content1 := ioutil.NopCloser(bytes.NewReader([]byte("1"))) - content2 := ioutil.NopCloser(bytes.NewReader([]byte("22"))) + content1 := []byte("1") + content2 := []byte("22") ctx := ctxpkg.ContextSetUser(context.Background(), user) err = fs.CreateHome(ctx) Expect(err).ToNot(HaveOccurred()) - err = fs.Upload(ctx, versionedFileRef, content1) + err = helpers.Upload(ctx, fs, versionedFileRef, content1) Expect(err).ToNot(HaveOccurred()) - err = fs.Upload(ctx, versionedFileRef, content2) + err = helpers.Upload(ctx, fs, versionedFileRef, content2) Expect(err).ToNot(HaveOccurred()) }) @@ -565,16 +564,16 @@ var _ = Describe("storage providers", func() { }) Expect(err).ToNot(HaveOccurred()) - content1 := ioutil.NopCloser(bytes.NewReader([]byte("1"))) - content2 := ioutil.NopCloser(bytes.NewReader([]byte("22"))) + content1 := []byte("1") + content2 := []byte("22") ctx := ctxpkg.ContextSetUser(context.Background(), user) err = fs.CreateHome(ctx) Expect(err).ToNot(HaveOccurred()) - err = fs.Upload(ctx, versionedFileRef, content1) + err = helpers.Upload(ctx, fs, versionedFileRef, content1) Expect(err).ToNot(HaveOccurred()) - err = fs.Upload(ctx, versionedFileRef, content2) + err = helpers.Upload(ctx, fs, versionedFileRef, content2) Expect(err).ToNot(HaveOccurred()) }) @@ -633,16 +632,16 @@ var _ = Describe("storage providers", func() { }) Expect(err).ToNot(HaveOccurred()) - content1 := ioutil.NopCloser(bytes.NewReader([]byte("1"))) - content2 := ioutil.NopCloser(bytes.NewReader([]byte("22"))) + content1 := []byte("1") + content2 := []byte("22") ctx := ctxpkg.ContextSetUser(context.Background(), user) err = fs.CreateHome(ctx) Expect(err).ToNot(HaveOccurred()) - err = fs.Upload(ctx, versionedFileRef, content1) + err = helpers.Upload(ctx, fs, versionedFileRef, content1) Expect(err).ToNot(HaveOccurred()) - err = fs.Upload(ctx, versionedFileRef, content2) + err = helpers.Upload(ctx, fs, versionedFileRef, content2) Expect(err).ToNot(HaveOccurred()) })