diff --git a/changelog/unreleased/range-requests.md b/changelog/unreleased/range-requests.md
new file mode 100644
index 0000000000..f8e7f07c40
--- /dev/null
+++ b/changelog/unreleased/range-requests.md
@@ -0,0 +1,6 @@
+Enhancement: Support range header in GET requests
+
+To allow resuming a download we now support GET requests with a range header.
+
+https://github.com/cs3org/reva/pull/1388
+https://github.com/owncloud/ocis-reva/issues/12
diff --git a/internal/http/services/datagateway/datagateway.go b/internal/http/services/datagateway/datagateway.go
index 64e92dcd35..9e79104a7e 100644
--- a/internal/http/services/datagateway/datagateway.go
+++ b/internal/http/services/datagateway/datagateway.go
@@ -24,6 +24,7 @@ import (
 	"net/http"
 	"net/url"
 	"path"
+	"strconv"
 	"time"
 
 	"github.com/cs3org/reva/pkg/appctx"
@@ -170,7 +171,7 @@ func (s *svc) doHead(w http.ResponseWriter, r *http.Request) {
 	claims, err := s.verify(ctx, r)
 	if err != nil {
 		err = errors.Wrap(err, "datagateway: error validating transfer token")
-		log.Err(err).Str("token", r.Header.Get(TokenTransportHeader)).Msg("invalid transfer token")
+		log.Error().Err(err).Str("token", r.Header.Get(TokenTransportHeader)).Msg("invalid transfer token")
 		w.WriteHeader(http.StatusForbidden)
 		return
 	}
@@ -180,7 +181,7 @@ func (s *svc) doHead(w http.ResponseWriter, r *http.Request) {
 	httpClient := s.client
 	httpReq, err := rhttp.NewRequest(ctx, "HEAD", claims.Target, nil)
 	if err != nil {
-		log.Err(err).Msg("wrong request")
+		log.Error().Err(err).Msg("wrong request")
 		w.WriteHeader(http.StatusInternalServerError)
 		return
 	}
@@ -188,7 +189,7 @@ func (s *svc) doHead(w http.ResponseWriter, r *http.Request) {
 
 	httpRes, err := httpClient.Do(httpReq)
 	if err != nil {
-		log.Err(err).Msg("error doing HEAD request to data service")
+		log.Error().Err(err).Msg("error doing HEAD request to data service")
 		w.WriteHeader(http.StatusInternalServerError)
 		return
 	}
@@ -211,7 +212,7 @@ func (s *svc) doGet(w http.ResponseWriter, r *http.Request) {
 	claims, err := s.verify(ctx, r)
 	if err != nil {
 		err = errors.Wrap(err, "datagateway: error validating transfer token")
-		log.Err(err).Str("token", r.Header.Get(TokenTransportHeader)).Msg("invalid transfer token")
+		log.Error().Err(err).Str("token", r.Header.Get(TokenTransportHeader)).Msg("invalid transfer token")
 		w.WriteHeader(http.StatusForbidden)
 		return
 	}
@@ -221,7 +222,7 @@ func (s *svc) doGet(w http.ResponseWriter, r *http.Request) {
 	httpClient := s.client
 	httpReq, err := rhttp.NewRequest(ctx, "GET", claims.Target, nil)
 	if err != nil {
-		log.Err(err).Msg("wrong request")
+		log.Error().Err(err).Msg("wrong request")
 		w.WriteHeader(http.StatusInternalServerError)
 		return
 	}
@@ -229,22 +230,32 @@ func (s *svc) doGet(w http.ResponseWriter, r *http.Request) {
 
 	httpRes, err := httpClient.Do(httpReq)
 	if err != nil {
-		log.Err(err).Msg("error doing GET request to data service")
+		log.Error().Err(err).Msg("error doing GET request to data service")
 		w.WriteHeader(http.StatusInternalServerError)
 		return
 	}
 	defer httpRes.Body.Close()
 
 	copyHeader(w.Header(), httpRes.Header)
-	if httpRes.StatusCode != http.StatusOK {
-		w.WriteHeader(httpRes.StatusCode)
+	// TODO why do we swallow the body?
+	w.WriteHeader(httpRes.StatusCode)
+	if httpRes.StatusCode != http.StatusOK && httpRes.StatusCode != http.StatusPartialContent {
 		return
 	}
 
-	w.WriteHeader(http.StatusOK)
-	_, err = io.Copy(w, httpRes.Body)
+	var c int64
+	c, err = io.Copy(w, httpRes.Body)
 	if err != nil {
-		log.Err(err).Msg("error writing body after headers were sent")
+		log.Error().Err(err).Msg("error writing body after headers were sent")
+	}
+	if httpRes.Header.Get("Content-Length") != "" {
+		i, err := strconv.ParseInt(httpRes.Header.Get("Content-Length"), 10, 64)
+		if err != nil {
+			log.Error().Err(err).Str("content-length", httpRes.Header.Get("Content-Length")).Msg("invalid content length in dataprovider response")
+		}
+		if i != c {
+			log.Error().Int64("content-length", i).Int64("transferred-bytes", c).Msg("content length vs transferred bytes mismatch")
+		}
 	}
 }
 
diff --git a/internal/http/services/owncloud/ocdav/get.go b/internal/http/services/owncloud/ocdav/get.go
index 04b294b5ca..35cf8be1a8 100644
--- a/internal/http/services/owncloud/ocdav/get.go
+++ b/internal/http/services/owncloud/ocdav/get.go
@@ -37,14 +37,14 @@ import (
 
 func (s *svc) handleGet(w http.ResponseWriter, r *http.Request, ns string) {
 	ctx := r.Context()
-	ctx, span := trace.StartSpan(ctx, "head")
+	ctx, span := trace.StartSpan(ctx, "get")
 	defer span.End()
 
 	ns = applyLayout(ctx, ns)
 
 	fn := path.Join(ns, r.URL.Path)
 
-	sublog := appctx.GetLogger(ctx).With().Str("path", fn).Logger()
+	sublog := appctx.GetLogger(ctx).With().Str("path", fn).Str("svc", "ocdav").Str("handler", "get").Logger()
 
 	client, err := s.getClient()
 	if err != nil {
@@ -109,6 +109,11 @@ func (s *svc) handleGet(w http.ResponseWriter, r *http.Request, ns string) {
 		return
 	}
 	httpReq.Header.Set(datagateway.TokenTransportHeader, token)
+
+	if r.Header.Get("Range") != "" {
+		httpReq.Header.Set("Range", r.Header.Get("Range"))
+	}
+
 	httpClient := s.client
 
 	httpRes, err := httpClient.Do(httpReq)
@@ -119,8 +124,8 @@ func (s *svc) handleGet(w http.ResponseWriter, r *http.Request, ns string) {
 	}
 	defer httpRes.Body.Close()
 
-	if httpRes.StatusCode != http.StatusOK {
-		w.WriteHeader(http.StatusInternalServerError)
+	if httpRes.StatusCode != http.StatusOK && httpRes.StatusCode != http.StatusPartialContent {
+		w.WriteHeader(httpRes.StatusCode)
 		return
 	}
 
@@ -133,13 +138,31 @@ func (s *svc) handleGet(w http.ResponseWriter, r *http.Request, ns string) {
 	t := utils.TSToTime(info.Mtime).UTC()
 	lastModifiedString := t.Format(time.RFC1123Z)
 	w.Header().Set("Last-Modified", lastModifiedString)
-	w.Header().Set("Content-Length", strconv.FormatUint(info.Size, 10))
+
+	if httpRes.StatusCode == http.StatusPartialContent {
+		w.Header().Set("Content-Range", httpRes.Header.Get("Content-Range"))
+		w.Header().Set("Content-Length", httpRes.Header.Get("Content-Length"))
+		w.WriteHeader(http.StatusPartialContent)
+	} else {
+		w.Header().Set("Content-Length", strconv.FormatUint(info.Size, 10))
+	}
 	/*
 		if md.Checksum != "" {
 			w.Header().Set("OC-Checksum", md.Checksum)
 		}
 	*/
-	if _, err := io.Copy(w, httpRes.Body); err != nil {
+	var c int64
+	if c, err = io.Copy(w, httpRes.Body); err != nil {
 		sublog.Error().Err(err).Msg("error finishing copying data to response")
 	}
+	if httpRes.Header.Get("Content-Length") != "" {
+		i, err := strconv.ParseInt(httpRes.Header.Get("Content-Length"), 10, 64)
+		if err != nil {
+			sublog.Error().Err(err).Str("content-length", httpRes.Header.Get("Content-Length")).Msg("invalid content length in datagateway response")
+		}
+		if i != c {
+			sublog.Error().Int64("content-length", i).Int64("transferred-bytes", c).Msg("content length vs transferred bytes mismatch")
+		}
+	}
+	// TODO we need to send the If-Match etag in the GET to the datagateway to prevent race conditions between stating and reading the file
 }
diff --git a/internal/http/services/owncloud/ocdav/head.go b/internal/http/services/owncloud/ocdav/head.go
index a0444bf0b0..29ff689fde 100644
--- a/internal/http/services/owncloud/ocdav/head.go
+++ b/internal/http/services/owncloud/ocdav/head.go
@@ -73,6 +73,9 @@ func (s *svc) handleHead(w http.ResponseWriter, r *http.Request, ns string) {
 	t := utils.TSToTime(info.Mtime).UTC()
 	lastModifiedString := t.Format(time.RFC1123Z)
 	w.Header().Set("Last-Modified", lastModifiedString)
-	w.WriteHeader(http.StatusOK)
 	w.Header().Set("Content-Length", strconv.FormatUint(info.Size, 10))
+	if info.Type != provider.ResourceType_RESOURCE_TYPE_CONTAINER {
+		w.Header().Set("Accept-Ranges", "bytes")
+	}
+	w.WriteHeader(http.StatusOK)
 }
diff --git a/pkg/rhttp/datatx/manager/simple/simple.go b/pkg/rhttp/datatx/manager/simple/simple.go
index 03700aefea..69e2fa985b 100644
--- a/pkg/rhttp/datatx/manager/simple/simple.go
+++ b/pkg/rhttp/datatx/manager/simple/simple.go
@@ -19,18 +19,17 @@
 package simple
 
 import (
-	"io"
 	"net/http"
 
-	"github.com/pkg/errors"
-
 	provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
 	"github.com/cs3org/reva/pkg/appctx"
 	"github.com/cs3org/reva/pkg/errtypes"
 	"github.com/cs3org/reva/pkg/rhttp/datatx"
 	"github.com/cs3org/reva/pkg/rhttp/datatx/manager/registry"
+	"github.com/cs3org/reva/pkg/rhttp/datatx/utils/download"
 	"github.com/cs3org/reva/pkg/storage"
 	"github.com/mitchellh/mapstructure"
+	"github.com/pkg/errors"
 )
 
 func init() {
@@ -64,42 +63,13 @@ func New(m map[string]interface{}) (datatx.DataTX, error) {
 
 func (m *manager) Handler(fs storage.FS) (http.Handler, error) {
 	h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
-		switch r.Method {
-		case "GET":
-			ctx := r.Context()
-			log := appctx.GetLogger(ctx)
-			var fn string
-			files, ok := r.URL.Query()["filename"]
-			if !ok || len(files[0]) < 1 {
-				fn = r.URL.Path
-			} else {
-				fn = files[0]
-			}
-
-			ref := &provider.Reference{Spec: &provider.Reference_Path{Path: fn}}
-
-			rc, err := fs.Download(ctx, ref)
-			if err != nil {
-				if _, ok := err.(errtypes.IsNotFound); ok {
-					log.Debug().Err(err).Msg("datasvc: file not found")
-					w.WriteHeader(http.StatusNotFound)
-				} else {
-					log.Err(err).Msg("datasvc: error downloading file")
-					w.WriteHeader(http.StatusInternalServerError)
-				}
-				return
-			}
-			defer rc.Close()
-
-			_, err = io.Copy(w, rc)
-			if err != nil {
-				log.Error().Err(err).Msg("error copying data to response")
-				return
-			}
+		ctx := r.Context()
+		sublog := appctx.GetLogger(ctx).With().Str("datatx", "simple").Logger()
 
+		switch r.Method {
+		case "GET", "HEAD":
+			download.GetOrHeadFile(w, r, fs)
 		case "PUT":
-			ctx := r.Context()
-			log := appctx.GetLogger(ctx)
 			fn := r.URL.Path
 			defer r.Body.Close()
 
@@ -111,7 +81,7 @@ func (m *manager) Handler(fs storage.FS) (http.Handler, error) {
 					w.WriteHeader(http.StatusPartialContent)
 					return
 				}
-				log.Error().Err(err).Msg("error uploading file")
+				sublog.Error().Err(err).Msg("error uploading file")
 				w.WriteHeader(http.StatusInternalServerError)
 				return
 			}
diff --git a/pkg/rhttp/datatx/manager/tus/tus.go b/pkg/rhttp/datatx/manager/tus/tus.go
index ea2b9df153..5803054df5 100644
--- a/pkg/rhttp/datatx/manager/tus/tus.go
+++ b/pkg/rhttp/datatx/manager/tus/tus.go
@@ -19,16 +19,14 @@
 package tus
 
 import (
-	"io"
 	"net/http"
 
 	"github.com/pkg/errors"
 
-	provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
-	"github.com/cs3org/reva/pkg/appctx"
 	"github.com/cs3org/reva/pkg/errtypes"
 	"github.com/cs3org/reva/pkg/rhttp/datatx"
 	"github.com/cs3org/reva/pkg/rhttp/datatx/manager/registry"
+	"github.com/cs3org/reva/pkg/rhttp/datatx/utils/download"
 	"github.com/cs3org/reva/pkg/storage"
 	"github.com/mitchellh/mapstructure"
 	tusd "github.com/tus/tusd/pkg/handler"
@@ -88,8 +86,6 @@ func (m *manager) Handler(fs storage.FS) (http.Handler, error) {
 	}
 
 	h := handler.Middleware(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
-		log := appctx.GetLogger(r.Context())
-		log.Info().Msgf("tusd routing: path=%s", r.URL.Path)
 
 		method := r.Method
 		// https://github.com/tus/tus-resumable-upload-protocol/blob/master/protocol.md#x-http-method-override
@@ -106,37 +102,8 @@ func (m *manager) Handler(fs storage.FS) (http.Handler, error) {
 			handler.PatchFile(w, r)
 		case "DELETE":
 			handler.DelFile(w, r)
-		// TODO(pvince81): allow for range-based requests?
 		case "GET":
-			ctx := r.Context()
-			log := appctx.GetLogger(ctx)
-			var fn string
-			files, ok := r.URL.Query()["filename"]
-			if !ok || len(files[0]) < 1 {
-				fn = r.URL.Path
-			} else {
-				fn = files[0]
-			}
-
-			ref := &provider.Reference{Spec: &provider.Reference_Path{Path: fn}}
-
-			rc, err := fs.Download(ctx, ref)
-			if err != nil {
-				if _, ok := err.(errtypes.IsNotFound); ok {
-					log.Debug().Err(err).Msg("datasvc: file not found")
-					w.WriteHeader(http.StatusNotFound)
-				} else {
-					log.Err(err).Msg("datasvc: error downloading file")
-					w.WriteHeader(http.StatusInternalServerError)
-				}
-				return
-			}
-
-			_, err = io.Copy(w, rc)
-			if err != nil {
-				log.Error().Err(err).Msg("error copying data to response")
-				return
-			}
+			download.GetOrHeadFile(w, r, fs)
 		default:
 			w.WriteHeader(http.StatusNotImplemented)
 		}
diff --git a/pkg/rhttp/datatx/utils/download/download.go b/pkg/rhttp/datatx/utils/download/download.go
new file mode 100644
index 0000000000..2c037addd0
--- /dev/null
+++ b/pkg/rhttp/datatx/utils/download/download.go
@@ -0,0 +1,194 @@
+// Copyright 2018-2020 CERN
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// In applying this license, CERN does not waive the privileges and immunities
+// granted to it by virtue of its status as an Intergovernmental Organization
+// or submit itself to any jurisdiction.
+
+// Package download provides a library to handle file download requests.
+package download
+
+import (
+	"fmt"
+	"io"
+	"mime/multipart"
+	"net/http"
+	"strconv"
+
+	provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
+	"github.com/cs3org/reva/pkg/appctx"
+	"github.com/cs3org/reva/pkg/errtypes"
+	"github.com/cs3org/reva/pkg/storage"
+	"github.com/rs/zerolog"
+)
+
+// GetOrHeadFile returns the requested file content
+func GetOrHeadFile(w http.ResponseWriter, r *http.Request, fs storage.FS) {
+	ctx := r.Context()
+	sublog := appctx.GetLogger(ctx).With().Str("svc", "datatx").Str("handler", "download").Logger()
+
+	var fn string
+	files, ok := r.URL.Query()["filename"]
+	if !ok || len(files[0]) < 1 {
+		fn = r.URL.Path
+	} else {
+		fn = files[0]
+	}
+
+	ref := &provider.Reference{Spec: &provider.Reference_Path{Path: fn}}
+
+	// TODO check preconditions like If-Range, If-Match ...
+
+	var md *provider.ResourceInfo
+	var err error
+
+	// do a stat to set a Content-Length header
+
+	if md, err = fs.GetMD(ctx, ref, nil); err != nil {
+		handleError(w, &sublog, err, "stat")
+		return
+	}
+
+	var ranges []HTTPRange
+
+	if r.Header.Get("Range") != "" {
+		ranges, err = ParseRange(r.Header.Get("Range"), int64(md.Size))
+		if err != nil {
+			if err == ErrNoOverlap {
+				w.Header().Set("Content-Range", fmt.Sprintf("bytes */%d", md.Size))
+			}
+			w.WriteHeader(http.StatusRequestedRangeNotSatisfiable)
+			fmt.Fprintln(w, err)
+			return
+		}
+		if SumRangesSize(ranges) > int64(md.Size) {
+			// The total number of bytes in all the ranges
+			// is larger than the size of the file by
+			// itself, so this is probably an attack, or a
+			// dumb client. Ignore the range request.
+			ranges = nil
+		}
+	}
+
+	content, err := fs.Download(ctx, ref)
+	if err != nil {
+		handleError(w, &sublog, err, "download")
+		return
+	}
+	defer content.Close()
+
+	code := http.StatusOK
+	sendSize := int64(md.Size)
+	var sendContent io.Reader = content
+
+	var s io.Seeker
+	if s, ok = content.(io.Seeker); ok {
+		// tell clients they can send range requests
+
+		w.Header().Set("Accept-Ranges", "bytes")
+	}
+	if len(ranges) > 0 {
+		sublog.Debug().Int64("start", ranges[0].Start).Int64("length", ranges[0].Length).Msg("range request")
+		if s == nil {
+			sublog.Error().Int64("start", ranges[0].Start).Int64("length", ranges[0].Length).Msg("ReadCloser is not seekable")
+			w.WriteHeader(http.StatusRequestedRangeNotSatisfiable)
+			return
+		}
+
+		switch {
+		case len(ranges) == 1:
+			// RFC 7233, Section 4.1:
+			// "If a single part is being transferred, the server
+			// generating the 206 response MUST generate a
+			// Content-Range header field, describing what range
+			// of the selected representation is enclosed, and a
+			// payload consisting of the range.
+			// ...
+			// A server MUST NOT generate a multipart response to
+			// a request for a single range, since a client that
+			// does not request multiple parts might not support
+			// multipart responses."
+			ra := ranges[0]
+			if _, err := s.Seek(ra.Start, io.SeekStart); err != nil {
+				sublog.Error().Err(err).Int64("start", ra.Start).Int64("length", ra.Length).Msg("content is not seekable")
+				w.WriteHeader(http.StatusRequestedRangeNotSatisfiable)
+				return
+			}
+			sendSize = ra.Length
+			code = http.StatusPartialContent
+			w.Header().Set("Content-Range", ra.ContentRange(int64(md.Size)))
+		case len(ranges) > 1:
+			sendSize = RangesMIMESize(ranges, md.MimeType, int64(md.Size))
+			code = http.StatusPartialContent
+
+			pr, pw := io.Pipe()
+			mw := multipart.NewWriter(pw)
+			w.Header().Set("Content-Type", "multipart/byteranges; boundary="+mw.Boundary())
+			sendContent = pr
+			defer pr.Close() // cause writing goroutine to fail and exit if CopyN doesn't finish.
+			go func() {
+				for _, ra := range ranges {
+					part, err := mw.CreatePart(ra.MimeHeader(md.MimeType, int64(md.Size)))
+					if err != nil {
+						_ = pw.CloseWithError(err) // CloseWithError always returns nil
+						return
+					}
+					if _, err := s.Seek(ra.Start, io.SeekStart); err != nil {
+						_ = pw.CloseWithError(err) // CloseWithError always returns nil
+						return
+					}
+					if _, err := io.CopyN(part, content, ra.Length); err != nil {
+						_ = pw.CloseWithError(err) // CloseWithError always returns nil
+						return
+					}
+				}
+				mw.Close()
+				pw.Close()
+			}()
+		}
+	}
+
+	if w.Header().Get("Content-Encoding") == "" {
+		w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10))
+	}
+
+	w.WriteHeader(code)
+
+	if r.Method != "HEAD" {
+		var c int64
+		c, err = io.CopyN(w, sendContent, sendSize)
+		if err != nil {
+			sublog.Error().Err(err).Msg("error copying data to response")
+			return
+		}
+		if c != sendSize {
+			sublog.Error().Int64("copied", c).Int64("size", sendSize).Msg("copied vs size mismatch")
+		}
+	}
+
+}
+
+func handleError(w http.ResponseWriter, log *zerolog.Logger, err error, action string) {
+	switch err.(type) {
+	case errtypes.IsNotFound:
+		log.Debug().Err(err).Str("action", action).Msg("file not found")
+		w.WriteHeader(http.StatusNotFound)
+	case errtypes.IsPermissionDenied:
+		log.Debug().Err(err).Str("action", action).Msg("permission denied")
+		w.WriteHeader(http.StatusForbidden)
+	default:
+		log.Error().Err(err).Str("action", action).Msg("unexpected error")
+		w.WriteHeader(http.StatusInternalServerError)
+	}
+}
diff --git a/pkg/rhttp/datatx/utils/download/range.go b/pkg/rhttp/datatx/utils/download/range.go
new file mode 100644
index 0000000000..987cb05548
--- /dev/null
+++ b/pkg/rhttp/datatx/utils/download/range.go
@@ -0,0 +1,160 @@
+// Copyright 2018-2020 CERN
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// In applying this license, CERN does not waive the privileges and immunities
+// granted to it by virtue of its status as an Intergovernmental Organization
+// or submit itself to any jurisdiction.
+
+package download
+
+import (
+	"errors"
+	"fmt"
+	"mime/multipart"
+	"net/textproto"
+	"strconv"
+	"strings"
+)
+
+// taken from https://golang.org/src/net/http/fs.go
+
+// ErrSeeker is returned by ServeContent's sizeFunc when the content
+// doesn't seek properly. The underlying Seeker's error text isn't
+// included in the sizeFunc reply so it's not sent over HTTP to end
+// users.
+var ErrSeeker = errors.New("seeker can't seek")
+
+// ErrNoOverlap is returned by serveContent's parseRange if first-byte-pos of
+// all of the byte-range-spec values is greater than the content size.
+var ErrNoOverlap = errors.New("invalid range: failed to overlap")
+
+// HTTPRange specifies the byte range to be sent to the client.
+type HTTPRange struct {
+	Start, Length int64
+}
+
+// ContentRange formats a Range header string as per RFC 7233.
+func (r HTTPRange) ContentRange(size int64) string {
+	return fmt.Sprintf("bytes %d-%d/%d", r.Start, r.Start+r.Length-1, size)
+}
+
+// MimeHeader creates range relevant MimeHeaders
+func (r HTTPRange) MimeHeader(contentType string, size int64) textproto.MIMEHeader {
+	return textproto.MIMEHeader{
+		"Content-Range": {r.ContentRange(size)},
+		"Content-Type":  {contentType},
+	}
+}
+
+// ParseRange parses a Range header string as per RFC 7233.
+// errNoOverlap is returned if none of the ranges overlap.
+func ParseRange(s string, size int64) ([]HTTPRange, error) {
+	if s == "" {
+		return nil, nil // header not present
+	}
+	const b = "bytes="
+	if !strings.HasPrefix(s, b) {
+		return nil, errors.New("invalid range")
+	}
+	ranges := []HTTPRange{}
+	noOverlap := false
+	for _, ra := range strings.Split(s[len(b):], ",") {
+		ra = textproto.TrimString(ra)
+		if ra == "" {
+			continue
+		}
+		i := strings.Index(ra, "-")
+		if i < 0 {
+			return nil, errors.New("invalid range")
+		}
+		start, end := textproto.TrimString(ra[:i]), textproto.TrimString(ra[i+1:])
+		var r HTTPRange
+		if start == "" {
+			// If no start is specified, end specifies the
+			// range start relative to the end of the file.
+			i, err := strconv.ParseInt(end, 10, 64)
+			if err != nil {
+				return nil, errors.New("invalid range")
+			}
+			if i > size {
+				i = size
+			}
+			r.Start = size - i
+			r.Length = size - r.Start
+		} else {
+			i, err := strconv.ParseInt(start, 10, 64)
+			if err != nil || i < 0 {
+				return nil, errors.New("invalid range")
+			}
+			if i >= size {
+				// If the range begins after the size of the content,
+				// then it does not overlap.
+				noOverlap = true
+				continue
+			}
+			r.Start = i
+			if end == "" {
+				// If no end is specified, range extends to end of the file.
+				r.Length = size - r.Start
+			} else {
+				i, err := strconv.ParseInt(end, 10, 64)
+				if err != nil || r.Start > i {
+					return nil, errors.New("invalid range")
+				}
+				if i >= size {
+					i = size - 1
+				}
+				r.Length = i - r.Start + 1
+			}
+		}
+		ranges = append(ranges, r)
+	}
+	if noOverlap && len(ranges) == 0 {
+		// The specified ranges did not overlap with the content.
+		return nil, ErrNoOverlap
+	}
+	return ranges, nil
+}
+
+// countingWriter counts how many bytes have been written to it.
+type countingWriter int64
+
+func (w *countingWriter) Write(p []byte) (n int, err error) {
+	*w += countingWriter(len(p))
+	return len(p), nil
+}
+
+// RangesMIMESize returns the number of bytes it takes to encode the
+// provided ranges as a multipart response.
+func RangesMIMESize(ranges []HTTPRange, contentType string, contentSize int64) (encSize int64) {
+	var w countingWriter
+	mw := multipart.NewWriter(&w)
+	for _, ra := range ranges {
+		// CreatePart might return an error if the io.Copy for the boundaries fails
+		// here parts are not filled, so we assume for now thet this will always succeed
+		_, _ = mw.CreatePart(ra.MimeHeader(contentType, contentSize))
+		encSize += ra.Length
+	}
+	mw.Close()
+	encSize += int64(w)
+	return
+}
+
+// SumRangesSize adds up the length of all ranges
+func SumRangesSize(ranges []HTTPRange) (size int64) {
+	for _, ra := range ranges {
+		size += ra.Length
+	}
+	return
+}
diff --git a/tests/acceptance/expected-failures-on-OCIS-storage.txt b/tests/acceptance/expected-failures-on-OCIS-storage.txt
index 06ce33b2e0..3d5cd23ebc 100644
--- a/tests/acceptance/expected-failures-on-OCIS-storage.txt
+++ b/tests/acceptance/expected-failures-on-OCIS-storage.txt
@@ -992,14 +992,6 @@ apiWebdavMove1/moveFileAsync.feature:234
 apiWebdavMove1/moveFileAsync.feature:235
 apiWebdavMove1/moveFileAsync.feature:240
 #
-# https://github.com/owncloud/ocis-reva/issues/12  Range Header is not obeyed when downloading a file
-#
-apiWebdavMove1/moveFileAsync.feature:156
-apiWebdavMove1/moveFileAsync.feature:173
-apiWebdavMove1/moveFileAsync.feature:174
-apiWebdavMove1/moveFileAsync.feature:184
-apiWebdavMove1/moveFileAsync.feature:185
-#
 # https://github.com/owncloud/product/issues/260 cannot set blacklisted file names
 #
 apiWebdavMove1/moveFileToBlacklistedNameAsync.feature:12
@@ -1094,20 +1086,10 @@ apiWebdavOperations/deleteFolder.feature:92
 # https://github.com/owncloud/ocis-reva/issues/12  Range Header is not obeyed when downloading a file
 # https://github.com/owncloud/core/issues/38006 Review and fix the tests that have sharing step to work with ocis
 #
-apiWebdavOperations/downloadFile.feature:29
-apiWebdavOperations/downloadFile.feature:30
 apiWebdavOperations/downloadFile.feature:72
 apiWebdavOperations/downloadFile.feature:73
 apiWebdavOperations/downloadFile.feature:84
 apiWebdavOperations/downloadFile.feature:85
-apiWebdavOperations/downloadFile.feature:144
-apiWebdavOperations/downloadFile.feature:145
-apiWebdavOperations/downloadFile.feature:179
-apiWebdavOperations/downloadFile.feature:180
-apiWebdavOperations/downloadFile.feature:189
-apiWebdavOperations/downloadFile.feature:190
-apiWebdavOperations/downloadFile.feature:198
-apiWebdavOperations/downloadFile.feature:199
 apiWebdavOperations/refuseAccess.feature:21
 apiWebdavOperations/refuseAccess.feature:22
 apiWebdavOperations/refuseAccess.feature:33
diff --git a/tests/acceptance/expected-failures-on-OWNCLOUD-storage.txt b/tests/acceptance/expected-failures-on-OWNCLOUD-storage.txt
index ceb6c8e826..c970d2e5e6 100644
--- a/tests/acceptance/expected-failures-on-OWNCLOUD-storage.txt
+++ b/tests/acceptance/expected-failures-on-OWNCLOUD-storage.txt
@@ -965,14 +965,6 @@ apiWebdavMove1/moveFileAsync.feature:234
 apiWebdavMove1/moveFileAsync.feature:235
 apiWebdavMove1/moveFileAsync.feature:240
 #
-# https://github.com/owncloud/ocis-reva/issues/12  Range Header is not obeyed when downloading a file
-#
-apiWebdavMove1/moveFileAsync.feature:156
-apiWebdavMove1/moveFileAsync.feature:173
-apiWebdavMove1/moveFileAsync.feature:174
-apiWebdavMove1/moveFileAsync.feature:184
-apiWebdavMove1/moveFileAsync.feature:185
-#
 # https://github.com/owncloud/product/issues/260 cannot set blacklisted file names
 #
 apiWebdavMove1/moveFileToBlacklistedNameAsync.feature:12
@@ -1067,20 +1059,10 @@ apiWebdavOperations/deleteFolder.feature:92
 # https://github.com/owncloud/ocis-reva/issues/12  Range Header is not obeyed when downloading a file
 # https://github.com/owncloud/core/issues/38006 Review and fix the tests that have sharing step to work with ocis
 #
-apiWebdavOperations/downloadFile.feature:29
-apiWebdavOperations/downloadFile.feature:30
 apiWebdavOperations/downloadFile.feature:72
 apiWebdavOperations/downloadFile.feature:73
 apiWebdavOperations/downloadFile.feature:84
 apiWebdavOperations/downloadFile.feature:85
-apiWebdavOperations/downloadFile.feature:144
-apiWebdavOperations/downloadFile.feature:145
-apiWebdavOperations/downloadFile.feature:179
-apiWebdavOperations/downloadFile.feature:180
-apiWebdavOperations/downloadFile.feature:189
-apiWebdavOperations/downloadFile.feature:190
-apiWebdavOperations/downloadFile.feature:198
-apiWebdavOperations/downloadFile.feature:199
 apiWebdavOperations/refuseAccess.feature:21
 apiWebdavOperations/refuseAccess.feature:22
 apiWebdavOperations/refuseAccess.feature:33